python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/gpuobj.h>
#include <core/memory.h>
#include <subdev/timer.h>
void
nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
u32 size, u16 tag, u8 port, bool secure)
{
u8 rem = size % 4;
u32 reg;
int i;
size -= rem;
reg = start | BIT(24) | (secure ? BIT(28) : 0);
nvkm_falcon_wr32(falcon, 0x180 + (port * 16), reg);
for (i = 0; i < size / 4; i++) {
/* write new tag every 256B */
if ((i & 0x3f) == 0)
nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
nvkm_falcon_wr32(falcon, 0x184 + (port * 16), ((u32 *)data)[i]);
}
/*
* If size is not a multiple of 4, mask the last work to ensure garbage
* does not get written
*/
if (rem) {
u32 extra = ((u32 *)data)[i];
/* write new tag every 256B */
if ((i & 0x3f) == 0)
nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
nvkm_falcon_wr32(falcon, 0x184 + (port * 16),
extra & (BIT(rem * 8) - 1));
++i;
}
/* code must be padded to 0x40 words */
for (; i & 0x3f; i++)
nvkm_falcon_wr32(falcon, 0x184 + (port * 16), 0);
}
void
nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
u32 size, u8 port)
{
u8 rem = size % 4;
int i;
size -= rem;
nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 24));
for (i = 0; i < size / 4; i++)
nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), ((u32 *)data)[i]);
/*
* If size is not a multiple of 4, mask the last word to ensure garbage
* does not get written
*/
if (rem) {
u32 extra = ((u32 *)data)[i];
nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8),
extra & (BIT(rem * 8) - 1));
}
}
void
nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
{
u32 reg = nvkm_falcon_rd32(falcon, 0x100);
if (reg & BIT(6))
nvkm_falcon_wr32(falcon, 0x130, 0x2);
else
nvkm_falcon_wr32(falcon, 0x100, 0x2);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/falcon/v1.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/user.h>
static u64
nvif_userc361_time(struct nvif_user *user)
{
u32 hi, lo;
do {
hi = nvif_rd32(&user->object, 0x084);
lo = nvif_rd32(&user->object, 0x080);
} while (hi != nvif_rd32(&user->object, 0x084));
return ((u64)hi << 32 | lo);
}
static void
nvif_userc361_doorbell(struct nvif_user *user, u32 token)
{
nvif_wr32(&user->object, 0x90, token);
}
const struct nvif_user_func
nvif_userc361 = {
.doorbell = nvif_userc361_doorbell,
.time = nvif_userc361_time,
};
| linux-master | drivers/gpu/drm/nouveau/nvif/userc361.c |
/*
* Copyright 2020 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/timer.h>
#include <nvif/device.h>
s64
nvif_timer_wait_test(struct nvif_timer_wait *wait)
{
u64 time = nvif_device_time(wait->device);
if (wait->reads == 0) {
wait->time0 = time;
wait->time1 = time;
}
if (wait->time1 == time) {
if (WARN_ON(wait->reads++ == 16))
return -ETIMEDOUT;
} else {
wait->time1 = time;
wait->reads = 1;
}
if (wait->time1 - wait->time0 > wait->limit)
return -ETIMEDOUT;
return wait->time1 - wait->time0;
}
void
nvif_timer_wait_init(struct nvif_device *device, u64 nsec,
struct nvif_timer_wait *wait)
{
wait->device = device;
wait->limit = nsec;
wait->reads = 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/timer.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include <nvif/object.h>
#include <nvif/client.h>
#include <nvif/driver.h>
#include <nvif/ioctl.h>
int
nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
{
struct nvif_client *client = object->client;
union {
struct nvif_ioctl_v0 v0;
} *args = data;
if (size >= sizeof(*args) && args->v0.version == 0) {
if (object != &client->object)
args->v0.object = nvif_handle(object);
else
args->v0.object = 0;
args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
} else
return -ENOSYS;
return client->driver->ioctl(client->object.priv, data, size, hack);
}
void
nvif_object_sclass_put(struct nvif_sclass **psclass)
{
kfree(*psclass);
*psclass = NULL;
}
int
nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_sclass_v0 sclass;
} *args = NULL;
int ret, cnt = 0, i;
u32 size;
while (1) {
size = sizeof(*args) + cnt * sizeof(args->sclass.oclass[0]);
if (!(args = kmalloc(size, GFP_KERNEL)))
return -ENOMEM;
args->ioctl.version = 0;
args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
args->sclass.version = 0;
args->sclass.count = cnt;
ret = nvif_object_ioctl(object, args, size, NULL);
if (ret == 0 && args->sclass.count <= cnt)
break;
cnt = args->sclass.count;
kfree(args);
if (ret != 0)
return ret;
}
*psclass = kcalloc(args->sclass.count, sizeof(**psclass), GFP_KERNEL);
if (*psclass) {
for (i = 0; i < args->sclass.count; i++) {
(*psclass)[i].oclass = args->sclass.oclass[i].oclass;
(*psclass)[i].minver = args->sclass.oclass[i].minver;
(*psclass)[i].maxver = args->sclass.oclass[i].maxver;
}
ret = args->sclass.count;
} else {
ret = -ENOMEM;
}
kfree(args);
return ret;
}
u32
nvif_object_rd(struct nvif_object *object, int size, u64 addr)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_rd_v0 rd;
} args = {
.ioctl.type = NVIF_IOCTL_V0_RD,
.rd.size = size,
.rd.addr = addr,
};
int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
if (ret) {
/*XXX: warn? */
return 0;
}
return args.rd.data;
}
void
nvif_object_wr(struct nvif_object *object, int size, u64 addr, u32 data)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_wr_v0 wr;
} args = {
.ioctl.type = NVIF_IOCTL_V0_WR,
.wr.size = size,
.wr.addr = addr,
.wr.data = data,
};
int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
if (ret) {
/*XXX: warn? */
}
}
int
nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_mthd_v0 mthd;
} *args;
u8 stack[128];
int ret;
if (sizeof(*args) + size > sizeof(stack)) {
if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
return -ENOMEM;
} else {
args = (void *)stack;
}
args->ioctl.version = 0;
args->ioctl.type = NVIF_IOCTL_V0_MTHD;
args->mthd.version = 0;
args->mthd.method = mthd;
memcpy(args->mthd.data, data, size);
ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
memcpy(data, args->mthd.data, size);
if (args != (void *)stack)
kfree(args);
return ret;
}
void
nvif_object_unmap_handle(struct nvif_object *object)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_unmap unmap;
} args = {
.ioctl.type = NVIF_IOCTL_V0_UNMAP,
};
nvif_object_ioctl(object, &args, sizeof(args), NULL);
}
int
nvif_object_map_handle(struct nvif_object *object, void *argv, u32 argc,
u64 *handle, u64 *length)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_map_v0 map;
} *args;
u32 argn = sizeof(*args) + argc;
int ret, maptype;
if (!(args = kzalloc(argn, GFP_KERNEL)))
return -ENOMEM;
args->ioctl.type = NVIF_IOCTL_V0_MAP;
memcpy(args->map.data, argv, argc);
ret = nvif_object_ioctl(object, args, argn, NULL);
*handle = args->map.handle;
*length = args->map.length;
maptype = args->map.type;
kfree(args);
return ret ? ret : (maptype == NVIF_IOCTL_MAP_V0_IO);
}
void
nvif_object_unmap(struct nvif_object *object)
{
struct nvif_client *client = object->client;
if (object->map.ptr) {
if (object->map.size) {
client->driver->unmap(client, object->map.ptr,
object->map.size);
object->map.size = 0;
}
object->map.ptr = NULL;
nvif_object_unmap_handle(object);
}
}
int
nvif_object_map(struct nvif_object *object, void *argv, u32 argc)
{
struct nvif_client *client = object->client;
u64 handle, length;
int ret = nvif_object_map_handle(object, argv, argc, &handle, &length);
if (ret >= 0) {
if (ret) {
object->map.ptr = client->driver->map(client,
handle,
length);
if (ret = -ENOMEM, object->map.ptr) {
object->map.size = length;
return 0;
}
} else {
object->map.ptr = (void *)(unsigned long)handle;
return 0;
}
nvif_object_unmap_handle(object);
}
return ret;
}
void
nvif_object_dtor(struct nvif_object *object)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_del del;
} args = {
.ioctl.type = NVIF_IOCTL_V0_DEL,
};
if (!nvif_object_constructed(object))
return;
nvif_object_unmap(object);
nvif_object_ioctl(object, &args, sizeof(args), NULL);
object->client = NULL;
}
int
nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
s32 oclass, void *data, u32 size, struct nvif_object *object)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_new_v0 new;
} *args;
int ret = 0;
object->client = NULL;
object->name = name ? name : "nvifObject";
object->handle = handle;
object->oclass = oclass;
object->map.ptr = NULL;
object->map.size = 0;
if (parent) {
if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
nvif_object_dtor(object);
return -ENOMEM;
}
object->parent = parent->parent;
args->ioctl.version = 0;
args->ioctl.type = NVIF_IOCTL_V0_NEW;
args->new.version = 0;
args->new.route = parent->client->route;
args->new.token = nvif_handle(object);
args->new.object = nvif_handle(object);
args->new.handle = handle;
args->new.oclass = oclass;
memcpy(args->new.data, data, size);
ret = nvif_object_ioctl(parent, args, sizeof(*args) + size,
&object->priv);
memcpy(data, args->new.data, size);
kfree(args);
if (ret == 0)
object->client = parent->client;
}
if (ret)
nvif_object_dtor(object);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/object.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/vmm.h>
#include <nvif/mem.h>
#include <nvif/if000c.h>
int
nvif_vmm_unmap(struct nvif_vmm *vmm, u64 addr)
{
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_UNMAP,
&(struct nvif_vmm_unmap_v0) { .addr = addr },
sizeof(struct nvif_vmm_unmap_v0));
}
int
nvif_vmm_map(struct nvif_vmm *vmm, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_mem *mem, u64 offset)
{
struct nvif_vmm_map_v0 *args;
u8 stack[48];
int ret;
if (sizeof(*args) + argc > sizeof(stack)) {
if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
return -ENOMEM;
} else {
args = (void *)stack;
}
args->version = 0;
args->addr = addr;
args->size = size;
args->memory = nvif_handle(&mem->object);
args->offset = offset;
memcpy(args->data, argv, argc);
ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_MAP,
args, sizeof(*args) + argc);
if (args != (void *)stack)
kfree(args);
return ret;
}
void
nvif_vmm_put(struct nvif_vmm *vmm, struct nvif_vma *vma)
{
if (vma->size) {
WARN_ON(nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PUT,
&(struct nvif_vmm_put_v0) {
.addr = vma->addr,
}, sizeof(struct nvif_vmm_put_v0)));
vma->size = 0;
}
}
int
nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
u8 page, u8 align, u64 size, struct nvif_vma *vma)
{
struct nvif_vmm_get_v0 args;
int ret;
args.version = vma->size = 0;
args.sparse = sparse;
args.page = page;
args.align = align;
args.size = size;
switch (type) {
case ADDR: args.type = NVIF_VMM_GET_V0_ADDR; break;
case PTES: args.type = NVIF_VMM_GET_V0_PTES; break;
case LAZY: args.type = NVIF_VMM_GET_V0_LAZY; break;
default:
WARN_ON(1);
return -EINVAL;
}
ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_GET,
&args, sizeof(args));
if (ret == 0) {
vma->addr = args.addr;
vma->size = args.size;
}
return ret;
}
int
nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size,
u8 shift)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_GET,
.addr = addr,
.size = size,
.shift = shift,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
int
nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_PUT,
.addr = addr,
.size = size,
.shift = shift,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
int
nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
void *argv, u32 argc, struct nvif_mem *mem, u64 offset)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_MAP,
.addr = addr,
.size = size,
.shift = shift,
.memory = nvif_handle(&mem->object),
.offset = offset,
.argv = (u64)(uintptr_t)argv,
.argc = argc,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
int
nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
u8 shift, bool sparse)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_UNMAP,
.addr = addr,
.size = size,
.shift = shift,
.sparse = sparse,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
int
nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_SPARSE,
.addr = addr,
.size = size,
.ref = ref,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
void
nvif_vmm_dtor(struct nvif_vmm *vmm)
{
kfree(vmm->page);
nvif_object_dtor(&vmm->object);
}
int
nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
enum nvif_vmm_type type, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_vmm *vmm)
{
struct nvif_vmm_v0 *args;
u32 argn = sizeof(*args) + argc;
int ret = -ENOSYS, i;
vmm->object.client = NULL;
vmm->page = NULL;
if (!(args = kmalloc(argn, GFP_KERNEL)))
return -ENOMEM;
args->version = 0;
args->addr = addr;
args->size = size;
switch (type) {
case UNMANAGED: args->type = NVIF_VMM_V0_TYPE_UNMANAGED; break;
case MANAGED: args->type = NVIF_VMM_V0_TYPE_MANAGED; break;
case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
default:
WARN_ON(1);
return -EINVAL;
}
memcpy(args->data, argv, argc);
ret = nvif_object_ctor(&mmu->object, name ? name : "nvifVmm", 0,
oclass, args, argn, &vmm->object);
if (ret)
goto done;
vmm->start = args->addr;
vmm->limit = args->size;
vmm->page_nr = args->page_nr;
vmm->page = kmalloc_array(vmm->page_nr, sizeof(*vmm->page),
GFP_KERNEL);
if (!vmm->page) {
ret = -ENOMEM;
goto done;
}
for (i = 0; i < vmm->page_nr; i++) {
struct nvif_vmm_page_v0 args = { .index = i };
ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PAGE,
&args, sizeof(args));
if (ret)
break;
vmm->page[i].shift = args.shift;
vmm->page[i].sparse = args.sparse;
vmm->page[i].vram = args.vram;
vmm->page[i].host = args.host;
vmm->page[i].comp = args.comp;
}
done:
if (ret)
nvif_vmm_dtor(vmm);
kfree(args);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/vmm.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/user.h>
#include <nvif/device.h>
#include <nvif/class.h>
void
nvif_user_dtor(struct nvif_device *device)
{
if (device->user.func) {
nvif_object_dtor(&device->user.object);
device->user.func = NULL;
}
}
int
nvif_user_ctor(struct nvif_device *device, const char *name)
{
struct {
s32 oclass;
int version;
const struct nvif_user_func *func;
} users[] = {
{ AMPERE_USERMODE_A, -1, &nvif_userc361 },
{ TURING_USERMODE_A, -1, &nvif_userc361 },
{ VOLTA_USERMODE_A, -1, &nvif_userc361 },
{}
};
int cid, ret;
if (device->user.func)
return 0;
cid = nvif_mclass(&device->object, users);
if (cid < 0)
return cid;
ret = nvif_object_ctor(&device->object, name ? name : "nvifUsermode",
0, users[cid].oclass, NULL, 0,
&device->user.object);
if (ret)
return ret;
nvif_object_map(&device->user.object, NULL, 0);
device->user.func = users[cid].func;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/user.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/mem.h>
#include <nvif/client.h>
#include <nvif/if000a.h>
int
nvif_mem_ctor_map(struct nvif_mmu *mmu, const char *name, u8 type, u64 size,
struct nvif_mem *mem)
{
int ret = nvif_mem_ctor(mmu, name, mmu->mem, NVIF_MEM_MAPPABLE | type,
0, size, NULL, 0, mem);
if (ret == 0) {
ret = nvif_object_map(&mem->object, NULL, 0);
if (ret)
nvif_mem_dtor(mem);
}
return ret;
}
void
nvif_mem_dtor(struct nvif_mem *mem)
{
nvif_object_dtor(&mem->object);
}
int
nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass,
int type, u8 page, u64 size, void *argv, u32 argc,
struct nvif_mem *mem)
{
struct nvif_mem_v0 *args;
u8 stack[128];
int ret;
mem->object.client = NULL;
if (type < 0)
return -EINVAL;
if (sizeof(*args) + argc > sizeof(stack)) {
if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
return -ENOMEM;
} else {
args = (void *)stack;
}
args->version = 0;
args->type = type;
args->page = page;
args->size = size;
memcpy(args->data, argv, argc);
ret = nvif_object_ctor(&mmu->object, name ? name : "nvifMem", 0, oclass,
args, sizeof(*args) + argc, &mem->object);
if (ret == 0) {
mem->type = mmu->type[type].type;
mem->page = args->page;
mem->addr = args->addr;
mem->size = args->size;
}
if (args != (void *)stack)
kfree(args);
return ret;
}
int
nvif_mem_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, u8 type,
u8 page, u64 size, void *argv, u32 argc, struct nvif_mem *mem)
{
int ret = -EINVAL, i;
mem->object.client = NULL;
for (i = 0; ret && i < mmu->type_nr; i++) {
if ((mmu->type[i].type & type) == type) {
ret = nvif_mem_ctor_type(mmu, name, oclass, i, page,
size, argv, argc, mem);
}
}
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/mem.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/conn.h>
#include <nvif/disp.h>
#include <nvif/printf.h>
#include <nvif/class.h>
#include <nvif/if0011.h>
int
nvif_conn_event_ctor(struct nvif_conn *conn, const char *name, nvif_event_func func, u8 types,
struct nvif_event *event)
{
struct {
struct nvif_event_v0 base;
struct nvif_conn_event_v0 conn;
} args;
int ret;
args.conn.version = 0;
args.conn.types = types;
ret = nvif_event_ctor_(&conn->object, name ?: "nvifConnHpd", nvif_conn_id(conn),
func, true, &args.base, sizeof(args), false, event);
NVIF_DEBUG(&conn->object, "[NEW EVENT:HPD types:%02x]", types);
return ret;
}
int
nvif_conn_hpd_status(struct nvif_conn *conn)
{
struct nvif_conn_hpd_status_v0 args;
int ret;
args.version = 0;
ret = nvif_mthd(&conn->object, NVIF_CONN_V0_HPD_STATUS, &args, sizeof(args));
NVIF_ERRON(ret, &conn->object, "[HPD_STATUS] support:%d present:%d",
args.support, args.present);
return ret ? ret : !!args.support + !!args.present;
}
void
nvif_conn_dtor(struct nvif_conn *conn)
{
nvif_object_dtor(&conn->object);
}
int
nvif_conn_ctor(struct nvif_disp *disp, const char *name, int id, struct nvif_conn *conn)
{
struct nvif_conn_v0 args;
int ret;
args.version = 0;
args.id = id;
ret = nvif_object_ctor(&disp->object, name ?: "nvifConn", id, NVIF_CLASS_CONN,
&args, sizeof(args), &conn->object);
NVIF_ERRON(ret, &disp->object, "[NEW conn id:%d]", id);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/conn.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/disp.h>
#include <nvif/device.h>
#include <nvif/printf.h>
#include <nvif/class.h>
#include <nvif/if0010.h>
void
nvif_disp_dtor(struct nvif_disp *disp)
{
nvif_object_dtor(&disp->object);
}
int
nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass, struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
{ GA102_DISP, 0 },
{ TU102_DISP, 0 },
{ GV100_DISP, 0 },
{ GP102_DISP, 0 },
{ GP100_DISP, 0 },
{ GM200_DISP, 0 },
{ GM107_DISP, 0 },
{ GK110_DISP, 0 },
{ GK104_DISP, 0 },
{ GF110_DISP, 0 },
{ GT214_DISP, 0 },
{ GT206_DISP, 0 },
{ GT200_DISP, 0 },
{ G82_DISP, 0 },
{ NV50_DISP, 0 },
{ NV04_DISP, 0 },
{}
};
struct nvif_disp_v0 args;
int cid, ret;
cid = nvif_sclass(&device->object, disps, oclass);
disp->object.client = NULL;
if (cid < 0) {
NVIF_ERRON(cid, &device->object, "[NEW disp%04x] not supported", oclass);
return cid;
}
args.version = 0;
ret = nvif_object_ctor(&device->object, name ?: "nvifDisp", 0,
disps[cid].oclass, &args, sizeof(args), &disp->object);
NVIF_ERRON(ret, &device->object, "[NEW disp%04x]", disps[cid].oclass);
if (ret)
return ret;
NVIF_DEBUG(&disp->object, "[NEW] conn_mask:%08x outp_mask:%08x head_mask:%08x",
args.conn_mask, args.outp_mask, args.head_mask);
disp->conn_mask = args.conn_mask;
disp->outp_mask = args.outp_mask;
disp->head_mask = args.head_mask;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/disp.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include <nvif/client.h>
#include <nvif/driver.h>
#include <nvif/ioctl.h>
#include <nvif/class.h>
#include <nvif/if0000.h>
int
nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
{
return client->driver->ioctl(client->object.priv, data, size, NULL);
}
int
nvif_client_suspend(struct nvif_client *client)
{
return client->driver->suspend(client->object.priv);
}
int
nvif_client_resume(struct nvif_client *client)
{
return client->driver->resume(client->object.priv);
}
void
nvif_client_dtor(struct nvif_client *client)
{
nvif_object_dtor(&client->object);
if (client->driver) {
if (client->driver->fini)
client->driver->fini(client->object.priv);
client->driver = NULL;
}
}
int
nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
struct nvif_client *client)
{
struct nvif_client_v0 args = { .device = device };
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_nop_v0 nop;
} nop = {};
int ret;
strncpy(args.name, name, sizeof(args.name));
ret = nvif_object_ctor(parent != client ? &parent->object : NULL,
name ? name : "nvifClient", 0,
NVIF_CLASS_CLIENT, &args, sizeof(args),
&client->object);
if (ret)
return ret;
client->object.client = client;
client->object.handle = ~0;
client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
client->driver = parent->driver;
if (ret == 0) {
ret = nvif_client_ioctl(client, &nop, sizeof(nop));
client->version = nop.nop.version;
}
if (ret)
nvif_client_dtor(client);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/client.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/fifo.h>
static int
nvif_fifo_runlists(struct nvif_device *device)
{
struct nvif_object *object = &device->object;
struct {
struct nv_device_info_v1 m;
struct {
struct nv_device_info_v1_data runlists;
struct nv_device_info_v1_data runlist[64];
} v;
} *a;
int ret, i;
if (device->runlist)
return 0;
if (!(a = kmalloc(sizeof(*a), GFP_KERNEL)))
return -ENOMEM;
a->m.version = 1;
a->m.count = sizeof(a->v) / sizeof(a->v.runlists);
a->v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS;
for (i = 0; i < ARRAY_SIZE(a->v.runlist); i++) {
a->v.runlist[i].mthd = NV_DEVICE_HOST_RUNLIST_ENGINES;
a->v.runlist[i].data = i;
}
ret = nvif_object_mthd(object, NV_DEVICE_V0_INFO, a, sizeof(*a));
if (ret)
goto done;
device->runlists = fls64(a->v.runlists.data);
device->runlist = kcalloc(device->runlists, sizeof(*device->runlist),
GFP_KERNEL);
if (!device->runlist) {
ret = -ENOMEM;
goto done;
}
for (i = 0; i < device->runlists; i++) {
if (a->v.runlist[i].mthd != NV_DEVICE_INFO_INVALID)
device->runlist[i].engines = a->v.runlist[i].data;
}
done:
kfree(a);
return ret;
}
u64
nvif_fifo_runlist(struct nvif_device *device, u64 engine)
{
u64 runm = 0;
int ret, i;
if ((ret = nvif_fifo_runlists(device)))
return runm;
for (i = 0; i < device->runlists; i++) {
if (device->runlist[i].engines & engine)
runm |= BIT_ULL(i);
}
return runm;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/fifo.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/head.h>
#include <nvif/disp.h>
#include <nvif/printf.h>
#include <nvif/class.h>
#include <nvif/if0013.h>
int
nvif_head_vblank_event_ctor(struct nvif_head *head, const char *name, nvif_event_func func,
bool wait, struct nvif_event *event)
{
int ret = nvif_event_ctor(&head->object, name ?: "nvifHeadVBlank", nvif_head_id(head),
func, wait, NULL, 0, event);
NVIF_ERRON(ret, &head->object, "[NEW EVENT:VBLANK]");
return ret;
}
void
nvif_head_dtor(struct nvif_head *head)
{
nvif_object_dtor(&head->object);
}
int
nvif_head_ctor(struct nvif_disp *disp, const char *name, int id, struct nvif_head *head)
{
struct nvif_head_v0 args;
int ret;
args.version = 0;
args.id = id;
ret = nvif_object_ctor(&disp->object, name ? name : "nvifHead", id, NVIF_CLASS_HEAD,
&args, sizeof(args), &head->object);
NVIF_ERRON(ret, &disp->object, "[NEW head id:%d]", args.id);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/head.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/event.h>
#include <nvif/printf.h>
#include <nvif/class.h>
#include <nvif/if000e.h>
int
nvif_event_block(struct nvif_event *event)
{
if (nvif_event_constructed(event)) {
int ret = nvif_mthd(&event->object, NVIF_EVENT_V0_BLOCK, NULL, 0);
NVIF_ERRON(ret, &event->object, "[BLOCK]");
return ret;
}
return 0;
}
int
nvif_event_allow(struct nvif_event *event)
{
if (nvif_event_constructed(event)) {
int ret = nvif_mthd(&event->object, NVIF_EVENT_V0_ALLOW, NULL, 0);
NVIF_ERRON(ret, &event->object, "[ALLOW]");
return ret;
}
return 0;
}
void
nvif_event_dtor(struct nvif_event *event)
{
nvif_object_dtor(&event->object);
}
int
nvif_event_ctor_(struct nvif_object *parent, const char *name, u32 handle, nvif_event_func func,
bool wait, struct nvif_event_v0 *args, u32 argc, bool warn,
struct nvif_event *event)
{
struct nvif_event_v0 _args;
int ret;
if (!args) {
args = &_args;
argc = sizeof(_args);
}
args->version = 0;
args->wait = wait;
ret = nvif_object_ctor(parent, name ?: "nvifEvent", handle,
NVIF_CLASS_EVENT, args, argc, &event->object);
NVIF_ERRON(ret && warn, parent, "[NEW EVENT wait:%d size:%zd]",
args->wait, argc - sizeof(*args));
if (ret)
return ret;
event->func = func;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/event.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include <nvif/device.h>
u64
nvif_device_time(struct nvif_device *device)
{
if (!device->user.func) {
struct nv_device_time_v0 args = {};
int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME,
&args, sizeof(args));
WARN_ON_ONCE(ret != 0);
return args.time;
}
return device->user.func->time(&device->user);
}
void
nvif_device_dtor(struct nvif_device *device)
{
nvif_user_dtor(device);
kfree(device->runlist);
device->runlist = NULL;
nvif_object_dtor(&device->object);
}
int
nvif_device_ctor(struct nvif_object *parent, const char *name, u32 handle,
s32 oclass, void *data, u32 size, struct nvif_device *device)
{
int ret = nvif_object_ctor(parent, name ? name : "nvifDevice", handle,
oclass, data, size, &device->object);
device->runlist = NULL;
device->user.func = NULL;
if (ret == 0) {
device->info.version = 0;
ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_INFO,
&device->info, sizeof(device->info));
}
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/device.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/outp.h>
#include <nvif/disp.h>
#include <nvif/printf.h>
#include <nvif/class.h>
int
nvif_outp_dp_mst_vcpi(struct nvif_outp *outp, int head,
u8 start_slot, u8 num_slots, u16 pbn, u16 aligned_pbn)
{
struct nvif_outp_dp_mst_vcpi_v0 args;
int ret;
args.version = 0;
args.head = head;
args.start_slot = start_slot;
args.num_slots = num_slots;
args.pbn = pbn;
args.aligned_pbn = aligned_pbn;
ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_MST_VCPI, &args, sizeof(args));
NVIF_ERRON(ret, &outp->object,
"[DP_MST_VCPI head:%d start_slot:%02x num_slots:%02x pbn:%04x aligned_pbn:%04x]",
args.head, args.start_slot, args.num_slots, args.pbn, args.aligned_pbn);
return ret;
}
int
nvif_outp_dp_retrain(struct nvif_outp *outp)
{
int ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_RETRAIN, NULL, 0);
NVIF_ERRON(ret, &outp->object, "[DP_RETRAIN]");
return ret;
}
int
nvif_outp_dp_aux_pwr(struct nvif_outp *outp, bool enable)
{
struct nvif_outp_dp_aux_pwr_v0 args;
int ret;
args.version = 0;
args.state = enable;
ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_AUX_PWR, &args, sizeof(args));
NVIF_ERRON(ret, &outp->object, "[DP_AUX_PWR state:%d]", args.state);
return ret;
}
int
nvif_outp_hda_eld(struct nvif_outp *outp, int head, void *data, u32 size)
{
struct {
struct nvif_outp_hda_eld_v0 mthd;
u8 data[128];
} args;
int ret;
if (WARN_ON(size > ARRAY_SIZE(args.data)))
return -EINVAL;
args.mthd.version = 0;
args.mthd.head = head;
memcpy(args.data, data, size);
ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDA_ELD, &args, sizeof(args.mthd) + size);
NVIF_ERRON(ret, &outp->object, "[HDA_ELD head:%d size:%d]", head, size);
return ret;
}
int
nvif_outp_infoframe(struct nvif_outp *outp, u8 type, struct nvif_outp_infoframe_v0 *args, u32 size)
{
int ret;
args->type = type;
ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_INFOFRAME, args, sizeof(*args) + size);
NVIF_ERRON(ret, &outp->object, "[INFOFRAME type:%d size:%d]", type, size);
return ret;
}
void
nvif_outp_release(struct nvif_outp *outp)
{
int ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_RELEASE, NULL, 0);
NVIF_ERRON(ret, &outp->object, "[RELEASE]");
outp->or.id = -1;
}
static inline int
nvif_outp_acquire(struct nvif_outp *outp, u8 proto, struct nvif_outp_acquire_v0 *args)
{
int ret;
args->version = 0;
args->proto = proto;
ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_ACQUIRE, args, sizeof(*args));
if (ret)
return ret;
outp->or.id = args->or;
outp->or.link = args->link;
return 0;
}
int
nvif_outp_acquire_dp(struct nvif_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
int link_nr, int link_bw, bool hda, bool mst)
{
struct nvif_outp_acquire_v0 args;
int ret;
args.dp.link_nr = link_nr;
args.dp.link_bw = link_bw;
args.dp.hda = hda;
args.dp.mst = mst;
memcpy(args.dp.dpcd, dpcd, sizeof(args.dp.dpcd));
ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_DP, &args);
NVIF_ERRON(ret, &outp->object,
"[ACQUIRE proto:DP link_nr:%d link_bw:%02x hda:%d mst:%d] or:%d link:%d",
args.dp.link_nr, args.dp.link_bw, args.dp.hda, args.dp.mst, args.or, args.link);
return ret;
}
int
nvif_outp_acquire_lvds(struct nvif_outp *outp, bool dual, bool bpc8)
{
struct nvif_outp_acquire_v0 args;
int ret;
args.lvds.dual = dual;
args.lvds.bpc8 = bpc8;
ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_LVDS, &args);
NVIF_ERRON(ret, &outp->object,
"[ACQUIRE proto:LVDS dual:%d 8bpc:%d] or:%d link:%d",
args.lvds.dual, args.lvds.bpc8, args.or, args.link);
return ret;
}
int
nvif_outp_acquire_tmds(struct nvif_outp *outp, int head,
bool hdmi, u8 max_ac_packet, u8 rekey, u8 scdc, bool hda)
{
struct nvif_outp_acquire_v0 args;
int ret;
args.tmds.head = head;
args.tmds.hdmi = hdmi;
args.tmds.hdmi_max_ac_packet = max_ac_packet;
args.tmds.hdmi_rekey = rekey;
args.tmds.hdmi_scdc = scdc;
args.tmds.hdmi_hda = hda;
ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_TMDS, &args);
NVIF_ERRON(ret, &outp->object,
"[ACQUIRE proto:TMDS head:%d hdmi:%d max_ac_packet:%d rekey:%d scdc:%d hda:%d]"
" or:%d link:%d", args.tmds.head, args.tmds.hdmi, args.tmds.hdmi_max_ac_packet,
args.tmds.hdmi_rekey, args.tmds.hdmi_scdc, args.tmds.hdmi_hda,
args.or, args.link);
return ret;
}
int
nvif_outp_acquire_rgb_crt(struct nvif_outp *outp)
{
struct nvif_outp_acquire_v0 args;
int ret;
ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_RGB_CRT, &args);
NVIF_ERRON(ret, &outp->object, "[ACQUIRE proto:RGB_CRT] or:%d", args.or);
return ret;
}
int
nvif_outp_load_detect(struct nvif_outp *outp, u32 loadval)
{
struct nvif_outp_load_detect_v0 args;
int ret;
args.version = 0;
args.data = loadval;
ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_LOAD_DETECT, &args, sizeof(args));
NVIF_ERRON(ret, &outp->object, "[LOAD_DETECT data:%08x] load:%02x", args.data, args.load);
return ret < 0 ? ret : args.load;
}
void
nvif_outp_dtor(struct nvif_outp *outp)
{
nvif_object_dtor(&outp->object);
}
int
nvif_outp_ctor(struct nvif_disp *disp, const char *name, int id, struct nvif_outp *outp)
{
struct nvif_outp_v0 args;
int ret;
args.version = 0;
args.id = id;
ret = nvif_object_ctor(&disp->object, name ?: "nvifOutp", id, NVIF_CLASS_OUTP,
&args, sizeof(args), &outp->object);
NVIF_ERRON(ret, &disp->object, "[NEW outp id:%d]", id);
if (ret)
return ret;
outp->or.id = -1;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/outp.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/mmu.h>
#include <nvif/class.h>
#include <nvif/if0008.h>
void
nvif_mmu_dtor(struct nvif_mmu *mmu)
{
if (!nvif_object_constructed(&mmu->object))
return;
kfree(mmu->kind);
kfree(mmu->type);
kfree(mmu->heap);
nvif_object_dtor(&mmu->object);
}
int
nvif_mmu_ctor(struct nvif_object *parent, const char *name, s32 oclass,
struct nvif_mmu *mmu)
{
static const struct nvif_mclass mems[] = {
{ NVIF_CLASS_MEM_GF100, -1 },
{ NVIF_CLASS_MEM_NV50 , -1 },
{ NVIF_CLASS_MEM_NV04 , -1 },
{}
};
struct nvif_mmu_v0 args;
int ret, i;
args.version = 0;
mmu->heap = NULL;
mmu->type = NULL;
mmu->kind = NULL;
ret = nvif_object_ctor(parent, name ? name : "nvifMmu", 0, oclass,
&args, sizeof(args), &mmu->object);
if (ret)
goto done;
mmu->dmabits = args.dmabits;
mmu->heap_nr = args.heap_nr;
mmu->type_nr = args.type_nr;
mmu->kind_nr = args.kind_nr;
ret = nvif_mclass(&mmu->object, mems);
if (ret < 0)
goto done;
mmu->mem = mems[ret].oclass;
mmu->heap = kmalloc_array(mmu->heap_nr, sizeof(*mmu->heap),
GFP_KERNEL);
mmu->type = kmalloc_array(mmu->type_nr, sizeof(*mmu->type),
GFP_KERNEL);
if (ret = -ENOMEM, !mmu->heap || !mmu->type)
goto done;
mmu->kind = kmalloc_array(mmu->kind_nr, sizeof(*mmu->kind),
GFP_KERNEL);
if (!mmu->kind && mmu->kind_nr)
goto done;
for (i = 0; i < mmu->heap_nr; i++) {
struct nvif_mmu_heap_v0 args = { .index = i };
ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_HEAP,
&args, sizeof(args));
if (ret)
goto done;
mmu->heap[i].size = args.size;
}
for (i = 0; i < mmu->type_nr; i++) {
struct nvif_mmu_type_v0 args = { .index = i };
ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_TYPE,
&args, sizeof(args));
if (ret)
goto done;
mmu->type[i].type = 0;
if (args.vram) mmu->type[i].type |= NVIF_MEM_VRAM;
if (args.host) mmu->type[i].type |= NVIF_MEM_HOST;
if (args.comp) mmu->type[i].type |= NVIF_MEM_COMP;
if (args.disp) mmu->type[i].type |= NVIF_MEM_DISP;
if (args.kind ) mmu->type[i].type |= NVIF_MEM_KIND;
if (args.mappable) mmu->type[i].type |= NVIF_MEM_MAPPABLE;
if (args.coherent) mmu->type[i].type |= NVIF_MEM_COHERENT;
if (args.uncached) mmu->type[i].type |= NVIF_MEM_UNCACHED;
mmu->type[i].heap = args.heap;
}
if (mmu->kind_nr) {
struct nvif_mmu_kind_v0 *kind;
size_t argc = struct_size(kind, data, mmu->kind_nr);
if (ret = -ENOMEM, !(kind = kmalloc(argc, GFP_KERNEL)))
goto done;
kind->version = 0;
kind->count = mmu->kind_nr;
ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_KIND,
kind, argc);
if (ret == 0)
memcpy(mmu->kind, kind->data, kind->count);
mmu->kind_inv = kind->kind_inv;
kfree(kind);
}
done:
if (ret)
nvif_mmu_dtor(mmu);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/mmu.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <nvif/driver.h>
#include <nvif/client.h>
static const struct nvif_driver *
nvif_driver[] = {
#ifdef __KERNEL__
&nvif_driver_nvkm,
#else
&nvif_driver_drm,
&nvif_driver_lib,
&nvif_driver_null,
#endif
NULL
};
int
nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
const char *name, u64 device, struct nvif_client *client)
{
int ret = -EINVAL, i;
for (i = 0; (client->driver = nvif_driver[i]); i++) {
if (!drv || !strcmp(client->driver->name, drv)) {
ret = client->driver->init(name, device, cfg, dbg,
&client->object.priv);
if (ret == 0)
break;
client->driver->fini(client->object.priv);
}
}
if (ret == 0)
ret = nvif_client_ctor(client, name, device, client);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvif/driver.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "core.h"
#include <nvif/class.h>
#include <nvif/push507c.h>
#include <nvhw/class/cl907d.h>
#include <nouveau_bo.h>
static int
sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
struct nvif_push *push = core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV907D, SOR_SET_CONTROL(or), ctrl);
return 0;
}
static void
sor907d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
{
struct nouveau_bo *bo = disp->sync;
const int off = or * 2;
outp->caps.dp_interlace =
NVBO_RV32(bo, off, NV907D_CORE_NOTIFIER_3, CAPABILITIES_CAP_SOR0_20, DP_INTERLACE);
}
const struct nv50_outp_func
sor907d = {
.ctrl = sor907d_ctrl,
.get_caps = sor907d_get_caps,
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/sor907d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "core.h"
#include "head.h"
#include <nvif/class.h>
#include <nvif/pushc37b.h>
#include <nvif/timer.h>
#include <nvhw/class/clc37d.h>
#include <nouveau_bo.h>
int
corec37d_wndw_owner(struct nv50_core *core)
{
struct nvif_push *push = core->chan.push;
const u32 windows = 8; /*XXX*/
int ret, i;
if ((ret = PUSH_WAIT(push, windows * 2)))
return ret;
for (i = 0; i < windows; i++) {
PUSH_MTHD(push, NVC37D, WINDOW_SET_CONTROL(i),
NVDEF(NVC37D, WINDOW_SET_CONTROL, OWNER, HEAD(i >> 1)));
}
return 0;
}
int
corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
struct nvif_push *push = core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, (ntfy ? 2 * 2 : 0) + 5)))
return ret;
if (ntfy) {
PUSH_MTHD(push, NVC37D, SET_NOTIFIER_CONTROL,
NVDEF(NVC37D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
NVVAL(NVC37D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 4) |
NVDEF(NVC37D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
}
PUSH_MTHD(push, NVC37D, SET_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_CURS],
SET_WINDOW_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_WNDW]);
PUSH_MTHD(push, NVC37D, UPDATE, 0x00000001 |
NVDEF(NVC37D, UPDATE, SPECIAL_HANDLING, NONE) |
NVDEF(NVC37D, UPDATE, INHIBIT_INTERRUPTS, FALSE));
if (ntfy) {
PUSH_MTHD(push, NVC37D, SET_NOTIFIER_CONTROL,
NVDEF(NVC37D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
}
return PUSH_KICK(push);
}
int
corec37d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
struct nvif_device *device)
{
s64 time = nvif_msec(device, 2000ULL,
if (NVBO_TD32(bo, offset, NV_DISP_NOTIFIER, _0, STATUS, ==, FINISHED))
break;
usleep_range(1, 2);
);
return time < 0 ? time : 0;
}
void
corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
{
NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _0,
NVDEF(NV_DISP_NOTIFIER, _0, STATUS, NOT_BEGUN));
NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _1, 0);
NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _2, 0);
NVBO_WR32(bo, offset, NV_DISP_NOTIFIER, _3, 0);
}
int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
{
int ret;
ret = nvif_object_ctor(&disp->disp->object, "dispCaps", 0,
GV100_DISP_CAPS, NULL, 0, &disp->caps);
if (ret) {
NV_ERROR(drm,
"Failed to init notifier caps region: %d\n",
ret);
return ret;
}
ret = nvif_object_map(&disp->caps, NULL, 0);
if (ret) {
NV_ERROR(drm,
"Failed to map notifier caps region: %d\n",
ret);
return ret;
}
return 0;
}
static int
corec37d_init(struct nv50_core *core)
{
struct nvif_push *push = core->chan.push;
const u32 windows = 8; /*XXX*/
int ret, i;
if ((ret = PUSH_WAIT(push, 2 + windows * 5)))
return ret;
PUSH_MTHD(push, NVC37D, SET_CONTEXT_DMA_NOTIFIER, core->chan.sync.handle);
for (i = 0; i < windows; i++) {
PUSH_MTHD(push, NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(i),
NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED1BPP, TRUE) |
NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED2BPP, TRUE) |
NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED4BPP, TRUE) |
NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED8BPP, TRUE) |
NVDEF(NVC37D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, YUV_PACKED422, TRUE),
WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(i), 0x00000000);
PUSH_MTHD(push, NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS(i),
NVVAL(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, MAX_PIXELS_FETCHED_PER_LINE, 0x7fff) |
NVDEF(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_LUT, USAGE_1025) |
NVDEF(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_SCALER_TAPS, TAPS_2) |
NVDEF(NVC37D, WINDOW_SET_WINDOW_USAGE_BOUNDS, UPSCALING_ALLOWED, FALSE));
}
core->assign_windows = true;
return PUSH_KICK(push);
}
static const struct nv50_core_func
corec37d = {
.init = corec37d_init,
.ntfy_init = corec37d_ntfy_init,
.caps_init = corec37d_caps_init,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
.head = &headc37d,
.sor = &sorc37d,
#if IS_ENABLED(CONFIG_DEBUG_FS)
.crc = &crcc37d,
#endif
};
int
corec37d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
{
return core507d_new_(&corec37d, drm, oclass, pcore);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/corec37d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "core.h"
#include "head.h"
static const struct nv50_core_func
core827d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
.caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head827d,
.dac = &dac507d,
.sor = &sor507d,
.pior = &pior507d,
};
int
core827d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
{
return core507d_new_(&core827d, drm, oclass, pcore);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/core827d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "core.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl507d.h>
#include <nvhw/class/cl837d.h>
static int
sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
struct nvif_push *push = core->chan.push;
int ret;
if (asyh) {
ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, HSYNC_POLARITY, asyh->or.nhsync);
ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, VSYNC_POLARITY, asyh->or.nvsync);
ctrl |= NVVAL(NV837D, SOR_SET_CONTROL, PIXEL_DEPTH, asyh->or.depth);
}
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507D, SOR_SET_CONTROL(or), ctrl);
return 0;
}
static void
sor507d_get_caps(struct nv50_disp *core, struct nouveau_encoder *outp, int or)
{
outp->caps.dp_interlace = true;
}
const struct nv50_outp_func
sor507d = {
.ctrl = sor507d_ctrl,
.get_caps = sor507d_get_caps,
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/sor507d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "wimm.h"
#include "atom.h"
#include "wndw.h"
#include <nvif/if0014.h>
#include <nvif/pushc37b.h>
#include <nvhw/class/clc37b.h>
static int
wimmc37b_update(struct nv50_wndw *wndw, u32 *interlock)
{
struct nvif_push *push = wndw->wimm.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC37B, UPDATE, 0x00000001 |
NVVAL(NVC37B, UPDATE, INTERLOCK_WITH_WINDOW,
!!(interlock[NV50_DISP_INTERLOCK_WNDW] & wndw->interlock.data)));
return PUSH_KICK(push);
}
static int
wimmc37b_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wimm.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC37B, SET_POINT_OUT(0),
NVVAL(NVC37B, SET_POINT_OUT, X, asyw->point.x) |
NVVAL(NVC37B, SET_POINT_OUT, Y, asyw->point.y));
return 0;
}
static const struct nv50_wimm_func
wimmc37b = {
.point = wimmc37b_point,
.update = wimmc37b_update,
};
static int
wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
s32 oclass, struct nv50_wndw *wndw)
{
struct nvif_disp_chan_v0 args = {
.id = wndw->id,
};
struct nv50_disp *disp = nv50_disp(drm->dev);
int ret;
ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
&oclass, 0, &args, sizeof(args), -1,
&wndw->wimm);
if (ret) {
NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
return ret;
}
wndw->interlock.wimm = wndw->interlock.data;
wndw->immd = func;
return 0;
}
int
wimmc37b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw)
{
return wimmc37b_init_(&wimmc37b, drm, oclass, wndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "core.h"
#include "head.h"
static const struct nv50_core_func
core917d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
.caps_init = core907d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head917d,
#if IS_ENABLED(CONFIG_DEBUG_FS)
.crc = &crc907d,
#endif
.dac = &dac907d,
.sor = &sor907d,
};
int
core917d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
{
return core507d_new_(&core917d, drm, oclass, pcore);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/core917d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "core.h"
#include "head.h"
#include <nvif/pushc37b.h>
#include <nvhw/class/clc57d.h>
static int
corec57d_init(struct nv50_core *core)
{
struct nvif_push *push = core->chan.push;
const u32 windows = 8; /*XXX*/
int ret, i;
if ((ret = PUSH_WAIT(push, 2 + windows * 5)))
return ret;
PUSH_MTHD(push, NVC57D, SET_CONTEXT_DMA_NOTIFIER, core->chan.sync.handle);
for (i = 0; i < windows; i++) {
PUSH_MTHD(push, NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(i),
NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED1BPP, TRUE) |
NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED2BPP, TRUE) |
NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED4BPP, TRUE) |
NVDEF(NVC57D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED8BPP, TRUE),
WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(i), 0x00000000);
PUSH_MTHD(push, NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS(i),
NVVAL(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, MAX_PIXELS_FETCHED_PER_LINE, 0x7fff) |
NVDEF(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, ILUT_ALLOWED, TRUE) |
NVDEF(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_SCALER_TAPS, TAPS_2) |
NVDEF(NVC57D, WINDOW_SET_WINDOW_USAGE_BOUNDS, UPSCALING_ALLOWED, FALSE));
}
core->assign_windows = true;
return PUSH_KICK(push);
}
static const struct nv50_core_func
corec57d = {
.init = corec57d_init,
.ntfy_init = corec37d_ntfy_init,
.caps_init = corec37d_caps_init,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
.head = &headc57d,
.sor = &sorc37d,
#if IS_ENABLED(CONFIG_DEBUG_FS)
.crc = &crcc57d,
#endif
};
int
corec57d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
{
return core507d_new_(&corec57d, drm, oclass, pcore);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/corec57d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "core.h"
#include <nvif/pushc37b.h>
#include <nvhw/class/clc37d.h>
static int
sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
struct nvif_push *push = core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC37D, SOR_SET_CONTROL(or), ctrl);
return 0;
}
static void
sorc37d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
{
u32 tmp = nvif_rd32(&disp->caps, 0x000144 + (or * 8));
outp->caps.dp_interlace = !!(tmp & 0x04000000);
}
const struct nv50_outp_func
sorc37d = {
.ctrl = sorc37d_ctrl,
.get_caps = sorc37d_get_caps,
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/sorc37d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "core.h"
#include "head.h"
#include <nvif/push507c.h>
#include <nvif/timer.h>
#include <nvhw/class/cl907d.h>
#include "nouveau_bo.h"
int
core907d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
{
struct nv50_core *core = disp->core;
struct nouveau_bo *bo = disp->sync;
s64 time;
int ret;
NVBO_WR32(bo, NV50_DISP_CORE_NTFY, NV907D_CORE_NOTIFIER_3, CAPABILITIES_4,
NVDEF(NV907D_CORE_NOTIFIER_3, CAPABILITIES_4, DONE, FALSE));
ret = core507d_read_caps(disp);
if (ret < 0)
return ret;
time = nvif_msec(core->chan.base.device, 2000ULL,
if (NVBO_TD32(bo, NV50_DISP_CORE_NTFY,
NV907D_CORE_NOTIFIER_3, CAPABILITIES_4, DONE, ==, TRUE))
break;
usleep_range(1, 2);
);
if (time < 0)
NV_ERROR(drm, "core caps notifier timeout\n");
return 0;
}
static const struct nv50_core_func
core907d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
.caps_init = core907d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head907d,
#if IS_ENABLED(CONFIG_DEBUG_FS)
.crc = &crc907d,
#endif
.dac = &dac907d,
.sor = &sor907d,
};
int
core907d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
{
return core507d_new_(&core907d, drm, oclass, pcore);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/core907d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "lut.h"
#include "disp.h"
#include <drm/drm_color_mgmt.h>
#include <drm/drm_mode.h>
#include <drm/drm_property.h>
#include <nvif/class.h>
u32
nv50_lut_load(struct nv50_lut *lut, int buffer, struct drm_property_blob *blob,
void (*load)(struct drm_color_lut *, int, void __iomem *))
{
struct drm_color_lut *in = blob ? blob->data : NULL;
void __iomem *mem = lut->mem[buffer].object.map.ptr;
const u32 addr = lut->mem[buffer].addr;
int i;
if (!in) {
in = kvmalloc_array(1024, sizeof(*in), GFP_KERNEL);
if (!WARN_ON(!in)) {
for (i = 0; i < 1024; i++) {
in[i].red =
in[i].green =
in[i].blue = (i << 16) >> 10;
}
load(in, 1024, mem);
kvfree(in);
}
} else {
load(in, drm_color_lut_size(blob), mem);
}
return addr;
}
void
nv50_lut_fini(struct nv50_lut *lut)
{
int i;
for (i = 0; i < ARRAY_SIZE(lut->mem); i++)
nvif_mem_dtor(&lut->mem[i]);
}
int
nv50_lut_init(struct nv50_disp *disp, struct nvif_mmu *mmu,
struct nv50_lut *lut)
{
const u32 size = disp->disp->object.oclass < GF110_DISP ? 257 : 1025;
int i;
for (i = 0; i < ARRAY_SIZE(lut->mem); i++) {
int ret = nvif_mem_ctor_map(mmu, "kmsLut", NVIF_MEM_VRAM,
size * 8, &lut->mem[i]);
if (ret)
return ret;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/lut.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "core.h"
#include "head.h"
#include <nvif/if0014.h>
#include <nvif/push507c.h>
#include <nvif/timer.h>
#include <nvhw/class/cl507d.h>
#include "nouveau_bo.h"
int
core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
struct nvif_push *push = core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, (ntfy ? 2 : 0) + 3)))
return ret;
if (ntfy) {
PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
NVDEF(NV507D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
NVVAL(NV507D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 2) |
NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
}
PUSH_MTHD(push, NV507D, UPDATE, interlock[NV50_DISP_INTERLOCK_BASE] |
interlock[NV50_DISP_INTERLOCK_OVLY] |
NVDEF(NV507D, UPDATE, NOT_DRIVER_FRIENDLY, FALSE) |
NVDEF(NV507D, UPDATE, NOT_DRIVER_UNFRIENDLY, FALSE) |
NVDEF(NV507D, UPDATE, INHIBIT_INTERRUPTS, FALSE),
SET_NOTIFIER_CONTROL,
NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
return PUSH_KICK(push);
}
int
core507d_ntfy_wait_done(struct nouveau_bo *bo, u32 offset,
struct nvif_device *device)
{
s64 time = nvif_msec(device, 2000ULL,
if (NVBO_TD32(bo, offset, NV_DISP_CORE_NOTIFIER_1, COMPLETION_0, DONE, ==, TRUE))
break;
usleep_range(1, 2);
);
return time < 0 ? time : 0;
}
void
core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
{
NVBO_WR32(bo, offset, NV_DISP_CORE_NOTIFIER_1, COMPLETION_0,
NVDEF(NV_DISP_CORE_NOTIFIER_1, COMPLETION_0, DONE, FALSE));
}
int
core507d_read_caps(struct nv50_disp *disp)
{
struct nvif_push *push = disp->core->chan.push;
int ret;
ret = PUSH_WAIT(push, 6);
if (ret)
return ret;
PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
NVDEF(NV507D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
NVVAL(NV507D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 2) |
NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
PUSH_MTHD(push, NV507D, GET_CAPABILITIES, 0x00000000);
PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
return PUSH_KICK(push);
}
int
core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
{
struct nv50_core *core = disp->core;
struct nouveau_bo *bo = disp->sync;
s64 time;
int ret;
NVBO_WR32(bo, NV50_DISP_CORE_NTFY, NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1,
NVDEF(NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, FALSE));
ret = core507d_read_caps(disp);
if (ret < 0)
return ret;
time = nvif_msec(core->chan.base.device, 2000ULL,
if (NVBO_TD32(bo, NV50_DISP_CORE_NTFY,
NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, ==, TRUE))
break;
usleep_range(1, 2);
);
if (time < 0)
NV_ERROR(drm, "core caps notifier timeout\n");
return 0;
}
int
core507d_init(struct nv50_core *core)
{
struct nvif_push *push = core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507D, SET_CONTEXT_DMA_NOTIFIER, core->chan.sync.handle);
return PUSH_KICK(push);
}
static const struct nv50_core_func
core507d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
.caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head507d,
.dac = &dac507d,
.sor = &sor507d,
.pior = &pior507d,
};
int
core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
s32 oclass, struct nv50_core **pcore)
{
struct nvif_disp_chan_v0 args = {};
struct nv50_disp *disp = nv50_disp(drm->dev);
struct nv50_core *core;
int ret;
if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL)))
return -ENOMEM;
core->func = func;
ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
&oclass, 0, &args, sizeof(args),
disp->sync->offset, &core->chan);
if (ret) {
NV_ERROR(drm, "core%04x allocation failed: %d\n", oclass, ret);
return ret;
}
return 0;
}
int
core507d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
{
return core507d_new_(&core507d, drm, oclass, pcore);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/core507d.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "wndw.h"
#include "atom.h"
#include <nvif/pushc37b.h>
#include <nvhw/class/clc57e.h>
static int
wndwc67e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 17)))
return ret;
PUSH_MTHD(push, NVC57E, SET_PRESENT_CONTROL,
NVVAL(NVC57E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
NVVAL(NVC57E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
NVDEF(NVC57E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
PUSH_MTHD(push, NVC57E, SET_SIZE,
NVVAL(NVC57E, SET_SIZE, WIDTH, asyw->image.w) |
NVVAL(NVC57E, SET_SIZE, HEIGHT, asyw->image.h),
SET_STORAGE,
NVVAL(NVC57E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
SET_PARAMS,
NVVAL(NVC57E, SET_PARAMS, FORMAT, asyw->image.format) |
NVDEF(NVC57E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
NVDEF(NVC57E, SET_PARAMS, SWAP_UV, DISABLE) |
NVDEF(NVC57E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
SET_PLANAR_STORAGE(0),
NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
PUSH_MTHD(push, NVC57E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
PUSH_MTHD(push, NVC57E, SET_POINT_IN(0),
NVVAL(NVC57E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
NVVAL(NVC57E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
PUSH_MTHD(push, NVC57E, SET_SIZE_IN,
NVVAL(NVC57E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
NVVAL(NVC57E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
PUSH_MTHD(push, NVC57E, SET_SIZE_OUT,
NVVAL(NVC57E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
NVVAL(NVC57E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
return 0;
}
static const struct nv50_wndw_func
wndwc67e = {
.acquire = wndwc37e_acquire,
.release = wndwc37e_release,
.sema_set = wndwc37e_sema_set,
.sema_clr = wndwc37e_sema_clr,
.ntfy_set = wndwc37e_ntfy_set,
.ntfy_clr = wndwc37e_ntfy_clr,
.ntfy_reset = corec37d_ntfy_init,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.ilut = wndwc57e_ilut,
.ilut_identity = true,
.ilut_size = 1024,
.xlut_set = wndwc57e_ilut_set,
.xlut_clr = wndwc57e_ilut_clr,
.csc = base907c_csc,
.csc_set = wndwc57e_csc_set,
.csc_clr = wndwc57e_csc_clr,
.image_set = wndwc67e_image_set,
.image_clr = wndwc37e_image_clr,
.blend_set = wndwc37e_blend_set,
.update = wndwc37e_update,
};
int
wndwc67e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
s32 oclass, struct nv50_wndw **pwndw)
{
return wndwc37e_new_(&wndwc67e, drm, type, index, oclass, BIT(index >> 1), pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "ovly.h"
#include "atom.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <nvif/if0014.h>
#include <nvif/push507c.h>
#include <nvhw/class/cl507e.h>
int
ovly507e_scale_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
return ret;
PUSH_MTHD(push, NV507E, SET_POINT_IN,
NVVAL(NV507E, SET_POINT_IN, X, asyw->scale.sx) |
NVVAL(NV507E, SET_POINT_IN, Y, asyw->scale.sy),
SET_SIZE_IN,
NVVAL(NV507E, SET_SIZE_IN, WIDTH, asyw->scale.sw) |
NVVAL(NV507E, SET_SIZE_IN, HEIGHT, asyw->scale.sh),
SET_SIZE_OUT,
NVVAL(NV507E, SET_SIZE_OUT, WIDTH, asyw->scale.dw));
return 0;
}
static int
ovly507e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 12)))
return ret;
PUSH_MTHD(push, NV507E, SET_PRESENT_CONTROL,
NVDEF(NV507E, SET_PRESENT_CONTROL, BEGIN_MODE, ASAP) |
NVVAL(NV507E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
PUSH_MTHD(push, NV507E, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
PUSH_MTHD(push, NV507E, SET_COMPOSITION_CONTROL,
NVDEF(NV507E, SET_COMPOSITION_CONTROL, MODE, OPAQUE_SUSPEND_BASE));
PUSH_MTHD(push, NV507E, SURFACE_SET_OFFSET, asyw->image.offset[0] >> 8);
PUSH_MTHD(push, NV507E, SURFACE_SET_SIZE,
NVVAL(NV507E, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
NVVAL(NV507E, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
SURFACE_SET_STORAGE,
NVVAL(NV507E, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
NVVAL(NV507E, SURFACE_SET_STORAGE, PITCH, (asyw->image.pitch[0] >> 8)) |
NVVAL(NV507E, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
NVVAL(NV507E, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
SURFACE_SET_PARAMS,
NVVAL(NV507E, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
NVVAL(NV507E, SURFACE_SET_PARAMS, COLOR_SPACE, asyw->image.colorspace) |
NVVAL(NV507E, SURFACE_SET_PARAMS, KIND, asyw->image.kind) |
NVDEF(NV507E, SURFACE_SET_PARAMS, PART_STRIDE, PARTSTRIDE_256));
return 0;
}
void
ovly507e_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
asyh->ovly.cpp = 0;
}
int
ovly507e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
const struct drm_framebuffer *fb = asyw->state.fb;
int ret;
ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
if (ret)
return ret;
asyh->ovly.cpp = fb->format->cpp[0];
return 0;
}
#include "nouveau_bo.h"
static const struct nv50_wndw_func
ovly507e = {
.acquire = ovly507e_acquire,
.release = ovly507e_release,
.ntfy_set = base507c_ntfy_set,
.ntfy_clr = base507c_ntfy_clr,
.ntfy_reset = base507c_ntfy_reset,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.image_set = ovly507e_image_set,
.image_clr = base507c_image_clr,
.scale_set = ovly507e_scale_set,
.update = base507c_update,
};
static const u32
ovly507e_format[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XRGB1555,
0
};
int
ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
struct nouveau_drm *drm, int head, s32 oclass, u32 interlock_data,
struct nv50_wndw **pwndw)
{
struct nvif_disp_chan_v0 args = {
.id = head,
};
struct nv50_disp *disp = nv50_disp(drm->dev);
struct nv50_wndw *wndw;
int ret;
ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_OVERLAY,
"ovly", head, format, BIT(head),
NV50_DISP_INTERLOCK_OVLY, interlock_data,
&wndw);
if (*pwndw = wndw, ret)
return ret;
ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
&oclass, 0, &args, sizeof(args),
disp->sync->offset, &wndw->wndw);
if (ret) {
NV_ERROR(drm, "ovly%04x allocation failed: %d\n", oclass, ret);
return ret;
}
wndw->ntfy = NV50_DISP_OVLY_NTFY(wndw->id);
wndw->sema = NV50_DISP_OVLY_SEM0(wndw->id);
wndw->data = 0x00000000;
return 0;
}
int
ovly507e_new(struct nouveau_drm *drm, int head, s32 oclass,
struct nv50_wndw **pwndw)
{
return ovly507e_new_(&ovly507e, ovly507e_format, drm, head, oclass,
0x00000004 << (head * 8), pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/ovly507e.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "curs.h"
#include <nvif/class.h>
int
nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
{
struct {
s32 oclass;
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} curses[] = {
{ GA102_DISP_CURSOR, 0, cursc37a_new },
{ TU102_DISP_CURSOR, 0, cursc37a_new },
{ GV100_DISP_CURSOR, 0, cursc37a_new },
{ GK104_DISP_CURSOR, 0, curs907a_new },
{ GF110_DISP_CURSOR, 0, curs907a_new },
{ GT214_DISP_CURSOR, 0, curs507a_new },
{ G82_DISP_CURSOR, 0, curs507a_new },
{ NV50_DISP_CURSOR, 0, curs507a_new },
{}
};
struct nv50_disp *disp = nv50_disp(drm->dev);
int cid;
cid = nvif_mclass(&disp->disp->object, curses);
if (cid < 0) {
NV_ERROR(drm, "No supported cursor immediate class\n");
return cid;
}
return curses[cid].new(drm, head, curses[cid].oclass, pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/curs.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "base.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl827c.h>
static int
base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
return ret;
PUSH_MTHD(push, NV827C, SET_PRESENT_CONTROL,
NVVAL(NV827C, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
NVVAL(NV827C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
PUSH_MTHD(push, NV827C, SET_CONTEXT_DMAS_ISO(0), asyw->image.handle, 1);
if (asyw->image.format == NV827C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16) {
PUSH_MTHD(push, NV827C, SET_PROCESSING,
NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, ENABLE),
SET_CONVERSION,
NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
NVVAL(NV827C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV827C, SET_PROCESSING,
NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
SET_CONVERSION,
NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
NVVAL(NV827C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
SURFACE_SET_OFFSET(0, 1), 0x00000000,
SURFACE_SET_SIZE(0),
NVVAL(NV827C, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
NVVAL(NV827C, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
SURFACE_SET_STORAGE(0),
NVVAL(NV827C, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
NVVAL(NV827C, SURFACE_SET_STORAGE, PITCH, asyw->image.pitch[0] >> 8) |
NVVAL(NV827C, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
NVVAL(NV827C, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
SURFACE_SET_PARAMS(0),
NVVAL(NV827C, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
NVDEF(NV827C, SURFACE_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
NVDEF(NV827C, SURFACE_SET_PARAMS, GAMMA, LINEAR) |
NVDEF(NV827C, SURFACE_SET_PARAMS, LAYOUT, FRM));
return 0;
}
static const struct nv50_wndw_func
base827c = {
.acquire = base507c_acquire,
.release = base507c_release,
.sema_set = base507c_sema_set,
.sema_clr = base507c_sema_clr,
.ntfy_reset = base507c_ntfy_reset,
.ntfy_set = base507c_ntfy_set,
.ntfy_clr = base507c_ntfy_clr,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.olut_core = 1,
.xlut_set = base507c_xlut_set,
.xlut_clr = base507c_xlut_clr,
.image_set = base827c_image_set,
.image_clr = base507c_image_clr,
.update = base507c_update,
};
int
base827c_new(struct nouveau_drm *drm, int head, s32 oclass,
struct nv50_wndw **pwndw)
{
return base507c_new_(&base827c, base507c_format, drm, head, oclass,
0x00000002 << (head * 8), pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/base827c.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "wndw.h"
#include "atom.h"
#include <drm/drm_atomic_helper.h>
#include <nouveau_bo.h>
#include <nvif/pushc37b.h>
#include <nvhw/class/clc57e.h>
static int
wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 17)))
return ret;
PUSH_MTHD(push, NVC57E, SET_PRESENT_CONTROL,
NVVAL(NVC57E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
NVVAL(NVC57E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
NVDEF(NVC57E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
PUSH_MTHD(push, NVC57E, SET_SIZE,
NVVAL(NVC57E, SET_SIZE, WIDTH, asyw->image.w) |
NVVAL(NVC57E, SET_SIZE, HEIGHT, asyw->image.h),
SET_STORAGE,
NVVAL(NVC57E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
NVVAL(NVC57E, SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
SET_PARAMS,
NVVAL(NVC57E, SET_PARAMS, FORMAT, asyw->image.format) |
NVDEF(NVC57E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
NVDEF(NVC57E, SET_PARAMS, SWAP_UV, DISABLE) |
NVDEF(NVC57E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
SET_PLANAR_STORAGE(0),
NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
PUSH_MTHD(push, NVC57E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
PUSH_MTHD(push, NVC57E, SET_POINT_IN(0),
NVVAL(NVC57E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
NVVAL(NVC57E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
PUSH_MTHD(push, NVC57E, SET_SIZE_IN,
NVVAL(NVC57E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
NVVAL(NVC57E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
PUSH_MTHD(push, NVC57E, SET_SIZE_OUT,
NVVAL(NVC57E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
NVVAL(NVC57E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
return 0;
}
int
wndwc57e_csc_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
const u32 identity[12] = {
0x00010000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00010000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00010000, 0x00000000,
};
int ret;
if ((ret = PUSH_WAIT(push, 1 + ARRAY_SIZE(identity))))
return ret;
PUSH_MTHD(push, NVC57E, SET_FMT_COEFFICIENT_C00, identity, ARRAY_SIZE(identity));
return 0;
}
int
wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
return ret;
PUSH_MTHD(push, NVC57E, SET_FMT_COEFFICIENT_C00, asyw->csc.matrix, 12);
return 0;
}
int
wndwc57e_ilut_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ILUT, 0x00000000);
return 0;
}
int
wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
return ret;
PUSH_MTHD(push, NVC57E, SET_ILUT_CONTROL,
NVVAL(NVC57E, SET_ILUT_CONTROL, SIZE, asyw->xlut.i.size) |
NVVAL(NVC57E, SET_ILUT_CONTROL, MODE, asyw->xlut.i.mode) |
NVVAL(NVC57E, SET_ILUT_CONTROL, INTERPOLATE, asyw->xlut.i.output_mode),
SET_CONTEXT_DMA_ILUT, asyw->xlut.handle,
SET_OFFSET_ILUT, asyw->xlut.i.offset >> 8);
return 0;
}
static u16
fixedU0_16_FP16(u16 fixed)
{
int sign = 0, exp = 0, man = 0;
if (fixed) {
while (--exp && !(fixed & 0x8000))
fixed <<= 1;
man = ((fixed << 1) & 0xffc0) >> 6;
exp += 15;
}
return (sign << 15) | (exp << 10) | man;
}
static void
wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
{
memset_io(mem, 0x00, 0x20); /* VSS header. */
mem += 0x20;
for (; size--; in++, mem += 0x08) {
u16 r = fixedU0_16_FP16(drm_color_lut_extract(in-> red, 16));
u16 g = fixedU0_16_FP16(drm_color_lut_extract(in->green, 16));
u16 b = fixedU0_16_FP16(drm_color_lut_extract(in-> blue, 16));
writew(r, mem + 0);
writew(g, mem + 2);
writew(b, mem + 4);
}
/* INTERPOLATE modes require a "next" entry to interpolate with,
* so we replicate the last entry to deal with this for now.
*/
writew(readw(mem - 8), mem + 0);
writew(readw(mem - 6), mem + 2);
writew(readw(mem - 4), mem + 4);
}
void
wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
if (!size)
size = 1024;
if (size == 256)
asyw->xlut.i.mode = NVC57E_SET_ILUT_CONTROL_MODE_DIRECT8;
else
asyw->xlut.i.mode = NVC57E_SET_ILUT_CONTROL_MODE_DIRECT10;
asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */;
asyw->xlut.i.output_mode = NVC57E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE;
asyw->xlut.i.load = wndwc57e_ilut_load;
}
/****************************************************************
* Log2(block height) ----------------------------+ *
* Page Kind ----------------------------------+ | *
* Gob Height/Page Kind Generation ------+ | | *
* Sector layout -------+ | | | *
* Compression ------+ | | | | */
const u64 wndwc57e_modifiers[] = { /* | | | | | */
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 0),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 1),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 2),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 3),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 4),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 5),
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static const struct nv50_wndw_func
wndwc57e = {
.acquire = wndwc37e_acquire,
.release = wndwc37e_release,
.sema_set = wndwc37e_sema_set,
.sema_clr = wndwc37e_sema_clr,
.ntfy_set = wndwc37e_ntfy_set,
.ntfy_clr = wndwc37e_ntfy_clr,
.ntfy_reset = corec37d_ntfy_init,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.ilut = wndwc57e_ilut,
.ilut_identity = true,
.ilut_size = 1024,
.xlut_set = wndwc57e_ilut_set,
.xlut_clr = wndwc57e_ilut_clr,
.csc = base907c_csc,
.csc_set = wndwc57e_csc_set,
.csc_clr = wndwc57e_csc_clr,
.image_set = wndwc57e_image_set,
.image_clr = wndwc37e_image_clr,
.blend_set = wndwc37e_blend_set,
.update = wndwc37e_update,
};
int
wndwc57e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
s32 oclass, struct nv50_wndw **pwndw)
{
return wndwc37e_new_(&wndwc57e, drm, type, index, oclass,
BIT(index >> 1), pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "ovly.h"
#include "atom.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl907e.h>
static int
ovly907e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 12)))
return ret;
PUSH_MTHD(push, NV907E, SET_PRESENT_CONTROL,
NVDEF(NV907E, SET_PRESENT_CONTROL, BEGIN_MODE, ASAP) |
NVVAL(NV907E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
PUSH_MTHD(push, NV907E, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
PUSH_MTHD(push, NV907E, SET_COMPOSITION_CONTROL,
NVDEF(NV907E, SET_COMPOSITION_CONTROL, MODE, OPAQUE));
PUSH_MTHD(push, NV907E, SURFACE_SET_OFFSET, asyw->image.offset[0] >> 8);
PUSH_MTHD(push, NV907E, SURFACE_SET_SIZE,
NVVAL(NV907E, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
NVVAL(NV907E, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
SURFACE_SET_STORAGE,
NVVAL(NV907E, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
NVVAL(NV907E, SURFACE_SET_STORAGE, PITCH, (asyw->image.pitch[0] >> 8)) |
NVVAL(NV907E, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
NVVAL(NV907E, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
SURFACE_SET_PARAMS,
NVVAL(NV907E, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
NVVAL(NV907E, SURFACE_SET_PARAMS, COLOR_SPACE, asyw->image.colorspace));
return 0;
}
const struct nv50_wndw_func
ovly907e = {
.acquire = ovly507e_acquire,
.release = ovly507e_release,
.ntfy_set = base507c_ntfy_set,
.ntfy_clr = base507c_ntfy_clr,
.ntfy_reset = ovly827e_ntfy_reset,
.ntfy_wait_begun = ovly827e_ntfy_wait_begun,
.image_set = ovly907e_image_set,
.image_clr = base507c_image_clr,
.scale_set = ovly507e_scale_set,
.update = base507c_update,
};
static const u32
ovly907e_format[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_XBGR16161616F,
0
};
int
ovly907e_new(struct nouveau_drm *drm, int head, s32 oclass,
struct nv50_wndw **pwndw)
{
return ovly507e_new_(&ovly907e, ovly907e_format, drm, head, oclass,
0x00000004 << (head * 4), pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/ovly907e.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "ovly.h"
static const u32
ovly917e_format[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR16161616F,
0
};
int
ovly917e_new(struct nouveau_drm *drm, int head, s32 oclass,
struct nv50_wndw **pwndw)
{
return ovly507e_new_(&ovly907e, ovly917e_format, drm, head, oclass,
0x00000004 << (head * 4), pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/ovly917e.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "base.h"
#include "atom.h"
static const u32
base917c_format[] = {
DRM_FORMAT_C8,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_ABGR16161616F,
0
};
int
base917c_new(struct nouveau_drm *drm, int head, s32 oclass,
struct nv50_wndw **pwndw)
{
return base507c_new_(&base907c, base917c_format, drm, head, oclass,
0x00000002 << (head * 4), pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/base917c.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "base.h"
#include <nvif/class.h>
int
nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
{
struct {
s32 oclass;
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} bases[] = {
{ GK110_DISP_BASE_CHANNEL_DMA, 0, base917c_new },
{ GK104_DISP_BASE_CHANNEL_DMA, 0, base917c_new },
{ GF110_DISP_BASE_CHANNEL_DMA, 0, base907c_new },
{ GT214_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
{ GT200_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
{ G82_DISP_BASE_CHANNEL_DMA, 0, base827c_new },
{ NV50_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
{}
};
struct nv50_disp *disp = nv50_disp(drm->dev);
int cid;
cid = nvif_mclass(&disp->disp->object, bases);
if (cid < 0) {
NV_ERROR(drm, "No supported base class\n");
return cid;
}
return bases[cid].new(drm, head, bases[cid].oclass, pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/base.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "base.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl907c.h>
static int
base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 10)))
return ret;
PUSH_MTHD(push, NV907C, SET_PRESENT_CONTROL,
NVVAL(NV907C, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
NVDEF(NV907C, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE) |
NVVAL(NV907C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
PUSH_MTHD(push, NV907C, SET_CONTEXT_DMAS_ISO(0), asyw->image.handle, 1);
PUSH_MTHD(push, NV907C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
SURFACE_SET_OFFSET(0, 1), 0x00000000,
SURFACE_SET_SIZE(0),
NVVAL(NV907C, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
NVVAL(NV907C, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
SURFACE_SET_STORAGE(0),
NVVAL(NV907C, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
NVVAL(NV907C, SURFACE_SET_STORAGE, PITCH, asyw->image.pitch[0] >> 8) |
NVVAL(NV907C, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
NVVAL(NV907C, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
SURFACE_SET_PARAMS(0),
NVVAL(NV907C, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
NVDEF(NV907C, SURFACE_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
NVDEF(NV907C, SURFACE_SET_PARAMS, GAMMA, LINEAR) |
NVDEF(NV907C, SURFACE_SET_PARAMS, LAYOUT, FRM));
return 0;
}
static int
base907c_xlut_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 6)))
return ret;
PUSH_MTHD(push, NV907C, SET_BASE_LUT_LO,
NVDEF(NV907C, SET_BASE_LUT_LO, ENABLE, DISABLE));
PUSH_MTHD(push, NV907C, SET_OUTPUT_LUT_LO,
NVDEF(NV907C, SET_OUTPUT_LUT_LO, ENABLE, DISABLE));
PUSH_MTHD(push, NV907C, SET_CONTEXT_DMA_LUT, 0x00000000);
return 0;
}
static int
base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 6)))
return ret;
PUSH_MTHD(push, NV907C, SET_BASE_LUT_LO,
NVVAL(NV907C, SET_BASE_LUT_LO, ENABLE, asyw->xlut.i.enable) |
NVVAL(NV907C, SET_BASE_LUT_LO, MODE, asyw->xlut.i.mode),
SET_BASE_LUT_HI, asyw->xlut.i.offset >> 8,
SET_OUTPUT_LUT_LO,
NVDEF(NV907C, SET_OUTPUT_LUT_LO, ENABLE, USE_CORE_LUT));
PUSH_MTHD(push, NV907C, SET_CONTEXT_DMA_LUT, asyw->xlut.handle);
return 0;
}
static void
base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
if (size == 1024)
asyw->xlut.i.mode = NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE;
else
asyw->xlut.i.mode = NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE;
asyw->xlut.i.enable = NV907C_SET_BASE_LUT_LO_ENABLE_ENABLE;
asyw->xlut.i.load = head907d_olut_load;
}
static inline u32
csc_drm_to_base(u64 in)
{
/* base takes a 19-bit 2's complement value in S3.16 format */
bool sign = in & BIT_ULL(63);
u32 integer = (in >> 32) & 0x7fffffff;
u32 fraction = in & 0xffffffff;
if (integer >= 4) {
return (1 << 18) - (sign ? 0 : 1);
} else {
u32 ret = (integer << 16) | (fraction >> 16);
if (sign)
ret = -ret;
return ret & GENMASK(18, 0);
}
}
void
base907c_csc(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
const struct drm_color_ctm *ctm)
{
int i, j;
for (j = 0; j < 3; j++) {
for (i = 0; i < 4; i++) {
u32 *val = &asyw->csc.matrix[j * 4 + i];
/* DRM does not support constant offset, while
* HW CSC does. Skip it. */
if (i == 3) {
*val = 0;
} else {
*val = csc_drm_to_base(ctm->matrix[j * 3 + i]);
}
}
}
}
static int
base907c_csc_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV907C, SET_CSC_RED2RED,
NVDEF(NV907C, SET_CSC_RED2RED, OWNER, CORE));
return 0;
}
static int
base907c_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
return ret;
PUSH_MTHD(push, NV907C, SET_CSC_RED2RED,
NVDEF(NV907C, SET_CSC_RED2RED, OWNER, BASE) |
NVVAL(NV907C, SET_CSC_RED2RED, COEFF, asyw->csc.matrix[0]),
SET_CSC_GRN2RED, &asyw->csc.matrix[1], 11);
return 0;
}
const struct nv50_wndw_func
base907c = {
.acquire = base507c_acquire,
.release = base507c_release,
.sema_set = base507c_sema_set,
.sema_clr = base507c_sema_clr,
.ntfy_reset = base507c_ntfy_reset,
.ntfy_set = base507c_ntfy_set,
.ntfy_clr = base507c_ntfy_clr,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.ilut = base907c_ilut,
.csc = base907c_csc,
.csc_set = base907c_csc_set,
.csc_clr = base907c_csc_clr,
.olut_core = true,
.ilut_size = 1024,
.xlut_set = base907c_xlut_set,
.xlut_clr = base907c_xlut_clr,
.image_set = base907c_image_set,
.image_clr = base507c_image_clr,
.update = base507c_update,
};
int
base907c_new(struct nouveau_drm *drm, int head, s32 oclass,
struct nv50_wndw **pwndw)
{
return base507c_new_(&base907c, base507c_format, drm, head, oclass,
0x00000002 << (head * 4), pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/base907c.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "wndw.h"
#include "atom.h"
#include <drm/drm_atomic_helper.h>
#include <nouveau_bo.h>
#include <nvif/if0014.h>
#include <nvif/pushc37b.h>
#include <nvhw/class/clc37e.h>
static int
wndwc37e_csc_clr(struct nv50_wndw *wndw)
{
return 0;
}
static int
wndwc37e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
return ret;
PUSH_MTHD(push, NVC37E, SET_CSC_RED2RED, asyw->csc.matrix, 12);
return 0;
}
static int
wndwc37e_ilut_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_INPUT_LUT, 0x00000000);
return 0;
}
static int
wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
return ret;
PUSH_MTHD(push, NVC37E, SET_CONTROL_INPUT_LUT,
NVVAL(NVC37E, SET_CONTROL_INPUT_LUT, OUTPUT_MODE, asyw->xlut.i.output_mode) |
NVVAL(NVC37E, SET_CONTROL_INPUT_LUT, RANGE, asyw->xlut.i.range) |
NVVAL(NVC37E, SET_CONTROL_INPUT_LUT, SIZE, asyw->xlut.i.size),
SET_OFFSET_INPUT_LUT, asyw->xlut.i.offset >> 8,
SET_CONTEXT_DMA_INPUT_LUT, asyw->xlut.handle);
return 0;
}
static void
wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
asyw->xlut.i.size = size == 1024 ? NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_1025 :
NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_257;
asyw->xlut.i.range = NVC37E_SET_CONTROL_INPUT_LUT_RANGE_UNITY;
asyw->xlut.i.output_mode = NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INTERPOLATE;
asyw->xlut.i.load = head907d_olut_load;
}
int
wndwc37e_blend_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 8)))
return ret;
PUSH_MTHD(push, NVC37E, SET_COMPOSITION_CONTROL,
NVDEF(NVC37E, SET_COMPOSITION_CONTROL, COLOR_KEY_SELECT, DISABLE) |
NVVAL(NVC37E, SET_COMPOSITION_CONTROL, DEPTH, asyw->blend.depth),
SET_COMPOSITION_CONSTANT_ALPHA,
NVVAL(NVC37E, SET_COMPOSITION_CONSTANT_ALPHA, K1, asyw->blend.k1) |
NVVAL(NVC37E, SET_COMPOSITION_CONSTANT_ALPHA, K2, 0),
SET_COMPOSITION_FACTOR_SELECT,
NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, SRC_COLOR_FACTOR_MATCH_SELECT,
asyw->blend.src_color) |
NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, SRC_COLOR_FACTOR_NO_MATCH_SELECT,
asyw->blend.src_color) |
NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, DST_COLOR_FACTOR_MATCH_SELECT,
asyw->blend.dst_color) |
NVVAL(NVC37E, SET_COMPOSITION_FACTOR_SELECT, DST_COLOR_FACTOR_NO_MATCH_SELECT,
asyw->blend.dst_color),
SET_KEY_ALPHA,
NVVAL(NVC37E, SET_KEY_ALPHA, MIN, 0x0000) |
NVVAL(NVC37E, SET_KEY_ALPHA, MAX, 0xffff),
SET_KEY_RED_CR,
NVVAL(NVC37E, SET_KEY_RED_CR, MIN, 0x0000) |
NVVAL(NVC37E, SET_KEY_RED_CR, MAX, 0xffff),
SET_KEY_GREEN_Y,
NVVAL(NVC37E, SET_KEY_GREEN_Y, MIN, 0x0000) |
NVVAL(NVC37E, SET_KEY_GREEN_Y, MAX, 0xffff),
SET_KEY_BLUE_CB,
NVVAL(NVC37E, SET_KEY_BLUE_CB, MIN, 0x0000) |
NVVAL(NVC37E, SET_KEY_BLUE_CB, MAX, 0xffff));
return 0;
}
int
wndwc37e_image_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
return ret;
PUSH_MTHD(push, NVC37E, SET_PRESENT_CONTROL,
NVVAL(NVC37E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, 0) |
NVDEF(NVC37E, SET_PRESENT_CONTROL, BEGIN_MODE, NON_TEARING));
PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_ISO(0), 0x00000000);
return 0;
}
static int
wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 17)))
return ret;
PUSH_MTHD(push, NVC37E, SET_PRESENT_CONTROL,
NVVAL(NVC37E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
NVVAL(NVC37E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
NVDEF(NVC37E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
PUSH_MTHD(push, NVC37E, SET_SIZE,
NVVAL(NVC37E, SET_SIZE, WIDTH, asyw->image.w) |
NVVAL(NVC37E, SET_SIZE, HEIGHT, asyw->image.h),
SET_STORAGE,
NVVAL(NVC37E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
NVVAL(NVC37E, SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
SET_PARAMS,
NVVAL(NVC37E, SET_PARAMS, FORMAT, asyw->image.format) |
NVVAL(NVC37E, SET_PARAMS, COLOR_SPACE, asyw->image.colorspace) |
NVDEF(NVC37E, SET_PARAMS, INPUT_RANGE, BYPASS) |
NVDEF(NVC37E, SET_PARAMS, UNDERREPLICATE, DISABLE) |
NVDEF(NVC37E, SET_PARAMS, DE_GAMMA, NONE) |
NVVAL(NVC37E, SET_PARAMS, CSC, asyw->csc.valid) |
NVDEF(NVC37E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
NVDEF(NVC37E, SET_PARAMS, SWAP_UV, DISABLE),
SET_PLANAR_STORAGE(0),
NVVAL(NVC37E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
NVVAL(NVC37E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
PUSH_MTHD(push, NVC37E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
PUSH_MTHD(push, NVC37E, SET_POINT_IN(0),
NVVAL(NVC37E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
NVVAL(NVC37E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
PUSH_MTHD(push, NVC37E, SET_SIZE_IN,
NVVAL(NVC37E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
NVVAL(NVC37E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
PUSH_MTHD(push, NVC37E, SET_SIZE_OUT,
NVVAL(NVC37E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
NVVAL(NVC37E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
return 0;
}
int
wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_NOTIFIER, 0x00000000);
return 0;
}
int
wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 3)))
return ret;
PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_NOTIFIER, asyw->ntfy.handle,
SET_NOTIFIER_CONTROL,
NVVAL(NVC37E, SET_NOTIFIER_CONTROL, MODE, asyw->ntfy.awaken) |
NVVAL(NVC37E, SET_NOTIFIER_CONTROL, OFFSET, asyw->ntfy.offset >> 4));
return 0;
}
int
wndwc37e_sema_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC37E, SET_CONTEXT_DMA_SEMAPHORE, 0x00000000);
return 0;
}
int
wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
return ret;
PUSH_MTHD(push, NVC37E, SET_SEMAPHORE_CONTROL, asyw->sema.offset,
SET_SEMAPHORE_ACQUIRE, asyw->sema.acquire,
SET_SEMAPHORE_RELEASE, asyw->sema.release,
SET_CONTEXT_DMA_SEMAPHORE, asyw->sema.handle);
return 0;
}
int
wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
return ret;
PUSH_MTHD(push, NVC37E, SET_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_CURS] << 1 |
interlock[NV50_DISP_INTERLOCK_CORE],
SET_WINDOW_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_WNDW]);
PUSH_MTHD(push, NVC37E, UPDATE, 0x00000001 |
NVVAL(NVC37E, UPDATE, INTERLOCK_WITH_WIN_IMM,
!!(interlock[NV50_DISP_INTERLOCK_WIMM] & wndw->interlock.data)));
return PUSH_KICK(push);
}
void
wndwc37e_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
}
int
wndwc37e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
return drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
}
static const u32
wndwc37e_format[] = {
DRM_FORMAT_C8,
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_ABGR16161616F,
0
};
static const struct nv50_wndw_func
wndwc37e = {
.acquire = wndwc37e_acquire,
.release = wndwc37e_release,
.sema_set = wndwc37e_sema_set,
.sema_clr = wndwc37e_sema_clr,
.ntfy_set = wndwc37e_ntfy_set,
.ntfy_clr = wndwc37e_ntfy_clr,
.ntfy_reset = corec37d_ntfy_init,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.ilut = wndwc37e_ilut,
.ilut_size = 1024,
.xlut_set = wndwc37e_ilut_set,
.xlut_clr = wndwc37e_ilut_clr,
.csc = base907c_csc,
.csc_set = wndwc37e_csc_set,
.csc_clr = wndwc37e_csc_clr,
.image_set = wndwc37e_image_set,
.image_clr = wndwc37e_image_clr,
.blend_set = wndwc37e_blend_set,
.update = wndwc37e_update,
};
int
wndwc37e_new_(const struct nv50_wndw_func *func, struct nouveau_drm *drm,
enum drm_plane_type type, int index, s32 oclass, u32 heads,
struct nv50_wndw **pwndw)
{
struct nvif_disp_chan_v0 args = {
.id = index,
};
struct nv50_disp *disp = nv50_disp(drm->dev);
struct nv50_wndw *wndw;
int ret;
ret = nv50_wndw_new_(func, drm->dev, type, "wndw", index,
wndwc37e_format, heads, NV50_DISP_INTERLOCK_WNDW,
BIT(index), &wndw);
if (*pwndw = wndw, ret)
return ret;
ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
&oclass, 0, &args, sizeof(args),
disp->sync->offset, &wndw->wndw);
if (ret) {
NV_ERROR(drm, "qndw%04x allocation failed: %d\n", oclass, ret);
return ret;
}
wndw->ntfy = NV50_DISP_WNDW_NTFY(wndw->id);
wndw->sema = NV50_DISP_WNDW_SEM0(wndw->id);
wndw->data = 0x00000000;
return 0;
}
int
wndwc37e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
s32 oclass, struct nv50_wndw **pwndw)
{
return wndwc37e_new_(&wndwc37e, drm, type, index, oclass,
BIT(index >> 1), pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "ovly.h"
#include "atom.h"
#include <nouveau_bo.h>
#include <nvif/push507c.h>
#include <nvif/timer.h>
#include <nvhw/class/cl827e.h>
static int
ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 12)))
return ret;
PUSH_MTHD(push, NV827E, SET_PRESENT_CONTROL,
NVDEF(NV827E, SET_PRESENT_CONTROL, BEGIN_MODE, ASAP) |
NVVAL(NV827E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
PUSH_MTHD(push, NV827E, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
PUSH_MTHD(push, NV827E, SET_COMPOSITION_CONTROL,
NVDEF(NV827E, SET_COMPOSITION_CONTROL, MODE, OPAQUE_SUSPEND_BASE));
PUSH_MTHD(push, NV827E, SURFACE_SET_OFFSET, asyw->image.offset[0] >> 8);
PUSH_MTHD(push, NV827E, SURFACE_SET_SIZE,
NVVAL(NV827E, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
NVVAL(NV827E, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
SURFACE_SET_STORAGE,
NVVAL(NV827E, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh) |
NVVAL(NV827E, SURFACE_SET_STORAGE, PITCH, (asyw->image.pitch[0] >> 8)) |
NVVAL(NV827E, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
NVVAL(NV827E, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout),
SURFACE_SET_PARAMS,
NVVAL(NV827E, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
NVVAL(NV827E, SURFACE_SET_PARAMS, COLOR_SPACE, asyw->image.colorspace));
return 0;
}
int
ovly827e_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
struct nvif_device *device)
{
s64 time = nvif_msec(device, 2000ULL,
if (NVBO_TD32(bo, offset, NV_DISP_NOTIFICATION_1, _3, STATUS, ==, BEGUN))
break;
usleep_range(1, 2);
);
return time < 0 ? time : 0;
}
void
ovly827e_ntfy_reset(struct nouveau_bo *bo, u32 offset)
{
NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, TIME_STAMP_0, 0);
NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, TIME_STAMP_1, 0);
NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, _2, 0);
NVBO_WR32(bo, offset, NV_DISP_NOTIFICATION_1, _3,
NVDEF(NV_DISP_NOTIFICATION_1, _3, STATUS, NOT_BEGUN));
}
static const struct nv50_wndw_func
ovly827e = {
.acquire = ovly507e_acquire,
.release = ovly507e_release,
.ntfy_set = base507c_ntfy_set,
.ntfy_clr = base507c_ntfy_clr,
.ntfy_reset = ovly827e_ntfy_reset,
.ntfy_wait_begun = ovly827e_ntfy_wait_begun,
.image_set = ovly827e_image_set,
.image_clr = base507c_image_clr,
.scale_set = ovly507e_scale_set,
.update = base507c_update,
};
const u32
ovly827e_format[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XBGR2101010,
0
};
int
ovly827e_new(struct nouveau_drm *drm, int head, s32 oclass,
struct nv50_wndw **pwndw)
{
return ovly507e_new_(&ovly827e, ovly827e_format, drm, head, oclass,
0x00000004 << (head * 8), pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/ovly827e.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "oimm.h"
#include <nvif/if0014.h>
static int
oimm507b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
s32 oclass, struct nv50_wndw *wndw)
{
struct nvif_disp_chan_v0 args = {
.id = wndw->id,
};
struct nv50_disp *disp = nv50_disp(drm->dev);
int ret;
ret = nvif_object_ctor(&disp->disp->object, "kmsOvim", 0, oclass,
&args, sizeof(args), &wndw->wimm.base.user);
if (ret) {
NV_ERROR(drm, "oimm%04x allocation failed: %d\n", oclass, ret);
return ret;
}
nvif_object_map(&wndw->wimm.base.user, NULL, 0);
wndw->immd = func;
return 0;
}
int
oimm507b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw)
{
return oimm507b_init_(&curs507a, drm, oclass, wndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/oimm507b.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "base.h"
#include <nvif/if0014.h>
#include <nvif/push507c.h>
#include <nvif/timer.h>
#include <nvhw/class/cl507c.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include "nouveau_bo.h"
int
base507c_update(struct nv50_wndw *wndw, u32 *interlock)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507C, UPDATE, interlock[NV50_DISP_INTERLOCK_CORE]);
return PUSH_KICK(push);
}
int
base507c_image_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
return ret;
PUSH_MTHD(push, NV507C, SET_PRESENT_CONTROL,
NVDEF(NV507C, SET_PRESENT_CONTROL, BEGIN_MODE, NON_TEARING) |
NVVAL(NV507C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, 0));
PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_ISO, 0x00000000);
return 0;
}
static int
base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
return ret;
PUSH_MTHD(push, NV507C, SET_PRESENT_CONTROL,
NVVAL(NV507C, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
NVVAL(NV507C, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval));
PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_ISO, asyw->image.handle[0]);
if (asyw->image.format == NV507C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16) {
PUSH_MTHD(push, NV507C, SET_PROCESSING,
NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, ENABLE),
SET_CONVERSION,
NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
NVVAL(NV507C, SET_CONVERSION, OFS, 0x64));
} else {
PUSH_MTHD(push, NV507C, SET_PROCESSING,
NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
SET_CONVERSION,
NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
NVVAL(NV507C, SET_CONVERSION, OFS, 0));
}
PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8);
PUSH_MTHD(push, NV507C, SURFACE_SET_SIZE(0),
NVVAL(NV507C, SURFACE_SET_SIZE, WIDTH, asyw->image.w) |
NVVAL(NV507C, SURFACE_SET_SIZE, HEIGHT, asyw->image.h),
SURFACE_SET_STORAGE(0),
NVVAL(NV507C, SURFACE_SET_STORAGE, MEMORY_LAYOUT, asyw->image.layout) |
NVVAL(NV507C, SURFACE_SET_STORAGE, PITCH, asyw->image.pitch[0] >> 8) |
NVVAL(NV507C, SURFACE_SET_STORAGE, PITCH, asyw->image.blocks[0]) |
NVVAL(NV507C, SURFACE_SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
SURFACE_SET_PARAMS(0),
NVVAL(NV507C, SURFACE_SET_PARAMS, FORMAT, asyw->image.format) |
NVDEF(NV507C, SURFACE_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
NVDEF(NV507C, SURFACE_SET_PARAMS, GAMMA, LINEAR) |
NVDEF(NV507C, SURFACE_SET_PARAMS, LAYOUT, FRM) |
NVVAL(NV507C, SURFACE_SET_PARAMS, KIND, asyw->image.kind) |
NVDEF(NV507C, SURFACE_SET_PARAMS, PART_STRIDE, PARTSTRIDE_256));
return 0;
}
int
base507c_xlut_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507C, SET_BASE_LUT_LO,
NVDEF(NV507C, SET_BASE_LUT_LO, ENABLE, DISABLE));
return 0;
}
int
base507c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507C, SET_BASE_LUT_LO,
NVDEF(NV507C, SET_BASE_LUT_LO, ENABLE, USE_CORE_LUT));
return 0;
}
int
base507c_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
struct nvif_device *device)
{
s64 time = nvif_msec(device, 2000ULL,
if (NVBO_TD32(bo, offset, NV_DISP_BASE_NOTIFIER_1, _0, STATUS, ==, BEGUN))
break;
usleep_range(1, 2);
);
return time < 0 ? time : 0;
}
int
base507c_ntfy_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_NOTIFIER, 0x00000000);
return 0;
}
int
base507c_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 3)))
return ret;
PUSH_MTHD(push, NV507C, SET_NOTIFIER_CONTROL,
NVVAL(NV507C, SET_NOTIFIER_CONTROL, MODE, asyw->ntfy.awaken) |
NVVAL(NV507C, SET_NOTIFIER_CONTROL, OFFSET, asyw->ntfy.offset >> 2),
SET_CONTEXT_DMA_NOTIFIER, asyw->ntfy.handle);
return 0;
}
void
base507c_ntfy_reset(struct nouveau_bo *bo, u32 offset)
{
NVBO_WR32(bo, offset, NV_DISP_BASE_NOTIFIER_1, _0,
NVDEF(NV_DISP_BASE_NOTIFIER_1, _0, STATUS, NOT_BEGUN));
}
int
base507c_sema_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507C, SET_CONTEXT_DMA_SEMAPHORE, 0x00000000);
return 0;
}
int
base507c_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
return ret;
PUSH_MTHD(push, NV507C, SET_SEMAPHORE_CONTROL, asyw->sema.offset,
SET_SEMAPHORE_ACQUIRE, asyw->sema.acquire,
SET_SEMAPHORE_RELEASE, asyw->sema.release,
SET_CONTEXT_DMA_SEMAPHORE, asyw->sema.handle);
return 0;
}
void
base507c_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
asyh->base.cpp = 0;
}
int
base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
const struct drm_framebuffer *fb = asyw->state.fb;
int ret;
ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, true);
if (ret)
return ret;
if (!wndw->func->ilut) {
if ((asyh->base.cpp != 1) ^ (fb->format->cpp[0] != 1))
asyh->state.color_mgmt_changed = true;
}
asyh->base.depth = fb->format->depth;
asyh->base.cpp = fb->format->cpp[0];
asyh->base.x = asyw->state.src.x1 >> 16;
asyh->base.y = asyw->state.src.y1 >> 16;
asyh->base.w = asyw->state.fb->width;
asyh->base.h = asyw->state.fb->height;
/* Some newer formats, esp FP16 ones, don't have a
* "depth". There's nothing that really makes sense there
* either, so just set it to the implicit bit count.
*/
if (!asyh->base.depth)
asyh->base.depth = asyh->base.cpp * 8;
return 0;
}
const u32
base507c_format[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_ABGR16161616F,
0
};
static const struct nv50_wndw_func
base507c = {
.acquire = base507c_acquire,
.release = base507c_release,
.sema_set = base507c_sema_set,
.sema_clr = base507c_sema_clr,
.ntfy_reset = base507c_ntfy_reset,
.ntfy_set = base507c_ntfy_set,
.ntfy_clr = base507c_ntfy_clr,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.olut_core = 1,
.xlut_set = base507c_xlut_set,
.xlut_clr = base507c_xlut_clr,
.image_set = base507c_image_set,
.image_clr = base507c_image_clr,
.update = base507c_update,
};
int
base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
struct nouveau_drm *drm, int head, s32 oclass, u32 interlock_data,
struct nv50_wndw **pwndw)
{
struct nvif_disp_chan_v0 args = {
.id = head,
};
struct nouveau_display *disp = nouveau_display(drm->dev);
struct nv50_disp *disp50 = nv50_disp(drm->dev);
struct nv50_wndw *wndw;
int ret;
ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_PRIMARY,
"base", head, format, BIT(head),
NV50_DISP_INTERLOCK_BASE, interlock_data, &wndw);
if (*pwndw = wndw, ret)
return ret;
ret = nv50_dmac_create(&drm->client.device, &disp->disp.object,
&oclass, head, &args, sizeof(args),
disp50->sync->offset, &wndw->wndw);
if (ret) {
NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret);
return ret;
}
wndw->ntfy = NV50_DISP_BASE_NTFY(wndw->id);
wndw->sema = NV50_DISP_BASE_SEM0(wndw->id);
wndw->data = 0x00000000;
return 0;
}
int
base507c_new(struct nouveau_drm *drm, int head, s32 oclass,
struct nv50_wndw **pwndw)
{
return base507c_new_(&base507c, base507c_format, drm, head, oclass,
0x00000002 << (head * 8), pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/base507c.c |
/*
* Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "disp.h"
#include "atom.h"
#include "core.h"
#include "head.h"
#include "wndw.h"
#include "handles.h"
#include <linux/dma-mapping.h>
#include <linux/hdmi.h>
#include <linux/component.h>
#include <linux/iopoll.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <nvif/push507c.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include <nvif/event.h>
#include <nvif/if0012.h>
#include <nvif/if0014.h>
#include <nvif/timer.h>
#include <nvhw/class/cl507c.h>
#include <nvhw/class/cl507d.h>
#include <nvhw/class/cl837d.h>
#include <nvhw/class/cl887d.h>
#include <nvhw/class/cl907d.h>
#include <nvhw/class/cl917d.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_gem.h"
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
#include "nouveau_fence.h"
#include "nv50_display.h"
#include <subdev/bios/dp.h>
/******************************************************************************
* EVO channel
*****************************************************************************/
static int
nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size,
struct nv50_chan *chan)
{
struct nvif_sclass *sclass;
int ret, i, n;
chan->device = device;
ret = n = nvif_object_sclass_get(disp, &sclass);
if (ret < 0)
return ret;
while (oclass[0]) {
for (i = 0; i < n; i++) {
if (sclass[i].oclass == oclass[0]) {
ret = nvif_object_ctor(disp, "kmsChan", 0,
oclass[0], data, size,
&chan->user);
if (ret == 0)
nvif_object_map(&chan->user, NULL, 0);
nvif_object_sclass_put(&sclass);
return ret;
}
}
oclass++;
}
nvif_object_sclass_put(&sclass);
return -ENOSYS;
}
static void
nv50_chan_destroy(struct nv50_chan *chan)
{
nvif_object_dtor(&chan->user);
}
/******************************************************************************
* DMA EVO channel
*****************************************************************************/
void
nv50_dmac_destroy(struct nv50_dmac *dmac)
{
nvif_object_dtor(&dmac->vram);
nvif_object_dtor(&dmac->sync);
nv50_chan_destroy(&dmac->base);
nvif_mem_dtor(&dmac->_push.mem);
}
static void
nv50_dmac_kick(struct nvif_push *push)
{
struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
if (dmac->put != dmac->cur) {
/* Push buffer fetches are not coherent with BAR1, we need to ensure
* writes have been flushed right through to VRAM before writing PUT.
*/
if (dmac->push->mem.type & NVIF_MEM_VRAM) {
struct nvif_device *device = dmac->base.device;
nvif_wr32(&device->object, 0x070000, 0x00000001);
nvif_msec(device, 2000,
if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
break;
);
}
NVIF_WV32(&dmac->base.user, NV507C, PUT, PTR, dmac->cur);
dmac->put = dmac->cur;
}
push->bgn = push->cur;
}
static int
nv50_dmac_free(struct nv50_dmac *dmac)
{
u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
if (get > dmac->cur) /* NVIDIA stay 5 away from GET, do the same. */
return get - dmac->cur - 5;
return dmac->max - dmac->cur;
}
static int
nv50_dmac_wind(struct nv50_dmac *dmac)
{
/* Wait for GET to depart from the beginning of the push buffer to
* prevent writing PUT == GET, which would be ignored by HW.
*/
u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
if (get == 0) {
/* Corner-case, HW idle, but non-committed work pending. */
if (dmac->put == 0)
nv50_dmac_kick(dmac->push);
if (nvif_msec(dmac->base.device, 2000,
if (NVIF_TV32(&dmac->base.user, NV507C, GET, PTR, >, 0))
break;
) < 0)
return -ETIMEDOUT;
}
PUSH_RSVD(dmac->push, PUSH_JUMP(dmac->push, 0));
dmac->cur = 0;
return 0;
}
static int
nv50_dmac_wait(struct nvif_push *push, u32 size)
{
struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
int free;
if (WARN_ON(size > dmac->max))
return -EINVAL;
dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
if (dmac->cur + size >= dmac->max) {
int ret = nv50_dmac_wind(dmac);
if (ret)
return ret;
push->cur = dmac->_push.mem.object.map.ptr;
push->cur = push->cur + dmac->cur;
nv50_dmac_kick(push);
}
if (nvif_msec(dmac->base.device, 2000,
if ((free = nv50_dmac_free(dmac)) >= size)
break;
) < 0) {
WARN_ON(1);
return -ETIMEDOUT;
}
push->bgn = dmac->_push.mem.object.map.ptr;
push->bgn = push->bgn + dmac->cur;
push->cur = push->bgn;
push->end = push->cur + free;
return 0;
}
MODULE_PARM_DESC(kms_vram_pushbuf, "Place EVO/NVD push buffers in VRAM (default: auto)");
static int nv50_dmac_vram_pushbuf = -1;
module_param_named(kms_vram_pushbuf, nv50_dmac_vram_pushbuf, int, 0400);
int
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
struct nv50_dmac *dmac)
{
struct nouveau_cli *cli = (void *)device->object.client;
struct nvif_disp_chan_v0 *args = data;
u8 type = NVIF_MEM_COHERENT;
int ret;
mutex_init(&dmac->lock);
/* Pascal added support for 47-bit physical addresses, but some
* parts of EVO still only accept 40-bit PAs.
*
* To avoid issues on systems with large amounts of RAM, and on
* systems where an IOMMU maps pages at a high address, we need
* to allocate push buffers in VRAM instead.
*
* This appears to match NVIDIA's behaviour on Pascal.
*/
if ((nv50_dmac_vram_pushbuf > 0) ||
(nv50_dmac_vram_pushbuf < 0 && device->info.family == NV_DEVICE_INFO_V0_PASCAL))
type |= NVIF_MEM_VRAM;
ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
&dmac->_push.mem);
if (ret)
return ret;
dmac->ptr = dmac->_push.mem.object.map.ptr;
dmac->_push.wait = nv50_dmac_wait;
dmac->_push.kick = nv50_dmac_kick;
dmac->push = &dmac->_push;
dmac->push->bgn = dmac->_push.mem.object.map.ptr;
dmac->push->cur = dmac->push->bgn;
dmac->push->end = dmac->push->bgn;
dmac->max = 0x1000/4 - 1;
/* EVO channels are affected by a HW bug where the last 12 DWORDs
* of the push buffer aren't able to be used safely.
*/
if (disp->oclass < GV100_DISP)
dmac->max -= 12;
args->pushbuf = nvif_handle(&dmac->_push.mem.object);
ret = nv50_chan_create(device, disp, oclass, head, data, size,
&dmac->base);
if (ret)
return ret;
if (syncbuf < 0)
return 0;
ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = syncbuf + 0x0000,
.limit = syncbuf + 0x0fff,
}, sizeof(struct nv_dma_v0),
&dmac->sync);
if (ret)
return ret;
ret = nvif_object_ctor(&dmac->base.user, "kmsVramCtxDma", NV50_DISP_HANDLE_VRAM,
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = 0,
.limit = device->info.ram_user - 1,
}, sizeof(struct nv_dma_v0),
&dmac->vram);
if (ret)
return ret;
return ret;
}
/******************************************************************************
* Output path helpers
*****************************************************************************/
static void
nv50_outp_dump_caps(struct nouveau_drm *drm,
struct nouveau_encoder *outp)
{
NV_DEBUG(drm, "%s caps: dp_interlace=%d\n",
outp->base.base.name, outp->caps.dp_interlace);
}
static int
nv50_outp_atomic_check_view(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
struct drm_display_mode *native_mode)
{
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct drm_display_mode *mode = &crtc_state->mode;
struct drm_connector *connector = conn_state->connector;
struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
asyc->scaler.full = false;
if (!native_mode)
return 0;
if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
/* Don't force scaler for EDID modes with
* same size as the native one (e.g. different
* refresh rate)
*/
if (mode->hdisplay == native_mode->hdisplay &&
mode->vdisplay == native_mode->vdisplay &&
mode->type & DRM_MODE_TYPE_DRIVER)
break;
mode = native_mode;
asyc->scaler.full = true;
break;
default:
break;
}
} else {
mode = native_mode;
}
if (!drm_mode_equal(adjusted_mode, mode)) {
drm_mode_copy(adjusted_mode, mode);
crtc_state->mode_changed = true;
}
return 0;
}
static void
nv50_outp_atomic_fix_depth(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state)
{
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
unsigned int max_rate, mode_rate;
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_DP:
max_rate = nv_encoder->dp.link_nr * nv_encoder->dp.link_bw;
/* we don't support more than 10 anyway */
asyh->or.bpc = min_t(u8, asyh->or.bpc, 10);
/* reduce the bpc until it works out */
while (asyh->or.bpc > 6) {
mode_rate = DIV_ROUND_UP(mode->clock * asyh->or.bpc * 3, 8);
if (mode_rate <= max_rate)
break;
asyh->or.bpc -= 2;
}
break;
default:
break;
}
}
static int
nv50_outp_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_connector *connector = conn_state->connector;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
int ret;
ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
nv_connector->native_mode);
if (ret)
return ret;
if (crtc_state->mode_changed || crtc_state->connectors_changed)
asyh->or.bpc = connector->display_info.bpc;
/* We might have to reduce the bpc */
nv50_outp_atomic_fix_depth(encoder, crtc_state);
return 0;
}
struct nouveau_connector *
nv50_outp_get_new_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
{
struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct drm_encoder *encoder = to_drm_encoder(outp);
int i;
for_each_new_connector_in_state(state, connector, connector_state, i) {
if (connector_state->best_encoder == encoder)
return nouveau_connector(connector);
}
return NULL;
}
struct nouveau_connector *
nv50_outp_get_old_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
{
struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct drm_encoder *encoder = to_drm_encoder(outp);
int i;
for_each_old_connector_in_state(state, connector, connector_state, i) {
if (connector_state->best_encoder == encoder)
return nouveau_connector(connector);
}
return NULL;
}
static struct nouveau_crtc *
nv50_outp_get_new_crtc(const struct drm_atomic_state *state, const struct nouveau_encoder *outp)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
const u32 mask = drm_encoder_mask(&outp->base.base);
int i;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (crtc_state->encoder_mask & mask)
return nouveau_crtc(crtc);
}
return NULL;
}
/******************************************************************************
* DAC
*****************************************************************************/
static void
nv50_dac_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
const u32 ctrl = NVDEF(NV507D, DAC_SET_CONTROL, OWNER, NONE);
core->func->dac->ctrl(core, nv_encoder->outp.or.id, ctrl, NULL);
nv_encoder->crtc = NULL;
nvif_outp_release(&nv_encoder->outp);
}
static void
nv50_dac_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
struct nv50_head_atom *asyh =
nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
struct nv50_core *core = nv50_disp(encoder->dev)->core;
u32 ctrl = 0;
switch (nv_crtc->index) {
case 0: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD0); break;
case 1: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD1); break;
case 2: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD2); break;
case 3: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD3); break;
default:
WARN_ON(1);
break;
}
ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, PROTOCOL, RGB_CRT);
nvif_outp_acquire_rgb_crt(&nv_encoder->outp);
core->func->dac->ctrl(core, nv_encoder->outp.or.id, ctrl, asyh);
asyh->or.depth = 0;
nv_encoder->crtc = &nv_crtc->base;
}
static enum drm_connector_status
nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
u32 loadval;
int ret;
loadval = nouveau_drm(encoder->dev)->vbios.dactestval;
if (loadval == 0)
loadval = 340;
ret = nvif_outp_load_detect(&nv_encoder->outp, loadval);
if (ret <= 0)
return connector_status_disconnected;
return connector_status_connected;
}
static const struct drm_encoder_helper_funcs
nv50_dac_help = {
.atomic_check = nv50_outp_atomic_check,
.atomic_enable = nv50_dac_atomic_enable,
.atomic_disable = nv50_dac_atomic_disable,
.detect = nv50_dac_detect
};
static void
nv50_dac_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
nvif_outp_dtor(&nv_encoder->outp);
drm_encoder_cleanup(encoder);
kfree(encoder);
}
static const struct drm_encoder_funcs
nv50_dac_func = {
.destroy = nv50_dac_destroy,
};
static int
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nv50_disp *disp = nv50_disp(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type = DRM_MODE_ENCODER_DAC;
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
if (bus)
nv_encoder->i2c = &bus->i2c;
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
"dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_dac_help);
drm_connector_attach_encoder(connector, encoder);
return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
}
/*
* audio component binding for ELD notification
*/
static void
nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port,
int dev_id)
{
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
port, dev_id);
}
static int
nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
bool *enabled, unsigned char *buf, int max_bytes)
{
struct drm_device *drm_dev = dev_get_drvdata(kdev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder;
struct nouveau_crtc *nv_crtc;
int ret = 0;
*enabled = false;
mutex_lock(&drm->audio.lock);
drm_for_each_encoder(encoder, drm->dev) {
struct nouveau_connector *nv_connector = NULL;
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST)
continue; /* TODO */
nv_encoder = nouveau_encoder(encoder);
nv_connector = nouveau_connector(nv_encoder->audio.connector);
nv_crtc = nouveau_crtc(nv_encoder->crtc);
if (!nv_crtc || nv_encoder->outp.or.id != port || nv_crtc->index != dev_id)
continue;
*enabled = nv_encoder->audio.enabled;
if (*enabled) {
ret = drm_eld_size(nv_connector->base.eld);
memcpy(buf, nv_connector->base.eld,
min(max_bytes, ret));
}
break;
}
mutex_unlock(&drm->audio.lock);
return ret;
}
static const struct drm_audio_component_ops nv50_audio_component_ops = {
.get_eld = nv50_audio_component_get_eld,
};
static int
nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev,
void *data)
{
struct drm_device *drm_dev = dev_get_drvdata(kdev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_audio_component *acomp = data;
if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
return -ENOMEM;
drm_modeset_lock_all(drm_dev);
acomp->ops = &nv50_audio_component_ops;
acomp->dev = kdev;
drm->audio.component = acomp;
drm_modeset_unlock_all(drm_dev);
return 0;
}
static void
nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev,
void *data)
{
struct drm_device *drm_dev = dev_get_drvdata(kdev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_audio_component *acomp = data;
drm_modeset_lock_all(drm_dev);
drm->audio.component = NULL;
acomp->ops = NULL;
acomp->dev = NULL;
drm_modeset_unlock_all(drm_dev);
}
static const struct component_ops nv50_audio_component_bind_ops = {
.bind = nv50_audio_component_bind,
.unbind = nv50_audio_component_unbind,
};
static void
nv50_audio_component_init(struct nouveau_drm *drm)
{
if (component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
return;
drm->audio.component_registered = true;
mutex_init(&drm->audio.lock);
}
static void
nv50_audio_component_fini(struct nouveau_drm *drm)
{
if (!drm->audio.component_registered)
return;
component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
drm->audio.component_registered = false;
mutex_destroy(&drm->audio.lock);
}
/******************************************************************************
* Audio
*****************************************************************************/
static bool
nv50_audio_supported(struct drm_encoder *encoder)
{
struct nv50_disp *disp = nv50_disp(encoder->dev);
if (disp->disp->object.oclass <= GT200_DISP ||
disp->disp->object.oclass == GT206_DISP)
return false;
return true;
}
static void
nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nvif_outp *outp = &nv_encoder->outp;
if (!nv50_audio_supported(encoder))
return;
mutex_lock(&drm->audio.lock);
if (nv_encoder->audio.enabled) {
nv_encoder->audio.enabled = false;
nv_encoder->audio.connector = NULL;
nvif_outp_hda_eld(&nv_encoder->outp, nv_crtc->index, NULL, 0);
}
mutex_unlock(&drm->audio.lock);
nv50_audio_component_eld_notify(drm->audio.component, outp->or.id, nv_crtc->index);
}
static void
nv50_audio_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
struct drm_display_mode *mode)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nvif_outp *outp = &nv_encoder->outp;
if (!nv50_audio_supported(encoder) || !drm_detect_monitor_audio(nv_connector->edid))
return;
mutex_lock(&drm->audio.lock);
nvif_outp_hda_eld(&nv_encoder->outp, nv_crtc->index, nv_connector->base.eld,
drm_eld_size(nv_connector->base.eld));
nv_encoder->audio.enabled = true;
nv_encoder->audio.connector = &nv_connector->base;
mutex_unlock(&drm->audio.lock);
nv50_audio_component_eld_notify(drm->audio.component, outp->or.id, nv_crtc->index);
}
/******************************************************************************
* HDMI
*****************************************************************************/
static void
nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
struct drm_display_mode *mode, bool hda)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_hdmi_info *hdmi = &nv_connector->base.display_info.hdmi;
union hdmi_infoframe infoframe = { 0 };
const u8 rekey = 56; /* binary driver, and tegra, constant */
u8 scdc = 0;
u32 max_ac_packet;
struct {
struct nvif_outp_infoframe_v0 infoframe;
u8 data[17];
} args = { 0 };
int ret, size;
max_ac_packet = mode->htotal - mode->hdisplay;
max_ac_packet -= rekey;
max_ac_packet -= 18; /* constant from tegra */
max_ac_packet /= 32;
if (hdmi->scdc.scrambling.supported) {
const bool high_tmds_clock_ratio = mode->clock > 340000;
ret = drm_scdc_readb(nv_encoder->i2c, SCDC_TMDS_CONFIG, &scdc);
if (ret < 0) {
NV_ERROR(drm, "Failure to read SCDC_TMDS_CONFIG: %d\n", ret);
return;
}
scdc &= ~(SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 | SCDC_SCRAMBLING_ENABLE);
if (high_tmds_clock_ratio || hdmi->scdc.scrambling.low_rates)
scdc |= SCDC_SCRAMBLING_ENABLE;
if (high_tmds_clock_ratio)
scdc |= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40;
ret = drm_scdc_writeb(nv_encoder->i2c, SCDC_TMDS_CONFIG, scdc);
if (ret < 0)
NV_ERROR(drm, "Failure to write SCDC_TMDS_CONFIG = 0x%02x: %d\n",
scdc, ret);
}
ret = nvif_outp_acquire_tmds(&nv_encoder->outp, nv_crtc->index, true,
max_ac_packet, rekey, scdc, hda);
if (ret)
return;
/* AVI InfoFrame. */
args.infoframe.version = 0;
args.infoframe.head = nv_crtc->index;
if (!drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, &nv_connector->base, mode)) {
drm_hdmi_avi_infoframe_quant_range(&infoframe.avi, &nv_connector->base, mode,
HDMI_QUANTIZATION_RANGE_FULL);
size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
} else {
size = 0;
}
nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, &args.infoframe, size);
/* Vendor InfoFrame. */
memset(&args.data, 0, sizeof(args.data));
if (!drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi,
&nv_connector->base, mode))
size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
else
size = 0;
nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, &args.infoframe, size);
nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
}
/******************************************************************************
* MST
*****************************************************************************/
#define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
#define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
#define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
struct nv50_mstc {
struct nv50_mstm *mstm;
struct drm_dp_mst_port *port;
struct drm_connector connector;
struct drm_display_mode *native;
struct edid *edid;
};
struct nv50_msto {
struct drm_encoder encoder;
/* head is statically assigned on msto creation */
struct nv50_head *head;
struct nv50_mstc *mstc;
bool disabled;
bool enabled;
};
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
{
struct nv50_msto *msto;
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST)
return nouveau_encoder(encoder);
msto = nv50_msto(encoder);
if (!msto->mstc)
return NULL;
return msto->mstc->mstm->outp;
}
static void
nv50_msto_cleanup(struct drm_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state,
struct drm_dp_mst_topology_mgr *mgr,
struct nv50_msto *msto)
{
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
struct drm_dp_mst_atomic_payload *payload =
drm_atomic_get_mst_payload_state(mst_state, msto->mstc->port);
NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
if (msto->disabled) {
msto->mstc = NULL;
msto->disabled = false;
} else if (msto->enabled) {
drm_dp_add_payload_part2(mgr, state, payload);
msto->enabled = false;
}
}
static void
nv50_msto_prepare(struct drm_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state,
struct drm_dp_mst_topology_mgr *mgr,
struct nv50_msto *msto)
{
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
struct drm_dp_mst_topology_state *old_mst_state;
struct drm_dp_mst_atomic_payload *payload, *old_payload;
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
old_mst_state = drm_atomic_get_old_mst_topology_state(state, mgr);
payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
old_payload = drm_atomic_get_mst_payload_state(old_mst_state, mstc->port);
// TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
if (msto->disabled) {
drm_dp_remove_payload(mgr, mst_state, old_payload, payload);
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
} else {
if (msto->enabled)
drm_dp_add_payload_part1(mgr, mst_state, payload);
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index,
payload->vc_start_slot, payload->time_slots,
payload->pbn, payload->time_slots * mst_state->pbn_div);
}
}
static int
nv50_msto_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_atomic_state *state = crtc_state->state;
struct drm_connector *connector = conn_state->connector;
struct drm_dp_mst_topology_state *mst_state;
struct nv50_mstc *mstc = nv50_mstc(connector);
struct nv50_mstm *mstm = mstc->mstm;
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
int slots;
int ret;
ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
mstc->native);
if (ret)
return ret;
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
/*
* When restoring duplicated states, we need to make sure that the bw
* remains the same and avoid recalculating it, as the connector's bpc
* may have changed after the state was duplicated
*/
if (!state->duplicated) {
const int clock = crtc_state->adjusted_mode.clock;
asyh->or.bpc = connector->display_info.bpc;
asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3,
false);
}
mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr);
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
if (!mst_state->pbn_div) {
struct nouveau_encoder *outp = mstc->mstm->outp;
mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
outp->dp.link_bw, outp->dp.link_nr);
}
slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn);
if (slots < 0)
return slots;
asyh->dp.tu = slots;
return 0;
}
static u8
nv50_dp_bpc_to_depth(unsigned int bpc)
{
switch (bpc) {
case 6: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444;
case 8: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444;
case 10:
default: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444;
}
}
static void
nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_head *head = msto->head;
struct nv50_head_atom *asyh =
nv50_head_atom(drm_atomic_get_new_crtc_state(state, &head->base.base));
struct nv50_mstc *mstc = NULL;
struct nv50_mstm *mstm = NULL;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
u8 proto;
drm_connector_list_iter_begin(encoder->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->state->best_encoder == &msto->encoder) {
mstc = nv50_mstc(connector);
mstm = mstc->mstm;
break;
}
}
drm_connector_list_iter_end(&conn_iter);
if (WARN_ON(!mstc))
return;
if (!mstm->links++) {
/*XXX: MST audio. */
nvif_outp_acquire_dp(&mstm->outp->outp, mstm->outp->dp.dpcd, 0, 0, false, true);
}
if (mstm->outp->outp.or.link & 1)
proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A;
else
proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B;
mstm->outp->update(mstm->outp, head->base.index, asyh, proto,
nv50_dp_bpc_to_depth(asyh->or.bpc));
msto->mstc = mstc;
msto->enabled = true;
mstm->modified = true;
}
static void
nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
mstm->modified = true;
if (!--mstm->links)
mstm->disabled = true;
msto->disabled = true;
}
static const struct drm_encoder_helper_funcs
nv50_msto_help = {
.atomic_disable = nv50_msto_atomic_disable,
.atomic_enable = nv50_msto_atomic_enable,
.atomic_check = nv50_msto_atomic_check,
};
static void
nv50_msto_destroy(struct drm_encoder *encoder)
{
struct nv50_msto *msto = nv50_msto(encoder);
drm_encoder_cleanup(&msto->encoder);
kfree(msto);
}
static const struct drm_encoder_funcs
nv50_msto = {
.destroy = nv50_msto_destroy,
};
static struct nv50_msto *
nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id)
{
struct nv50_msto *msto;
int ret;
msto = kzalloc(sizeof(*msto), GFP_KERNEL);
if (!msto)
return ERR_PTR(-ENOMEM);
ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
DRM_MODE_ENCODER_DPMST, "mst-%d", id);
if (ret) {
kfree(msto);
return ERR_PTR(ret);
}
drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base);
msto->head = head;
return msto;
}
static struct drm_encoder *
nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
connector);
struct nv50_mstc *mstc = nv50_mstc(connector);
struct drm_crtc *crtc = connector_state->crtc;
if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
return NULL;
return &nv50_head(crtc)->msto->encoder;
}
static enum drm_mode_status
nv50_mstc_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
struct nouveau_encoder *outp = mstc->mstm->outp;
/* TODO: calculate the PBN from the dotclock and validate against the
* MSTB's max possible PBN
*/
return nv50_dp_mode_valid(outp, mode, NULL);
}
static int
nv50_mstc_get_modes(struct drm_connector *connector)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
int ret = 0;
mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
drm_connector_update_edid_property(&mstc->connector, mstc->edid);
if (mstc->edid)
ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
/*
* XXX: Since we don't use HDR in userspace quite yet, limit the bpc
* to 8 to save bandwidth on the topology. In the future, we'll want
* to properly fix this by dynamically selecting the highest possible
* bpc that would fit in the topology
*/
if (connector->display_info.bpc)
connector->display_info.bpc =
clamp(connector->display_info.bpc, 6U, 8U);
else
connector->display_info.bpc = 8;
if (mstc->native)
drm_mode_destroy(mstc->connector.dev, mstc->native);
mstc->native = nouveau_conn_native_mode(&mstc->connector);
return ret;
}
static int
nv50_mstc_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr;
return drm_dp_atomic_release_time_slots(state, mgr, mstc->port);
}
static int
nv50_mstc_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx, bool force)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
int ret;
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
ret = pm_runtime_get_sync(connector->dev->dev);
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
}
ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
mstc->port);
if (ret != connector_status_connected)
goto out;
out:
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
return ret;
}
static const struct drm_connector_helper_funcs
nv50_mstc_help = {
.get_modes = nv50_mstc_get_modes,
.mode_valid = nv50_mstc_mode_valid,
.atomic_best_encoder = nv50_mstc_atomic_best_encoder,
.atomic_check = nv50_mstc_atomic_check,
.detect_ctx = nv50_mstc_detect,
};
static void
nv50_mstc_destroy(struct drm_connector *connector)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
drm_connector_cleanup(&mstc->connector);
drm_dp_mst_put_port_malloc(mstc->port);
kfree(mstc);
}
static const struct drm_connector_funcs
nv50_mstc = {
.reset = nouveau_conn_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = nv50_mstc_destroy,
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
.atomic_destroy_state = nouveau_conn_atomic_destroy_state,
.atomic_set_property = nouveau_conn_atomic_set_property,
.atomic_get_property = nouveau_conn_atomic_get_property,
};
static int
nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
const char *path, struct nv50_mstc **pmstc)
{
struct drm_device *dev = mstm->outp->base.base.dev;
struct drm_crtc *crtc;
struct nv50_mstc *mstc;
int ret;
if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
return -ENOMEM;
mstc->mstm = mstm;
mstc->port = port;
ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
DRM_MODE_CONNECTOR_DisplayPort);
if (ret) {
kfree(*pmstc);
*pmstc = NULL;
return ret;
}
drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
mstc->connector.funcs->reset(&mstc->connector);
nouveau_conn_attach_properties(&mstc->connector);
drm_for_each_crtc(crtc, dev) {
if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
continue;
drm_connector_attach_encoder(&mstc->connector,
&nv50_head(crtc)->msto->encoder);
}
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
drm_connector_set_path_property(&mstc->connector, path);
drm_dp_mst_get_port_malloc(port);
return 0;
}
static void
nv50_mstm_cleanup(struct drm_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state,
struct nv50_mstm *mstm)
{
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
struct drm_encoder *encoder;
NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
drm_dp_check_act_status(&mstm->mgr);
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
if (mstc && mstc->mstm == mstm)
nv50_msto_cleanup(state, mst_state, &mstm->mgr, msto);
}
}
mstm->modified = false;
}
static void
nv50_mstm_prepare(struct drm_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state,
struct nv50_mstm *mstm)
{
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
struct drm_encoder *encoder;
NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
/* Disable payloads first */
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
if (mstc && mstc->mstm == mstm && msto->disabled)
nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
}
}
/* Add payloads for new heads, while also updating the start slots of any unmodified (but
* active) heads that may have had their VC slots shifted left after the previous step
*/
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
if (mstc && mstc->mstm == mstm && !msto->disabled)
nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
}
}
if (mstm->disabled) {
if (!mstm->links)
nvif_outp_release(&mstm->outp->outp);
mstm->disabled = false;
}
}
static struct drm_connector *
nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, const char *path)
{
struct nv50_mstm *mstm = nv50_mstm(mgr);
struct nv50_mstc *mstc;
int ret;
ret = nv50_mstc_new(mstm, port, path, &mstc);
if (ret)
return NULL;
return &mstc->connector;
}
static const struct drm_dp_mst_topology_cbs
nv50_mstm = {
.add_connector = nv50_mstm_add_connector,
};
bool
nv50_mstm_service(struct nouveau_drm *drm,
struct nouveau_connector *nv_connector,
struct nv50_mstm *mstm)
{
struct drm_dp_aux *aux = &nv_connector->aux;
bool handled = true, ret = true;
int rc;
u8 esi[8] = {};
while (handled) {
u8 ack[8] = {};
rc = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
if (rc != 8) {
ret = false;
break;
}
drm_dp_mst_hpd_irq_handle_event(&mstm->mgr, esi, ack, &handled);
if (!handled)
break;
rc = drm_dp_dpcd_writeb(aux, DP_SINK_COUNT_ESI + 1, ack[1]);
if (rc != 1) {
ret = false;
break;
}
drm_dp_mst_hpd_irq_send_new_request(&mstm->mgr);
}
if (!ret)
NV_DEBUG(drm, "Failed to handle ESI on %s: %d\n",
nv_connector->base.name, rc);
return ret;
}
void
nv50_mstm_remove(struct nv50_mstm *mstm)
{
mstm->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
}
int
nv50_mstm_detect(struct nouveau_encoder *outp)
{
struct nv50_mstm *mstm = outp->dp.mstm;
struct drm_dp_aux *aux;
int ret;
if (!mstm || !mstm->can_mst)
return 0;
aux = mstm->mgr.aux;
/* Clear any leftover MST state we didn't set ourselves by first
* disabling MST if it was already enabled
*/
ret = drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
if (ret < 0)
return ret;
/* And start enabling */
ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, true);
if (ret)
return ret;
mstm->is_mst = true;
return 1;
}
static void
nv50_mstm_fini(struct nouveau_encoder *outp)
{
struct nv50_mstm *mstm = outp->dp.mstm;
if (!mstm)
return;
/* Don't change the MST state of this connector until we've finished
* resuming, since we can't safely grab hpd_irq_lock in our resume
* path to protect mstm->is_mst without potentially deadlocking
*/
mutex_lock(&outp->dp.hpd_irq_lock);
mstm->suspended = true;
mutex_unlock(&outp->dp.hpd_irq_lock);
if (mstm->is_mst)
drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
}
static void
nv50_mstm_init(struct nouveau_encoder *outp, bool runtime)
{
struct nv50_mstm *mstm = outp->dp.mstm;
int ret = 0;
if (!mstm)
return;
if (mstm->is_mst) {
ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
if (ret == -1)
nv50_mstm_remove(mstm);
}
mutex_lock(&outp->dp.hpd_irq_lock);
mstm->suspended = false;
mutex_unlock(&outp->dp.hpd_irq_lock);
if (ret == -1)
drm_kms_helper_hotplug_event(mstm->mgr.dev);
}
static void
nv50_mstm_del(struct nv50_mstm **pmstm)
{
struct nv50_mstm *mstm = *pmstm;
if (mstm) {
drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
kfree(*pmstm);
*pmstm = NULL;
}
}
static int
nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
int conn_base_id, struct nv50_mstm **pmstm)
{
const int max_payloads = hweight8(outp->dcb->heads);
struct drm_device *dev = outp->base.base.dev;
struct nv50_mstm *mstm;
int ret;
if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
return -ENOMEM;
mstm->outp = outp;
mstm->mgr.cbs = &nv50_mstm;
ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
max_payloads, conn_base_id);
if (ret)
return ret;
return 0;
}
/******************************************************************************
* SOR
*****************************************************************************/
static void
nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
struct nv50_head_atom *asyh, u8 proto, u8 depth)
{
struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
struct nv50_core *core = disp->core;
if (!asyh) {
nv_encoder->ctrl &= ~BIT(head);
if (NVDEF_TEST(nv_encoder->ctrl, NV507D, SOR_SET_CONTROL, OWNER, ==, NONE))
nv_encoder->ctrl = 0;
} else {
nv_encoder->ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, PROTOCOL, proto);
nv_encoder->ctrl |= BIT(head);
asyh->or.depth = depth;
}
core->func->sor->ctrl(core, nv_encoder->outp.or.id, nv_encoder->ctrl, asyh);
}
/* TODO: Should we extend this to PWM-only backlights?
* As well, should we add a DRM helper for waiting for the backlight to acknowledge
* the panel backlight has been shut off? Intel doesn't seem to do this, and uses a
* fixed time delay from the vbios…
*/
static void
nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder);
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nouveau_backlight *backlight = nv_connector->backlight;
#endif
struct drm_dp_aux *aux = &nv_connector->aux;
int ret;
u8 pwr;
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
if (backlight && backlight->uses_dpcd) {
ret = drm_edp_backlight_disable(aux, &backlight->edp_info);
if (ret < 0)
NV_ERROR(drm, "Failed to disable backlight on [CONNECTOR:%d:%s]: %d\n",
nv_connector->base.base.id, nv_connector->base.name, ret);
}
#endif
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
if (ret == 0) {
pwr &= ~DP_SET_POWER_MASK;
pwr |= DP_SET_POWER_D3;
drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
}
}
nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
nv50_audio_disable(encoder, nv_crtc);
nvif_outp_release(&nv_encoder->outp);
nv_encoder->crtc = NULL;
}
static void
nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
struct nv50_head_atom *asyh =
nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct nvif_outp *outp = &nv_encoder->outp;
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector;
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
struct nouveau_backlight *backlight;
#endif
struct nvbios *bios = &drm->vbios;
bool lvds_dual = false, lvds_8bpc = false, hda = false;
u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM;
u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT;
nv_connector = nv50_outp_get_new_connector(state, nv_encoder);
nv_encoder->crtc = &nv_crtc->base;
if ((disp->disp->object.oclass == GT214_DISP ||
disp->disp->object.oclass >= GF110_DISP) &&
drm_detect_monitor_audio(nv_connector->edid))
hda = true;
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
if (disp->disp->object.oclass == NV50_DISP ||
!drm_detect_hdmi_monitor(nv_connector->edid))
nvif_outp_acquire_tmds(outp, nv_crtc->index, false, 0, 0, 0, false);
else
nv50_hdmi_enable(encoder, nv_crtc, nv_connector, state, mode, hda);
if (nv_encoder->outp.or.link & 1) {
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A;
/* Only enable dual-link if:
* - Need to (i.e. rate > 165MHz)
* - DCB says we can
* - Not an HDMI monitor, since there's no dual-link
* on HDMI.
*/
if (mode->clock >= 165000 &&
nv_encoder->dcb->duallink_possible &&
!drm_detect_hdmi_monitor(nv_connector->edid))
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS;
} else {
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B;
}
break;
case DCB_OUTPUT_LVDS:
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM;
if (bios->fp_no_ddc) {
lvds_dual = bios->fp.dual_link;
lvds_8bpc = bios->fp.if_is_24bit;
} else {
if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
if (((u8 *)nv_connector->edid)[121] == 2)
lvds_dual = true;
} else
if (mode->clock >= bios->fp.duallink_transition_clk) {
lvds_dual = true;
}
if (lvds_dual) {
if (bios->fp.strapless_is_24bit & 2)
lvds_8bpc = true;
} else {
if (bios->fp.strapless_is_24bit & 1)
lvds_8bpc = true;
}
if (asyh->or.bpc == 8)
lvds_8bpc = true;
}
nvif_outp_acquire_lvds(&nv_encoder->outp, lvds_dual, lvds_8bpc);
break;
case DCB_OUTPUT_DP:
nvif_outp_acquire_dp(&nv_encoder->outp, nv_encoder->dp.dpcd, 0, 0, hda, false);
depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
if (nv_encoder->outp.or.link & 1)
proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_A;
else
proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
backlight = nv_connector->backlight;
if (backlight && backlight->uses_dpcd)
drm_edp_backlight_enable(&nv_connector->aux, &backlight->edp_info,
(u16)backlight->dev->props.brightness);
#endif
break;
default:
BUG();
break;
}
nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth);
}
static const struct drm_encoder_helper_funcs
nv50_sor_help = {
.atomic_check = nv50_outp_atomic_check,
.atomic_enable = nv50_sor_atomic_enable,
.atomic_disable = nv50_sor_atomic_disable,
};
static void
nv50_sor_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
nvif_outp_dtor(&nv_encoder->outp);
nv50_mstm_del(&nv_encoder->dp.mstm);
drm_encoder_cleanup(encoder);
if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
kfree(encoder);
}
static const struct drm_encoder_funcs
nv50_sor_func = {
.destroy = nv50_sor_destroy,
};
bool nv50_has_mst(struct nouveau_drm *drm)
{
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
u32 data;
u8 ver, hdr, cnt, len;
data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len);
return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04);
}
static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
struct nv50_disp *disp = nv50_disp(connector->dev);
int type, ret;
switch (dcbe->type) {
case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
case DCB_OUTPUT_TMDS:
case DCB_OUTPUT_DP:
default:
type = DRM_MODE_ENCODER_TMDS;
break;
}
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->update = nv50_sor_update;
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
"sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_sor_help);
drm_connector_attach_encoder(connector, encoder);
disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
nv50_outp_dump_caps(drm, nv_encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
mutex_init(&nv_encoder->dp.hpd_irq_lock);
if (aux) {
if (disp->disp->object.oclass < GF110_DISP) {
/* HW has no support for address-only
* transactions, so we're required to
* use custom I2C-over-AUX code.
*/
nv_encoder->i2c = &aux->i2c;
} else {
nv_encoder->i2c = &nv_connector->aux.ddc;
}
nv_encoder->aux = aux;
}
if (nv_connector->type != DCB_CONNECTOR_eDP &&
nv50_has_mst(drm)) {
ret = nv50_mstm_new(nv_encoder, &nv_connector->aux,
16, nv_connector->base.base.id,
&nv_encoder->dp.mstm);
if (ret)
return ret;
}
} else {
struct nvkm_i2c_bus *bus =
nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
if (bus)
nv_encoder->i2c = &bus->i2c;
}
return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
}
/******************************************************************************
* PIOR
*****************************************************************************/
static int
nv50_pior_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
if (ret)
return ret;
crtc_state->adjusted_mode.clock *= 2;
return 0;
}
static void
nv50_pior_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
const u32 ctrl = NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, NONE);
core->func->pior->ctrl(core, nv_encoder->outp.or.id, ctrl, NULL);
nv_encoder->crtc = NULL;
nvif_outp_release(&nv_encoder->outp);
}
static void
nv50_pior_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
struct nv50_head_atom *asyh =
nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
struct nv50_core *core = nv50_disp(encoder->dev)->core;
u32 ctrl = 0;
switch (nv_crtc->index) {
case 0: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD0); break;
case 1: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD1); break;
default:
WARN_ON(1);
break;
}
switch (asyh->or.bpc) {
case 10: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444; break;
case 8: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444; break;
case 6: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444; break;
default: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT; break;
}
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
nvif_outp_acquire_tmds(&nv_encoder->outp, false, false, 0, 0, 0, false);
break;
case DCB_OUTPUT_DP:
ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
nvif_outp_acquire_dp(&nv_encoder->outp, nv_encoder->dp.dpcd, 0, 0, false, false);
break;
default:
BUG();
break;
}
core->func->pior->ctrl(core, nv_encoder->outp.or.id, ctrl, asyh);
nv_encoder->crtc = &nv_crtc->base;
}
static const struct drm_encoder_helper_funcs
nv50_pior_help = {
.atomic_check = nv50_pior_atomic_check,
.atomic_enable = nv50_pior_atomic_enable,
.atomic_disable = nv50_pior_atomic_disable,
};
static void
nv50_pior_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
nvif_outp_dtor(&nv_encoder->outp);
drm_encoder_cleanup(encoder);
mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
kfree(encoder);
}
static const struct drm_encoder_funcs
nv50_pior_func = {
.destroy = nv50_pior_destroy,
};
static int
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = NULL;
struct nvkm_i2c_aux *aux = NULL;
struct i2c_adapter *ddc;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type;
switch (dcbe->type) {
case DCB_OUTPUT_TMDS:
bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
ddc = bus ? &bus->i2c : NULL;
type = DRM_MODE_ENCODER_TMDS;
break;
case DCB_OUTPUT_DP:
aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
ddc = aux ? &aux->i2c : NULL;
type = DRM_MODE_ENCODER_TMDS;
break;
default:
return -ENODEV;
}
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->i2c = ddc;
nv_encoder->aux = aux;
mutex_init(&nv_encoder->dp.hpd_irq_lock);
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
"pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_pior_help);
drm_connector_attach_encoder(connector, encoder);
disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
nv50_outp_dump_caps(drm, nv_encoder);
return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
}
/******************************************************************************
* Atomic
*****************************************************************************/
static void
nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
{
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_topology_state *mst_state;
struct nouveau_drm *drm = nouveau_drm(state->dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
struct nv50_core *core = disp->core;
struct nv50_mstm *mstm;
int i;
NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
mstm = nv50_mstm(mgr);
if (mstm->modified)
nv50_mstm_prepare(state, mst_state, mstm);
}
core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
core->func->update(core, interlock, true);
if (core->func->ntfy_wait_done(disp->sync, NV50_DISP_CORE_NTFY,
disp->core->chan.base.device))
NV_ERROR(drm, "core notifier timeout\n");
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
mstm = nv50_mstm(mgr);
if (mstm->modified)
nv50_mstm_cleanup(state, mst_state, mstm);
}
}
static void
nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
{
struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
int i;
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw *wndw = nv50_wndw(plane);
if (interlock[wndw->interlock.type] & wndw->interlock.data) {
if (wndw->func->update)
wndw->func->update(wndw, interlock);
}
}
}
static void
nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_crtc_state *new_crtc_state, *old_crtc_state;
struct drm_crtc *crtc;
struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_atom *atom = nv50_atom(state);
struct nv50_core *core = disp->core;
struct nv50_outp_atom *outp, *outt;
u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
int i;
bool flushed = false;
NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
nv50_crc_atomic_stop_reporting(state);
drm_atomic_helper_wait_for_fences(dev, state, false);
drm_atomic_helper_wait_for_dependencies(state);
drm_dp_mst_atomic_wait_for_dependencies(state);
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_atomic_helper_calc_timestamping_constants(state);
if (atom->lock_core)
mutex_lock(&disp->mutex);
/* Disable head(s). */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_head *head = nv50_head(crtc);
NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
asyh->clr.mask, asyh->set.mask);
if (old_crtc_state->active && !new_crtc_state->active) {
pm_runtime_put_noidle(dev->dev);
drm_crtc_vblank_off(crtc);
}
if (asyh->clr.mask) {
nv50_head_flush_clr(head, asyh, atom->flush_disable);
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
}
}
/* Disable plane(s). */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
asyw->clr.mask, asyw->set.mask);
if (!asyw->clr.mask)
continue;
nv50_wndw_flush_clr(wndw, interlock, atom->flush_disable, asyw);
}
/* Disable output path(s). */
list_for_each_entry(outp, &atom->outp, head) {
const struct drm_encoder_helper_funcs *help;
struct drm_encoder *encoder;
encoder = outp->encoder;
help = encoder->helper_private;
NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
outp->clr.mask, outp->set.mask);
if (outp->clr.mask) {
help->atomic_disable(encoder, state);
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
if (outp->flush_disable) {
nv50_disp_atomic_commit_wndw(state, interlock);
nv50_disp_atomic_commit_core(state, interlock);
memset(interlock, 0x00, sizeof(interlock));
flushed = true;
}
}
}
/* Flush disable. */
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (atom->flush_disable) {
nv50_disp_atomic_commit_wndw(state, interlock);
nv50_disp_atomic_commit_core(state, interlock);
memset(interlock, 0x00, sizeof(interlock));
flushed = true;
}
}
if (flushed)
nv50_crc_atomic_release_notifier_contexts(state);
nv50_crc_atomic_init_notifier_contexts(state);
/* Update output path(s). */
list_for_each_entry_safe(outp, outt, &atom->outp, head) {
const struct drm_encoder_helper_funcs *help;
struct drm_encoder *encoder;
encoder = outp->encoder;
help = encoder->helper_private;
NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
outp->set.mask, outp->clr.mask);
if (outp->set.mask) {
help->atomic_enable(encoder, state);
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
}
list_del(&outp->head);
kfree(outp);
}
/* Update head(s). */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_head *head = nv50_head(crtc);
NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
asyh->set.mask, asyh->clr.mask);
if (asyh->set.mask) {
nv50_head_flush_set(head, asyh);
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
}
if (new_crtc_state->active) {
if (!old_crtc_state->active) {
drm_crtc_vblank_on(crtc);
pm_runtime_get_noresume(dev->dev);
}
if (new_crtc_state->event)
drm_crtc_vblank_get(crtc);
}
}
/* Update window->head assignment.
*
* This has to happen in an update that's not interlocked with
* any window channels to avoid hitting HW error checks.
*
*TODO: Proper handling of window ownership (Turing apparently
* supports non-fixed mappings).
*/
if (core->assign_windows) {
core->func->wndw.owner(core);
nv50_disp_atomic_commit_core(state, interlock);
core->assign_windows = false;
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
}
/* Finish updating head(s)...
*
* NVD is rather picky about both where window assignments can change,
* *and* about certain core and window channel states matching.
*
* The EFI GOP driver on newer GPUs configures window channels with a
* different output format to what we do, and the core channel update
* in the assign_windows case above would result in a state mismatch.
*
* Delay some of the head update until after that point to workaround
* the issue. This only affects the initial modeset.
*
* TODO: handle this better when adding flexible window mapping
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_head *head = nv50_head(crtc);
NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
asyh->set.mask, asyh->clr.mask);
if (asyh->set.mask) {
nv50_head_flush_set_wndw(head, asyh);
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
}
}
/* Update plane(s). */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
asyw->set.mask, asyw->clr.mask);
if ( !asyw->set.mask &&
(!asyw->clr.mask || atom->flush_disable))
continue;
nv50_wndw_flush_set(wndw, interlock, asyw);
}
/* Flush update. */
nv50_disp_atomic_commit_wndw(state, interlock);
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (interlock[NV50_DISP_INTERLOCK_BASE] ||
interlock[NV50_DISP_INTERLOCK_OVLY] ||
interlock[NV50_DISP_INTERLOCK_WNDW] ||
!atom->state.legacy_cursor_update)
nv50_disp_atomic_commit_core(state, interlock);
else
disp->core->func->update(disp->core, interlock, false);
}
if (atom->lock_core)
mutex_unlock(&disp->mutex);
/* Wait for HW to signal completion. */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
int ret = nv50_wndw_wait_armed(wndw, asyw);
if (ret)
NV_ERROR(drm, "%s: timeout\n", plane->name);
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->event) {
unsigned long flags;
/* Get correct count/ts if racing with vblank irq */
if (new_crtc_state->active)
drm_crtc_accurate_vblank_count(crtc);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
new_crtc_state->event = NULL;
if (new_crtc_state->active)
drm_crtc_vblank_put(crtc);
}
}
nv50_crc_atomic_start_reporting(state);
if (!flushed)
nv50_crc_atomic_release_notifier_contexts(state);
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_helper_commit_cleanup_done(state);
drm_atomic_state_put(state);
/* Drop the RPM ref we got from nv50_disp_atomic_commit() */
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
}
static void
nv50_disp_atomic_commit_work(struct work_struct *work)
{
struct drm_atomic_state *state =
container_of(work, typeof(*state), commit_work);
nv50_disp_atomic_commit_tail(state);
}
static int
nv50_disp_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock)
{
struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
int ret, i;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
goto done;
INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
goto done;
if (!nonblock) {
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
if (ret)
goto err_cleanup;
}
ret = drm_atomic_helper_swap_state(state, true);
if (ret)
goto err_cleanup;
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
if (asyw->set.image)
nv50_wndw_ntfy_enable(wndw, asyw);
}
drm_atomic_state_get(state);
/*
* Grab another RPM ref for the commit tail, which will release the
* ref when it's finished
*/
pm_runtime_get_noresume(dev->dev);
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
nv50_disp_atomic_commit_tail(state);
err_cleanup:
if (ret)
drm_atomic_helper_cleanup_planes(dev, state);
done:
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
static struct nv50_outp_atom *
nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
{
struct nv50_outp_atom *outp;
list_for_each_entry(outp, &atom->outp, head) {
if (outp->encoder == encoder)
return outp;
}
outp = kzalloc(sizeof(*outp), GFP_KERNEL);
if (!outp)
return ERR_PTR(-ENOMEM);
list_add(&outp->head, &atom->outp);
outp->encoder = encoder;
return outp;
}
static int
nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
struct drm_connector_state *old_connector_state)
{
struct drm_encoder *encoder = old_connector_state->best_encoder;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
struct nv50_outp_atom *outp;
if (!(crtc = old_connector_state->crtc))
return 0;
old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
outp = nv50_disp_outp_atomic_add(atom, encoder);
if (IS_ERR(outp))
return PTR_ERR(outp);
if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
outp->flush_disable = true;
atom->flush_disable = true;
}
outp->clr.ctrl = true;
atom->lock_core = true;
}
return 0;
}
static int
nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
struct drm_connector_state *connector_state)
{
struct drm_encoder *encoder = connector_state->best_encoder;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
struct nv50_outp_atom *outp;
if (!(crtc = connector_state->crtc))
return 0;
new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
outp = nv50_disp_outp_atomic_add(atom, encoder);
if (IS_ERR(outp))
return PTR_ERR(outp);
outp->set.ctrl = true;
atom->lock_core = true;
}
return 0;
}
static int
nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
struct nv50_atom *atom = nv50_atom(state);
struct nv50_core *core = nv50_disp(dev)->core;
struct drm_connector_state *old_connector_state, *new_connector_state;
struct drm_connector *connector;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
struct nv50_head *head;
struct nv50_head_atom *asyh;
int ret, i;
if (core->assign_windows && core->func->head->static_wndw_map) {
drm_for_each_crtc(crtc, dev) {
new_crtc_state = drm_atomic_get_crtc_state(state,
crtc);
if (IS_ERR(new_crtc_state))
return PTR_ERR(new_crtc_state);
head = nv50_head(crtc);
asyh = nv50_head_atom(new_crtc_state);
core->func->head->static_wndw_map(head, asyh);
}
}
/* We need to handle colour management on a per-plane basis. */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->color_mgmt_changed) {
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
}
}
ret = drm_atomic_helper_check(dev, state);
if (ret)
return ret;
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
if (ret)
return ret;
ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
if (ret)
return ret;
}
ret = drm_dp_mst_atomic_check(state);
if (ret)
return ret;
nv50_crc_atomic_check_outp(atom);
return 0;
}
static void
nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
{
struct nv50_atom *atom = nv50_atom(state);
struct nv50_outp_atom *outp, *outt;
list_for_each_entry_safe(outp, outt, &atom->outp, head) {
list_del(&outp->head);
kfree(outp);
}
drm_atomic_state_default_clear(state);
}
static void
nv50_disp_atomic_state_free(struct drm_atomic_state *state)
{
struct nv50_atom *atom = nv50_atom(state);
drm_atomic_state_default_release(&atom->state);
kfree(atom);
}
static struct drm_atomic_state *
nv50_disp_atomic_state_alloc(struct drm_device *dev)
{
struct nv50_atom *atom;
if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
drm_atomic_state_init(dev, &atom->state) < 0) {
kfree(atom);
return NULL;
}
INIT_LIST_HEAD(&atom->outp);
return &atom->state;
}
static const struct drm_mode_config_funcs
nv50_disp_func = {
.fb_create = nouveau_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = nv50_disp_atomic_check,
.atomic_commit = nv50_disp_atomic_commit,
.atomic_state_alloc = nv50_disp_atomic_state_alloc,
.atomic_state_clear = nv50_disp_atomic_state_clear,
.atomic_state_free = nv50_disp_atomic_state_free,
};
static const struct drm_mode_config_helper_funcs
nv50_disp_helper_func = {
.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
};
/******************************************************************************
* Init
*****************************************************************************/
static void
nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST)
nv50_mstm_fini(nouveau_encoder(encoder));
}
if (!runtime)
cancel_work_sync(&drm->hpd_work);
}
static int
nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
{
struct nv50_core *core = nv50_disp(dev)->core;
struct drm_encoder *encoder;
if (resume || runtime)
core->func->init(core);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
struct nouveau_encoder *nv_encoder =
nouveau_encoder(encoder);
nv50_mstm_init(nv_encoder, runtime);
}
}
return 0;
}
static void
nv50_display_destroy(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
nv50_audio_component_fini(nouveau_drm(dev));
nvif_object_unmap(&disp->caps);
nvif_object_dtor(&disp->caps);
nv50_core_del(&disp->core);
nouveau_bo_unmap(disp->sync);
if (disp->sync)
nouveau_bo_unpin(disp->sync);
nouveau_bo_ref(NULL, &disp->sync);
nouveau_display(dev)->priv = NULL;
kfree(disp);
}
int
nv50_display_create(struct drm_device *dev)
{
struct nvif_device *device = &nouveau_drm(dev)->client.device;
struct nouveau_drm *drm = nouveau_drm(dev);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *tmp;
struct nv50_disp *disp;
struct dcb_output *dcbe;
int crtcs, ret, i;
bool has_mst = nv50_has_mst(drm);
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
mutex_init(&disp->mutex);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv50_display_destroy;
nouveau_display(dev)->init = nv50_display_init;
nouveau_display(dev)->fini = nv50_display_fini;
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
dev->mode_config.helper_private = &nv50_disp_helper_func;
dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true;
dev->mode_config.normalize_zpos = true;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, NULL, NULL, &disp->sync);
if (!ret) {
ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (!ret) {
ret = nouveau_bo_map(disp->sync);
if (ret)
nouveau_bo_unpin(disp->sync);
}
if (ret)
nouveau_bo_ref(NULL, &disp->sync);
}
if (ret)
goto out;
/* allocate master evo channel */
ret = nv50_core_new(drm, &disp->core);
if (ret)
goto out;
disp->core->func->init(disp->core);
if (disp->core->func->caps_init) {
ret = disp->core->func->caps_init(drm, disp);
if (ret)
goto out;
}
/* Assign the correct format modifiers */
if (disp->disp->object.oclass >= TU102_DISP)
nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
else
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
/* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later
* generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The
* proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to
* small page allocations in prepare_fb(). When this is implemented, we should also force
* large pages (128K) for ovly fbs in order to fix Kepler ovlys.
* But until then, just limit cursors to 128x128 - which is small enough to avoid ever using
* large pages.
*/
if (disp->disp->object.oclass >= GM107_DISP) {
dev->mode_config.cursor_width = 256;
dev->mode_config.cursor_height = 256;
} else if (disp->disp->object.oclass >= GK104_DISP) {
dev->mode_config.cursor_width = 128;
dev->mode_config.cursor_height = 128;
} else {
dev->mode_config.cursor_width = 64;
dev->mode_config.cursor_height = 64;
}
/* create crtc objects to represent the hw heads */
if (disp->disp->object.oclass >= GV100_DISP)
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
else
if (disp->disp->object.oclass >= GF110_DISP)
crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
else
crtcs = 0x3;
for (i = 0; i < fls(crtcs); i++) {
struct nv50_head *head;
if (!(crtcs & (1 << i)))
continue;
head = nv50_head_create(dev, i);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
goto out;
}
if (has_mst) {
head->msto = nv50_msto_new(dev, head, i);
if (IS_ERR(head->msto)) {
ret = PTR_ERR(head->msto);
head->msto = NULL;
goto out;
}
/*
* FIXME: This is a hack to workaround the following
* issues:
*
* https://gitlab.gnome.org/GNOME/mutter/issues/759
* https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277
*
* Once these issues are closed, this should be
* removed
*/
head->msto->encoder.possible_crtcs = crtcs;
}
}
/* create encoder/connector objects based on VBIOS DCB table */
for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
connector = nouveau_connector_create(dev, dcbe);
if (IS_ERR(connector))
continue;
if (dcbe->location == DCB_LOC_ON_CHIP) {
switch (dcbe->type) {
case DCB_OUTPUT_TMDS:
case DCB_OUTPUT_LVDS:
case DCB_OUTPUT_DP:
ret = nv50_sor_create(connector, dcbe);
break;
case DCB_OUTPUT_ANALOG:
ret = nv50_dac_create(connector, dcbe);
break;
default:
ret = -ENODEV;
break;
}
} else {
ret = nv50_pior_create(connector, dcbe);
}
if (ret) {
NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
dcbe->location, dcbe->type,
ffs(dcbe->or) - 1, ret);
ret = 0;
}
}
/* cull any connectors we created that don't have an encoder */
list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
if (connector->possible_encoders)
continue;
NV_WARN(drm, "%s has no encoders, removing\n",
connector->name);
connector->funcs->destroy(connector);
}
/* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
dev->vblank_disable_immediate = true;
nv50_audio_component_init(drm);
out:
if (ret)
nv50_display_destroy(dev);
return ret;
}
/******************************************************************************
* Format modifiers
*****************************************************************************/
/****************************************************************
* Log2(block height) ----------------------------+ *
* Page Kind ----------------------------------+ | *
* Gob Height/Page Kind Generation ------+ | | *
* Sector layout -------+ | | | *
* Compression ------+ | | | | */
const u64 disp50xx_modifiers[] = { /* | | | | | */
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 0),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 1),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 2),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 3),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 4),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 5),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 0),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 1),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 2),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 3),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 4),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 5),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 0),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 1),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 2),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 3),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 4),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 5),
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
/****************************************************************
* Log2(block height) ----------------------------+ *
* Page Kind ----------------------------------+ | *
* Gob Height/Page Kind Generation ------+ | | *
* Sector layout -------+ | | | *
* Compression ------+ | | | | */
const u64 disp90xx_modifiers[] = { /* | | | | | */
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 0),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 1),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 2),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 3),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 4),
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 5),
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/disp.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "core.h"
#include <nvif/class.h>
void
nv50_core_del(struct nv50_core **pcore)
{
struct nv50_core *core = *pcore;
if (core) {
nv50_dmac_destroy(&core->chan);
kfree(*pcore);
*pcore = NULL;
}
}
int
nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
{
struct {
s32 oclass;
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
{ GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
{ GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
{ GP100_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
{ GM200_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
{ GM107_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
{ GK110_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
{ GK104_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
{ GF110_DISP_CORE_CHANNEL_DMA, 0, core907d_new },
{ GT214_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
{ GT206_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
{ GT200_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
{ G82_DISP_CORE_CHANNEL_DMA, 0, core827d_new },
{ NV50_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
{}
};
struct nv50_disp *disp = nv50_disp(drm->dev);
int cid;
cid = nvif_mclass(&disp->disp->object, cores);
if (cid < 0) {
NV_ERROR(drm, "No supported core channel class\n");
return cid;
}
return cores[cid].new(drm, cores[cid].oclass, pcore);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/core.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "ovly.h"
#include "oimm.h"
#include <nvif/class.h>
int
nv50_ovly_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
{
static const struct {
s32 oclass;
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} ovlys[] = {
{ GK104_DISP_OVERLAY_CONTROL_DMA, 0, ovly917e_new },
{ GF110_DISP_OVERLAY_CONTROL_DMA, 0, ovly907e_new },
{ GT214_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
{ GT200_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
{ G82_DISP_OVERLAY_CHANNEL_DMA, 0, ovly827e_new },
{ NV50_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
{}
};
struct nv50_disp *disp = nv50_disp(drm->dev);
int cid, ret;
cid = nvif_mclass(&disp->disp->object, ovlys);
if (cid < 0) {
NV_ERROR(drm, "No supported overlay class\n");
return cid;
}
ret = ovlys[cid].new(drm, head, ovlys[cid].oclass, pwndw);
if (ret)
return ret;
return nv50_oimm_init(drm, *pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/ovly.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "core.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl907d.h>
static int
dac907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
struct nvif_push *push = core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV907D, DAC_SET_CONTROL(or), ctrl);
return 0;
}
const struct nv50_outp_func
dac907d = {
.ctrl = dac907d_ctrl,
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/dac907d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "head.h"
#include "base.h"
#include "core.h"
#include "curs.h"
#include "ovly.h"
#include "crc.h"
#include <nvif/class.h>
#include <nvif/event.h>
#include <nvif/cl0046.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_vblank.h>
#include "nouveau_connector.h"
void
nv50_head_flush_clr(struct nv50_head *head,
struct nv50_head_atom *asyh, bool flush)
{
union nv50_head_atom_mask clr = {
.mask = asyh->clr.mask & ~(flush ? 0 : asyh->set.mask),
};
if (clr.crc) nv50_crc_atomic_clr(head);
if (clr.olut) head->func->olut_clr(head);
if (clr.core) head->func->core_clr(head);
if (clr.curs) head->func->curs_clr(head);
}
void
nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
{
if (asyh->set.curs ) head->func->curs_set(head, asyh);
if (asyh->set.olut ) {
asyh->olut.offset = nv50_lut_load(&head->olut,
asyh->olut.buffer,
asyh->state.gamma_lut,
asyh->olut.load);
head->func->olut_set(head, asyh);
}
}
void
nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
if (asyh->set.view ) head->func->view (head, asyh);
if (asyh->set.mode ) head->func->mode (head, asyh);
if (asyh->set.core ) head->func->core_set(head, asyh);
if (asyh->set.base ) head->func->base (head, asyh);
if (asyh->set.ovly ) head->func->ovly (head, asyh);
if (asyh->set.dither ) head->func->dither (head, asyh);
if (asyh->set.procamp) head->func->procamp (head, asyh);
if (asyh->set.crc ) nv50_crc_atomic_set (head, asyh);
if (asyh->set.or ) head->func->or (head, asyh);
}
static void
nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
struct nv50_head_atom *asyh,
struct nouveau_conn_atom *asyc)
{
const int vib = asyc->procamp.color_vibrance - 100;
const int hue = asyc->procamp.vibrant_hue - 90;
const int adj = (vib > 0) ? 50 : 0;
asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
asyh->set.procamp = true;
}
static void
nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
struct nv50_head_atom *asyh,
struct nouveau_conn_atom *asyc)
{
u32 mode = 0x00;
if (asyc->dither.mode) {
if (asyc->dither.mode == DITHERING_MODE_AUTO) {
if (asyh->base.depth > asyh->or.bpc * 3)
mode = DITHERING_MODE_DYNAMIC2X2;
} else {
mode = asyc->dither.mode;
}
if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
if (asyh->or.bpc >= 8)
mode |= DITHERING_DEPTH_8BPC;
} else {
mode |= asyc->dither.depth;
}
}
asyh->dither.enable = NVVAL_GET(mode, NV507D, HEAD_SET_DITHER_CONTROL, ENABLE);
asyh->dither.bits = NVVAL_GET(mode, NV507D, HEAD_SET_DITHER_CONTROL, BITS);
asyh->dither.mode = NVVAL_GET(mode, NV507D, HEAD_SET_DITHER_CONTROL, MODE);
asyh->set.dither = true;
}
static void
nv50_head_atomic_check_view(struct nv50_head_atom *armh,
struct nv50_head_atom *asyh,
struct nouveau_conn_atom *asyc)
{
struct drm_connector *connector = asyc->state.connector;
struct drm_display_mode *omode = &asyh->state.adjusted_mode;
struct drm_display_mode *umode = &asyh->state.mode;
int mode = asyc->scaler.mode;
struct edid *edid;
int umode_vdisplay, omode_hdisplay, omode_vdisplay;
if (connector->edid_blob_ptr)
edid = (struct edid *)connector->edid_blob_ptr->data;
else
edid = NULL;
if (!asyc->scaler.full) {
if (mode == DRM_MODE_SCALE_NONE)
omode = umode;
} else {
/* Non-EDID LVDS/eDP mode. */
mode = DRM_MODE_SCALE_FULLSCREEN;
}
/* For the user-specified mode, we must ignore doublescan and
* the like, but honor frame packing.
*/
umode_vdisplay = umode->vdisplay;
if ((umode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
umode_vdisplay += umode->vtotal;
asyh->view.iW = umode->hdisplay;
asyh->view.iH = umode_vdisplay;
/* For the output mode, we can just use the stock helper. */
drm_mode_get_hv_timing(omode, &omode_hdisplay, &omode_vdisplay);
asyh->view.oW = omode_hdisplay;
asyh->view.oH = omode_vdisplay;
/* Add overscan compensation if necessary, will keep the aspect
* ratio the same as the backend mode unless overridden by the
* user setting both hborder and vborder properties.
*/
if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
(asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
drm_detect_hdmi_monitor(edid)))) {
u32 bX = asyc->scaler.underscan.hborder;
u32 bY = asyc->scaler.underscan.vborder;
u32 r = (asyh->view.oH << 19) / asyh->view.oW;
if (bX) {
asyh->view.oW -= (bX * 2);
if (bY) asyh->view.oH -= (bY * 2);
else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
} else {
asyh->view.oW -= (asyh->view.oW >> 4) + 32;
if (bY) asyh->view.oH -= (bY * 2);
else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
}
}
/* Handle CENTER/ASPECT scaling, taking into account the areas
* removed already for overscan compensation.
*/
switch (mode) {
case DRM_MODE_SCALE_CENTER:
/* NOTE: This will cause scaling when the input is
* larger than the output.
*/
asyh->view.oW = min(asyh->view.iW, asyh->view.oW);
asyh->view.oH = min(asyh->view.iH, asyh->view.oH);
break;
case DRM_MODE_SCALE_ASPECT:
/* Determine whether the scaling should be on width or on
* height. This is done by comparing the aspect ratios of the
* sizes. If the output AR is larger than input AR, that means
* we want to change the width (letterboxed on the
* left/right), otherwise on the height (letterboxed on the
* top/bottom).
*
* E.g. 4:3 (1.333) AR image displayed on a 16:10 (1.6) AR
* screen will have letterboxes on the left/right. However a
* 16:9 (1.777) AR image on that same screen will have
* letterboxes on the top/bottom.
*
* inputAR = iW / iH; outputAR = oW / oH
* outputAR > inputAR is equivalent to oW * iH > iW * oH
*/
if (asyh->view.oW * asyh->view.iH > asyh->view.iW * asyh->view.oH) {
/* Recompute output width, i.e. left/right letterbox */
u32 r = (asyh->view.iW << 19) / asyh->view.iH;
asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
} else {
/* Recompute output height, i.e. top/bottom letterbox */
u32 r = (asyh->view.iH << 19) / asyh->view.iW;
asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
}
break;
default:
break;
}
asyh->set.view = true;
}
static int
nv50_head_atomic_check_lut(struct nv50_head *head,
struct nv50_head_atom *asyh)
{
struct drm_device *dev = head->base.base.dev;
struct drm_crtc *crtc = &head->base.base;
struct nv50_disp *disp = nv50_disp(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_property_blob *olut = asyh->state.gamma_lut,
*ilut = asyh->state.degamma_lut;
int size;
/* Ensure that the ilut is valid */
if (ilut) {
size = drm_color_lut_size(ilut);
if (!head->func->ilut_check(size)) {
NV_ATOMIC(drm, "Invalid size %d for degamma on [CRTC:%d:%s]\n",
size, crtc->base.id, crtc->name);
return -EINVAL;
}
}
/* Determine whether core output LUT should be enabled. */
if (olut) {
/* Check if any window(s) have stolen the core output LUT
* to as an input LUT for legacy gamma + I8 colour format.
*/
if (asyh->wndw.olut) {
/* If any window has stolen the core output LUT,
* all of them must.
*/
if (asyh->wndw.olut != asyh->wndw.mask)
return -EINVAL;
olut = NULL;
}
}
if (!olut) {
if (!head->func->olut_identity) {
asyh->olut.handle = 0;
return 0;
}
size = 0;
} else {
size = drm_color_lut_size(olut);
}
if (!head->func->olut(head, asyh, size)) {
NV_ATOMIC(drm, "Invalid size %d for gamma on [CRTC:%d:%s]\n",
size, crtc->base.id, crtc->name);
return -EINVAL;
}
asyh->olut.handle = disp->core->chan.vram.handle;
asyh->olut.buffer = !asyh->olut.buffer;
return 0;
}
static void
nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
struct nv50_head_mode *m = &asyh->mode;
u32 blankus;
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE);
/*
* DRM modes are defined in terms of a repeating interval
* starting with the active display area. The hardware modes
* are defined in terms of a repeating interval starting one
* unit (pixel or line) into the sync pulse. So, add bias.
*/
m->h.active = mode->crtc_htotal;
m->h.synce = mode->crtc_hsync_end - mode->crtc_hsync_start - 1;
m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1;
m->h.blanks = m->h.blanke + mode->crtc_hdisplay;
m->v.active = mode->crtc_vtotal;
m->v.synce = mode->crtc_vsync_end - mode->crtc_vsync_start - 1;
m->v.blanke = mode->crtc_vblank_end - mode->crtc_vsync_start - 1;
m->v.blanks = m->v.blanke + mode->crtc_vdisplay;
/*XXX: Safe underestimate, even "0" works */
blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active;
blankus *= 1000;
blankus /= mode->crtc_clock;
m->v.blankus = blankus;
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
m->v.blank2e = m->v.active + m->v.blanke;
m->v.blank2s = m->v.blank2e + mode->crtc_vdisplay;
m->v.active = (m->v.active * 2) + 1;
m->interlace = true;
} else {
m->v.blank2e = 0;
m->v.blank2s = 1;
m->interlace = false;
}
m->clock = mode->crtc_clock;
asyh->or.nhsync = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
asyh->or.nvsync = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
asyh->set.or = head->func->or != NULL;
asyh->set.mode = true;
}
static int
nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct nouveau_drm *drm = nouveau_drm(crtc->dev);
struct nv50_head *head = nv50_head(crtc);
struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state);
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
struct nouveau_conn_atom *asyc = NULL;
struct drm_connector_state *conns;
struct drm_connector *conn;
int i, ret;
bool check_lut = asyh->state.color_mgmt_changed ||
memcmp(&armh->wndw, &asyh->wndw, sizeof(asyh->wndw));
NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
if (check_lut) {
ret = nv50_head_atomic_check_lut(head, asyh);
if (ret)
return ret;
}
if (asyh->state.active) {
for_each_new_connector_in_state(asyh->state.state, conn, conns, i) {
if (conns->crtc == crtc) {
asyc = nouveau_conn_atom(conns);
break;
}
}
if (armh->state.active) {
if (asyc) {
if (asyh->state.mode_changed)
asyc->set.scaler = true;
if (armh->base.depth != asyh->base.depth)
asyc->set.dither = true;
}
} else {
if (asyc)
asyc->set.mask = ~0;
asyh->set.mask = ~0;
asyh->set.or = head->func->or != NULL;
}
if (asyh->state.mode_changed || asyh->state.connectors_changed)
nv50_head_atomic_check_mode(head, asyh);
if (check_lut)
asyh->olut.visible = asyh->olut.handle != 0;
if (asyc) {
if (asyc->set.scaler)
nv50_head_atomic_check_view(armh, asyh, asyc);
if (asyc->set.dither)
nv50_head_atomic_check_dither(armh, asyh, asyc);
if (asyc->set.procamp)
nv50_head_atomic_check_procamp(armh, asyh, asyc);
}
if (head->func->core_calc) {
head->func->core_calc(head, asyh);
if (!asyh->core.visible)
asyh->olut.visible = false;
}
asyh->set.base = armh->base.cpp != asyh->base.cpp;
asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
} else {
asyh->olut.visible = false;
asyh->core.visible = false;
asyh->curs.visible = false;
asyh->base.cpp = 0;
asyh->ovly.cpp = 0;
}
if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
if (asyh->core.visible) {
if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
asyh->set.core = true;
} else
if (armh->core.visible) {
asyh->clr.core = true;
}
if (asyh->curs.visible) {
if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
asyh->set.curs = true;
} else
if (armh->curs.visible) {
asyh->clr.curs = true;
}
if (asyh->olut.visible) {
if (memcmp(&armh->olut, &asyh->olut, sizeof(asyh->olut)))
asyh->set.olut = true;
} else
if (armh->olut.visible) {
asyh->clr.olut = true;
}
} else {
asyh->clr.olut = armh->olut.visible;
asyh->clr.core = armh->core.visible;
asyh->clr.curs = armh->curs.visible;
asyh->set.olut = asyh->olut.visible;
asyh->set.core = asyh->core.visible;
asyh->set.curs = asyh->curs.visible;
}
ret = nv50_crc_atomic_check_head(head, asyh, armh);
if (ret)
return ret;
if (asyh->clr.mask || asyh->set.mask)
nv50_atom(asyh->state.state)->lock_core = true;
return 0;
}
static const struct drm_crtc_helper_funcs
nv50_head_help = {
.atomic_check = nv50_head_atomic_check,
.get_scanout_position = nouveau_display_scanoutpos,
};
static void
nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct nv50_head_atom *asyh = nv50_head_atom(state);
__drm_atomic_helper_crtc_destroy_state(&asyh->state);
kfree(asyh);
}
static struct drm_crtc_state *
nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
{
struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
struct nv50_head_atom *asyh;
if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
asyh->wndw = armh->wndw;
asyh->view = armh->view;
asyh->mode = armh->mode;
asyh->olut = armh->olut;
asyh->core = armh->core;
asyh->curs = armh->curs;
asyh->base = armh->base;
asyh->ovly = armh->ovly;
asyh->dither = armh->dither;
asyh->procamp = armh->procamp;
asyh->crc = armh->crc;
asyh->or = armh->or;
asyh->dp = armh->dp;
asyh->clr.mask = 0;
asyh->set.mask = 0;
return &asyh->state;
}
static void
nv50_head_reset(struct drm_crtc *crtc)
{
struct nv50_head_atom *asyh;
if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
return;
if (crtc->state)
nv50_head_atomic_destroy_state(crtc, crtc->state);
__drm_atomic_helper_crtc_reset(crtc, &asyh->state);
}
static int
nv50_head_late_register(struct drm_crtc *crtc)
{
return nv50_head_crc_late_register(nv50_head(crtc));
}
static void
nv50_head_destroy(struct drm_crtc *crtc)
{
struct nv50_head *head = nv50_head(crtc);
nvif_event_dtor(&head->base.vblank);
nvif_head_dtor(&head->base.head);
nv50_lut_fini(&head->olut);
drm_crtc_cleanup(crtc);
kfree(head);
}
static const struct drm_crtc_funcs
nv50_head_func = {
.reset = nv50_head_reset,
.destroy = nv50_head_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = nv50_head_atomic_duplicate_state,
.atomic_destroy_state = nv50_head_atomic_destroy_state,
.enable_vblank = nouveau_display_vblank_enable,
.disable_vblank = nouveau_display_vblank_disable,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.late_register = nv50_head_late_register,
};
static const struct drm_crtc_funcs
nvd9_head_func = {
.reset = nv50_head_reset,
.destroy = nv50_head_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = nv50_head_atomic_duplicate_state,
.atomic_destroy_state = nv50_head_atomic_destroy_state,
.enable_vblank = nouveau_display_vblank_enable,
.disable_vblank = nouveau_display_vblank_disable,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.verify_crc_source = nv50_crc_verify_source,
.get_crc_sources = nv50_crc_get_sources,
.set_crc_source = nv50_crc_set_source,
.late_register = nv50_head_late_register,
};
static int
nv50_head_vblank_handler(struct nvif_event *event, void *repv, u32 repc)
{
struct nouveau_crtc *nv_crtc = container_of(event, struct nouveau_crtc, vblank);
if (drm_crtc_handle_vblank(&nv_crtc->base))
nv50_crc_handle_vblank(nv50_head(&nv_crtc->base));
return NVIF_EVENT_KEEP;
}
struct nv50_head *
nv50_head_create(struct drm_device *dev, int index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_head *head;
struct nv50_wndw *base, *ovly, *curs;
struct nouveau_crtc *nv_crtc;
struct drm_crtc *crtc;
const struct drm_crtc_funcs *funcs;
int ret;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
return ERR_PTR(-ENOMEM);
head->func = disp->core->func->head;
head->base.index = index;
if (disp->disp->object.oclass < GF110_DISP)
funcs = &nv50_head_func;
else
funcs = &nvd9_head_func;
if (disp->disp->object.oclass < GV100_DISP) {
ret = nv50_base_new(drm, head->base.index, &base);
ret = nv50_ovly_new(drm, head->base.index, &ovly);
} else {
ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_PRIMARY,
head->base.index * 2 + 0, &base);
ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_OVERLAY,
head->base.index * 2 + 1, &ovly);
}
if (ret == 0)
ret = nv50_curs_new(drm, head->base.index, &curs);
if (ret) {
kfree(head);
return ERR_PTR(ret);
}
nv_crtc = &head->base;
crtc = &nv_crtc->base;
drm_crtc_init_with_planes(dev, crtc, &base->plane, &curs->plane,
funcs, "head-%d", head->base.index);
drm_crtc_helper_add(crtc, &nv50_head_help);
/* Keep the legacy gamma size at 256 to avoid compatibility issues */
drm_mode_crtc_set_gamma_size(crtc, 256);
drm_crtc_enable_color_mgmt(crtc, base->func->ilut_size,
disp->disp->object.oclass >= GF110_DISP,
head->func->olut_size);
if (head->func->olut_set) {
ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut);
if (ret) {
nv50_head_destroy(crtc);
return ERR_PTR(ret);
}
}
ret = nvif_head_ctor(disp->disp, head->base.base.name, head->base.index, &head->base.head);
if (ret)
return ERR_PTR(ret);
ret = nvif_head_vblank_event_ctor(&head->base.head, "kmsVbl", nv50_head_vblank_handler,
false, &nv_crtc->vblank);
if (ret)
return ERR_PTR(ret);
return head;
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/head.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "core.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl507d.h>
static int
dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
struct nvif_push *push = core->chan.push;
u32 sync = 0;
int ret;
if (asyh) {
sync |= NVVAL(NV507D, DAC_SET_POLARITY, HSYNC, asyh->or.nhsync);
sync |= NVVAL(NV507D, DAC_SET_POLARITY, VSYNC, asyh->or.nvsync);
}
if ((ret = PUSH_WAIT(push, 3)))
return ret;
PUSH_MTHD(push, NV507D, DAC_SET_CONTROL(or), ctrl,
DAC_SET_POLARITY(or), sync);
return 0;
}
const struct nv50_outp_func
dac507d = {
.ctrl = dac507d_ctrl,
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/dac507d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "head.h"
#include "core.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl827d.h>
static int
head827d_curs_clr(struct nv50_head *head)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
return ret;
PUSH_MTHD(push, NV827D, HEAD_SET_CONTROL_CURSOR(i),
NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8) |
NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, SIZE, W64_H64));
PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_CURSOR(i), 0x00000000);
return 0;
}
static int
head827d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
return ret;
PUSH_MTHD(push, NV827D, HEAD_SET_CONTROL_CURSOR(i),
NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
NVVAL(NV827D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND) |
NVDEF(NV827D, HEAD_SET_CONTROL_CURSOR, SUB_OWNER, NONE),
HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
return 0;
}
static int
head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 9)))
return ret;
PUSH_MTHD(push, NV827D, HEAD_SET_OFFSET(i, 0),
NVVAL(NV827D, HEAD_SET_OFFSET, ORIGIN, asyh->core.offset >> 8));
PUSH_MTHD(push, NV827D, HEAD_SET_SIZE(i),
NVVAL(NV827D, HEAD_SET_SIZE, WIDTH, asyh->core.w) |
NVVAL(NV827D, HEAD_SET_SIZE, HEIGHT, asyh->core.h),
HEAD_SET_STORAGE(i),
NVVAL(NV827D, HEAD_SET_STORAGE, BLOCK_HEIGHT, asyh->core.blockh) |
NVVAL(NV827D, HEAD_SET_STORAGE, PITCH, asyh->core.pitch >> 8) |
NVVAL(NV827D, HEAD_SET_STORAGE, PITCH, asyh->core.blocks) |
NVVAL(NV827D, HEAD_SET_STORAGE, MEMORY_LAYOUT, asyh->core.layout),
HEAD_SET_PARAMS(i),
NVVAL(NV827D, HEAD_SET_PARAMS, FORMAT, asyh->core.format) |
NVDEF(NV827D, HEAD_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
NVDEF(NV827D, HEAD_SET_PARAMS, GAMMA, LINEAR),
HEAD_SET_CONTEXT_DMAS_ISO(i, 0),
NVVAL(NV827D, HEAD_SET_CONTEXT_DMAS_ISO, HANDLE, asyh->core.handle));
PUSH_MTHD(push, NV827D, HEAD_SET_VIEWPORT_POINT_IN(i, 0),
NVVAL(NV827D, HEAD_SET_VIEWPORT_POINT_IN, X, asyh->core.x) |
NVVAL(NV827D, HEAD_SET_VIEWPORT_POINT_IN, Y, asyh->core.y));
return 0;
}
static int
head827d_olut_clr(struct nv50_head *head)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
return ret;
PUSH_MTHD(push, NV827D, HEAD_SET_BASE_LUT_LO(i),
NVDEF(NV827D, HEAD_SET_BASE_LUT_LO, ENABLE, DISABLE));
PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_LUT(i), 0x00000000);
return 0;
}
static int
head827d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
return ret;
PUSH_MTHD(push, NV827D, HEAD_SET_BASE_LUT_LO(i),
NVDEF(NV827D, HEAD_SET_BASE_LUT_LO, ENABLE, ENABLE) |
NVVAL(NV827D, HEAD_SET_BASE_LUT_LO, MODE, asyh->olut.mode) |
NVVAL(NV827D, HEAD_SET_BASE_LUT_LO, ORIGIN, 0),
HEAD_SET_BASE_LUT_HI(i),
NVVAL(NV827D, HEAD_SET_BASE_LUT_HI, ORIGIN, asyh->olut.offset >> 8));
PUSH_MTHD(push, NV827D, HEAD_SET_CONTEXT_DMA_LUT(i), asyh->olut.handle);
return 0;
}
const struct nv50_head_func
head827d = {
.view = head507d_view,
.mode = head507d_mode,
.olut = head507d_olut,
.olut_size = 256,
.olut_set = head827d_olut_set,
.olut_clr = head827d_olut_clr,
.core_calc = head507d_core_calc,
.core_set = head827d_core_set,
.core_clr = head507d_core_clr,
.curs_layout = head507d_curs_layout,
.curs_format = head507d_curs_format,
.curs_set = head827d_curs_set,
.curs_clr = head827d_curs_clr,
.base = head507d_base,
.ovly = head507d_ovly,
.dither = head507d_dither,
.procamp = head507d_procamp,
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/head827d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "oimm.h"
#include <nvif/class.h>
int
nv50_oimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
{
static const struct {
s32 oclass;
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} oimms[] = {
{ GK104_DISP_OVERLAY, 0, oimm507b_init },
{ GF110_DISP_OVERLAY, 0, oimm507b_init },
{ GT214_DISP_OVERLAY, 0, oimm507b_init },
{ G82_DISP_OVERLAY, 0, oimm507b_init },
{ NV50_DISP_OVERLAY, 0, oimm507b_init },
{}
};
struct nv50_disp *disp = nv50_disp(drm->dev);
int cid;
cid = nvif_mclass(&disp->disp->object, oimms);
if (cid < 0) {
NV_ERROR(drm, "No supported overlay immediate class\n");
return cid;
}
return oimms[cid].init(drm, oimms[cid].oclass, wndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/oimm.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "wndw.h"
#include "wimm.h"
#include "handles.h"
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include <nvhw/class/cl507c.h>
#include <nvhw/class/cl507e.h>
#include <nvhw/class/clc37e.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include "nouveau_bo.h"
#include "nouveau_gem.h"
static void
nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
{
nvif_object_dtor(&ctxdma->object);
list_del(&ctxdma->head);
kfree(ctxdma);
}
static struct nv50_wndw_ctxdma *
nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
{
struct nouveau_drm *drm = nouveau_drm(fb->dev);
struct nv50_wndw_ctxdma *ctxdma;
u32 handle;
u32 unused;
u8 kind;
struct {
struct nv_dma_v0 base;
union {
struct nv50_dma_v0 nv50;
struct gf100_dma_v0 gf100;
struct gf119_dma_v0 gf119;
};
} args = {};
u32 argc = sizeof(args.base);
int ret;
nouveau_framebuffer_get_layout(fb, &unused, &kind);
handle = NV50_DISP_HANDLE_WNDW_CTX(kind);
list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
if (ctxdma->object.handle == handle)
return ctxdma;
}
if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
return ERR_PTR(-ENOMEM);
list_add(&ctxdma->head, &wndw->ctxdma.list);
args.base.target = NV_DMA_V0_TARGET_VRAM;
args.base.access = NV_DMA_V0_ACCESS_RDWR;
args.base.start = 0;
args.base.limit = drm->client.device.info.ram_user - 1;
if (drm->client.device.info.chipset < 0x80) {
args.nv50.part = NV50_DMA_V0_PART_256;
argc += sizeof(args.nv50);
} else
if (drm->client.device.info.chipset < 0xc0) {
args.nv50.part = NV50_DMA_V0_PART_256;
args.nv50.kind = kind;
argc += sizeof(args.nv50);
} else
if (drm->client.device.info.chipset < 0xd0) {
args.gf100.kind = kind;
argc += sizeof(args.gf100);
} else {
args.gf119.page = GF119_DMA_V0_PAGE_LP;
args.gf119.kind = kind;
argc += sizeof(args.gf119);
}
ret = nvif_object_ctor(wndw->ctxdma.parent, "kmsFbCtxDma", handle,
NV_DMA_IN_MEMORY, &args, argc, &ctxdma->object);
if (ret) {
nv50_wndw_ctxdma_del(ctxdma);
return ERR_PTR(ret);
}
return ctxdma;
}
int
nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
if (asyw->set.ntfy) {
return wndw->func->ntfy_wait_begun(disp->sync,
asyw->ntfy.offset,
wndw->wndw.base.device);
}
return 0;
}
void
nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
struct nv50_wndw_atom *asyw)
{
union nv50_wndw_atom_mask clr = {
.mask = asyw->clr.mask & ~(flush ? 0 : asyw->set.mask),
};
if (clr.sema ) wndw->func-> sema_clr(wndw);
if (clr.ntfy ) wndw->func-> ntfy_clr(wndw);
if (clr.xlut ) wndw->func-> xlut_clr(wndw);
if (clr.csc ) wndw->func-> csc_clr(wndw);
if (clr.image) wndw->func->image_clr(wndw);
interlock[wndw->interlock.type] |= wndw->interlock.data;
}
void
nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
struct nv50_wndw_atom *asyw)
{
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
asyw->image.mode = NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING;
asyw->image.interval = 1;
}
if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
if (asyw->set.image) wndw->func->image_set(wndw, asyw);
if (asyw->set.xlut ) {
if (asyw->ilut) {
asyw->xlut.i.offset =
nv50_lut_load(&wndw->ilut, asyw->xlut.i.buffer,
asyw->ilut, asyw->xlut.i.load);
}
wndw->func->xlut_set(wndw, asyw);
}
if (asyw->set.csc ) wndw->func->csc_set (wndw, asyw);
if (asyw->set.scale) wndw->func->scale_set(wndw, asyw);
if (asyw->set.blend) wndw->func->blend_set(wndw, asyw);
if (asyw->set.point) {
if (asyw->set.point = false, asyw->set.mask)
interlock[wndw->interlock.type] |= wndw->interlock.data;
interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm;
wndw->immd->point(wndw, asyw);
wndw->immd->update(wndw, interlock);
} else {
interlock[wndw->interlock.type] |= wndw->interlock.data;
}
}
void
nv50_wndw_ntfy_enable(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
asyw->ntfy.handle = wndw->wndw.sync.handle;
asyw->ntfy.offset = wndw->ntfy;
asyw->ntfy.awaken = false;
asyw->set.ntfy = true;
wndw->func->ntfy_reset(disp->sync, wndw->ntfy);
wndw->ntfy ^= 0x10;
}
static void
nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
wndw->func->release(wndw, asyw, asyh);
asyw->ntfy.handle = 0;
asyw->sema.handle = 0;
asyw->xlut.handle = 0;
memset(asyw->image.handle, 0x00, sizeof(asyw->image.handle));
}
static int
nv50_wndw_atomic_check_acquire_yuv(struct nv50_wndw_atom *asyw)
{
switch (asyw->state.fb->format->format) {
case DRM_FORMAT_YUYV:
asyw->image.format = NV507E_SURFACE_SET_PARAMS_FORMAT_VE8YO8UE8YE8;
break;
case DRM_FORMAT_UYVY:
asyw->image.format = NV507E_SURFACE_SET_PARAMS_FORMAT_YO8VE8YE8UE8;
break;
default:
WARN_ON(1);
return -EINVAL;
}
asyw->image.colorspace = NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_601;
return 0;
}
static int
nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
{
switch (asyw->state.fb->format->format) {
case DRM_FORMAT_C8:
asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_I8;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8;
break;
case DRM_FORMAT_RGB565:
asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_R5G6B5;
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5;
break;
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8;
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
asyw->image.format = NVC37E_SET_PARAMS_FORMAT_A2R10G10B10;
break;
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
asyw->image.format = NV507C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16;
break;
default:
return -EINVAL;
}
asyw->image.colorspace = NV507E_SURFACE_SET_PARAMS_COLOR_SPACE_RGB;
return 0;
}
static int
nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
struct nv50_wndw_atom *armw,
struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
struct drm_framebuffer *fb = asyw->state.fb;
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
uint8_t kind;
uint32_t tile_mode;
int ret;
NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
if (fb != armw->state.fb || !armw->visible || modeset) {
nouveau_framebuffer_get_layout(fb, &tile_mode, &kind);
asyw->image.w = fb->width;
asyw->image.h = fb->height;
asyw->image.kind = kind;
ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
if (ret) {
ret = nv50_wndw_atomic_check_acquire_yuv(asyw);
if (ret)
return ret;
}
if (asyw->image.kind) {
asyw->image.layout = NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR;
if (drm->client.device.info.chipset >= 0xc0)
asyw->image.blockh = tile_mode >> 4;
else
asyw->image.blockh = tile_mode;
asyw->image.blocks[0] = fb->pitches[0] / 64;
asyw->image.pitch[0] = 0;
} else {
asyw->image.layout = NV507C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH;
asyw->image.blockh = NV507C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB;
asyw->image.blocks[0] = 0;
asyw->image.pitch[0] = fb->pitches[0];
}
if (!asyh->state.async_flip)
asyw->image.interval = 1;
else
asyw->image.interval = 0;
if (asyw->image.interval)
asyw->image.mode = NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING;
else
asyw->image.mode = NV507C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE;
asyw->set.image = wndw->func->image_set != NULL;
}
if (wndw->func->scale_set) {
asyw->scale.sx = asyw->state.src_x >> 16;
asyw->scale.sy = asyw->state.src_y >> 16;
asyw->scale.sw = asyw->state.src_w >> 16;
asyw->scale.sh = asyw->state.src_h >> 16;
asyw->scale.dw = asyw->state.crtc_w;
asyw->scale.dh = asyw->state.crtc_h;
if (memcmp(&armw->scale, &asyw->scale, sizeof(asyw->scale)))
asyw->set.scale = true;
}
if (wndw->func->blend_set) {
asyw->blend.depth = 255 - asyw->state.normalized_zpos;
asyw->blend.k1 = asyw->state.alpha >> 8;
switch (asyw->state.pixel_blend_mode) {
case DRM_MODE_BLEND_PREMULTI:
asyw->blend.src_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1;
asyw->blend.dst_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC;
break;
case DRM_MODE_BLEND_COVERAGE:
asyw->blend.src_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC;
asyw->blend.dst_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC;
break;
case DRM_MODE_BLEND_PIXEL_NONE:
default:
asyw->blend.src_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1;
asyw->blend.dst_color = NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1;
break;
}
if (memcmp(&armw->blend, &asyw->blend, sizeof(asyw->blend)))
asyw->set.blend = true;
}
if (wndw->immd) {
asyw->point.x = asyw->state.crtc_x;
asyw->point.y = asyw->state.crtc_y;
if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
asyw->set.point = true;
}
return wndw->func->acquire(wndw, asyw, asyh);
}
static int
nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
struct nv50_wndw_atom *armw,
struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
struct drm_property_blob *ilut = asyh->state.degamma_lut;
/* I8 format without an input LUT makes no sense, and the
* HW error-checks for this.
*
* In order to handle legacy gamma, when there's no input
* LUT we need to steal the output LUT and use it instead.
*/
if (!ilut && asyw->state.fb->format->format == DRM_FORMAT_C8) {
/* This should be an error, but there's legacy clients
* that do a modeset before providing a gamma table.
*
* We keep the window disabled to avoid angering HW.
*/
if (!(ilut = asyh->state.gamma_lut)) {
asyw->visible = false;
return 0;
}
if (wndw->func->ilut)
asyh->wndw.olut |= BIT(wndw->id);
} else {
asyh->wndw.olut &= ~BIT(wndw->id);
}
if (!ilut && wndw->func->ilut_identity &&
asyw->state.fb->format->format != DRM_FORMAT_XBGR16161616F &&
asyw->state.fb->format->format != DRM_FORMAT_ABGR16161616F) {
static struct drm_property_blob dummy = {};
ilut = &dummy;
}
/* Recalculate LUT state. */
memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut));
asyw->xlut.handle = wndw->wndw.vram.handle;
asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
asyw->set.xlut = true;
} else {
asyw->clr.xlut = armw->xlut.handle != 0;
}
/* Handle setting base SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT. */
if (wndw->func->olut_core &&
(!armw->visible || (armw->xlut.handle && !asyw->xlut.handle)))
asyw->set.xlut = true;
if (wndw->func->csc && asyh->state.ctm) {
const struct drm_color_ctm *ctm = asyh->state.ctm->data;
wndw->func->csc(wndw, asyw, ctm);
asyw->csc.valid = true;
asyw->set.csc = true;
} else {
asyw->csc.valid = false;
asyw->clr.csc = armw->csc.valid;
}
/* Can't do an immediate flip while changing the LUT. */
asyh->state.async_flip = false;
return 0;
}
static int
nv50_wndw_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct nouveau_drm *drm = nouveau_drm(plane->dev);
struct nv50_wndw *wndw = nv50_wndw(plane);
struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_head_atom *harm = NULL, *asyh = NULL;
bool modeset = false;
int ret;
NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
/* Fetch the assembly state for the head the window will belong to,
* and determine whether the window will be visible.
*/
if (asyw->state.crtc) {
asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
if (IS_ERR(asyh))
return PTR_ERR(asyh);
modeset = drm_atomic_crtc_needs_modeset(&asyh->state);
asyw->visible = asyh->state.active;
} else {
asyw->visible = false;
}
/* Fetch assembly state for the head the window used to belong to. */
if (armw->state.crtc) {
harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
if (IS_ERR(harm))
return PTR_ERR(harm);
}
/* LUT configuration can potentially cause the window to be disabled. */
if (asyw->visible && wndw->func->xlut_set &&
(!armw->visible ||
asyh->state.color_mgmt_changed ||
asyw->state.fb->format->format !=
armw->state.fb->format->format)) {
ret = nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
if (ret)
return ret;
}
/* Calculate new window state. */
if (asyw->visible) {
ret = nv50_wndw_atomic_check_acquire(wndw, modeset,
armw, asyw, asyh);
if (ret)
return ret;
asyh->wndw.mask |= BIT(wndw->id);
} else
if (armw->visible) {
nv50_wndw_atomic_check_release(wndw, asyw, harm);
harm->wndw.mask &= ~BIT(wndw->id);
} else {
return 0;
}
/* Aside from the obvious case where the window is actively being
* disabled, we might also need to temporarily disable the window
* when performing certain modeset operations.
*/
if (!asyw->visible || modeset) {
asyw->clr.ntfy = armw->ntfy.handle != 0;
asyw->clr.sema = armw->sema.handle != 0;
asyw->clr.xlut = armw->xlut.handle != 0;
if (asyw->clr.xlut && asyw->visible)
asyw->set.xlut = asyw->xlut.handle != 0;
asyw->clr.csc = armw->csc.valid;
if (wndw->func->image_clr)
asyw->clr.image = armw->image.handle[0] != 0;
}
return 0;
}
static void
nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
{
struct nouveau_drm *drm = nouveau_drm(plane->dev);
struct nouveau_bo *nvbo;
NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
if (!old_state->fb)
return;
nvbo = nouveau_gem_object(old_state->fb->obj[0]);
nouveau_bo_unpin(nvbo);
}
static int
nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
struct nouveau_drm *drm = nouveau_drm(plane->dev);
struct nv50_wndw *wndw = nv50_wndw(plane);
struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
struct nouveau_bo *nvbo;
struct nv50_head_atom *asyh;
struct nv50_wndw_ctxdma *ctxdma;
int ret;
NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
if (!asyw->state.fb)
return 0;
nvbo = nouveau_gem_object(fb->obj[0]);
ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret)
return ret;
if (wndw->ctxdma.parent) {
ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
if (IS_ERR(ctxdma)) {
nouveau_bo_unpin(nvbo);
return PTR_ERR(ctxdma);
}
if (asyw->visible)
asyw->image.handle[0] = ctxdma->object.handle;
}
ret = drm_gem_plane_helper_prepare_fb(plane, state);
if (ret)
return ret;
asyw->image.offset[0] = nvbo->offset;
if (wndw->func->prepare) {
asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
if (IS_ERR(asyh))
return PTR_ERR(asyh);
wndw->func->prepare(wndw, asyh, asyw);
}
return 0;
}
static const struct drm_plane_helper_funcs
nv50_wndw_helper = {
.prepare_fb = nv50_wndw_prepare_fb,
.cleanup_fb = nv50_wndw_cleanup_fb,
.atomic_check = nv50_wndw_atomic_check,
};
static void
nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
__drm_atomic_helper_plane_destroy_state(&asyw->state);
kfree(asyw);
}
static struct drm_plane_state *
nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
{
struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
struct nv50_wndw_atom *asyw;
if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
asyw->sema = armw->sema;
asyw->ntfy = armw->ntfy;
asyw->ilut = NULL;
asyw->xlut = armw->xlut;
asyw->csc = armw->csc;
asyw->image = armw->image;
asyw->point = armw->point;
asyw->clr.mask = 0;
asyw->set.mask = 0;
return &asyw->state;
}
static int
nv50_wndw_zpos_default(struct drm_plane *plane)
{
return (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 :
(plane->type == DRM_PLANE_TYPE_OVERLAY) ? 1 : 255;
}
static void
nv50_wndw_reset(struct drm_plane *plane)
{
struct nv50_wndw_atom *asyw;
if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
return;
if (plane->state)
plane->funcs->atomic_destroy_state(plane, plane->state);
__drm_atomic_helper_plane_reset(plane, &asyw->state);
}
static void
nv50_wndw_destroy(struct drm_plane *plane)
{
struct nv50_wndw *wndw = nv50_wndw(plane);
struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
nv50_wndw_ctxdma_del(ctxdma);
}
nv50_dmac_destroy(&wndw->wimm);
nv50_dmac_destroy(&wndw->wndw);
nv50_lut_fini(&wndw->ilut);
drm_plane_cleanup(&wndw->plane);
kfree(wndw);
}
/* This function assumes the format has already been validated against the plane
* and the modifier was validated against the device-wides modifier list at FB
* creation time.
*/
static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
u32 format, u64 modifier)
{
struct nouveau_drm *drm = nouveau_drm(plane->dev);
uint8_t i;
if (drm->client.device.info.chipset < 0xc0) {
const struct drm_format_info *info = drm_format_info(format);
const uint8_t kind = (modifier >> 12) & 0xff;
if (!format) return false;
for (i = 0; i < info->num_planes; i++)
if ((info->cpp[i] != 4) && kind != 0x70) return false;
}
return true;
}
const struct drm_plane_funcs
nv50_wndw = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = nv50_wndw_destroy,
.reset = nv50_wndw_reset,
.atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
.atomic_destroy_state = nv50_wndw_atomic_destroy_state,
.format_mod_supported = nv50_plane_format_mod_supported,
};
static const u64 nv50_cursor_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
int
nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
enum drm_plane_type type, const char *name, int index,
const u32 *format, u32 heads,
enum nv50_disp_interlock_type interlock_type, u32 interlock_data,
struct nv50_wndw **pwndw)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_mmu *mmu = &drm->client.mmu;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_wndw *wndw;
const u64 *format_modifiers;
int nformat;
int ret;
if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
return -ENOMEM;
wndw->func = func;
wndw->id = index;
wndw->interlock.type = interlock_type;
wndw->interlock.data = interlock_data;
wndw->ctxdma.parent = &wndw->wndw.base.user;
INIT_LIST_HEAD(&wndw->ctxdma.list);
for (nformat = 0; format[nformat]; nformat++);
if (type == DRM_PLANE_TYPE_CURSOR)
format_modifiers = nv50_cursor_format_modifiers;
else
format_modifiers = nouveau_display(dev)->format_modifiers;
ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat,
format_modifiers, type, "%s-%d", name, index);
if (ret) {
kfree(*pwndw);
*pwndw = NULL;
return ret;
}
drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
if (wndw->func->ilut) {
ret = nv50_lut_init(disp, mmu, &wndw->ilut);
if (ret)
return ret;
}
if (wndw->func->blend_set) {
ret = drm_plane_create_zpos_property(&wndw->plane,
nv50_wndw_zpos_default(&wndw->plane), 0, 254);
if (ret)
return ret;
ret = drm_plane_create_alpha_property(&wndw->plane);
if (ret)
return ret;
ret = drm_plane_create_blend_mode_property(&wndw->plane,
BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
if (ret)
return ret;
} else {
ret = drm_plane_create_zpos_immutable_property(&wndw->plane,
nv50_wndw_zpos_default(&wndw->plane));
if (ret)
return ret;
}
return 0;
}
int
nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
struct nv50_wndw **pwndw)
{
struct {
s32 oclass;
int version;
int (*new)(struct nouveau_drm *, enum drm_plane_type,
int, s32, struct nv50_wndw **);
} wndws[] = {
{ GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new },
{ TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
{}
};
struct nv50_disp *disp = nv50_disp(drm->dev);
int cid, ret;
cid = nvif_mclass(&disp->disp->object, wndws);
if (cid < 0) {
NV_ERROR(drm, "No supported window class\n");
return cid;
}
ret = wndws[cid].new(drm, type, index, wndws[cid].oclass, pwndw);
if (ret)
return ret;
return nv50_wimm_init(drm, *pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/wndw.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "curs.h"
#include "atom.h"
#include <nvhw/class/clc37a.h>
static int
cursc37a_update(struct nv50_wndw *wndw, u32 *interlock)
{
struct nvif_object *user = &wndw->wimm.base.user;
int ret = nvif_chan_wait(&wndw->wimm, 1);
if (ret == 0)
NVIF_WR32(user, NVC37A, UPDATE, 0x00000001);
return ret;
}
static int
cursc37a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_object *user = &wndw->wimm.base.user;
int ret = nvif_chan_wait(&wndw->wimm, 1);
if (ret == 0) {
NVIF_WR32(user, NVC37A, SET_CURSOR_HOT_SPOT_POINT_OUT(0),
NVVAL(NVC37A, SET_CURSOR_HOT_SPOT_POINT_OUT, X, asyw->point.x) |
NVVAL(NVC37A, SET_CURSOR_HOT_SPOT_POINT_OUT, Y, asyw->point.y));
}
return ret;
}
static const struct nv50_wimm_func
cursc37a = {
.point = cursc37a_point,
.update = cursc37a_update,
};
int
cursc37a_new(struct nouveau_drm *drm, int head, s32 oclass,
struct nv50_wndw **pwndw)
{
return curs507a_new_(&cursc37a, drm, head, oclass,
0x00000001 << head, pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/cursc37a.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "head.h"
#include "atom.h"
#include "core.h"
#include <nvif/pushc37b.h>
#include <nvhw/class/clc37d.h>
static int
headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u8 depth;
int ret;
/*XXX: This is a dirty hack until OR depth handling is
* improved later for deep colour etc.
*/
switch (asyh->or.depth) {
case 6: depth = 5; break;
case 5: depth = 4; break;
case 2: depth = 1; break;
case 0: depth = 4; break;
default:
depth = asyh->or.depth;
WARN_ON(1);
break;
}
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, depth) |
NVDEF(NVC37D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, COLOR_SPACE_OVERRIDE, DISABLE));
return 0;
}
static int
headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC37D, HEAD_SET_PROCAMP(i),
NVDEF(NVC37D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
NVDEF(NVC37D, HEAD_SET_PROCAMP, CHROMA_LPF, DISABLE) |
NVVAL(NVC37D, HEAD_SET_PROCAMP, SAT_COS, asyh->procamp.sat.cos) |
NVVAL(NVC37D, HEAD_SET_PROCAMP, SAT_SINE, asyh->procamp.sat.sin) |
NVDEF(NVC37D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA) |
NVDEF(NVC37D, HEAD_SET_PROCAMP, RANGE_COMPRESSION, DISABLE) |
NVDEF(NVC37D, HEAD_SET_PROCAMP, BLACK_LEVEL, GRAPHICS));
return 0;
}
int
headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC37D, HEAD_SET_DITHER_CONTROL(i),
NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
NVDEF(NVC37D, HEAD_SET_DITHER_CONTROL, OFFSET_ENABLE, DISABLE) |
NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
NVVAL(NVC37D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
return 0;
}
int
headc37d_curs_clr(struct nv50_head *head)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
return ret;
PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_CURSOR(i),
NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8));
PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CURSOR(i, 0), 0x00000000);
return 0;
}
int
headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 7)))
return ret;
PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_CURSOR(i),
NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR, DE_GAMMA, NONE),
HEAD_SET_CONTROL_CURSOR_COMPOSITION(i),
NVVAL(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, K1, 0xff) |
NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, CURSOR_COLOR_FACTOR_SELECT,
K1) |
NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, VIEWPORT_COLOR_FACTOR_SELECT,
NEG_K1_TIMES_SRC) |
NVDEF(NVC37D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, MODE, BLEND));
PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CURSOR(i, 0), asyh->curs.handle);
PUSH_MTHD(push, NVC37D, HEAD_SET_OFFSET_CURSOR(i, 0), asyh->curs.offset >> 8);
return 0;
}
int
headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
asyh->curs.format = asyw->image.format;
return 0;
}
static int
headc37d_olut_clr(struct nv50_head *head)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(i), 0x00000000);
return 0;
}
static int
headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
return ret;
PUSH_MTHD(push, NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT(i),
NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT, SIZE, asyh->olut.size) |
NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT, RANGE, asyh->olut.range) |
NVVAL(NVC37D, HEAD_SET_CONTROL_OUTPUT_LUT, OUTPUT_MODE, asyh->olut.output_mode),
HEAD_SET_OFFSET_OUTPUT_LUT(i), asyh->olut.offset >> 8,
HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(i), asyh->olut.handle);
return 0;
}
static bool
headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
if (size != 256 && size != 1024)
return false;
asyh->olut.size = size == 1024 ? NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_1025 :
NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_257;
asyh->olut.range = NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_UNITY;
asyh->olut.output_mode = NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INTERPOLATE;
asyh->olut.load = head907d_olut_load;
return true;
}
static int
headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 15)))
return ret;
PUSH_MTHD(push, NVC37D, HEAD_SET_RASTER_SIZE(i),
NVVAL(NVC37D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
NVVAL(NVC37D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
HEAD_SET_RASTER_SYNC_END(i),
NVVAL(NVC37D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
NVVAL(NVC37D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
HEAD_SET_RASTER_BLANK_END(i),
NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
HEAD_SET_RASTER_BLANK_START(i),
NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
NVVAL(NVC37D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks));
//XXX:
PUSH_NVSQ(push, NVC37D, 0x2074 + (i * 0x400), m->v.blank2e << 16 | m->v.blank2s);
PUSH_NVSQ(push, NVC37D, 0x2008 + (i * 0x400), m->interlace);
PUSH_MTHD(push, NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
NVVAL(NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000));
PUSH_MTHD(push, NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
NVVAL(NVC37D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000));
/*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
PUSH_MTHD(push, NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS(i),
NVDEF(NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS, CURSOR, USAGE_W256_H256) |
NVDEF(NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS, OUTPUT_LUT, USAGE_1025) |
NVDEF(NVC37D, HEAD_SET_HEAD_USAGE_BOUNDS, UPSCALING_ALLOWED, TRUE));
return 0;
}
int
headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
return ret;
PUSH_MTHD(push, NVC37D, HEAD_SET_VIEWPORT_SIZE_IN(i),
NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
PUSH_MTHD(push, NVC37D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
NVVAL(NVC37D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH));
return 0;
}
void
headc37d_static_wndw_map(struct nv50_head *head, struct nv50_head_atom *asyh)
{
int i, end;
for (i = head->base.index * 2, end = i + 2; i < end; i++)
asyh->wndw.owned |= BIT(i);
}
const struct nv50_head_func
headc37d = {
.view = headc37d_view,
.mode = headc37d_mode,
.olut = headc37d_olut,
.ilut_check = head907d_ilut_check,
.olut_size = 1024,
.olut_set = headc37d_olut_set,
.olut_clr = headc37d_olut_clr,
.curs_layout = head917d_curs_layout,
.curs_format = headc37d_curs_format,
.curs_set = headc37d_curs_set,
.curs_clr = headc37d_curs_clr,
.dither = headc37d_dither,
.procamp = headc37d_procamp,
.or = headc37d_or,
.static_wndw_map = headc37d_static_wndw_map,
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/headc37d.c |
// SPDX-License-Identifier: MIT
#include <linux/string.h>
#include <drm/drm_crtc.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include <nvif/timer.h>
#include <nvhw/class/cl907d.h>
#include "nouveau_drv.h"
#include "core.h"
#include "head.h"
#include "wndw.h"
#include "handles.h"
#include "crc.h"
static const char * const nv50_crc_sources[] = {
[NV50_CRC_SOURCE_NONE] = "none",
[NV50_CRC_SOURCE_AUTO] = "auto",
[NV50_CRC_SOURCE_RG] = "rg",
[NV50_CRC_SOURCE_OUTP_ACTIVE] = "outp-active",
[NV50_CRC_SOURCE_OUTP_COMPLETE] = "outp-complete",
[NV50_CRC_SOURCE_OUTP_INACTIVE] = "outp-inactive",
};
static int nv50_crc_parse_source(const char *buf, enum nv50_crc_source *s)
{
int i;
if (!buf) {
*s = NV50_CRC_SOURCE_NONE;
return 0;
}
i = match_string(nv50_crc_sources, ARRAY_SIZE(nv50_crc_sources), buf);
if (i < 0)
return i;
*s = i;
return 0;
}
int
nv50_crc_verify_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt)
{
struct nouveau_drm *drm = nouveau_drm(crtc->dev);
enum nv50_crc_source source;
if (nv50_crc_parse_source(source_name, &source) < 0) {
NV_DEBUG(drm, "unknown source %s\n", source_name);
return -EINVAL;
}
*values_cnt = 1;
return 0;
}
const char *const *nv50_crc_get_sources(struct drm_crtc *crtc, size_t *count)
{
*count = ARRAY_SIZE(nv50_crc_sources);
return nv50_crc_sources;
}
static void
nv50_crc_program_ctx(struct nv50_head *head,
struct nv50_crc_notifier_ctx *ctx)
{
struct nv50_disp *disp = nv50_disp(head->base.base.dev);
struct nv50_core *core = disp->core;
u32 interlock[NV50_DISP_INTERLOCK__SIZE] = { 0 };
core->func->crc->set_ctx(head, ctx);
core->func->update(core, interlock, false);
}
static void nv50_crc_ctx_flip_work(struct kthread_work *base)
{
struct drm_vblank_work *work = to_drm_vblank_work(base);
struct nv50_crc *crc = container_of(work, struct nv50_crc, flip_work);
struct nv50_head *head = container_of(crc, struct nv50_head, crc);
struct drm_crtc *crtc = &head->base.base;
struct drm_device *dev = crtc->dev;
struct nv50_disp *disp = nv50_disp(dev);
const uint64_t start_vbl = drm_crtc_vblank_count(crtc);
uint64_t end_vbl;
u8 new_idx = crc->ctx_idx ^ 1;
/*
* We don't want to accidentally wait for longer then the vblank, so
* try again for the next vblank if we don't grab the lock
*/
if (!mutex_trylock(&disp->mutex)) {
drm_dbg_kms(dev, "Lock contended, delaying CRC ctx flip for %s\n", crtc->name);
drm_vblank_work_schedule(work, start_vbl + 1, true);
return;
}
drm_dbg_kms(dev, "Flipping notifier ctx for %s (%d -> %d)\n",
crtc->name, crc->ctx_idx, new_idx);
nv50_crc_program_ctx(head, NULL);
nv50_crc_program_ctx(head, &crc->ctx[new_idx]);
mutex_unlock(&disp->mutex);
end_vbl = drm_crtc_vblank_count(crtc);
if (unlikely(end_vbl != start_vbl))
NV_ERROR(nouveau_drm(dev),
"Failed to flip CRC context on %s on time (%llu > %llu)\n",
crtc->name, end_vbl, start_vbl);
spin_lock_irq(&crc->lock);
crc->ctx_changed = true;
spin_unlock_irq(&crc->lock);
}
static inline void nv50_crc_reset_ctx(struct nv50_crc_notifier_ctx *ctx)
{
memset_io(ctx->mem.object.map.ptr, 0, ctx->mem.object.map.size);
}
static void
nv50_crc_get_entries(struct nv50_head *head,
const struct nv50_crc_func *func,
enum nv50_crc_source source)
{
struct drm_crtc *crtc = &head->base.base;
struct nv50_crc *crc = &head->crc;
u32 output_crc;
while (crc->entry_idx < func->num_entries) {
/*
* While Nvidia's documentation says CRCs are written on each
* subsequent vblank after being enabled, in practice they
* aren't written immediately.
*/
output_crc = func->get_entry(head, &crc->ctx[crc->ctx_idx],
source, crc->entry_idx);
if (!output_crc)
return;
drm_crtc_add_crc_entry(crtc, true, crc->frame, &output_crc);
crc->frame++;
crc->entry_idx++;
}
}
void nv50_crc_handle_vblank(struct nv50_head *head)
{
struct drm_crtc *crtc = &head->base.base;
struct nv50_crc *crc = &head->crc;
const struct nv50_crc_func *func =
nv50_disp(head->base.base.dev)->core->func->crc;
struct nv50_crc_notifier_ctx *ctx;
bool need_reschedule = false;
if (!func)
return;
/*
* We don't lose events if we aren't able to report CRCs until the
* next vblank, so only report CRCs if the locks we need aren't
* contended to prevent missing an actual vblank event
*/
if (!spin_trylock(&crc->lock))
return;
if (!crc->src)
goto out;
ctx = &crc->ctx[crc->ctx_idx];
if (crc->ctx_changed && func->ctx_finished(head, ctx)) {
nv50_crc_get_entries(head, func, crc->src);
crc->ctx_idx ^= 1;
crc->entry_idx = 0;
crc->ctx_changed = false;
/*
* Unfortunately when notifier contexts are changed during CRC
* capture, we will inevitably lose the CRC entry for the
* frame where the hardware actually latched onto the first
* UPDATE. According to Nvidia's hardware engineers, there's
* no workaround for this.
*
* Now, we could try to be smart here and calculate the number
* of missed CRCs based on audit timestamps, but those were
* removed starting with volta. Since we always flush our
* updates back-to-back without waiting, we'll just be
* optimistic and assume we always miss exactly one frame.
*/
drm_dbg_kms(head->base.base.dev,
"Notifier ctx flip for head-%d finished, lost CRC for frame %llu\n",
head->base.index, crc->frame);
crc->frame++;
nv50_crc_reset_ctx(ctx);
need_reschedule = true;
}
nv50_crc_get_entries(head, func, crc->src);
if (need_reschedule)
drm_vblank_work_schedule(&crc->flip_work,
drm_crtc_vblank_count(crtc)
+ crc->flip_threshold
- crc->entry_idx,
true);
out:
spin_unlock(&crc->lock);
}
static void nv50_crc_wait_ctx_finished(struct nv50_head *head,
const struct nv50_crc_func *func,
struct nv50_crc_notifier_ctx *ctx)
{
struct drm_device *dev = head->base.base.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
s64 ret;
ret = nvif_msec(&drm->client.device, 50,
if (func->ctx_finished(head, ctx)) break;);
if (ret == -ETIMEDOUT)
NV_ERROR(drm,
"CRC notifier ctx for head %d not finished after 50ms\n",
head->base.index);
else if (ret)
NV_ATOMIC(drm,
"CRC notifier ctx for head-%d finished after %lldns\n",
head->base.index, ret);
}
void nv50_crc_atomic_stop_reporting(struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int i;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct nv50_head *head = nv50_head(crtc);
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
struct nv50_crc *crc = &head->crc;
if (!asyh->clr.crc)
continue;
spin_lock_irq(&crc->lock);
crc->src = NV50_CRC_SOURCE_NONE;
spin_unlock_irq(&crc->lock);
drm_crtc_vblank_put(crtc);
drm_vblank_work_cancel_sync(&crc->flip_work);
NV_ATOMIC(nouveau_drm(crtc->dev),
"CRC reporting on vblank for head-%d disabled\n",
head->base.index);
/* CRC generation is still enabled in hw, we'll just report
* any remaining CRC entries ourselves after it gets disabled
* in hardware
*/
}
}
void nv50_crc_atomic_init_notifier_contexts(struct drm_atomic_state *state)
{
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int i;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct nv50_head *head = nv50_head(crtc);
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_crc *crc = &head->crc;
int i;
if (!asyh->set.crc)
continue;
crc->entry_idx = 0;
crc->ctx_changed = false;
for (i = 0; i < ARRAY_SIZE(crc->ctx); i++)
nv50_crc_reset_ctx(&crc->ctx[i]);
}
}
void nv50_crc_atomic_release_notifier_contexts(struct drm_atomic_state *state)
{
const struct nv50_crc_func *func =
nv50_disp(state->dev)->core->func->crc;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int i;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct nv50_head *head = nv50_head(crtc);
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_crc *crc = &head->crc;
struct nv50_crc_notifier_ctx *ctx = &crc->ctx[crc->ctx_idx];
if (!asyh->clr.crc)
continue;
if (crc->ctx_changed) {
nv50_crc_wait_ctx_finished(head, func, ctx);
ctx = &crc->ctx[crc->ctx_idx ^ 1];
}
nv50_crc_wait_ctx_finished(head, func, ctx);
}
}
void nv50_crc_atomic_start_reporting(struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int i;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct nv50_head *head = nv50_head(crtc);
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
struct nv50_crc *crc = &head->crc;
u64 vbl_count;
if (!asyh->set.crc)
continue;
drm_crtc_vblank_get(crtc);
spin_lock_irq(&crc->lock);
vbl_count = drm_crtc_vblank_count(crtc);
crc->frame = vbl_count;
crc->src = asyh->crc.src;
drm_vblank_work_schedule(&crc->flip_work,
vbl_count + crc->flip_threshold,
true);
spin_unlock_irq(&crc->lock);
NV_ATOMIC(nouveau_drm(crtc->dev),
"CRC reporting on vblank for head-%d enabled\n",
head->base.index);
}
}
int nv50_crc_atomic_check_head(struct nv50_head *head,
struct nv50_head_atom *asyh,
struct nv50_head_atom *armh)
{
struct nv50_atom *atom = nv50_atom(asyh->state.state);
bool changed = armh->crc.src != asyh->crc.src;
if (!armh->crc.src && !asyh->crc.src) {
asyh->set.crc = false;
asyh->clr.crc = false;
return 0;
}
if (drm_atomic_crtc_needs_modeset(&asyh->state) || changed) {
asyh->clr.crc = armh->crc.src && armh->state.active;
asyh->set.crc = asyh->crc.src && asyh->state.active;
if (changed)
asyh->set.or |= armh->or.crc_raster !=
asyh->or.crc_raster;
if (asyh->clr.crc && asyh->set.crc)
atom->flush_disable = true;
} else {
asyh->set.crc = false;
asyh->clr.crc = false;
}
return 0;
}
void nv50_crc_atomic_check_outp(struct nv50_atom *atom)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
if (atom->flush_disable)
return;
for_each_oldnew_crtc_in_state(&atom->state, crtc, old_crtc_state,
new_crtc_state, i) {
struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state);
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_outp_atom *outp_atom;
struct nouveau_encoder *outp;
struct drm_encoder *encoder, *enc;
enc = nv50_head_atom_get_encoder(armh);
if (!enc)
continue;
outp = nv50_real_outp(enc);
if (!outp)
continue;
encoder = &outp->base.base;
if (!asyh->clr.crc)
continue;
/*
* Re-programming ORs can't be done in the same flush as
* disabling CRCs
*/
list_for_each_entry(outp_atom, &atom->outp, head) {
if (outp_atom->encoder == encoder) {
if (outp_atom->set.mask) {
atom->flush_disable = true;
return;
} else {
break;
}
}
}
}
}
static enum nv50_crc_source_type
nv50_crc_source_type(struct nouveau_encoder *outp,
enum nv50_crc_source source)
{
struct dcb_output *dcbe = outp->dcb;
switch (source) {
case NV50_CRC_SOURCE_NONE: return NV50_CRC_SOURCE_TYPE_NONE;
case NV50_CRC_SOURCE_RG: return NV50_CRC_SOURCE_TYPE_RG;
default: break;
}
if (dcbe->location != DCB_LOC_ON_CHIP)
return NV50_CRC_SOURCE_TYPE_PIOR;
switch (dcbe->type) {
case DCB_OUTPUT_DP: return NV50_CRC_SOURCE_TYPE_SF;
case DCB_OUTPUT_ANALOG: return NV50_CRC_SOURCE_TYPE_DAC;
default: return NV50_CRC_SOURCE_TYPE_SOR;
}
}
void nv50_crc_atomic_set(struct nv50_head *head,
struct nv50_head_atom *asyh)
{
struct drm_crtc *crtc = &head->base.base;
struct drm_device *dev = crtc->dev;
struct nv50_crc *crc = &head->crc;
const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc;
struct nouveau_encoder *outp;
struct drm_encoder *encoder;
encoder = nv50_head_atom_get_encoder(asyh);
if (!encoder)
return;
outp = nv50_real_outp(encoder);
if (!outp)
return;
func->set_src(head, outp->outp.or.id, nv50_crc_source_type(outp, asyh->crc.src),
&crc->ctx[crc->ctx_idx]);
}
void nv50_crc_atomic_clr(struct nv50_head *head)
{
const struct nv50_crc_func *func =
nv50_disp(head->base.base.dev)->core->func->crc;
func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL);
}
static inline int
nv50_crc_raster_type(enum nv50_crc_source source)
{
switch (source) {
case NV50_CRC_SOURCE_NONE:
case NV50_CRC_SOURCE_AUTO:
case NV50_CRC_SOURCE_RG:
case NV50_CRC_SOURCE_OUTP_ACTIVE:
return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER;
case NV50_CRC_SOURCE_OUTP_COMPLETE:
return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER;
case NV50_CRC_SOURCE_OUTP_INACTIVE:
return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER;
}
return 0;
}
/* We handle mapping the memory for CRC notifiers ourselves, since each
* notifier needs it's own handle
*/
static inline int
nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
struct nv50_crc_notifier_ctx *ctx, size_t len, int idx)
{
struct nv50_core *core = nv50_disp(head->base.base.dev)->core;
int ret;
ret = nvif_mem_ctor_map(mmu, "kmsCrcNtfy", NVIF_MEM_VRAM, len, &ctx->mem);
if (ret)
return ret;
ret = nvif_object_ctor(&core->chan.base.user, "kmsCrcNtfyCtxDma",
NV50_DISP_HANDLE_CRC_CTX(head, idx),
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = ctx->mem.addr,
.limit = ctx->mem.addr
+ ctx->mem.size - 1,
}, sizeof(struct nv_dma_v0),
&ctx->ntfy);
if (ret)
goto fail_fini;
return 0;
fail_fini:
nvif_mem_dtor(&ctx->mem);
return ret;
}
static inline void
nv50_crc_ctx_fini(struct nv50_crc_notifier_ctx *ctx)
{
nvif_object_dtor(&ctx->ntfy);
nvif_mem_dtor(&ctx->mem);
}
int nv50_crc_set_source(struct drm_crtc *crtc, const char *source_str)
{
struct drm_device *dev = crtc->dev;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct nv50_head *head = nv50_head(crtc);
struct nv50_crc *crc = &head->crc;
const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc;
struct nvif_mmu *mmu = &nouveau_drm(dev)->client.mmu;
struct nv50_head_atom *asyh;
struct drm_crtc_state *crtc_state;
enum nv50_crc_source source;
int ret = 0, ctx_flags = 0, i;
ret = nv50_crc_parse_source(source_str, &source);
if (ret)
return ret;
/*
* Since we don't want the user to accidentally interrupt us as we're
* disabling CRCs
*/
if (source)
ctx_flags |= DRM_MODESET_ACQUIRE_INTERRUPTIBLE;
drm_modeset_acquire_init(&ctx, ctx_flags);
state = drm_atomic_state_alloc(dev);
if (!state) {
ret = -ENOMEM;
goto out_acquire_fini;
}
state->acquire_ctx = &ctx;
if (source) {
for (i = 0; i < ARRAY_SIZE(head->crc.ctx); i++) {
ret = nv50_crc_ctx_init(head, mmu, &crc->ctx[i],
func->notifier_len, i);
if (ret)
goto out_ctx_fini;
}
}
retry:
crtc_state = drm_atomic_get_crtc_state(state, &head->base.base);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
if (ret == -EDEADLK)
goto deadlock;
else if (ret)
goto out_drop_locks;
}
asyh = nv50_head_atom(crtc_state);
asyh->crc.src = source;
asyh->or.crc_raster = nv50_crc_raster_type(source);
ret = drm_atomic_commit(state);
if (ret == -EDEADLK)
goto deadlock;
else if (ret)
goto out_drop_locks;
if (!source) {
/*
* If the user specified a custom flip threshold through
* debugfs, reset it
*/
crc->flip_threshold = func->flip_threshold;
}
out_drop_locks:
drm_modeset_drop_locks(&ctx);
out_ctx_fini:
if (!source || ret) {
for (i = 0; i < ARRAY_SIZE(crc->ctx); i++)
nv50_crc_ctx_fini(&crc->ctx[i]);
}
drm_atomic_state_put(state);
out_acquire_fini:
drm_modeset_acquire_fini(&ctx);
return ret;
deadlock:
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}
static int
nv50_crc_debugfs_flip_threshold_get(struct seq_file *m, void *data)
{
struct nv50_head *head = m->private;
struct drm_crtc *crtc = &head->base.base;
struct nv50_crc *crc = &head->crc;
int ret;
ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
if (ret)
return ret;
seq_printf(m, "%d\n", crc->flip_threshold);
drm_modeset_unlock(&crtc->mutex);
return ret;
}
static int
nv50_crc_debugfs_flip_threshold_open(struct inode *inode, struct file *file)
{
return single_open(file, nv50_crc_debugfs_flip_threshold_get,
inode->i_private);
}
static ssize_t
nv50_crc_debugfs_flip_threshold_set(struct file *file,
const char __user *ubuf, size_t len,
loff_t *offp)
{
struct seq_file *m = file->private_data;
struct nv50_head *head = m->private;
struct nv50_head_atom *armh;
struct drm_crtc *crtc = &head->base.base;
struct nouveau_drm *drm = nouveau_drm(crtc->dev);
struct nv50_crc *crc = &head->crc;
const struct nv50_crc_func *func =
nv50_disp(crtc->dev)->core->func->crc;
int value, ret;
ret = kstrtoint_from_user(ubuf, len, 10, &value);
if (ret)
return ret;
if (value > func->flip_threshold)
return -EINVAL;
else if (value == -1)
value = func->flip_threshold;
else if (value < -1)
return -EINVAL;
ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
if (ret)
return ret;
armh = nv50_head_atom(crtc->state);
if (armh->crc.src) {
ret = -EBUSY;
goto out;
}
NV_DEBUG(drm,
"Changing CRC flip threshold for next capture on head-%d to %d\n",
head->base.index, value);
crc->flip_threshold = value;
ret = len;
out:
drm_modeset_unlock(&crtc->mutex);
return ret;
}
static const struct file_operations nv50_crc_flip_threshold_fops = {
.owner = THIS_MODULE,
.open = nv50_crc_debugfs_flip_threshold_open,
.read = seq_read,
.write = nv50_crc_debugfs_flip_threshold_set,
.release = single_release,
};
int nv50_head_crc_late_register(struct nv50_head *head)
{
struct drm_crtc *crtc = &head->base.base;
const struct nv50_crc_func *func =
nv50_disp(crtc->dev)->core->func->crc;
struct dentry *root;
if (!func || !crtc->debugfs_entry)
return 0;
root = debugfs_create_dir("nv_crc", crtc->debugfs_entry);
debugfs_create_file("flip_threshold", 0644, root, head,
&nv50_crc_flip_threshold_fops);
return 0;
}
static inline void
nv50_crc_init_head(struct nv50_disp *disp, const struct nv50_crc_func *func,
struct nv50_head *head)
{
struct nv50_crc *crc = &head->crc;
crc->flip_threshold = func->flip_threshold;
spin_lock_init(&crc->lock);
drm_vblank_work_init(&crc->flip_work, &head->base.base,
nv50_crc_ctx_flip_work);
}
void nv50_crc_init(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
struct drm_crtc *crtc;
const struct nv50_crc_func *func = disp->core->func->crc;
if (!func)
return;
drm_for_each_crtc(crtc, dev)
nv50_crc_init_head(disp, func, nv50_head(crtc));
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/crc.c |
// SPDX-License-Identifier: MIT
#include "crc.h"
#include "crcc37d.h"
#include "core.h"
#include "disp.h"
#include "head.h"
#include <nvif/pushc37b.h>
#include <nvhw/class/clc57d.h>
static int crcc57d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 crc_args = NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) |
NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE);
int ret;
switch (source) {
case NV50_CRC_SOURCE_TYPE_SOR:
crc_args |= NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SOR(or));
break;
case NV50_CRC_SOURCE_TYPE_SF:
crc_args |= NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SF);
break;
default:
break;
}
ret = PUSH_WAIT(push, 4);
if (ret)
return ret;
if (source) {
PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx->ntfy.handle);
PUSH_MTHD(push, NVC57D, HEAD_SET_CRC_CONTROL(i), crc_args);
} else {
PUSH_MTHD(push, NVC57D, HEAD_SET_CRC_CONTROL(i), 0);
PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_CRC(i), 0);
}
return 0;
}
const struct nv50_crc_func crcc57d = {
.set_src = crcc57d_set_src,
.set_ctx = crcc37d_set_ctx,
.get_entry = crcc37d_get_entry,
.ctx_finished = crcc37d_ctx_finished,
.flip_threshold = CRCC37D_FLIP_THRESHOLD,
.num_entries = CRCC37D_MAX_ENTRIES,
.notifier_len = sizeof(struct crcc37d_notifier),
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/crcc57d.c |
// SPDX-License-Identifier: MIT
#include <drm/drm_crtc.h>
#include "crc.h"
#include "core.h"
#include "disp.h"
#include "head.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl907d.h>
#define CRC907D_MAX_ENTRIES 255
struct crc907d_notifier {
u32 status;
u32 :32; /* reserved */
struct crc907d_entry {
u32 status;
u32 compositor_crc;
u32 output_crc[2];
} entries[CRC907D_MAX_ENTRIES];
} __packed;
static int
crc907d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 crc_args = NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
NVDEF(NV907D, HEAD_SET_CRC_CONTROL, TIMESTAMP_MODE, FALSE) |
NVDEF(NV907D, HEAD_SET_CRC_CONTROL, SECONDARY_OUTPUT, NONE) |
NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE) |
NVDEF(NV907D, HEAD_SET_CRC_CONTROL, WIDE_PIPE_CRC, ENABLE);
int ret;
switch (source) {
case NV50_CRC_SOURCE_TYPE_SOR:
crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, SOR(or));
break;
case NV50_CRC_SOURCE_TYPE_PIOR:
crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, PIOR(or));
break;
case NV50_CRC_SOURCE_TYPE_DAC:
crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, DAC(or));
break;
case NV50_CRC_SOURCE_TYPE_RG:
crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, RG(i));
break;
case NV50_CRC_SOURCE_TYPE_SF:
crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, SF(i));
break;
case NV50_CRC_SOURCE_NONE:
crc_args |= NVDEF(NV907D, HEAD_SET_CRC_CONTROL, PRIMARY_OUTPUT, NONE);
break;
}
if ((ret = PUSH_WAIT(push, 4)))
return ret;
if (source) {
PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx->ntfy.handle);
PUSH_MTHD(push, NV907D, HEAD_SET_CRC_CONTROL(i), crc_args);
} else {
PUSH_MTHD(push, NV907D, HEAD_SET_CRC_CONTROL(i), crc_args);
PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CRC(i), 0);
}
return 0;
}
static int
crc907d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx ? ctx->ntfy.handle : 0);
return 0;
}
static u32 crc907d_get_entry(struct nv50_head *head,
struct nv50_crc_notifier_ctx *ctx,
enum nv50_crc_source source, int idx)
{
struct crc907d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
return ioread32_native(¬ifier->entries[idx].output_crc[0]);
}
static bool crc907d_ctx_finished(struct nv50_head *head,
struct nv50_crc_notifier_ctx *ctx)
{
struct nouveau_drm *drm = nouveau_drm(head->base.base.dev);
struct crc907d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
const u32 status = ioread32_native(¬ifier->status);
const u32 overflow = status & 0x0000003e;
if (!(status & 0x00000001))
return false;
if (overflow) {
const char *engine = NULL;
switch (overflow) {
case 0x00000004: engine = "DSI"; break;
case 0x00000008: engine = "Compositor"; break;
case 0x00000010: engine = "CRC output 1"; break;
case 0x00000020: engine = "CRC output 2"; break;
}
if (engine)
NV_ERROR(drm,
"CRC notifier context for head %d overflowed on %s: %x\n",
head->base.index, engine, status);
else
NV_ERROR(drm,
"CRC notifier context for head %d overflowed: %x\n",
head->base.index, status);
}
NV_DEBUG(drm, "Head %d CRC context status: %x\n",
head->base.index, status);
return true;
}
const struct nv50_crc_func crc907d = {
.set_src = crc907d_set_src,
.set_ctx = crc907d_set_ctx,
.get_entry = crc907d_get_entry,
.ctx_finished = crc907d_ctx_finished,
.flip_threshold = CRC907D_MAX_ENTRIES - 10,
.num_entries = CRC907D_MAX_ENTRIES,
.notifier_len = sizeof(struct crc907d_notifier),
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/crc907d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "core.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl507d.h>
#include <nvhw/class/cl837d.h>
static int
pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
struct nvif_push *push = core->chan.push;
int ret;
if (asyh) {
ctrl |= NVVAL(NV507D, PIOR_SET_CONTROL, HSYNC_POLARITY, asyh->or.nhsync);
ctrl |= NVVAL(NV507D, PIOR_SET_CONTROL, VSYNC_POLARITY, asyh->or.nvsync);
ctrl |= NVVAL(NV837D, PIOR_SET_CONTROL, PIXEL_DEPTH, asyh->or.depth);
}
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507D, PIOR_SET_CONTROL(or), ctrl);
return 0;
}
static void
pior507d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp,
int or)
{
outp->caps.dp_interlace = true;
}
const struct nv50_outp_func
pior507d = {
.ctrl = pior507d_ctrl,
.get_caps = pior507d_get_caps,
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/pior507d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_connector.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_vblank.h>
#include "nouveau_drv.h"
#include "nouveau_bios.h"
#include "nouveau_connector.h"
#include "head.h"
#include "core.h"
#include "crc.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl907d.h>
int
head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 3)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, asyh->or.depth),
HEAD_SET_CONTROL(i), 0x31ec6000 | head->base.index << 25 |
NVVAL(NV907D, HEAD_SET_CONTROL, STRUCTURE, asyh->mode.interlace));
return 0;
}
int
head907d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_PROCAMP(i),
NVDEF(NV907D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
NVDEF(NV907D, HEAD_SET_PROCAMP, CHROMA_LPF, AUTO) |
NVVAL(NV907D, HEAD_SET_PROCAMP, SAT_COS, asyh->procamp.sat.cos) |
NVVAL(NV907D, HEAD_SET_PROCAMP, SAT_SINE, asyh->procamp.sat.sin) |
NVDEF(NV907D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA) |
NVDEF(NV907D, HEAD_SET_PROCAMP, RANGE_COMPRESSION, DISABLE));
return 0;
}
static int
head907d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_DITHER_CONTROL(i),
NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
NVVAL(NV907D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
return 0;
}
int
head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
if (asyh->ovly.cpp) {
switch (asyh->ovly.cpp) {
case 8: bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
case 4: bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
case 2: bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
default:
WARN_ON(1);
break;
}
bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, USABLE, TRUE);
} else {
bounds |= NVDEF(NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16);
}
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_OVERLAY_USAGE_BOUNDS(i), bounds);
return 0;
}
static int
head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
if (asyh->base.cpp) {
switch (asyh->base.cpp) {
case 8: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
case 4: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
case 2: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
case 1: bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_8); break;
default:
WARN_ON(1);
break;
}
bounds |= NVDEF(NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
}
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
return 0;
}
int
head907d_curs_clr(struct nv50_head *head)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_CURSOR(i),
NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8) |
NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, SIZE, W64_H64));
PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CURSOR(i), 0x00000000);
return 0;
}
int
head907d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_CURSOR(i),
NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
NVVAL(NV907D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
NVDEF(NV907D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
return 0;
}
int
head907d_core_clr(struct nv50_head *head)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMAS_ISO(i), 0x00000000);
return 0;
}
int
head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 9)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_OFFSET(i),
NVVAL(NV907D, HEAD_SET_OFFSET, ORIGIN, asyh->core.offset >> 8));
PUSH_MTHD(push, NV907D, HEAD_SET_SIZE(i),
NVVAL(NV907D, HEAD_SET_SIZE, WIDTH, asyh->core.w) |
NVVAL(NV907D, HEAD_SET_SIZE, HEIGHT, asyh->core.h),
HEAD_SET_STORAGE(i),
NVVAL(NV907D, HEAD_SET_STORAGE, BLOCK_HEIGHT, asyh->core.blockh) |
NVVAL(NV907D, HEAD_SET_STORAGE, PITCH, asyh->core.pitch >> 8) |
NVVAL(NV907D, HEAD_SET_STORAGE, PITCH, asyh->core.blocks) |
NVVAL(NV907D, HEAD_SET_STORAGE, MEMORY_LAYOUT, asyh->core.layout),
HEAD_SET_PARAMS(i),
NVVAL(NV907D, HEAD_SET_PARAMS, FORMAT, asyh->core.format) |
NVDEF(NV907D, HEAD_SET_PARAMS, SUPER_SAMPLE, X1_AA) |
NVDEF(NV907D, HEAD_SET_PARAMS, GAMMA, LINEAR),
HEAD_SET_CONTEXT_DMAS_ISO(i),
NVVAL(NV907D, HEAD_SET_CONTEXT_DMAS_ISO, HANDLE, asyh->core.handle));
PUSH_MTHD(push, NV907D, HEAD_SET_VIEWPORT_POINT_IN(i),
NVVAL(NV907D, HEAD_SET_VIEWPORT_POINT_IN, X, asyh->core.x) |
NVVAL(NV907D, HEAD_SET_VIEWPORT_POINT_IN, Y, asyh->core.y));
return 0;
}
int
head907d_olut_clr(struct nv50_head *head)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_OUTPUT_LUT_LO(i),
NVDEF(NV907D, HEAD_SET_OUTPUT_LUT_LO, ENABLE, DISABLE));
PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_LUT(i), 0x00000000);
return 0;
}
int
head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_OUTPUT_LUT_LO(i),
NVDEF(NV907D, HEAD_SET_OUTPUT_LUT_LO, ENABLE, ENABLE) |
NVVAL(NV907D, HEAD_SET_OUTPUT_LUT_LO, MODE, asyh->olut.mode) |
NVDEF(NV907D, HEAD_SET_OUTPUT_LUT_LO, NEVER_YIELD_TO_BASE, DISABLE),
HEAD_SET_OUTPUT_LUT_HI(i),
NVVAL(NV907D, HEAD_SET_OUTPUT_LUT_HI, ORIGIN, asyh->olut.offset >> 8));
PUSH_MTHD(push, NV907D, HEAD_SET_CONTEXT_DMA_LUT(i), asyh->olut.handle);
return 0;
}
void
head907d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
{
for (; size--; in++, mem += 8) {
writew(drm_color_lut_extract(in-> red, 14) + 0x6000, mem + 0);
writew(drm_color_lut_extract(in->green, 14) + 0x6000, mem + 2);
writew(drm_color_lut_extract(in-> blue, 14) + 0x6000, mem + 4);
}
/* INTERPOLATE modes require a "next" entry to interpolate with,
* so we replicate the last entry to deal with this for now.
*/
writew(readw(mem - 8), mem + 0);
writew(readw(mem - 6), mem + 2);
writew(readw(mem - 4), mem + 4);
}
bool
head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
if (size != 256 && size != 1024)
return false;
if (size == 1024)
asyh->olut.mode = NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE;
else
asyh->olut.mode = NV907D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE;
asyh->olut.load = head907d_olut_load;
return true;
}
bool head907d_ilut_check(int size)
{
return size == 256 || size == 1024;
}
int
head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_OVERSCAN_COLOR(i),
NVVAL(NV907D, HEAD_SET_OVERSCAN_COLOR, RED, 0) |
NVVAL(NV907D, HEAD_SET_OVERSCAN_COLOR, GRN, 0) |
NVVAL(NV907D, HEAD_SET_OVERSCAN_COLOR, BLU, 0),
HEAD_SET_RASTER_SIZE(i),
NVVAL(NV907D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
NVVAL(NV907D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
HEAD_SET_RASTER_SYNC_END(i),
NVVAL(NV907D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
NVVAL(NV907D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
HEAD_SET_RASTER_BLANK_END(i),
NVVAL(NV907D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
NVVAL(NV907D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
HEAD_SET_RASTER_BLANK_START(i),
NVVAL(NV907D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
NVVAL(NV907D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks),
HEAD_SET_RASTER_VERT_BLANK2(i),
NVVAL(NV907D, HEAD_SET_RASTER_VERT_BLANK2, YSTART, m->v.blank2s) |
NVVAL(NV907D, HEAD_SET_RASTER_VERT_BLANK2, YEND, m->v.blank2e));
PUSH_MTHD(push, NV907D, HEAD_SET_DEFAULT_BASE_COLOR(i),
NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, RED, 0) |
NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, GREEN, 0) |
NVVAL(NV907D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0));
PUSH_MTHD(push, NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
NVVAL(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000) |
NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, ADJ1000DIV1001, FALSE),
HEAD_SET_PIXEL_CLOCK_CONFIGURATION(i),
NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_CONFIGURATION, MODE, CLK_CUSTOM) |
NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_CONFIGURATION, NOT_DRIVER, FALSE) |
NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_CONFIGURATION, ENABLE_HOPPING, FALSE),
HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
NVVAL(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000) |
NVDEF(NV907D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, ADJ1000DIV1001, FALSE));
return 0;
}
int
head907d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 8)))
return ret;
PUSH_MTHD(push, NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER(i),
NVDEF(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, VERTICAL_TAPS, TAPS_1) |
NVDEF(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, HORIZONTAL_TAPS, TAPS_1) |
NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, HRESPONSE_BIAS, 0) |
NVVAL(NV907D, HEAD_SET_CONTROL_OUTPUT_SCALER, VRESPONSE_BIAS, 0));
PUSH_MTHD(push, NV907D, HEAD_SET_VIEWPORT_SIZE_IN(i),
NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
PUSH_MTHD(push, NV907D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH),
HEAD_SET_VIEWPORT_SIZE_OUT_MIN(i),
NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, WIDTH, asyh->view.oW) |
NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, HEIGHT, asyh->view.oH),
HEAD_SET_VIEWPORT_SIZE_OUT_MAX(i),
NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MAX, WIDTH, asyh->view.oW) |
NVVAL(NV907D, HEAD_SET_VIEWPORT_SIZE_OUT_MAX, HEIGHT, asyh->view.oH));
return 0;
}
const struct nv50_head_func
head907d = {
.view = head907d_view,
.mode = head907d_mode,
.olut = head907d_olut,
.ilut_check = head907d_ilut_check,
.olut_size = 1024,
.olut_set = head907d_olut_set,
.olut_clr = head907d_olut_clr,
.core_calc = head507d_core_calc,
.core_set = head907d_core_set,
.core_clr = head907d_core_clr,
.curs_layout = head507d_curs_layout,
.curs_format = head507d_curs_format,
.curs_set = head907d_curs_set,
.curs_clr = head907d_curs_clr,
.base = head907d_base,
.ovly = head907d_ovly,
.dither = head907d_dither,
.procamp = head907d_procamp,
.or = head907d_or,
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/head907d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "wimm.h"
#include <nvif/class.h>
int
nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
{
struct {
s32 oclass;
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} wimms[] = {
{ GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{}
};
struct nv50_disp *disp = nv50_disp(drm->dev);
int cid;
cid = nvif_mclass(&disp->disp->object, wimms);
if (cid < 0) {
NV_ERROR(drm, "No supported window immediate class\n");
return cid;
}
return wimms[cid].init(drm, wimms[cid].oclass, wndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/wimm.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "curs.h"
int
curs907a_new(struct nouveau_drm *drm, int head, s32 oclass,
struct nv50_wndw **pwndw)
{
return curs507a_new_(&curs507a, drm, head, oclass,
0x00000001 << (head * 4), pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/curs907a.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "head.h"
#include "atom.h"
#include "core.h"
#include <nvif/pushc37b.h>
#include <nvhw/class/clc57d.h>
static int
headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u8 depth;
int ret;
/*XXX: This is a dirty hack until OR depth handling is
* improved later for deep colour etc.
*/
switch (asyh->or.depth) {
case 6: depth = 5; break;
case 5: depth = 4; break;
case 2: depth = 1; break;
case 0: depth = 4; break;
default:
depth = asyh->or.depth;
WARN_ON(1);
break;
}
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
NVVAL(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, depth) |
NVDEF(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, COLOR_SPACE_OVERRIDE, DISABLE) |
NVDEF(NVC57D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, EXT_PACKET_WIN, NONE));
return 0;
}
static int
headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
//TODO:
PUSH_MTHD(push, NVC57D, HEAD_SET_PROCAMP(i),
NVDEF(NVC57D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
NVDEF(NVC57D, HEAD_SET_PROCAMP, CHROMA_LPF, DISABLE) |
NVDEF(NVC57D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA));
return 0;
}
static int
headc57d_olut_clr(struct nv50_head *head)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_OLUT(i), 0x00000000);
return 0;
}
static int
headc57d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
return ret;
PUSH_MTHD(push, NVC57D, HEAD_SET_OLUT_CONTROL(i),
NVVAL(NVC57D, HEAD_SET_OLUT_CONTROL, INTERPOLATE, asyh->olut.output_mode) |
NVDEF(NVC57D, HEAD_SET_OLUT_CONTROL, MIRROR, DISABLE) |
NVVAL(NVC57D, HEAD_SET_OLUT_CONTROL, MODE, asyh->olut.mode) |
NVVAL(NVC57D, HEAD_SET_OLUT_CONTROL, SIZE, asyh->olut.size),
HEAD_SET_OLUT_FP_NORM_SCALE(i), 0xffffffff,
HEAD_SET_CONTEXT_DMA_OLUT(i), asyh->olut.handle,
HEAD_SET_OFFSET_OLUT(i), asyh->olut.offset >> 8);
return 0;
}
static void
headc57d_olut_load_8(struct drm_color_lut *in, int size, void __iomem *mem)
{
memset_io(mem, 0x00, 0x20); /* VSS header. */
mem += 0x20;
while (size--) {
u16 r = drm_color_lut_extract(in-> red + 0, 16);
u16 g = drm_color_lut_extract(in->green + 0, 16);
u16 b = drm_color_lut_extract(in-> blue + 0, 16);
u16 ri = 0, gi = 0, bi = 0, i;
if (in++, size) {
ri = (drm_color_lut_extract(in-> red, 16) - r) / 4;
gi = (drm_color_lut_extract(in->green, 16) - g) / 4;
bi = (drm_color_lut_extract(in-> blue, 16) - b) / 4;
}
for (i = 0; i < 4; i++, mem += 8) {
writew(r + ri * i, mem + 0);
writew(g + gi * i, mem + 2);
writew(b + bi * i, mem + 4);
}
}
/* INTERPOLATE modes require a "next" entry to interpolate with,
* so we replicate the last entry to deal with this for now.
*/
writew(readw(mem - 8), mem + 0);
writew(readw(mem - 6), mem + 2);
writew(readw(mem - 4), mem + 4);
}
static void
headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
{
memset_io(mem, 0x00, 0x20); /* VSS header. */
mem += 0x20;
for (; size--; in++, mem += 0x08) {
writew(drm_color_lut_extract(in-> red, 16), mem + 0);
writew(drm_color_lut_extract(in->green, 16), mem + 2);
writew(drm_color_lut_extract(in-> blue, 16), mem + 4);
}
/* INTERPOLATE modes require a "next" entry to interpolate with,
* so we replicate the last entry to deal with this for now.
*/
writew(readw(mem - 8), mem + 0);
writew(readw(mem - 6), mem + 2);
writew(readw(mem - 4), mem + 4);
}
static bool
headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
if (size != 0 && size != 256 && size != 1024)
return false;
asyh->olut.mode = NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10;
asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */;
asyh->olut.output_mode = NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE;
if (size == 256)
asyh->olut.load = headc57d_olut_load_8;
else
asyh->olut.load = headc57d_olut_load;
return true;
}
static int
headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 15)))
return ret;
PUSH_MTHD(push, NVC57D, HEAD_SET_RASTER_SIZE(i),
NVVAL(NVC57D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
NVVAL(NVC57D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
HEAD_SET_RASTER_SYNC_END(i),
NVVAL(NVC57D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
NVVAL(NVC57D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
HEAD_SET_RASTER_BLANK_END(i),
NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
HEAD_SET_RASTER_BLANK_START(i),
NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
NVVAL(NVC57D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks));
//XXX:
PUSH_NVSQ(push, NVC57D, 0x2074 + (i * 0x400), m->v.blank2e << 16 | m->v.blank2s);
PUSH_NVSQ(push, NVC57D, 0x2008 + (i * 0x400), m->interlace);
PUSH_MTHD(push, NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
NVVAL(NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000));
PUSH_MTHD(push, NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
NVVAL(NVC57D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000));
/*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
PUSH_MTHD(push, NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS(i),
NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, CURSOR, USAGE_W256_H256) |
NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, OLUT_ALLOWED, TRUE) |
NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, OUTPUT_SCALER_TAPS, TAPS_2) |
NVDEF(NVC57D, HEAD_SET_HEAD_USAGE_BOUNDS, UPSCALING_ALLOWED, TRUE));
return 0;
}
const struct nv50_head_func
headc57d = {
.view = headc37d_view,
.mode = headc57d_mode,
.olut = headc57d_olut,
.ilut_check = head907d_ilut_check,
.olut_identity = true,
.olut_size = 1024,
.olut_set = headc57d_olut_set,
.olut_clr = headc57d_olut_clr,
.curs_layout = head917d_curs_layout,
.curs_format = headc37d_curs_format,
.curs_set = headc37d_curs_set,
.curs_clr = headc37d_curs_clr,
.dither = headc37d_dither,
.procamp = headc57d_procamp,
.or = headc57d_or,
/* TODO: flexible window mappings */
.static_wndw_map = headc37d_static_wndw_map,
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/headc57d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "head.h"
#include "core.h"
#include "nvif/push.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl917d.h>
static int
head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV917D, HEAD_SET_DITHER_CONTROL(i),
NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
NVVAL(NV917D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
return 0;
}
static int
head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
if (asyh->base.cpp) {
switch (asyh->base.cpp) {
case 8: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
case 4: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
case 2: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
case 1: bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_8); break;
default:
WARN_ON(1);
break;
}
bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
bounds |= NVDEF(NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, BASE_LUT, USAGE_1025);
}
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV917D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
return 0;
}
static int
head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
ret = PUSH_WAIT(push, 5);
if (ret)
return ret;
PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i),
NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
return 0;
}
int
head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
switch (asyw->state.fb->width) {
case 32: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32; break;
case 64: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64; break;
case 128: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128; break;
case 256: asyh->curs.layout = NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256; break;
default:
return -EINVAL;
}
return 0;
}
const struct nv50_head_func
head917d = {
.view = head907d_view,
.mode = head907d_mode,
.olut = head907d_olut,
.ilut_check = head907d_ilut_check,
.olut_size = 1024,
.olut_set = head907d_olut_set,
.olut_clr = head907d_olut_clr,
.core_calc = head507d_core_calc,
.core_set = head907d_core_set,
.core_clr = head907d_core_clr,
.curs_layout = head917d_curs_layout,
.curs_format = head507d_curs_format,
.curs_set = head917d_curs_set,
.curs_clr = head907d_curs_clr,
.base = head917d_base,
.ovly = head907d_ovly,
.dither = head917d_dither,
.procamp = head907d_procamp,
.or = head907d_or,
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/head917d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "head.h"
#include "core.h"
#include <nvif/push507c.h>
#include <nvhw/class/cl507d.h>
int
head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507D, HEAD_SET_PROCAMP(i),
NVDEF(NV507D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
NVDEF(NV507D, HEAD_SET_PROCAMP, CHROMA_LPF, AUTO) |
NVVAL(NV507D, HEAD_SET_PROCAMP, SAT_COS, asyh->procamp.sat.cos) |
NVVAL(NV507D, HEAD_SET_PROCAMP, SAT_SINE, asyh->procamp.sat.sin) |
NVDEF(NV507D, HEAD_SET_PROCAMP, TRANSITION, HARD));
return 0;
}
int
head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507D, HEAD_SET_DITHER_CONTROL(i),
NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
NVVAL(NV507D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
return 0;
}
int
head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
if (asyh->ovly.cpp) {
switch (asyh->ovly.cpp) {
case 4: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
case 2: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
default:
WARN_ON(1);
break;
}
bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
} else {
bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16);
}
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
return 0;
}
int
head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
if (asyh->base.cpp) {
switch (asyh->base.cpp) {
case 8: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_64); break;
case 4: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_32); break;
case 2: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_16); break;
case 1: bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, PIXEL_DEPTH, BPP_8); break;
default:
WARN_ON(1);
break;
}
bounds |= NVDEF(NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, USABLE, TRUE);
}
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507D, HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(i), bounds);
return 0;
}
static int
head507d_curs_clr(struct nv50_head *head)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507D, HEAD_SET_CONTROL_CURSOR(i),
NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8) |
NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, SIZE, W64_H64));
return 0;
}
static int
head507d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 3)))
return ret;
PUSH_MTHD(push, NV507D, HEAD_SET_CONTROL_CURSOR(i),
NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
NVVAL(NV507D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND) |
NVDEF(NV507D, HEAD_SET_CONTROL_CURSOR, SUB_OWNER, NONE),
HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
return 0;
}
int
head507d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
switch (asyw->image.format) {
case 0xcf: asyh->curs.format = NV507D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8; break;
default:
WARN_ON(1);
return -EINVAL;
}
return 0;
}
int
head507d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
switch (asyw->image.w) {
case 32: asyh->curs.layout = NV507D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32; break;
case 64: asyh->curs.layout = NV507D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64; break;
default:
return -EINVAL;
}
return 0;
}
int
head507d_core_clr(struct nv50_head *head)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507D, HEAD_SET_CONTEXT_DMA_ISO(i), 0x00000000);
return 0;
}
static int
head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 9)))
return ret;
PUSH_MTHD(push, NV507D, HEAD_SET_OFFSET(i, 0),
NVVAL(NV507D, HEAD_SET_OFFSET, ORIGIN, asyh->core.offset >> 8));
PUSH_MTHD(push, NV507D, HEAD_SET_SIZE(i),
NVVAL(NV507D, HEAD_SET_SIZE, WIDTH, asyh->core.w) |
NVVAL(NV507D, HEAD_SET_SIZE, HEIGHT, asyh->core.h),
HEAD_SET_STORAGE(i),
NVVAL(NV507D, HEAD_SET_STORAGE, BLOCK_HEIGHT, asyh->core.blockh) |
NVVAL(NV507D, HEAD_SET_STORAGE, PITCH, asyh->core.pitch >> 8) |
NVVAL(NV507D, HEAD_SET_STORAGE, PITCH, asyh->core.blocks) |
NVVAL(NV507D, HEAD_SET_STORAGE, MEMORY_LAYOUT, asyh->core.layout),
HEAD_SET_PARAMS(i),
NVVAL(NV507D, HEAD_SET_PARAMS, FORMAT, asyh->core.format) |
NVVAL(NV507D, HEAD_SET_PARAMS, KIND, asyh->core.kind) |
NVDEF(NV507D, HEAD_SET_PARAMS, PART_STRIDE, PARTSTRIDE_256),
HEAD_SET_CONTEXT_DMA_ISO(i),
NVVAL(NV507D, HEAD_SET_CONTEXT_DMA_ISO, HANDLE, asyh->core.handle));
PUSH_MTHD(push, NV507D, HEAD_SET_VIEWPORT_POINT_IN(i, 0),
NVVAL(NV507D, HEAD_SET_VIEWPORT_POINT_IN, X, asyh->core.x) |
NVVAL(NV507D, HEAD_SET_VIEWPORT_POINT_IN, Y, asyh->core.y));
/* EVO will complain with INVALID_STATE if we have an
* active cursor and (re)specify HeadSetContextDmaIso
* without also updating HeadSetOffsetCursor.
*/
asyh->set.curs = asyh->curs.visible;
asyh->set.olut = asyh->olut.handle != 0;
return 0;
}
void
head507d_core_calc(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nv50_disp *disp = nv50_disp(head->base.base.dev);
if ((asyh->core.visible = (asyh->base.cpp != 0))) {
asyh->core.x = asyh->base.x;
asyh->core.y = asyh->base.y;
asyh->core.w = asyh->base.w;
asyh->core.h = asyh->base.h;
} else
if ((asyh->core.visible = (asyh->ovly.cpp != 0)) ||
(asyh->core.visible = asyh->curs.visible)) {
/*XXX: We need to either find some way of having the
* primary base layer appear black, while still
* being able to display the other layers, or we
* need to allocate a dummy black surface here.
*/
asyh->core.x = 0;
asyh->core.y = 0;
asyh->core.w = asyh->state.mode.hdisplay;
asyh->core.h = asyh->state.mode.vdisplay;
}
asyh->core.handle = disp->core->chan.vram.handle;
asyh->core.offset = 0;
asyh->core.format = NV507D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8;
asyh->core.kind = NV507D_HEAD_SET_PARAMS_KIND_KIND_PITCH;
asyh->core.layout = NV507D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH;
asyh->core.blockh = NV507D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB;
asyh->core.blocks = 0;
asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
}
static int
head507d_olut_clr(struct nv50_head *head)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NV507D, HEAD_SET_BASE_LUT_LO(i),
NVDEF(NV507D, HEAD_SET_BASE_LUT_LO, ENABLE, DISABLE));
return 0;
}
static int
head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 3)))
return ret;
PUSH_MTHD(push, NV507D, HEAD_SET_BASE_LUT_LO(i),
NVDEF(NV507D, HEAD_SET_BASE_LUT_LO, ENABLE, ENABLE) |
NVVAL(NV507D, HEAD_SET_BASE_LUT_LO, MODE, asyh->olut.mode) |
NVVAL(NV507D, HEAD_SET_BASE_LUT_LO, ORIGIN, 0),
HEAD_SET_BASE_LUT_HI(i),
NVVAL(NV507D, HEAD_SET_BASE_LUT_HI, ORIGIN, asyh->olut.offset >> 8));
return 0;
}
static void
head507d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
{
for (; size--; in++, mem += 8) {
writew(drm_color_lut_extract(in-> red, 11) << 3, mem + 0);
writew(drm_color_lut_extract(in->green, 11) << 3, mem + 2);
writew(drm_color_lut_extract(in-> blue, 11) << 3, mem + 4);
}
/* INTERPOLATE modes require a "next" entry to interpolate with,
* so we replicate the last entry to deal with this for now.
*/
writew(readw(mem - 8), mem + 0);
writew(readw(mem - 6), mem + 2);
writew(readw(mem - 4), mem + 4);
}
bool
head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
if (size != 256)
return false;
if (asyh->base.cpp == 1)
asyh->olut.mode = NV507D_HEAD_SET_BASE_LUT_LO_MODE_LORES;
else
asyh->olut.mode = NV507D_HEAD_SET_BASE_LUT_LO_MODE_HIRES;
asyh->olut.load = head507d_olut_load;
return true;
}
int
head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
return ret;
PUSH_MTHD(push, NV507D, HEAD_SET_PIXEL_CLOCK(i),
NVVAL(NV507D, HEAD_SET_PIXEL_CLOCK, FREQUENCY, m->clock) |
NVDEF(NV507D, HEAD_SET_PIXEL_CLOCK, MODE, CLK_CUSTOM) |
NVDEF(NV507D, HEAD_SET_PIXEL_CLOCK, ADJ1000DIV1001, FALSE) |
NVDEF(NV507D, HEAD_SET_PIXEL_CLOCK, NOT_DRIVER, FALSE),
HEAD_SET_CONTROL(i),
NVVAL(NV507D, HEAD_SET_CONTROL, STRUCTURE, m->interlace));
PUSH_MTHD(push, NV507D, HEAD_SET_OVERSCAN_COLOR(i),
NVVAL(NV507D, HEAD_SET_OVERSCAN_COLOR, RED, 0) |
NVVAL(NV507D, HEAD_SET_OVERSCAN_COLOR, GRN, 0) |
NVVAL(NV507D, HEAD_SET_OVERSCAN_COLOR, BLU, 0),
HEAD_SET_RASTER_SIZE(i),
NVVAL(NV507D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
NVVAL(NV507D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
HEAD_SET_RASTER_SYNC_END(i),
NVVAL(NV507D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
NVVAL(NV507D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
HEAD_SET_RASTER_BLANK_END(i),
NVVAL(NV507D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
NVVAL(NV507D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
HEAD_SET_RASTER_BLANK_START(i),
NVVAL(NV507D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
NVVAL(NV507D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks),
HEAD_SET_RASTER_VERT_BLANK2(i),
NVVAL(NV507D, HEAD_SET_RASTER_VERT_BLANK2, YSTART, m->v.blank2s) |
NVVAL(NV507D, HEAD_SET_RASTER_VERT_BLANK2, YEND, m->v.blank2e),
HEAD_SET_RASTER_VERT_BLANK_DMI(i),
NVVAL(NV507D, HEAD_SET_RASTER_VERT_BLANK_DMI, DURATION, m->v.blankus));
PUSH_MTHD(push, NV507D, HEAD_SET_DEFAULT_BASE_COLOR(i),
NVVAL(NV507D, HEAD_SET_DEFAULT_BASE_COLOR, RED, 0) |
NVVAL(NV507D, HEAD_SET_DEFAULT_BASE_COLOR, GREEN, 0) |
NVVAL(NV507D, HEAD_SET_DEFAULT_BASE_COLOR, BLUE, 0));
return 0;
}
int
head507d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 7)))
return ret;
PUSH_MTHD(push, NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER(i),
NVDEF(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, VERTICAL_TAPS, TAPS_1) |
NVDEF(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, HORIZONTAL_TAPS, TAPS_1) |
NVVAL(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, HRESPONSE_BIAS, 0) |
NVVAL(NV507D, HEAD_SET_CONTROL_OUTPUT_SCALER, VRESPONSE_BIAS, 0));
PUSH_MTHD(push, NV507D, HEAD_SET_VIEWPORT_SIZE_IN(i),
NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
PUSH_MTHD(push, NV507D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH),
HEAD_SET_VIEWPORT_SIZE_OUT_MIN(i),
NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, WIDTH, asyh->view.oW) |
NVVAL(NV507D, HEAD_SET_VIEWPORT_SIZE_OUT_MIN, HEIGHT, asyh->view.oH));
return 0;
}
const struct nv50_head_func
head507d = {
.view = head507d_view,
.mode = head507d_mode,
.olut = head507d_olut,
.olut_size = 256,
.olut_set = head507d_olut_set,
.olut_clr = head507d_olut_clr,
.core_calc = head507d_core_calc,
.core_set = head507d_core_set,
.core_clr = head507d_core_clr,
.curs_layout = head507d_curs_layout,
.curs_format = head507d_curs_format,
.curs_set = head507d_curs_set,
.curs_clr = head507d_curs_clr,
.base = head507d_base,
.ovly = head507d_ovly,
.dither = head507d_dither,
.procamp = head507d_procamp,
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/head507d.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "curs.h"
#include "core.h"
#include "head.h"
#include <nvif/if0014.h>
#include <nvif/timer.h>
#include <nvhw/class/cl507a.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
bool
curs507a_space(struct nv50_wndw *wndw)
{
nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 100,
if (NVIF_TV32(&wndw->wimm.base.user, NV507A, FREE, COUNT, >=, 4))
return true;
);
WARN_ON(1);
return false;
}
static int
curs507a_update(struct nv50_wndw *wndw, u32 *interlock)
{
struct nvif_object *user = &wndw->wimm.base.user;
int ret = nvif_chan_wait(&wndw->wimm, 1);
if (ret == 0) {
NVIF_WR32(user, NV507A, UPDATE,
NVDEF(NV507A, UPDATE, INTERLOCK_WITH_CORE, DISABLE));
}
return ret;
}
static int
curs507a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_object *user = &wndw->wimm.base.user;
int ret = nvif_chan_wait(&wndw->wimm, 1);
if (ret == 0) {
NVIF_WR32(user, NV507A, SET_CURSOR_HOT_SPOT_POINT_OUT,
NVVAL(NV507A, SET_CURSOR_HOT_SPOT_POINT_OUT, X, asyw->point.x) |
NVVAL(NV507A, SET_CURSOR_HOT_SPOT_POINT_OUT, Y, asyw->point.y));
}
return ret;
}
const struct nv50_wimm_func
curs507a = {
.point = curs507a_point,
.update = curs507a_update,
};
static void
curs507a_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
struct nv50_wndw_atom *asyw)
{
u32 handle = nv50_disp(wndw->plane.dev)->core->chan.vram.handle;
u32 offset = asyw->image.offset[0];
if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
asyh->curs.handle = handle;
asyh->curs.offset = offset;
asyh->set.curs = asyh->curs.visible;
}
}
static void
curs507a_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
asyh->curs.visible = false;
}
static int
curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
struct nv50_head *head = nv50_head(asyw->state.crtc);
struct drm_framebuffer *fb = asyw->state.fb;
int ret;
ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
asyh->curs.visible = asyw->state.visible;
if (ret || !asyh->curs.visible)
return ret;
if (asyw->state.crtc_w != asyw->state.crtc_h) {
NV_ATOMIC(drm, "Plane width/height must be equal for cursors\n");
return -EINVAL;
}
if (asyw->image.w != asyw->state.crtc_w) {
NV_ATOMIC(drm, "Plane width must be equal to fb width for cursors (height can be larger though)\n");
return -EINVAL;
}
if (asyw->state.src_x || asyw->state.src_y) {
NV_ATOMIC(drm, "Cursor planes do not support framebuffer offsets\n");
return -EINVAL;
}
if (asyw->image.pitch[0] != asyw->image.w * fb->format->cpp[0]) {
NV_ATOMIC(drm,
"%s: invalid cursor image pitch: image must be packed (pitch = %d, width = %d)\n",
wndw->plane.name, asyw->image.pitch[0], asyw->image.w);
return -EINVAL;
}
ret = head->func->curs_layout(head, asyw, asyh);
if (ret) {
NV_ATOMIC(drm,
"%s: invalid cursor image size: unsupported size %dx%d\n",
wndw->plane.name, asyw->image.w, asyw->image.h);
return ret;
}
ret = head->func->curs_format(head, asyw, asyh);
if (ret) {
NV_ATOMIC(drm,
"%s: invalid cursor image format 0x%X\n",
wndw->plane.name, fb->format->format);
return ret;
}
return 0;
}
static const u32
curs507a_format[] = {
DRM_FORMAT_ARGB8888,
0
};
static const struct nv50_wndw_func
curs507a_wndw = {
.acquire = curs507a_acquire,
.release = curs507a_release,
.prepare = curs507a_prepare,
};
int
curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
int head, s32 oclass, u32 interlock_data,
struct nv50_wndw **pwndw)
{
struct nvif_disp_chan_v0 args = {
.id = head,
};
struct nv50_disp *disp = nv50_disp(drm->dev);
struct nv50_wndw *wndw;
int ret;
ret = nv50_wndw_new_(&curs507a_wndw, drm->dev, DRM_PLANE_TYPE_CURSOR,
"curs", head, curs507a_format, BIT(head),
NV50_DISP_INTERLOCK_CURS, interlock_data, &wndw);
if (*pwndw = wndw, ret)
return ret;
ret = nvif_object_ctor(&disp->disp->object, "kmsCurs", 0, oclass,
&args, sizeof(args), &wndw->wimm.base.user);
if (ret) {
NV_ERROR(drm, "curs%04x allocation failed: %d\n", oclass, ret);
return ret;
}
nvif_object_map(&wndw->wimm.base.user, NULL, 0);
wndw->immd = func;
wndw->ctxdma.parent = NULL;
return 0;
}
int
curs507a_new(struct nouveau_drm *drm, int head, s32 oclass,
struct nv50_wndw **pwndw)
{
return curs507a_new_(&curs507a, drm, head, oclass,
0x00000001 << (head * 8), pwndw);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv50/curs507a.c |
// SPDX-License-Identifier: MIT
#include <drm/drm_crtc.h>
#include "crc.h"
#include "crcc37d.h"
#include "core.h"
#include "disp.h"
#include "head.h"
#include <nvif/pushc37b.h>
#include <nvhw/class/clc37d.h>
static int
crcc37d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 crc_args = NVVAL(NVC37D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, i * 4) |
NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) |
NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE);
int ret;
switch (source) {
case NV50_CRC_SOURCE_TYPE_SOR:
crc_args |= NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SOR(or));
break;
case NV50_CRC_SOURCE_TYPE_PIOR:
crc_args |= NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, PIOR(or));
break;
case NV50_CRC_SOURCE_TYPE_SF:
crc_args |= NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SF);
break;
default:
break;
}
if ((ret = PUSH_WAIT(push, 4)))
return ret;
if (source) {
PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx->ntfy.handle);
PUSH_MTHD(push, NVC37D, HEAD_SET_CRC_CONTROL(i), crc_args);
} else {
PUSH_MTHD(push, NVC37D, HEAD_SET_CRC_CONTROL(i), 0);
PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CRC(i), 0);
}
return 0;
}
int crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
return ret;
PUSH_MTHD(push, NVC37D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx ? ctx->ntfy.handle : 0);
return 0;
}
u32 crcc37d_get_entry(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx,
enum nv50_crc_source source, int idx)
{
struct crcc37d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
struct crcc37d_entry __iomem *entry = ¬ifier->entries[idx];
u32 __iomem *crc_addr;
if (source == NV50_CRC_SOURCE_RG)
crc_addr = &entry->rg_crc;
else
crc_addr = &entry->output_crc[0];
return ioread32_native(crc_addr);
}
bool crcc37d_ctx_finished(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
{
struct nouveau_drm *drm = nouveau_drm(head->base.base.dev);
struct crcc37d_notifier __iomem *notifier = ctx->mem.object.map.ptr;
const u32 status = ioread32_native(¬ifier->status);
const u32 overflow = status & 0x0000007e;
if (!(status & 0x00000001))
return false;
if (overflow) {
const char *engine = NULL;
switch (overflow) {
case 0x00000004: engine = "Front End"; break;
case 0x00000008: engine = "Compositor"; break;
case 0x00000010: engine = "RG"; break;
case 0x00000020: engine = "CRC output 1"; break;
case 0x00000040: engine = "CRC output 2"; break;
}
if (engine)
NV_ERROR(drm,
"CRC notifier context for head %d overflowed on %s: %x\n",
head->base.index, engine, status);
else
NV_ERROR(drm,
"CRC notifier context for head %d overflowed: %x\n",
head->base.index, status);
}
NV_DEBUG(drm, "Head %d CRC context status: %x\n",
head->base.index, status);
return true;
}
const struct nv50_crc_func crcc37d = {
.set_src = crcc37d_set_src,
.set_ctx = crcc37d_set_ctx,
.get_entry = crcc37d_get_entry,
.ctx_finished = crcc37d_ctx_finished,
.flip_threshold = CRCC37D_FLIP_THRESHOLD,
.num_entries = CRCC37D_MAX_ENTRIES,
.notifier_len = sizeof(struct crcc37d_notifier),
};
| linux-master | drivers/gpu/drm/nouveau/dispnv50/crcc37d.c |
/*
* Copyright 1993-2003 NVIDIA, Corporation
* Copyright 2006 Dave Airlie
* Copyright 2007 Maarten Maathuis
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_ttm.h"
#include "nouveau_bo.h"
#include "nouveau_gem.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
#include "hw.h"
#include "nvreg.h"
#include "disp.h"
#include "nouveau_dma.h"
#include <subdev/bios/pll.h>
#include <subdev/clk.h>
#include <nvif/push006c.h>
#include <nvif/event.h>
#include <nvif/cl0046.h>
static int
nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
static void
crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
{
NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
crtcstate->CRTC[index]);
}
static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level;
if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) {
regp->CRTC[NV_CIO_CRE_CSB] = 0x80;
regp->CRTC[NV_CIO_CRE_5B] = nv_crtc->saturation << 2;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_5B);
}
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_CSB);
}
static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
nv_crtc->sharpness = level;
if (level < 0) /* blur is in hw range 0x3f -> 0x20 */
level += 0x40;
regp->ramdac_634 = level;
NVWriteRAMDAC(crtc->dev, nv_crtc->index, NV_PRAMDAC_634, regp->ramdac_634);
}
#define PLLSEL_VPLL1_MASK \
(NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL \
| NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2)
#define PLLSEL_VPLL2_MASK \
(NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 \
| NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2)
#define PLLSEL_TV_MASK \
(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
| NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 \
| NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \
| NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
/* NV4x 0x40.. pll notes:
* gpu pll: 0x4000 + 0x4004
* ?gpu? pll: 0x4008 + 0x400c
* vpll1: 0x4010 + 0x4014
* vpll2: 0x4018 + 0x401c
* mpll: 0x4020 + 0x4024
* mpll: 0x4038 + 0x403c
*
* the first register of each pair has some unknown details:
* bits 0-7: redirected values from elsewhere? (similar to PLL_SETUP_CONTROL?)
* bits 20-23: (mpll) something to do with post divider?
* bits 28-31: related to single stage mode? (bit 8/12)
*/
static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock)
{
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
struct nvkm_pll_vals *pv = ®p->pllvals;
struct nvbios_pll pll_lim;
if (nvbios_pll_parse(bios, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0,
&pll_lim))
return;
/* NM2 == 0 is used to determine single stage mode on two stage plls */
pv->NM2 = 0;
/* for newer nv4x the blob uses only the first stage of the vpll below a
* certain clock. for a certain nv4b this is 150MHz. since the max
* output frequency of the first stage for this card is 300MHz, it is
* assumed the threshold is given by vco1 maxfreq/2
*/
/* for early nv4x, specifically nv40 and *some* nv43 (devids 0 and 6,
* not 8, others unknown), the blob always uses both plls. no problem
* has yet been observed in allowing the use a single stage pll on all
* nv43 however. the behaviour of single stage use is untested on nv40
*/
if (drm->client.device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
if (!clk->pll_calc(clk, &pll_lim, dot_clock, pv))
return;
state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
/* The blob uses this always, so let's do the same */
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
/* again nv40 and some nv43 act more like nv3x as described above */
if (drm->client.device.info.chipset < 0x41)
state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
if (pv->NM2)
NV_DEBUG(drm, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
else
NV_DEBUG(drm, "vpll: n %d m %d log2p %d\n",
pv->N1, pv->M1, pv->log2P);
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
}
static void
nv_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
unsigned char seq1 = 0, crtc17 = 0;
unsigned char crtc1A;
NV_DEBUG(drm, "Setting dpms mode %d on CRTC %d\n", mode,
nv_crtc->index);
if (nv_crtc->last_dpms == mode) /* Don't do unnecessary mode changes. */
return;
nv_crtc->last_dpms = mode;
if (nv_two_heads(dev))
NVSetOwner(dev, nv_crtc->index);
/* nv4ref indicates these two RPC1 bits inhibit h/v sync */
crtc1A = NVReadVgaCrtc(dev, nv_crtc->index,
NV_CIO_CRE_RPC1_INDEX) & ~0xC0;
switch (mode) {
case DRM_MODE_DPMS_STANDBY:
/* Screen: Off; HSync: Off, VSync: On -- Not Supported */
seq1 = 0x20;
crtc17 = 0x80;
crtc1A |= 0x80;
break;
case DRM_MODE_DPMS_SUSPEND:
/* Screen: Off; HSync: On, VSync: Off -- Not Supported */
seq1 = 0x20;
crtc17 = 0x80;
crtc1A |= 0x40;
break;
case DRM_MODE_DPMS_OFF:
/* Screen: Off; HSync: Off, VSync: Off */
seq1 = 0x20;
crtc17 = 0x00;
crtc1A |= 0xC0;
break;
case DRM_MODE_DPMS_ON:
default:
/* Screen: On; HSync: On, VSync: On */
seq1 = 0x00;
crtc17 = 0x80;
break;
}
NVVgaSeqReset(dev, nv_crtc->index, true);
/* Each head has it's own sequencer, so we can turn it off when we want */
seq1 |= (NVReadVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX) & ~0x20);
NVWriteVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX, seq1);
crtc17 |= (NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX) & ~0x80);
mdelay(10);
NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX, crtc17);
NVVgaSeqReset(dev, nv_crtc->index, false);
NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
}
static void
nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
struct drm_framebuffer *fb = crtc->primary->fb;
/* Calculate our timings */
int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
int horizStart = (mode->crtc_hsync_start >> 3) + 1;
int horizEnd = (mode->crtc_hsync_end >> 3) + 1;
int horizTotal = (mode->crtc_htotal >> 3) - 5;
int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1;
int horizBlankEnd = (mode->crtc_htotal >> 3) - 1;
int vertDisplay = mode->crtc_vdisplay - 1;
int vertStart = mode->crtc_vsync_start - 1;
int vertEnd = mode->crtc_vsync_end - 1;
int vertTotal = mode->crtc_vtotal - 2;
int vertBlankStart = mode->crtc_vdisplay - 1;
int vertBlankEnd = mode->crtc_vtotal - 1;
struct drm_encoder *encoder;
bool fp_output = false;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
if (encoder->crtc == crtc &&
(nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
nv_encoder->dcb->type == DCB_OUTPUT_TMDS))
fp_output = true;
}
if (fp_output) {
vertStart = vertTotal - 3;
vertEnd = vertTotal - 2;
vertBlankStart = vertStart;
horizStart = horizTotal - 5;
horizEnd = horizTotal - 2;
horizBlankEnd = horizTotal + 4;
#if 0
if (dev->overlayAdaptor && drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
/* This reportedly works around some video overlay bandwidth problems */
horizTotal += 2;
#endif
}
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vertTotal |= 1;
#if 0
ErrorF("horizDisplay: 0x%X \n", horizDisplay);
ErrorF("horizStart: 0x%X \n", horizStart);
ErrorF("horizEnd: 0x%X \n", horizEnd);
ErrorF("horizTotal: 0x%X \n", horizTotal);
ErrorF("horizBlankStart: 0x%X \n", horizBlankStart);
ErrorF("horizBlankEnd: 0x%X \n", horizBlankEnd);
ErrorF("vertDisplay: 0x%X \n", vertDisplay);
ErrorF("vertStart: 0x%X \n", vertStart);
ErrorF("vertEnd: 0x%X \n", vertEnd);
ErrorF("vertTotal: 0x%X \n", vertTotal);
ErrorF("vertBlankStart: 0x%X \n", vertBlankStart);
ErrorF("vertBlankEnd: 0x%X \n", vertBlankEnd);
#endif
/*
* compute correct Hsync & Vsync polarity
*/
if ((mode->flags & (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))
&& (mode->flags & (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) {
regp->MiscOutReg = 0x23;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
regp->MiscOutReg |= 0x40;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
regp->MiscOutReg |= 0x80;
} else {
int vdisplay = mode->vdisplay;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
vdisplay *= 2;
if (mode->vscan > 1)
vdisplay *= mode->vscan;
if (vdisplay < 400)
regp->MiscOutReg = 0xA3; /* +hsync -vsync */
else if (vdisplay < 480)
regp->MiscOutReg = 0x63; /* -hsync +vsync */
else if (vdisplay < 768)
regp->MiscOutReg = 0xE3; /* -hsync -vsync */
else
regp->MiscOutReg = 0x23; /* +hsync +vsync */
}
/*
* Time Sequencer
*/
regp->Sequencer[NV_VIO_SR_RESET_INDEX] = 0x00;
/* 0x20 disables the sequencer */
if (mode->flags & DRM_MODE_FLAG_CLKDIV2)
regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x29;
else
regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x21;
regp->Sequencer[NV_VIO_SR_PLANE_MASK_INDEX] = 0x0F;
regp->Sequencer[NV_VIO_SR_CHAR_MAP_INDEX] = 0x00;
regp->Sequencer[NV_VIO_SR_MEM_MODE_INDEX] = 0x0E;
/*
* CRTC
*/
regp->CRTC[NV_CIO_CR_HDT_INDEX] = horizTotal;
regp->CRTC[NV_CIO_CR_HDE_INDEX] = horizDisplay;
regp->CRTC[NV_CIO_CR_HBS_INDEX] = horizBlankStart;
regp->CRTC[NV_CIO_CR_HBE_INDEX] = (1 << 7) |
XLATE(horizBlankEnd, 0, NV_CIO_CR_HBE_4_0);
regp->CRTC[NV_CIO_CR_HRS_INDEX] = horizStart;
regp->CRTC[NV_CIO_CR_HRE_INDEX] = XLATE(horizBlankEnd, 5, NV_CIO_CR_HRE_HBE_5) |
XLATE(horizEnd, 0, NV_CIO_CR_HRE_4_0);
regp->CRTC[NV_CIO_CR_VDT_INDEX] = vertTotal;
regp->CRTC[NV_CIO_CR_OVL_INDEX] = XLATE(vertStart, 9, NV_CIO_CR_OVL_VRS_9) |
XLATE(vertDisplay, 9, NV_CIO_CR_OVL_VDE_9) |
XLATE(vertTotal, 9, NV_CIO_CR_OVL_VDT_9) |
(1 << 4) |
XLATE(vertBlankStart, 8, NV_CIO_CR_OVL_VBS_8) |
XLATE(vertStart, 8, NV_CIO_CR_OVL_VRS_8) |
XLATE(vertDisplay, 8, NV_CIO_CR_OVL_VDE_8) |
XLATE(vertTotal, 8, NV_CIO_CR_OVL_VDT_8);
regp->CRTC[NV_CIO_CR_RSAL_INDEX] = 0x00;
regp->CRTC[NV_CIO_CR_CELL_HT_INDEX] = ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ? MASK(NV_CIO_CR_CELL_HT_SCANDBL) : 0) |
1 << 6 |
XLATE(vertBlankStart, 9, NV_CIO_CR_CELL_HT_VBS_9);
regp->CRTC[NV_CIO_CR_CURS_ST_INDEX] = 0x00;
regp->CRTC[NV_CIO_CR_CURS_END_INDEX] = 0x00;
regp->CRTC[NV_CIO_CR_SA_HI_INDEX] = 0x00;
regp->CRTC[NV_CIO_CR_SA_LO_INDEX] = 0x00;
regp->CRTC[NV_CIO_CR_TCOFF_HI_INDEX] = 0x00;
regp->CRTC[NV_CIO_CR_TCOFF_LO_INDEX] = 0x00;
regp->CRTC[NV_CIO_CR_VRS_INDEX] = vertStart;
regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
/* framebuffer can be larger than crtc scanout area. */
regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitches[0] / 8;
regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
regp->CRTC[NV_CIO_CR_MODE_INDEX] = 0x43;
regp->CRTC[NV_CIO_CR_LCOMP_INDEX] = 0xff;
/*
* Some extended CRTC registers (they are not saved with the rest of the vga regs).
*/
/* framebuffer can be larger than crtc scanout area. */
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
XLATE(fb->pitches[0] / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
regp->CRTC[NV_CIO_CRE_42] =
XLATE(fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
XLATE(vertBlankStart, 10, NV_CIO_CRE_LSR_VBS_10) |
XLATE(vertStart, 10, NV_CIO_CRE_LSR_VRS_10) |
XLATE(vertDisplay, 10, NV_CIO_CRE_LSR_VDE_10) |
XLATE(vertTotal, 10, NV_CIO_CRE_LSR_VDT_10);
regp->CRTC[NV_CIO_CRE_HEB__INDEX] = XLATE(horizStart, 8, NV_CIO_CRE_HEB_HRS_8) |
XLATE(horizBlankStart, 8, NV_CIO_CRE_HEB_HBS_8) |
XLATE(horizDisplay, 8, NV_CIO_CRE_HEB_HDE_8) |
XLATE(horizTotal, 8, NV_CIO_CRE_HEB_HDT_8);
regp->CRTC[NV_CIO_CRE_EBR_INDEX] = XLATE(vertBlankStart, 11, NV_CIO_CRE_EBR_VBS_11) |
XLATE(vertStart, 11, NV_CIO_CRE_EBR_VRS_11) |
XLATE(vertDisplay, 11, NV_CIO_CRE_EBR_VDE_11) |
XLATE(vertTotal, 11, NV_CIO_CRE_EBR_VDT_11);
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
horizTotal = (horizTotal >> 1) & ~1;
regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = horizTotal;
regp->CRTC[NV_CIO_CRE_HEB__INDEX] |= XLATE(horizTotal, 8, NV_CIO_CRE_HEB_ILC_8);
} else
regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = 0xff; /* interlace off */
/*
* Graphics Display Controller
*/
regp->Graphics[NV_VIO_GX_SR_INDEX] = 0x00;
regp->Graphics[NV_VIO_GX_SREN_INDEX] = 0x00;
regp->Graphics[NV_VIO_GX_CCOMP_INDEX] = 0x00;
regp->Graphics[NV_VIO_GX_ROP_INDEX] = 0x00;
regp->Graphics[NV_VIO_GX_READ_MAP_INDEX] = 0x00;
regp->Graphics[NV_VIO_GX_MODE_INDEX] = 0x40; /* 256 color mode */
regp->Graphics[NV_VIO_GX_MISC_INDEX] = 0x05; /* map 64k mem + graphic mode */
regp->Graphics[NV_VIO_GX_DONT_CARE_INDEX] = 0x0F;
regp->Graphics[NV_VIO_GX_BIT_MASK_INDEX] = 0xFF;
regp->Attribute[0] = 0x00; /* standard colormap translation */
regp->Attribute[1] = 0x01;
regp->Attribute[2] = 0x02;
regp->Attribute[3] = 0x03;
regp->Attribute[4] = 0x04;
regp->Attribute[5] = 0x05;
regp->Attribute[6] = 0x06;
regp->Attribute[7] = 0x07;
regp->Attribute[8] = 0x08;
regp->Attribute[9] = 0x09;
regp->Attribute[10] = 0x0A;
regp->Attribute[11] = 0x0B;
regp->Attribute[12] = 0x0C;
regp->Attribute[13] = 0x0D;
regp->Attribute[14] = 0x0E;
regp->Attribute[15] = 0x0F;
regp->Attribute[NV_CIO_AR_MODE_INDEX] = 0x01; /* Enable graphic mode */
/* Non-vga */
regp->Attribute[NV_CIO_AR_OSCAN_INDEX] = 0x00;
regp->Attribute[NV_CIO_AR_PLANE_INDEX] = 0x0F; /* enable all color planes */
regp->Attribute[NV_CIO_AR_HPP_INDEX] = 0x00;
regp->Attribute[NV_CIO_AR_CSEL_INDEX] = 0x00;
}
/**
* Sets up registers for the given mode/adjusted_mode pair.
*
* The clocks, CRTCs and outputs attached to this CRTC must be off.
*
* This shouldn't enable any clocks, CRTCs, or outputs, but they should
* be easily turned on/off after this.
*/
static void
nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
{
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index];
const struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_encoder *encoder;
bool lvds_output = false, tmds_output = false, tv_output = false,
off_chip_digital = false;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
bool digital = false;
if (encoder->crtc != crtc)
continue;
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS)
digital = lvds_output = true;
if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
tv_output = true;
if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS)
digital = tmds_output = true;
if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital)
off_chip_digital = true;
}
/* Registers not directly related to the (s)vga mode */
/* What is the meaning of this register? */
/* A few popular values are 0x18, 0x1c, 0x38, 0x3c */
regp->CRTC[NV_CIO_CRE_ENH_INDEX] = savep->CRTC[NV_CIO_CRE_ENH_INDEX] & ~(1<<5);
regp->crtc_eng_ctrl = 0;
/* Except for rare conditions I2C is enabled on the primary crtc */
if (nv_crtc->index == 0)
regp->crtc_eng_ctrl |= NV_CRTC_FSEL_I2C;
#if 0
/* Set overlay to desired crtc. */
if (dev->overlayAdaptor) {
NVPortPrivPtr pPriv = GET_OVERLAY_PRIVATE(dev);
if (pPriv->overlayCRTC == nv_crtc->index)
regp->crtc_eng_ctrl |= NV_CRTC_FSEL_OVERLAY;
}
#endif
/* ADDRESS_SPACE_PNVM is the same as setting HCUR_ASI */
regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
if (drm->client.device.info.chipset >= 0x11)
regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
/* Unblock some timings */
regp->CRTC[NV_CIO_CRE_53] = 0;
regp->CRTC[NV_CIO_CRE_54] = 0;
/* 0x00 is disabled, 0x11 is lvds, 0x22 crt and 0x88 tmds */
if (lvds_output)
regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x11;
else if (tmds_output)
regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x88;
else
regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x22;
/* These values seem to vary */
/* This register seems to be used by the bios to make certain decisions on some G70 cards? */
regp->CRTC[NV_CIO_CRE_SCRATCH4__INDEX] = savep->CRTC[NV_CIO_CRE_SCRATCH4__INDEX];
nv_crtc_set_digital_vibrance(crtc, nv_crtc->saturation);
/* probably a scratch reg, but kept for cargo-cult purposes:
* bit0: crtc0?, head A
* bit6: lvds, head A
* bit7: (only in X), head A
*/
if (nv_crtc->index == 0)
regp->CRTC[NV_CIO_CRE_4B] = savep->CRTC[NV_CIO_CRE_4B] | 0x80;
/* The blob seems to take the current value from crtc 0, add 4 to that
* and reuse the old value for crtc 1 */
regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = nv04_display(dev)->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
if (!nv_crtc->index)
regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4;
/* the blob sometimes sets |= 0x10 (which is the same as setting |=
* 1 << 30 on 0x60.830), for no apparent reason */
regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
regp->crtc_830 = mode->crtc_vdisplay - 3;
regp->crtc_834 = mode->crtc_vdisplay - 1;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
/* This is what the blob does */
regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
else
regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
/* Some misc regs */
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
regp->CRTC[NV_CIO_CRE_85] = 0xFF;
regp->CRTC[NV_CIO_CRE_86] = 0x1;
}
regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (fb->format->depth + 1) / 8;
/* Enable slaved mode (called MODE_TV in nv4ref.h) */
if (lvds_output || tmds_output || tv_output)
regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7);
/* Generic PRAMDAC regs */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
/* Only bit that bios and blob set. */
regp->nv10_cursync = (1 << 25);
regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL |
NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
if (fb->format->depth == 16)
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
if (drm->client.device.info.chipset >= 0x11)
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
regp->tv_setup = 0;
nv_crtc_set_image_sharpening(crtc, nv_crtc->sharpness);
/* Some values the blob sets */
regp->ramdac_8c0 = 0x100;
regp->ramdac_a20 = 0x0;
regp->ramdac_a24 = 0xfffff;
regp->ramdac_a34 = 0x1;
}
static int
nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
{
struct nv04_display *disp = nv04_display(crtc->dev);
struct drm_framebuffer *fb = crtc->primary->fb;
struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret == 0) {
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
nouveau_bo_ref(nvbo, &disp->image[nv_crtc->index]);
}
return ret;
}
/**
* Sets up registers for the given mode/adjusted_mode pair.
*
* The clocks, CRTCs and outputs attached to this CRTC must be off.
*
* This shouldn't enable any clocks, CRTCs, or outputs, but they should
* be easily turned on/off after this.
*/
static int
nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_drm *drm = nouveau_drm(dev);
int ret;
NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
drm_mode_debug_printmodeline(adjusted_mode);
ret = nv_crtc_swap_fbs(crtc, old_fb);
if (ret)
return ret;
/* unlock must come after turning off FP_TG_CONTROL in output_prepare */
nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
nv_crtc_mode_set_vga(crtc, adjusted_mode);
/* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
nv_crtc_mode_set_regs(crtc, adjusted_mode);
nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
return 0;
}
static void nv_crtc_save(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index];
struct nv04_mode_state *saved = &nv04_display(dev)->saved_reg;
struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index];
if (nv_two_heads(crtc->dev))
NVSetOwner(crtc->dev, nv_crtc->index);
nouveau_hw_save_state(crtc->dev, nv_crtc->index, saved);
/* init some state to saved value */
state->sel_clk = saved->sel_clk & ~(0x5 << 16);
crtc_state->CRTC[NV_CIO_CRE_LCD__INDEX] = crtc_saved->CRTC[NV_CIO_CRE_LCD__INDEX];
state->pllsel = saved->pllsel & ~(PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK);
crtc_state->gpio_ext = crtc_saved->gpio_ext;
}
static void nv_crtc_restore(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
int head = nv_crtc->index;
uint8_t saved_cr21 = nv04_display(dev)->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21];
if (nv_two_heads(crtc->dev))
NVSetOwner(crtc->dev, head);
nouveau_hw_load_state(crtc->dev, head, &nv04_display(dev)->saved_reg);
nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21);
nv_crtc->last_dpms = NV_DPMS_CLEARED;
}
static void nv_crtc_prepare(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
const struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
if (nv_two_heads(dev))
NVSetOwner(dev, nv_crtc->index);
drm_crtc_vblank_off(crtc);
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
NVBlankScreen(dev, nv_crtc->index, true);
/* Some more preparation. */
NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
}
}
static void nv_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
const struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
nouveau_hw_load_state(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL);
#ifdef __BIG_ENDIAN
/* turn on LFB swapping */
{
uint8_t tmp = NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR);
tmp |= MASK(NV_CIO_CRE_RCR_ENDIAN_BIG);
NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR, tmp);
}
#endif
funcs->dpms(crtc, DRM_MODE_DPMS_ON);
drm_crtc_vblank_on(crtc);
}
static void nv_crtc_destroy(struct drm_crtc *crtc)
{
struct nv04_display *disp = nv04_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
if (!nv_crtc)
return;
drm_crtc_cleanup(crtc);
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
nvif_event_dtor(&nv_crtc->vblank);
nvif_head_dtor(&nv_crtc->head);
kfree(nv_crtc);
}
static void
nv_crtc_gamma_load(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = nv_crtc->base.dev;
struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs;
u16 *r, *g, *b;
int i;
rgbs = (struct rgb *)nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].DAC;
r = crtc->gamma_store;
g = r + crtc->gamma_size;
b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
rgbs[i].r = *r++ >> 8;
rgbs[i].g = *g++ >> 8;
rgbs[i].b = *b++ >> 8;
}
nouveau_hw_load_state_palette(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
}
static void
nv_crtc_disable(struct drm_crtc *crtc)
{
struct nv04_display *disp = nv04_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
}
static int
nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
/* We need to know the depth before we upload, but it's possible to
* get called before a framebuffer is bound. If this is the case,
* mark the lut values as dirty by setting depth==0, and it'll be
* uploaded on the first mode_set_base()
*/
if (!nv_crtc->base.primary->fb) {
nv_crtc->lut.depth = 0;
return 0;
}
nv_crtc_gamma_load(crtc);
return 0;
}
static int
nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *passed_fb,
int x, int y, bool atomic)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
struct nouveau_bo *nvbo;
struct drm_framebuffer *drm_fb;
int arb_burst, arb_lwm;
NV_DEBUG(drm, "index %d\n", nv_crtc->index);
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
NV_DEBUG(drm, "No FB bound\n");
return 0;
}
/* If atomic, we want to switch to the fb we were passed, so
* now we update pointers to do that.
*/
if (atomic) {
drm_fb = passed_fb;
} else {
drm_fb = crtc->primary->fb;
}
nvbo = nouveau_gem_object(drm_fb->obj[0]);
nv_crtc->fb.offset = nvbo->offset;
if (nv_crtc->lut.depth != drm_fb->format->depth) {
nv_crtc->lut.depth = drm_fb->format->depth;
nv_crtc_gamma_load(crtc);
}
/* Update the framebuffer format. */
regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3;
regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (drm_fb->format->depth + 1) / 8;
regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
if (drm_fb->format->depth == 16)
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX);
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
regp->ramdac_gen_ctrl);
regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitches[0] >> 3;
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
XLATE(drm_fb->pitches[0] >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
regp->CRTC[NV_CIO_CRE_42] =
XLATE(drm_fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
/* Update the framebuffer location. */
regp->fb_start = nv_crtc->fb.offset & ~3;
regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->format->cpp[0]);
nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
/* Update the arbitration parameters. */
nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->format->cpp[0] * 8,
&arb_burst, &arb_lwm);
regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst;
regp->CRTC[NV_CIO_CRE_FFLWM__INDEX] = arb_lwm & 0xff;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN) {
regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
}
return 0;
}
static int
nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
int ret = nv_crtc_swap_fbs(crtc, old_fb);
if (ret)
return ret;
return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
static int
nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true);
}
static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
struct nouveau_bo *dst)
{
int width = nv_cursor_width(dev);
uint32_t pixel;
int i, j;
for (i = 0; i < width; i++) {
for (j = 0; j < width; j++) {
pixel = nouveau_bo_rd32(src, i*64 + j);
nouveau_bo_wr16(dst, i*width + j, (pixel & 0x80000000) >> 16
| (pixel & 0xf80000) >> 9
| (pixel & 0xf800) >> 6
| (pixel & 0xf8) >> 3);
}
}
}
static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
struct nouveau_bo *dst)
{
uint32_t pixel;
int alpha, i;
/* nv11+ supports premultiplied (PM), or non-premultiplied (NPM) alpha
* cursors (though NPM in combination with fp dithering may not work on
* nv11, from "nv" driver history)
* NPM mode needs NV_PCRTC_CURSOR_CONFIG_ALPHA_BLEND set and is what the
* blob uses, however we get given PM cursors so we use PM mode
*/
for (i = 0; i < 64 * 64; i++) {
pixel = nouveau_bo_rd32(src, i);
/* hw gets unhappy if alpha <= rgb values. for a PM image "less
* than" shouldn't happen; fix "equal to" case by adding one to
* alpha channel (slightly inaccurate, but so is attempting to
* get back to NPM images, due to limits of integer precision)
*/
alpha = pixel >> 24;
if (alpha > 0 && alpha < 255)
pixel = (pixel & 0x00ffffff) | ((alpha + 1) << 24);
#ifdef __BIG_ENDIAN
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->client.device.info.chipset == 0x11) {
pixel = ((pixel & 0x000000ff) << 24) |
((pixel & 0x0000ff00) << 8) |
((pixel & 0x00ff0000) >> 8) |
((pixel & 0xff000000) >> 24);
}
}
#endif
nouveau_bo_wr32(dst, i, pixel);
}
}
static int
nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t buffer_handle, uint32_t width, uint32_t height)
{
struct nouveau_drm *drm = nouveau_drm(crtc->dev);
struct drm_device *dev = drm->dev;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_bo *cursor = NULL;
struct drm_gem_object *gem;
int ret = 0;
if (!buffer_handle) {
nv_crtc->cursor.hide(nv_crtc, true);
return 0;
}
if (width != 64 || height != 64)
return -EINVAL;
gem = drm_gem_object_lookup(file_priv, buffer_handle);
if (!gem)
return -ENOENT;
cursor = nouveau_gem_object(gem);
ret = nouveau_bo_map(cursor);
if (ret)
goto out;
if (drm->client.device.info.chipset >= 0x11)
nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
else
nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
nouveau_bo_unmap(cursor);
nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->offset;
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
nv_crtc->cursor.show(nv_crtc, true);
out:
drm_gem_object_put(gem);
return ret;
}
static int
nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
nv_crtc->cursor.set_pos(nv_crtc, x, y);
return 0;
}
struct nv04_page_flip_state {
struct list_head head;
struct drm_pending_vblank_event *event;
struct drm_crtc *crtc;
int bpp, pitch;
u64 offset;
};
static int
nv04_finish_page_flip(struct nouveau_channel *chan,
struct nv04_page_flip_state *ps)
{
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_drm *drm = chan->drm;
struct drm_device *dev = drm->dev;
struct nv04_page_flip_state *s;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
if (list_empty(&fctx->flip)) {
NV_ERROR(drm, "unexpected pageflip\n");
spin_unlock_irqrestore(&dev->event_lock, flags);
return -EINVAL;
}
s = list_first_entry(&fctx->flip, struct nv04_page_flip_state, head);
if (s->event) {
drm_crtc_arm_vblank_event(s->crtc, s->event);
} else {
/* Give up ownership of vblank for page-flipped crtc */
drm_crtc_vblank_put(s->crtc);
}
list_del(&s->head);
if (ps)
*ps = *s;
kfree(s);
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
}
int
nv04_flip_complete(struct nvif_event *event, void *argv, u32 argc)
{
struct nv04_display *disp = container_of(event, typeof(*disp), flip);
struct nouveau_drm *drm = disp->drm;
struct nouveau_channel *chan = drm->channel;
struct nv04_page_flip_state state;
if (!nv04_finish_page_flip(chan, &state)) {
nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
state.offset + state.crtc->y *
state.pitch + state.crtc->x *
state.bpp / 8);
}
return NVIF_EVENT_KEEP;
}
static int
nv04_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_bo *old_bo,
struct nouveau_bo *new_bo,
struct nv04_page_flip_state *s,
struct nouveau_fence **pfence)
{
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_drm *drm = chan->drm;
struct drm_device *dev = drm->dev;
struct nvif_push *push = chan->chan.push;
unsigned long flags;
int ret;
/* Queue it to the pending list */
spin_lock_irqsave(&dev->event_lock, flags);
list_add_tail(&s->head, &fctx->flip);
spin_unlock_irqrestore(&dev->event_lock, flags);
/* Synchronize with the old framebuffer */
ret = nouveau_fence_sync(old_bo, chan, false, false);
if (ret)
goto fail;
/* Emit the pageflip */
ret = PUSH_WAIT(push, 2);
if (ret)
goto fail;
PUSH_NVSQ(push, NV_SW, NV_SW_PAGE_FLIP, 0x00000000);
PUSH_KICK(push);
ret = nouveau_fence_new(pfence, chan);
if (ret)
goto fail;
return 0;
fail:
spin_lock_irqsave(&dev->event_lock, flags);
list_del(&s->head);
spin_unlock_irqrestore(&dev->event_lock, flags);
return ret;
}
static int
nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, u32 flags,
struct drm_modeset_acquire_ctx *ctx)
{
const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct nouveau_bo *old_bo = nouveau_gem_object(old_fb->obj[0]);
struct nouveau_bo *new_bo = nouveau_gem_object(fb->obj[0]);
struct nv04_page_flip_state *s;
struct nouveau_channel *chan;
struct nouveau_cli *cli;
struct nouveau_fence *fence;
struct nv04_display *dispnv04 = nv04_display(dev);
struct nvif_push *push;
int head = nouveau_crtc(crtc)->index;
int ret;
chan = drm->channel;
if (!chan)
return -ENODEV;
cli = (void *)chan->user.client;
push = chan->chan.push;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
if (new_bo != old_bo) {
ret = nouveau_bo_pin(new_bo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret)
goto fail_free;
}
mutex_lock(&cli->mutex);
ret = ttm_bo_reserve(&new_bo->bo, true, false, NULL);
if (ret)
goto fail_unpin;
/* synchronise rendering channel with the kernel's channel */
ret = nouveau_fence_sync(new_bo, chan, false, true);
if (ret) {
ttm_bo_unreserve(&new_bo->bo);
goto fail_unpin;
}
if (new_bo != old_bo) {
ttm_bo_unreserve(&new_bo->bo);
ret = ttm_bo_reserve(&old_bo->bo, true, false, NULL);
if (ret)
goto fail_unpin;
}
/* Initialize a page flip struct */
*s = (struct nv04_page_flip_state)
{ { }, event, crtc, fb->format->cpp[0] * 8, fb->pitches[0],
new_bo->offset };
/* Keep vblanks on during flip, for the target crtc of this flip */
drm_crtc_vblank_get(crtc);
/* Emit a page flip */
if (swap_interval) {
ret = PUSH_WAIT(push, 8);
if (ret)
goto fail_unreserve;
PUSH_NVSQ(push, NV05F, 0x012c, 0);
PUSH_NVSQ(push, NV05F, 0x0134, head);
PUSH_NVSQ(push, NV05F, 0x0100, 0);
PUSH_NVSQ(push, NV05F, 0x0130, 0);
}
nouveau_bo_ref(new_bo, &dispnv04->image[head]);
ret = nv04_page_flip_emit(chan, old_bo, new_bo, s, &fence);
if (ret)
goto fail_unreserve;
mutex_unlock(&cli->mutex);
/* Update the crtc struct and cleanup */
crtc->primary->fb = fb;
nouveau_bo_fence(old_bo, fence, false);
ttm_bo_unreserve(&old_bo->bo);
if (old_bo != new_bo)
nouveau_bo_unpin(old_bo);
nouveau_fence_unref(&fence);
return 0;
fail_unreserve:
drm_crtc_vblank_put(crtc);
ttm_bo_unreserve(&old_bo->bo);
fail_unpin:
mutex_unlock(&cli->mutex);
if (old_bo != new_bo)
nouveau_bo_unpin(new_bo);
fail_free:
kfree(s);
return ret;
}
static const struct drm_crtc_funcs nv04_crtc_funcs = {
.cursor_set = nv04_crtc_cursor_set,
.cursor_move = nv04_crtc_cursor_move,
.gamma_set = nv_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.page_flip = nv04_crtc_page_flip,
.destroy = nv_crtc_destroy,
.enable_vblank = nouveau_display_vblank_enable,
.disable_vblank = nouveau_display_vblank_disable,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.dpms = nv_crtc_dpms,
.prepare = nv_crtc_prepare,
.commit = nv_crtc_commit,
.mode_set = nv_crtc_mode_set,
.mode_set_base = nv04_crtc_mode_set_base,
.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
.disable = nv_crtc_disable,
.get_scanout_position = nouveau_display_scanoutpos,
};
static const uint32_t modeset_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
};
static const struct drm_plane_funcs nv04_primary_plane_funcs = {
DRM_PLANE_NON_ATOMIC_FUNCS,
};
static int
nv04_crtc_vblank_handler(struct nvif_event *event, void *repv, u32 repc)
{
struct nouveau_crtc *nv_crtc = container_of(event, struct nouveau_crtc, vblank);
drm_crtc_handle_vblank(&nv_crtc->base);
return NVIF_EVENT_KEEP;
}
int
nv04_crtc_create(struct drm_device *dev, int crtc_num)
{
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_crtc *nv_crtc;
struct drm_plane *primary;
int ret;
nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
if (!nv_crtc)
return -ENOMEM;
nv_crtc->lut.depth = 0;
nv_crtc->index = crtc_num;
nv_crtc->last_dpms = NV_DPMS_CLEARED;
nv_crtc->save = nv_crtc_save;
nv_crtc->restore = nv_crtc_restore;
primary = __drm_universal_plane_alloc(dev, sizeof(*primary), 0, 0,
&nv04_primary_plane_funcs,
modeset_formats,
ARRAY_SIZE(modeset_formats), NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (IS_ERR(primary)) {
ret = PTR_ERR(primary);
kfree(nv_crtc);
return ret;
}
drm_crtc_init_with_planes(dev, &nv_crtc->base, primary, NULL,
&nv04_crtc_funcs, NULL);
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100,
NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL,
&nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
NOUVEAU_GEM_DOMAIN_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
}
if (ret)
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
}
nv04_cursor_init(nv_crtc);
ret = nvif_head_ctor(&disp->disp, nv_crtc->base.name, nv_crtc->index, &nv_crtc->head);
if (ret)
return ret;
return nvif_head_vblank_event_ctor(&nv_crtc->head, "kmsVbl", nv04_crtc_vblank_handler,
false, &nv_crtc->vblank);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv04/crtc.c |
/*
* Copyright 1993-2003 NVIDIA, Corporation
* Copyright 2007-2009 Stuart Bennett
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "hw.h"
/****************************************************************************\
* *
* The video arbitration routines calculate some "magic" numbers. Fixes *
* the snow seen when accessing the framebuffer without it. *
* It just works (I hope). *
* *
\****************************************************************************/
struct nv_fifo_info {
int lwm;
int burst;
};
struct nv_sim_state {
int pclk_khz;
int mclk_khz;
int nvclk_khz;
int bpp;
int mem_page_miss;
int mem_latency;
int memory_type;
int memory_width;
int two_heads;
};
static void
nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
{
int pagemiss, cas, bpp;
int nvclks, mclks, crtpagemiss;
int found, mclk_extra, mclk_loop, cbs, m1, p1;
int mclk_freq, pclk_freq, nvclk_freq;
int us_m, us_n, us_p, crtc_drain_rate;
int cpm_us, us_crt, clwm;
pclk_freq = arb->pclk_khz;
mclk_freq = arb->mclk_khz;
nvclk_freq = arb->nvclk_khz;
pagemiss = arb->mem_page_miss;
cas = arb->mem_latency;
bpp = arb->bpp;
cbs = 128;
nvclks = 10;
mclks = 13 + cas;
mclk_extra = 3;
found = 0;
while (!found) {
found = 1;
mclk_loop = mclks + mclk_extra;
us_m = mclk_loop * 1000 * 1000 / mclk_freq;
us_n = nvclks * 1000 * 1000 / nvclk_freq;
us_p = nvclks * 1000 * 1000 / pclk_freq;
crtc_drain_rate = pclk_freq * bpp / 8;
crtpagemiss = 2;
crtpagemiss += 1;
cpm_us = crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
us_crt = cpm_us + us_m + us_n + us_p;
clwm = us_crt * crtc_drain_rate / (1000 * 1000);
clwm++;
m1 = clwm + cbs - 512;
p1 = m1 * pclk_freq / mclk_freq;
p1 = p1 * bpp / 8;
if ((p1 < m1 && m1 > 0) || clwm > 519) {
found = !mclk_extra;
mclk_extra--;
}
if (clwm < 384)
clwm = 384;
fifo->lwm = clwm;
fifo->burst = cbs;
}
}
static void
nv10_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
{
int fill_rate, drain_rate;
int pclks, nvclks, mclks, xclks;
int pclk_freq, nvclk_freq, mclk_freq;
int fill_lat, extra_lat;
int max_burst_o, max_burst_l;
int fifo_len, min_lwm, max_lwm;
const int burst_lat = 80; /* Maximum allowable latency due
* to the CRTC FIFO burst. (ns) */
pclk_freq = arb->pclk_khz;
nvclk_freq = arb->nvclk_khz;
mclk_freq = arb->mclk_khz;
fill_rate = mclk_freq * arb->memory_width / 8; /* kB/s */
drain_rate = pclk_freq * arb->bpp / 8; /* kB/s */
fifo_len = arb->two_heads ? 1536 : 1024; /* B */
/* Fixed FIFO refill latency. */
pclks = 4; /* lwm detect. */
nvclks = 3 /* lwm -> sync. */
+ 2 /* fbi bus cycles (1 req + 1 busy) */
+ 1 /* 2 edge sync. may be very close to edge so
* just put one. */
+ 1 /* fbi_d_rdv_n */
+ 1 /* Fbi_d_rdata */
+ 1; /* crtfifo load */
mclks = 1 /* 2 edge sync. may be very close to edge so
* just put one. */
+ 1 /* arb_hp_req */
+ 5 /* tiling pipeline */
+ 2 /* latency fifo */
+ 2 /* memory request to fbio block */
+ 7; /* data returned from fbio block */
/* Need to accumulate 256 bits for read */
mclks += (arb->memory_type == 0 ? 2 : 1)
* arb->memory_width / 32;
fill_lat = mclks * 1000 * 1000 / mclk_freq /* minimum mclk latency */
+ nvclks * 1000 * 1000 / nvclk_freq /* nvclk latency */
+ pclks * 1000 * 1000 / pclk_freq; /* pclk latency */
/* Conditional FIFO refill latency. */
xclks = 2 * arb->mem_page_miss + mclks /* Extra latency due to
* the overlay. */
+ 2 * arb->mem_page_miss /* Extra pagemiss latency. */
+ (arb->bpp == 32 ? 8 : 4); /* Margin of error. */
extra_lat = xclks * 1000 * 1000 / mclk_freq;
if (arb->two_heads)
/* Account for another CRTC. */
extra_lat += fill_lat + extra_lat + burst_lat;
/* FIFO burst */
/* Max burst not leading to overflows. */
max_burst_o = (1 + fifo_len - extra_lat * drain_rate / (1000 * 1000))
* (fill_rate / 1000) / ((fill_rate - drain_rate) / 1000);
fifo->burst = min(max_burst_o, 1024);
/* Max burst value with an acceptable latency. */
max_burst_l = burst_lat * fill_rate / (1000 * 1000);
fifo->burst = min(max_burst_l, fifo->burst);
fifo->burst = rounddown_pow_of_two(fifo->burst);
/* FIFO low watermark */
min_lwm = (fill_lat + extra_lat) * drain_rate / (1000 * 1000) + 1;
max_lwm = fifo_len - fifo->burst
+ fill_lat * drain_rate / (1000 * 1000)
+ fifo->burst * drain_rate / fill_rate;
fifo->lwm = min_lwm + 10 * (max_lwm - min_lwm) / 100; /* Empirical. */
}
static void
nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
int *burst, int *lwm)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
struct nv_fifo_info fifo_data;
struct nv_sim_state sim_data;
int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
uint32_t cfg1 = nvif_rd32(device, NV04_PFB_CFG1);
struct pci_dev *pdev = to_pci_dev(dev->dev);
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
sim_data.nvclk_khz = NVClk;
sim_data.bpp = bpp;
sim_data.two_heads = nv_two_heads(dev);
if ((pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
(pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
uint32_t type;
int domain = pci_domain_nr(pdev->bus);
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 1),
0x7c, &type);
sim_data.memory_type = (type >> 12) & 1;
sim_data.memory_width = 64;
sim_data.mem_latency = 3;
sim_data.mem_page_miss = 10;
} else {
sim_data.memory_type = nvif_rd32(device, NV04_PFB_CFG0) & 0x1;
sim_data.memory_width = (nvif_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
sim_data.mem_latency = cfg1 & 0xf;
sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
}
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT)
nv04_calc_arb(&fifo_data, &sim_data);
else
nv10_calc_arb(&fifo_data, &sim_data);
*burst = ilog2(fifo_data.burst >> 4);
*lwm = fifo_data.lwm >> 3;
}
static void
nv20_update_arb(int *burst, int *lwm)
{
unsigned int fifo_size, burst_size, graphics_lwm;
fifo_size = 2048;
burst_size = 512;
graphics_lwm = fifo_size - burst_size;
*burst = ilog2(burst_size >> 5);
*lwm = graphics_lwm >> 3;
}
void
nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN)
nv04_update_arb(dev, vclk, bpp, burst, lwm);
else if ((pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
(pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
*burst = 128;
*lwm = 0x0480;
} else
nv20_update_arb(burst, lwm);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv04/arb.c |
/*
* Copyright (C) 2009 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
#include "hw.h"
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/i2c/ch7006.h>
static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
{
{
I2C_BOARD_INFO("ch7006", 0x75),
.platform_data = &(struct ch7006_encoder_params) {
CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
0, 0, 0,
CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
}
},
0
},
{ }
};
int nv04_tv_identify(struct drm_device *dev, int i2c_index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index);
if (bus) {
return nvkm_i2c_bus_probe(bus, "TV encoder",
nv04_tv_encoder_info,
NULL, NULL);
}
return -ENODEV;
}
#define PLLSEL_TV_CRTC1_MASK \
(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
| NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1)
#define PLLSEL_TV_CRTC2_MASK \
(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \
| NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
uint8_t crtc1A;
NV_DEBUG(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
mode, nv_encoder->dcb->index);
state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
if (mode == DRM_MODE_DPMS_ON) {
int head = nouveau_crtc(encoder->crtc)->index;
crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX);
state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK :
PLLSEL_TV_CRTC1_MASK;
/* Inhibit hsync */
crtc1A |= 0x80;
NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A);
}
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
get_slave_funcs(encoder)->dpms(encoder, mode);
}
static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
{
struct nv04_crtc_reg *state = &nv04_display(dev)->mode_reg.crtc_reg[head];
state->tv_setup = 0;
if (bind)
state->CRTC[NV_CIO_CRE_49] |= 0x10;
else
state->CRTC[NV_CIO_CRE_49] &= ~0x10;
NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX,
state->CRTC[NV_CIO_CRE_LCD__INDEX]);
NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49,
state->CRTC[NV_CIO_CRE_49]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP,
state->tv_setup);
}
static void nv04_tv_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
int head = nouveau_crtc(encoder->crtc)->index;
const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
helper->dpms(encoder, DRM_MODE_DPMS_OFF);
nv04_dfp_disable(dev, head);
if (nv_two_heads(dev))
nv04_tv_bind(dev, head ^ 1, false);
nv04_tv_bind(dev, head, true);
}
static void nv04_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
regp->tv_htotal = adjusted_mode->htotal;
regp->tv_vtotal = adjusted_mode->vtotal;
/* These delay the TV signals with respect to the VGA port,
* they might be useful if we ever allow a CRTC to drive
* multiple outputs.
*/
regp->tv_hskew = 1;
regp->tv_hsync_delay = 1;
regp->tv_hsync_delay2 = 64;
regp->tv_vskew = 1;
regp->tv_vsync_delay = 1;
get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
}
static void nv04_tv_commit(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
nv04_encoder_get_connector(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
static void nv04_tv_destroy(struct drm_encoder *encoder)
{
get_slave_funcs(encoder)->destroy(encoder);
drm_encoder_cleanup(encoder);
kfree(encoder->helper_private);
kfree(nouveau_encoder(encoder));
}
static const struct drm_encoder_funcs nv04_tv_funcs = {
.destroy = nv04_tv_destroy,
};
static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = {
.dpms = nv04_tv_dpms,
.mode_fixup = drm_i2c_encoder_mode_fixup,
.prepare = nv04_tv_prepare,
.commit = nv04_tv_commit,
.mode_set = nv04_tv_mode_set,
.detect = drm_i2c_encoder_detect,
};
int
nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index);
int type, ret;
/* Ensure that we can talk to this encoder */
type = nv04_tv_identify(dev, entry->i2c_index);
if (type < 0)
return type;
/* Allocate the necessary memory */
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
/* Initialize the common members */
encoder = to_drm_encoder(nv_encoder);
drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC,
NULL);
drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs);
nv_encoder->enc_save = drm_i2c_encoder_save;
nv_encoder->enc_restore = drm_i2c_encoder_restore;
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
/* Run the slave-specific initialization */
ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
&bus->i2c,
&nv04_tv_encoder_info[type].dev);
if (ret < 0)
goto fail_cleanup;
/* Attach it to the specified connector. */
get_slave_funcs(encoder)->create_resources(encoder, connector);
drm_connector_attach_encoder(connector, encoder);
return 0;
fail_cleanup:
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/dispnv04/tvnv04.c |
/*
* Copyright 2006 Dave Airlie
* Copyright 2007 Maarten Maathuis
* Copyright 2007-2009 Stuart Bennett
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "nouveau_drv.h"
#include "hw.h"
#include <subdev/bios/pll.h>
#include <nvif/timer.h>
#define CHIPSET_NFORCE 0x01a0
#define CHIPSET_NFORCE2 0x01f0
/*
* misc hw access wrappers/control functions
*/
void
NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value)
{
NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value);
}
uint8_t
NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index)
{
NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
return NVReadPRMVIO(dev, head, NV_PRMVIO_SR);
}
void
NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value)
{
NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value);
}
uint8_t
NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
{
NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
return NVReadPRMVIO(dev, head, NV_PRMVIO_GX);
}
/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
* it affects only the 8 bit vga io regs, which we access using mmio at
* 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
* in general, the set value of cr44 does not matter: reg access works as
* expected and values can be set for the appropriate head by using a 0x2000
* offset as required
* however:
* a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
* cr44 must be set to 0 or 3 for accessing values on the correct head
* through the common 0xc03c* addresses
* b) in tied mode (4) head B is programmed to the values set on head A, and
* access using the head B addresses can have strange results, ergo we leave
* tied mode in init once we know to what cr44 should be restored on exit
*
* the owner parameter is slightly abused:
* 0 and 1 are treated as head values and so the set value is (owner * 3)
* other values are treated as literal values to set
*/
void
NVSetOwner(struct drm_device *dev, int owner)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (owner == 1)
owner *= 3;
if (drm->client.device.info.chipset == 0x11) {
/* This might seem stupid, but the blob does it and
* omitting it often locks the system up.
*/
NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX);
}
/* CR44 is always changed on CRTC0 */
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
if (drm->client.device.info.chipset == 0x11) { /* set me harder */
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
}
}
void
NVBlankScreen(struct drm_device *dev, int head, bool blank)
{
unsigned char seq1;
if (nv_two_heads(dev))
NVSetOwner(dev, head);
seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
NVVgaSeqReset(dev, head, true);
if (blank)
NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
else
NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);
NVVgaSeqReset(dev, head, false);
}
/*
* PLL getting
*/
static void
nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
uint32_t pll2, struct nvkm_pll_vals *pllvals)
{
struct nouveau_drm *drm = nouveau_drm(dev);
/* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
/* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */
pllvals->log2P = (pll1 >> 16) & 0x7;
pllvals->N2 = pllvals->M2 = 1;
if (reg1 <= 0x405c) {
pllvals->NM1 = pll2 & 0xffff;
/* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */
if (!(pll1 & 0x1100))
pllvals->NM2 = pll2 >> 16;
} else {
pllvals->NM1 = pll1 & 0xffff;
if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
pllvals->NM2 = pll2 & 0xffff;
else if (drm->client.device.info.chipset == 0x30 || drm->client.device.info.chipset == 0x35) {
pllvals->M1 &= 0xf; /* only 4 bits */
if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
pllvals->M2 = (pll1 >> 4) & 0x7;
pllvals->N2 = ((pll1 >> 21) & 0x18) |
((pll1 >> 19) & 0x7);
}
}
}
}
int
nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
struct nvkm_pll_vals *pllvals)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &drm->client.device.object;
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
uint32_t reg1, pll1, pll2 = 0;
struct nvbios_pll pll_lim;
int ret;
ret = nvbios_pll_parse(bios, plltype, &pll_lim);
if (ret || !(reg1 = pll_lim.reg))
return -ENOENT;
pll1 = nvif_rd32(device, reg1);
if (reg1 <= 0x405c)
pll2 = nvif_rd32(device, reg1 + 4);
else if (nv_two_reg_pll(dev)) {
uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
pll2 = nvif_rd32(device, reg2);
}
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
/* check whether vpll has been forced into single stage mode */
if (reg1 == NV_PRAMDAC_VPLL_COEFF) {
if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE)
pll2 = 0;
} else
if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE)
pll2 = 0;
}
nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
pllvals->refclk = pll_lim.refclk;
return 0;
}
int
nouveau_hw_pllvals_to_clk(struct nvkm_pll_vals *pv)
{
/* Avoid divide by zero if called at an inappropriate time */
if (!pv->M1 || !pv->M2)
return 0;
return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P;
}
int
nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct nvkm_pll_vals pllvals;
int ret;
int domain;
domain = pci_domain_nr(pdev->bus);
if (plltype == PLL_MEMORY &&
(pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
uint32_t mpllP;
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 3),
0x6c, &mpllP);
mpllP = (mpllP >> 8) & 0xf;
if (!mpllP)
mpllP = 4;
return 400000 / mpllP;
} else
if (plltype == PLL_MEMORY &&
(pdev->device & 0xff0) == CHIPSET_NFORCE2) {
uint32_t clock;
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 5),
0x4c, &clock);
return clock / 1000;
}
ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
if (ret)
return ret;
return nouveau_hw_pllvals_to_clk(&pllvals);
}
static void
nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
{
/* the vpll on an unused head can come up with a random value, way
* beyond the pll limits. for some reason this causes the chip to
* lock up when reading the dac palette regs, so set a valid pll here
* when such a condition detected. only seen on nv11 to date
*/
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
struct nvkm_clk *clk = nvxx_clk(device);
struct nvkm_bios *bios = nvxx_bios(device);
struct nvbios_pll pll_lim;
struct nvkm_pll_vals pv;
enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0;
if (nvbios_pll_parse(bios, pll, &pll_lim))
return;
nouveau_hw_get_pllvals(dev, pll, &pv);
if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
pv.log2P <= pll_lim.max_p)
return;
NV_WARN(drm, "VPLL %d outwith limits, attempting to fix\n", head + 1);
/* set lowest clock within static limits */
pv.M1 = pll_lim.vco1.max_m;
pv.N1 = pll_lim.vco1.min_n;
pv.log2P = pll_lim.max_p_usable;
clk->pll_prog(clk, pll_lim.reg, &pv);
}
/*
* vga font save/restore
*/
static void nouveau_vga_font_io(struct drm_device *dev,
void __iomem *iovram,
bool save, unsigned plane)
{
unsigned i;
NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
for (i = 0; i < 16384; i++) {
if (save) {
nv04_display(dev)->saved_vga_font[plane][i] =
ioread32_native(iovram + i * 4);
} else {
iowrite32_native(nv04_display(dev)->saved_vga_font[plane][i],
iovram + i * 4);
}
}
}
void
nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
uint8_t misc, gr4, gr5, gr6, seq2, seq4;
bool graphicsmode;
unsigned plane;
void __iomem *iovram;
if (nv_two_heads(dev))
NVSetOwner(dev, 0);
NVSetEnablePalette(dev, 0, true);
graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1;
NVSetEnablePalette(dev, 0, false);
if (graphicsmode) /* graphics mode => framebuffer => no need to save */
return;
NV_INFO(drm, "%sing VGA fonts\n", save ? "Sav" : "Restor");
/* map first 64KiB of VRAM, holds VGA fonts etc */
iovram = ioremap(pci_resource_start(pdev, 1), 65536);
if (!iovram) {
NV_ERROR(drm, "Failed to map VRAM, "
"cannot save/restore VGA fonts.\n");
return;
}
if (nv_two_heads(dev))
NVBlankScreen(dev, 1, true);
NVBlankScreen(dev, 0, true);
/* save control regs */
misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ);
seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX);
seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX);
gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX);
gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX);
gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX);
NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67);
NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6);
NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0);
NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5);
/* store font in planes 0..3 */
for (plane = 0; plane < 4; plane++)
nouveau_vga_font_io(dev, iovram, save, plane);
/* restore control regs */
NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc);
NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4);
NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5);
NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6);
NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2);
NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4);
if (nv_two_heads(dev))
NVBlankScreen(dev, 1, false);
NVBlankScreen(dev, 0, false);
iounmap(iovram);
}
/*
* mode state save/load
*/
static void
rd_cio_state(struct drm_device *dev, int head,
struct nv04_crtc_reg *crtcstate, int index)
{
crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index);
}
static void
wr_cio_state(struct drm_device *dev, int head,
struct nv04_crtc_reg *crtcstate, int index)
{
NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]);
}
static void
nv_save_state_ramdac(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
int i;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals);
state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
if (nv_two_heads(dev))
state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
if (drm->client.device.info.chipset == 0x11)
regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
if (nv_gf4_disp_arch(dev))
regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
if (drm->client.device.info.chipset >= 0x30)
regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL);
regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW);
regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY);
regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL);
regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW);
regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY);
regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2);
for (i = 0; i < 7; i++) {
uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg);
regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20);
}
if (nv_gf4_disp_arch(dev)) {
regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER);
for (i = 0; i < 3; i++) {
regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4);
regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4);
}
}
regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0);
if (!nv_gf4_disp_arch(dev) && head == 0) {
/* early chips don't allow access to PRAMDAC_TMDS_* without
* the head A FPCLK on (nv11 even locks up) */
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 &
~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK);
}
regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1);
regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2);
regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR);
if (nv_gf4_disp_arch(dev))
regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
for (i = 0; i < 38; i++)
regp->ctv_regs[i] = NVReadRAMDAC(dev, head,
NV_PRAMDAC_CTV + 4*i);
}
}
static void
nv_load_state_ramdac(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
int i;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
clk->pll_prog(clk, pllreg, ®p->pllvals);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
if (nv_two_heads(dev))
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
if (drm->client.device.info.chipset == 0x11)
NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
if (nv_gf4_disp_arch(dev))
NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
if (drm->client.device.info.chipset >= 0x30)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2);
for (i = 0; i < 7; i++) {
uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]);
NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]);
}
if (nv_gf4_disp_arch(dev)) {
NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither);
for (i = 0; i < 3; i++) {
NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]);
}
}
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color);
if (nv_gf4_disp_arch(dev))
NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
for (i = 0; i < 38; i++)
NVWriteRAMDAC(dev, head,
NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]);
}
}
static void
nv_save_state_vga(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
int i;
regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ);
for (i = 0; i < 25; i++)
rd_cio_state(dev, head, regp, i);
NVSetEnablePalette(dev, head, true);
for (i = 0; i < 21; i++)
regp->Attribute[i] = NVReadVgaAttr(dev, head, i);
NVSetEnablePalette(dev, head, false);
for (i = 0; i < 9; i++)
regp->Graphics[i] = NVReadVgaGr(dev, head, i);
for (i = 0; i < 5; i++)
regp->Sequencer[i] = NVReadVgaSeq(dev, head, i);
}
static void
nv_load_state_vga(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
int i;
NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg);
for (i = 0; i < 5; i++)
NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]);
nv_lock_vga_crtc_base(dev, head, false);
for (i = 0; i < 25; i++)
wr_cio_state(dev, head, regp, i);
nv_lock_vga_crtc_base(dev, head, true);
for (i = 0; i < 9; i++)
NVWriteVgaGr(dev, head, i, regp->Graphics[i]);
NVSetEnablePalette(dev, head, true);
for (i = 0; i < 21; i++)
NVWriteVgaAttr(dev, head, i, regp->Attribute[i]);
NVSetEnablePalette(dev, head, false);
}
static void
nv_save_state_ext(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
int i;
rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
rd_cio_state(dev, head, regp, 0x9f);
rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
if (nv_two_heads(dev))
regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL);
regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG);
}
regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG);
rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
}
/* NV11 and NV20 don't have this, they stop at 0x52. */
if (nv_gf4_disp_arch(dev)) {
rd_cio_state(dev, head, regp, NV_CIO_CRE_42);
rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
for (i = 0; i < 0x10; i++)
regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i);
rd_cio_state(dev, head, regp, NV_CIO_CRE_59);
rd_cio_state(dev, head, regp, NV_CIO_CRE_5B);
rd_cio_state(dev, head, regp, NV_CIO_CRE_85);
rd_cio_state(dev, head, regp, NV_CIO_CRE_86);
}
regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START);
}
static void
nv_load_state_ext(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &drm->client.device.object;
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t reg900;
int i;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
if (nv_two_heads(dev))
/* setting ENGINE_CTRL (EC) *must* come before
* CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
* EC that should not be overwritten by writing stale EC
*/
NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
nvif_wr32(device, NV_PVIDEO_STOP, 1);
nvif_wr32(device, NV_PVIDEO_INTR_EN, 0);
nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->client.device.info.ram_size - 1);
nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->client.device.info.ram_size - 1);
nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->client.device.info.ram_size - 1);
nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->client.device.info.ram_size - 1);
nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
else
NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
}
}
NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg);
wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
wr_cio_state(dev, head, regp, 0x9f);
wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
nv_fix_nv40_hw_cursor(dev, head);
wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
}
/* NV11 and NV20 stop at 0x52. */
if (nv_gf4_disp_arch(dev)) {
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
/* Not waiting for vertical retrace before modifying
CRE_53/CRE_54 causes lockups. */
nvif_msec(&drm->client.device, 650,
if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
break;
);
nvif_msec(&drm->client.device, 650,
if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
break;
);
}
wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
for (i = 0; i < 0x10; i++)
NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]);
wr_cio_state(dev, head, regp, NV_CIO_CRE_59);
wr_cio_state(dev, head, regp, NV_CIO_CRE_5B);
wr_cio_state(dev, head, regp, NV_CIO_CRE_85);
wr_cio_state(dev, head, regp, NV_CIO_CRE_86);
}
NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
}
static void
nv_save_state_palette(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
int head_offset = head * NV_PRMDIO_SIZE, i;
nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
NV_PRMDIO_PIXEL_MASK_MASK);
nvif_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
for (i = 0; i < 768; i++) {
state->crtc_reg[head].DAC[i] = nvif_rd08(device,
NV_PRMDIO_PALETTE_DATA + head_offset);
}
NVSetEnablePalette(dev, head, false);
}
void
nouveau_hw_load_state_palette(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
int head_offset = head * NV_PRMDIO_SIZE, i;
nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
NV_PRMDIO_PIXEL_MASK_MASK);
nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
for (i = 0; i < 768; i++) {
nvif_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset,
state->crtc_reg[head].DAC[i]);
}
NVSetEnablePalette(dev, head, false);
}
void nouveau_hw_save_state(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->client.device.info.chipset == 0x11)
/* NB: no attempt is made to restore the bad pll later on */
nouveau_hw_fix_bad_vpll(dev, head);
nv_save_state_ramdac(dev, head, state);
nv_save_state_vga(dev, head, state);
nv_save_state_palette(dev, head, state);
nv_save_state_ext(dev, head, state);
}
void nouveau_hw_load_state(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
NVVgaProtect(dev, head, true);
nv_load_state_ramdac(dev, head, state);
nv_load_state_ext(dev, head, state);
nouveau_hw_load_state_palette(dev, head, state);
nv_load_state_vga(dev, head, state);
NVVgaProtect(dev, head, false);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv04/hw.c |
/*
* Copyright (C) 2009 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <drm/drm_crtc_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
#include "hw.h"
#include "tvnv17.h"
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
"\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
"\t\tDefault: PAL\n"
"\t\t*NOTE* Ignored for cards with external TV encoders.");
static char *nouveau_tv_norm;
module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
uint32_t sample = 0;
int head;
#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
if (drm->vbios.tvdactestval)
testval = drm->vbios.tvdactestval;
dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
head = (dacclk & 0x100) >> 8;
/* Save the previous state. */
gpio1 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
gpio0 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c);
ctv_14 = NVReadRAMDAC(dev, head, 0x680c14);
ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
/* Prepare the DAC for load detection. */
nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 |
NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |
NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
(dacclk & ~0xff) | 0x22);
msleep(1);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
(dacclk & ~0xff) | 0x21);
NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20);
NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16);
/* Sample pin 0x4 (usually S-video luma). */
NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff);
msleep(20);
sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
& 0x4 << 28;
/* Sample the remaining pins. */
NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff);
msleep(20);
sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
& 0xa << 28;
/* Restore the previous state. */
NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c);
NVWriteRAMDAC(dev, head, 0x680c14, ctv_14);
NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
return sample;
}
static bool
get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_device *device = nvxx_device(&drm->client.device);
if (device->quirk && device->quirk->tv_pin_mask) {
*pin_mask = device->quirk->tv_pin_mask;
return false;
}
return true;
}
static enum drm_connector_status
nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_mode_config *conf = &dev->mode_config;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct dcb_output *dcb = tv_enc->base.dcb;
bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask);
if (nv04_dac_in_use(encoder))
return connector_status_disconnected;
if (reliable) {
if (drm->client.device.info.chipset == 0x42 ||
drm->client.device.info.chipset == 0x43)
tv_enc->pin_mask =
nv42_tv_sample_load(encoder) >> 28 & 0xe;
else
tv_enc->pin_mask =
nv17_dac_sample_load(encoder) >> 28 & 0xe;
}
switch (tv_enc->pin_mask) {
case 0x2:
case 0x4:
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
break;
case 0xc:
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
break;
case 0xe:
if (dcb->tvconf.has_component_output)
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
else
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
break;
default:
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
break;
}
drm_object_property_set_value(&connector->base,
conf->tv_subconnector_property,
tv_enc->subconnector);
if (!reliable) {
return connector_status_unknown;
} else if (tv_enc->subconnector) {
NV_INFO(drm, "Load detected on output %c\n",
'@' + ffs(dcb->or));
return connector_status_connected;
} else {
return connector_status_disconnected;
}
}
static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
const struct drm_display_mode *tv_mode;
int n = 0;
for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(encoder->dev, tv_mode);
mode->clock = tv_norm->tv_enc_mode.vrefresh *
mode->htotal / 1000 *
mode->vtotal / 1000;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
mode->clock *= 2;
if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
n++;
}
return n;
}
static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode;
struct drm_display_mode *mode;
const struct {
int hdisplay;
int vdisplay;
} modes[] = {
{ 640, 400 },
{ 640, 480 },
{ 720, 480 },
{ 720, 576 },
{ 800, 600 },
{ 1024, 768 },
{ 1280, 720 },
{ 1280, 1024 },
{ 1920, 1080 }
};
int i, n = 0;
for (i = 0; i < ARRAY_SIZE(modes); i++) {
if (modes[i].hdisplay > output_mode->hdisplay ||
modes[i].vdisplay > output_mode->vdisplay)
continue;
if (modes[i].hdisplay == output_mode->hdisplay &&
modes[i].vdisplay == output_mode->vdisplay) {
mode = drm_mode_duplicate(encoder->dev, output_mode);
mode->type |= DRM_MODE_TYPE_PREFERRED;
} else {
mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
modes[i].vdisplay, 60, false,
(output_mode->flags &
DRM_MODE_FLAG_INTERLACE), false);
}
/* CVT modes are sometimes unsuitable... */
if (output_mode->hdisplay <= 720
|| output_mode->hdisplay >= 1920) {
mode->htotal = output_mode->htotal;
mode->hsync_start = (mode->hdisplay + (mode->htotal
- mode->hdisplay) * 9 / 10) & ~7;
mode->hsync_end = mode->hsync_start + 8;
}
if (output_mode->vdisplay >= 1024) {
mode->vtotal = output_mode->vtotal;
mode->vsync_start = output_mode->vsync_start;
mode->vsync_end = output_mode->vsync_end;
}
mode->type |= DRM_MODE_TYPE_DRIVER;
drm_mode_probed_add(connector, mode);
n++;
}
return n;
}
static int nv17_tv_get_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
if (tv_norm->kind == CTV_ENC_MODE)
return nv17_tv_get_hd_modes(encoder, connector);
else
return nv17_tv_get_ld_modes(encoder, connector);
}
static int nv17_tv_mode_valid(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
if (tv_norm->kind == CTV_ENC_MODE) {
struct drm_display_mode *output_mode =
&tv_norm->ctv_enc_mode.mode;
if (mode->clock > 400000)
return MODE_CLOCK_HIGH;
if (mode->hdisplay > output_mode->hdisplay ||
mode->vdisplay > output_mode->vdisplay)
return MODE_BAD;
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) !=
(output_mode->flags & DRM_MODE_FLAG_INTERLACE))
return MODE_NO_INTERLACE;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
} else {
const int vsync_tolerance = 600;
if (mode->clock > 70000)
return MODE_CLOCK_HIGH;
if (abs(drm_mode_vrefresh(mode) * 1000 -
tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance)
return MODE_VSYNC;
/* The encoder takes care of the actual interlacing */
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
}
return MODE_OK;
}
static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
if (nv04_dac_in_use(encoder))
return false;
if (tv_norm->kind == CTV_ENC_MODE)
adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock;
else
adjusted_mode->clock = 90000;
return true;
}
static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
if (nouveau_encoder(encoder)->last_dpms == mode)
return;
nouveau_encoder(encoder)->last_dpms = mode;
NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
mode, nouveau_encoder(encoder)->dcb->index);
regs->ptv_200 &= ~1;
if (tv_norm->kind == CTV_ENC_MODE) {
nv04_dfp_update_fp_control(encoder, mode);
} else {
nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF);
if (mode == DRM_MODE_DPMS_ON)
regs->ptv_200 |= 1;
}
nv_load_ptv(dev, regs, 200);
nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
}
static void nv17_tv_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
int head = nouveau_crtc(encoder->crtc)->index;
uint8_t *cr_lcd = &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[
NV_CIO_CRE_LCD__INDEX];
uint32_t dacclk_off = NV_PRAMDAC_DACCLK +
nv04_dac_output_offset(encoder);
uint32_t dacclk;
helper->dpms(encoder, DRM_MODE_DPMS_OFF);
nv04_dfp_disable(dev, head);
/* Unbind any FP encoders from this head if we need the FP
* stuff enabled. */
if (tv_norm->kind == CTV_ENC_MODE) {
struct drm_encoder *enc;
list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
struct dcb_output *dcb = nouveau_encoder(enc)->dcb;
if ((dcb->type == DCB_OUTPUT_TMDS ||
dcb->type == DCB_OUTPUT_LVDS) &&
!enc->crtc &&
nv04_dfp_get_bound_head(dev, dcb) == head) {
nv04_dfp_bind_head(dev, dcb, head ^ 1,
drm->vbios.fp.dual_link);
}
}
}
if (tv_norm->kind == CTV_ENC_MODE)
*cr_lcd |= 0x1 | (head ? 0x0 : 0x8);
/* Set the DACCLK register */
dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
dacclk |= 0x1a << 16;
if (tv_norm->kind == CTV_ENC_MODE) {
dacclk |= 0x20;
if (head)
dacclk |= 0x100;
else
dacclk &= ~0x100;
} else {
dacclk |= 0x10;
}
NVWriteRAMDAC(dev, 0, dacclk_off, dacclk);
}
static void nv17_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *drm_mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
int head = nouveau_crtc(encoder->crtc)->index;
struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head];
struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
int i;
regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */
regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */
regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */
regs->tv_setup = 1;
regs->ramdac_8c0 = 0x0;
if (tv_norm->kind == TV_ENC_MODE) {
tv_regs->ptv_200 = 0x13111100;
if (head)
tv_regs->ptv_200 |= 0x10;
tv_regs->ptv_20c = 0x808010;
tv_regs->ptv_304 = 0x2d00000;
tv_regs->ptv_600 = 0x0;
tv_regs->ptv_60c = 0x0;
tv_regs->ptv_610 = 0x1e00000;
if (tv_norm->tv_enc_mode.vdisplay == 576) {
tv_regs->ptv_508 = 0x1200000;
tv_regs->ptv_614 = 0x33;
} else if (tv_norm->tv_enc_mode.vdisplay == 480) {
tv_regs->ptv_508 = 0xf00000;
tv_regs->ptv_614 = 0x13;
}
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) {
tv_regs->ptv_500 = 0xe8e0;
tv_regs->ptv_504 = 0x1710;
tv_regs->ptv_604 = 0x0;
tv_regs->ptv_608 = 0x0;
} else {
if (tv_norm->tv_enc_mode.vdisplay == 576) {
tv_regs->ptv_604 = 0x20;
tv_regs->ptv_608 = 0x10;
tv_regs->ptv_500 = 0x19710;
tv_regs->ptv_504 = 0x68f0;
} else if (tv_norm->tv_enc_mode.vdisplay == 480) {
tv_regs->ptv_604 = 0x10;
tv_regs->ptv_608 = 0x20;
tv_regs->ptv_500 = 0x4b90;
tv_regs->ptv_504 = 0x1b480;
}
}
for (i = 0; i < 0x40; i++)
tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i];
} else {
struct drm_display_mode *output_mode =
&tv_norm->ctv_enc_mode.mode;
/* The registers in PRAMDAC+0xc00 control some timings and CSC
* parameters for the CTV encoder (It's only used for "HD" TV
* modes, I don't think I have enough working to guess what
* they exactly mean...), it's probably connected at the
* output of the FP encoder, but it also needs the analog
* encoder in its OR enabled and routed to the head it's
* using. It's enabled with the DACCLK register, bits [5:4].
*/
for (i = 0; i < 38; i++)
regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i];
regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
regs->fp_horiz_regs[FP_SYNC_START] =
output_mode->hsync_start - 1;
regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay +
max((output_mode->hdisplay-600)/40 - 1, 1);
regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
regs->fp_vert_regs[FP_SYNC_START] =
output_mode->vsync_start - 1;
regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1;
regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
regs->fp_debug_2 = 0;
regs->fp_margin_color = 0x801080;
}
}
static void nv17_tv_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
if (get_tv_norm(encoder)->kind == TV_ENC_MODE) {
nv17_tv_update_rescaler(encoder);
nv17_tv_update_properties(encoder);
} else {
nv17_ctv_update_rescaler(encoder);
}
nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
/* This could use refinement for flatpanels, but it should work */
if (drm->client.device.info.chipset < 0x44)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
nv04_dac_output_offset(encoder),
0xf0000000);
else
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
nv04_dac_output_offset(encoder),
0x00100000);
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
nv04_encoder_get_connector(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
static void nv17_tv_save(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
nouveau_encoder(encoder)->restore.output =
NVReadRAMDAC(dev, 0,
NV_PRAMDAC_DACCLK +
nv04_dac_output_offset(encoder));
nv17_tv_state_save(dev, &tv_enc->saved_state);
tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200;
}
static void nv17_tv_restore(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
nv04_dac_output_offset(encoder),
nouveau_encoder(encoder)->restore.output);
nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
}
static int nv17_tv_create_resources(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_mode_config *conf = &dev->mode_config;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS :
NUM_LD_TV_NORMS;
int i;
if (nouveau_tv_norm) {
i = match_string(nv17_tv_norm_names, num_tv_norms,
nouveau_tv_norm);
if (i < 0)
NV_WARN(drm, "Invalid TV norm setting \"%s\"\n",
nouveau_tv_norm);
else
tv_enc->tv_norm = i;
}
drm_mode_create_tv_properties_legacy(dev, num_tv_norms, nv17_tv_norm_names);
drm_object_attach_property(&connector->base,
conf->tv_select_subconnector_property,
tv_enc->select_subconnector);
drm_object_attach_property(&connector->base,
conf->tv_subconnector_property,
tv_enc->subconnector);
drm_object_attach_property(&connector->base,
conf->legacy_tv_mode_property,
tv_enc->tv_norm);
drm_object_attach_property(&connector->base,
conf->tv_flicker_reduction_property,
tv_enc->flicker);
drm_object_attach_property(&connector->base,
conf->tv_saturation_property,
tv_enc->saturation);
drm_object_attach_property(&connector->base,
conf->tv_hue_property,
tv_enc->hue);
drm_object_attach_property(&connector->base,
conf->tv_overscan_property,
tv_enc->overscan);
return 0;
}
static int nv17_tv_set_property(struct drm_encoder *encoder,
struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct drm_mode_config *conf = &encoder->dev->mode_config;
struct drm_crtc *crtc = encoder->crtc;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
bool modes_changed = false;
if (property == conf->tv_overscan_property) {
tv_enc->overscan = val;
if (encoder->crtc) {
if (tv_norm->kind == CTV_ENC_MODE)
nv17_ctv_update_rescaler(encoder);
else
nv17_tv_update_rescaler(encoder);
}
} else if (property == conf->tv_saturation_property) {
if (tv_norm->kind != TV_ENC_MODE)
return -EINVAL;
tv_enc->saturation = val;
nv17_tv_update_properties(encoder);
} else if (property == conf->tv_hue_property) {
if (tv_norm->kind != TV_ENC_MODE)
return -EINVAL;
tv_enc->hue = val;
nv17_tv_update_properties(encoder);
} else if (property == conf->tv_flicker_reduction_property) {
if (tv_norm->kind != TV_ENC_MODE)
return -EINVAL;
tv_enc->flicker = val;
if (encoder->crtc)
nv17_tv_update_rescaler(encoder);
} else if (property == conf->legacy_tv_mode_property) {
if (connector->dpms != DRM_MODE_DPMS_OFF)
return -EINVAL;
tv_enc->tv_norm = val;
modes_changed = true;
} else if (property == conf->tv_select_subconnector_property) {
if (tv_norm->kind != TV_ENC_MODE)
return -EINVAL;
tv_enc->select_subconnector = val;
nv17_tv_update_properties(encoder);
} else {
return -EINVAL;
}
if (modes_changed) {
drm_helper_probe_single_connector_modes(connector, 0, 0);
/* Disable the crtc to ensure a full modeset is
* performed whenever it's turned on again. */
if (crtc)
drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y,
crtc->primary->fb);
}
return 0;
}
static void nv17_tv_destroy(struct drm_encoder *encoder)
{
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
drm_encoder_cleanup(encoder);
kfree(tv_enc);
}
static const struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
.dpms = nv17_tv_dpms,
.mode_fixup = nv17_tv_mode_fixup,
.prepare = nv17_tv_prepare,
.commit = nv17_tv_commit,
.mode_set = nv17_tv_mode_set,
.detect = nv17_tv_detect,
};
static const struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
.get_modes = nv17_tv_get_modes,
.mode_valid = nv17_tv_mode_valid,
.create_resources = nv17_tv_create_resources,
.set_property = nv17_tv_set_property,
};
static const struct drm_encoder_funcs nv17_tv_funcs = {
.destroy = nv17_tv_destroy,
};
int
nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
{
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder;
struct nv17_tv_encoder *tv_enc = NULL;
tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL);
if (!tv_enc)
return -ENOMEM;
tv_enc->overscan = 50;
tv_enc->flicker = 50;
tv_enc->saturation = 50;
tv_enc->hue = 0;
tv_enc->tv_norm = TV_NORM_PAL;
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
tv_enc->pin_mask = 0;
encoder = to_drm_encoder(&tv_enc->base);
tv_enc->base.dcb = entry;
tv_enc->base.or = ffs(entry->or) - 1;
drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC,
NULL);
drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
tv_enc->base.enc_save = nv17_tv_save;
tv_enc->base.enc_restore = nv17_tv_restore;
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
nv17_tv_create_resources(encoder, connector);
drm_connector_attach_encoder(connector, encoder);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/dispnv04/tvnv17.c |
/*
* Copyright 2003 NVIDIA, Corporation
* Copyright 2006 Dave Airlie
* Copyright 2007 Maarten Maathuis
* Copyright 2007-2009 Stuart Bennett
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_modeset_helper_vtables.h>
#include "nouveau_drv.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
#include "hw.h"
#include "nvreg.h"
#include <subdev/bios/gpio.h>
#include <subdev/gpio.h>
#include <nvif/timer.h>
int nv04_dac_output_offset(struct drm_encoder *encoder)
{
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
int offset = 0;
if (dcb->or & (8 | DCB_OUTPUT_C))
offset += 0x68;
if (dcb->or & (8 | DCB_OUTPUT_B))
offset += 0x2000;
return offset;
}
/*
* arbitrary limit to number of sense oscillations tolerated in one sample
* period (observed to be at least 13 in "nvidia")
*/
#define MAX_HBLANK_OSC 20
/*
* arbitrary limit to number of conflicting sample pairs to tolerate at a
* voltage step (observed to be at least 5 in "nvidia")
*/
#define MAX_SAMPLE_PAIRS 10
static int sample_load_twice(struct drm_device *dev, bool sense[2])
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &drm->client.device.object;
int i;
for (i = 0; i < 2; i++) {
bool sense_a, sense_b, sense_b_prime;
int j = 0;
/*
* wait for bit 0 clear -- out of hblank -- (say reg value 0x4),
* then wait for transition 0x4->0x5->0x4: enter hblank, leave
* hblank again
* use a 10ms timeout (guards against crtc being inactive, in
* which case blank state would never change)
*/
if (nvif_msec(&drm->client.device, 10,
if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
break;
) < 0)
return -EBUSY;
if (nvif_msec(&drm->client.device, 10,
if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
break;
) < 0)
return -EBUSY;
if (nvif_msec(&drm->client.device, 10,
if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
break;
) < 0)
return -EBUSY;
udelay(100);
/* when level triggers, sense is _LO_ */
sense_a = nvif_rd08(device, NV_PRMCIO_INP0) & 0x10;
/* take another reading until it agrees with sense_a... */
do {
udelay(100);
sense_b = nvif_rd08(device, NV_PRMCIO_INP0) & 0x10;
if (sense_a != sense_b) {
sense_b_prime =
nvif_rd08(device, NV_PRMCIO_INP0) & 0x10;
if (sense_b == sense_b_prime) {
/* ... unless two consecutive subsequent
* samples agree; sense_a is replaced */
sense_a = sense_b;
/* force mis-match so we loop */
sense_b = !sense_a;
}
}
} while ((sense_a != sense_b) && ++j < MAX_HBLANK_OSC);
if (j == MAX_HBLANK_OSC)
/* with so much oscillation, default to sense:LO */
sense[i] = false;
else
sense[i] = sense_a;
}
return 0;
}
static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
struct nouveau_drm *drm = nouveau_drm(dev);
uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
uint8_t saved_palette0[3], saved_palette_mask;
uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
int i;
uint8_t blue;
bool sense = true;
/*
* for this detection to work, there needs to be a mode set up on the
* CRTC. this is presumed to be the case
*/
if (nv_two_heads(dev))
/* only implemented for head A for now */
NVSetOwner(dev, 0);
saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL,
saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
msleep(10);
saved_pi = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX,
saved_pi & ~(0x80 | MASK(NV_CIO_CRE_PIXEL_FORMAT)));
saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
nvif_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
for (i = 0; i < 3; i++)
saved_palette0[i] = nvif_rd08(device, NV_PRMDIO_PALETTE_DATA);
saved_palette_mask = nvif_rd08(device, NV_PRMDIO_PIXEL_MASK);
nvif_wr08(device, NV_PRMDIO_PIXEL_MASK, 0);
saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
(saved_rgen_ctrl & ~(NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM)) |
NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON);
blue = 8; /* start of test range */
do {
bool sense_pair[2];
nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
/* testing blue won't find monochrome monitors. I don't care */
nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, blue);
i = 0;
/* take sample pairs until both samples in the pair agree */
do {
if (sample_load_twice(dev, sense_pair))
goto out;
} while ((sense_pair[0] != sense_pair[1]) &&
++i < MAX_SAMPLE_PAIRS);
if (i == MAX_SAMPLE_PAIRS)
/* too much oscillation defaults to LO */
sense = false;
else
sense = sense_pair[0];
/*
* if sense goes LO before blue ramps to 0x18, monitor is not connected.
* ergo, if blue gets to 0x18, monitor must be connected
*/
} while (++blue < 0x18 && sense);
out:
nvif_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
for (i = 0; i < 3; i++)
nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
if (blue == 0x18) {
NV_DEBUG(drm, "Load detected on head A\n");
return connector_status_connected;
}
return connector_status_disconnected;
}
uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
saved_rtest_ctrl, saved_gpio0 = 0, saved_gpio1 = 0, temp, routput;
int head;
#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
if (dcb->type == DCB_OUTPUT_TV) {
testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
if (drm->vbios.tvdactestval)
testval = drm->vbios.tvdactestval;
} else {
testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
if (drm->vbios.dactestval)
testval = drm->vbios.dactestval;
}
saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
saved_powerctrl_2 = nvif_rd32(device, NV_PBUS_POWERCTRL_2);
nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
if (regoffset == 0x68) {
saved_powerctrl_4 = nvif_rd32(device, NV_PBUS_POWERCTRL_4);
nvif_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
}
if (gpio) {
saved_gpio1 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
saved_gpio0 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
}
msleep(4);
saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
head = (saved_routput & 0x100) >> 8;
/* if there's a spare crtc, using it will minimise flicker */
if (!(NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX) & 0xC0))
head ^= 1;
/* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
routput = (saved_routput & 0xfffffece) | head << 8;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE) {
if (dcb->type == DCB_OUTPUT_TV)
routput |= 0x1a << 16;
else
routput &= ~(0x1a << 16);
}
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, routput);
msleep(1);
temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, temp | 1);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA,
NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK | testval);
temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
msleep(5);
sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
/* do it again just in case it's a residual current */
sample &= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
temp & ~NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, 0);
/* bios does something more complex for restoring, but I think this is good enough */
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
if (regoffset == 0x68)
nvif_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
if (gpio) {
nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
}
return sample;
}
static enum drm_connector_status
nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
if (nv04_dac_in_use(encoder))
return connector_status_disconnected;
if (nv17_dac_sample_load(encoder) &
NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
NV_DEBUG(drm, "Load detected on output %c\n",
'@' + ffs(dcb->or));
return connector_status_connected;
} else {
return connector_status_disconnected;
}
}
static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
if (nv04_dac_in_use(encoder))
return false;
return true;
}
static void nv04_dac_prepare(struct drm_encoder *encoder)
{
const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct drm_device *dev = encoder->dev;
int head = nouveau_crtc(encoder->crtc)->index;
helper->dpms(encoder, DRM_MODE_DPMS_OFF);
nv04_dfp_disable(dev, head);
}
static void nv04_dac_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
int head = nouveau_crtc(encoder->crtc)->index;
if (nv_gf4_disp_arch(dev)) {
struct drm_encoder *rebind;
uint32_t dac_offset = nv04_dac_output_offset(encoder);
uint32_t otherdac;
/* bit 16-19 are bits that are set on some G70 cards,
* but don't seem to have much effect */
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
head << 8 | NV_PRAMDAC_DACCLK_SEL_DACCLK);
/* force any other vga encoders to bind to the other crtc */
list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) {
if (rebind == encoder
|| nouveau_encoder(rebind)->dcb->type != DCB_OUTPUT_ANALOG)
continue;
dac_offset = nv04_dac_output_offset(rebind);
otherdac = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
(otherdac & ~0x0100) | (head ^ 1) << 8);
}
}
/* This could use refinement for flatpanels, but it should work this way */
if (drm->client.device.info.chipset < 0x44)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
else
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
}
static void nv04_dac_commit(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
nv04_encoder_get_connector(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
if (nv_gf4_disp_arch(dev)) {
uint32_t *dac_users = &nv04_display(dev)->dac_users[ffs(dcb->or) - 1];
int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder);
uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off);
if (enable) {
*dac_users |= 1 << dcb->index;
NVWriteRAMDAC(dev, 0, dacclk_off, dacclk | NV_PRAMDAC_DACCLK_SEL_DACCLK);
} else {
*dac_users &= ~(1 << dcb->index);
if (!*dac_users)
NVWriteRAMDAC(dev, 0, dacclk_off,
dacclk & ~NV_PRAMDAC_DACCLK_SEL_DACCLK);
}
}
}
/* Check if the DAC corresponding to 'encoder' is being used by
* someone else. */
bool nv04_dac_in_use(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
return nv_gf4_disp_arch(encoder->dev) &&
(nv04_display(dev)->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index));
}
static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
if (nv_encoder->last_dpms == mode)
return;
nv_encoder->last_dpms = mode;
NV_DEBUG(drm, "Setting dpms mode %d on vga encoder (output %d)\n",
mode, nv_encoder->dcb->index);
nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
}
static void nv04_dac_save(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
if (nv_gf4_disp_arch(dev))
nv_encoder->restore.output = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
nv04_dac_output_offset(encoder));
}
static void nv04_dac_restore(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
if (nv_gf4_disp_arch(dev))
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder),
nv_encoder->restore.output);
nv_encoder->last_dpms = NV_DPMS_CLEARED;
}
static void nv04_dac_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
}
static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
.dpms = nv04_dac_dpms,
.mode_fixup = nv04_dac_mode_fixup,
.prepare = nv04_dac_prepare,
.commit = nv04_dac_commit,
.mode_set = nv04_dac_mode_set,
.detect = nv04_dac_detect
};
static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = {
.dpms = nv04_dac_dpms,
.mode_fixup = nv04_dac_mode_fixup,
.prepare = nv04_dac_prepare,
.commit = nv04_dac_commit,
.mode_set = nv04_dac_mode_set,
.detect = nv17_dac_detect
};
static const struct drm_encoder_funcs nv04_dac_funcs = {
.destroy = nv04_dac_destroy,
};
int
nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry)
{
const struct drm_encoder_helper_funcs *helper;
struct nouveau_encoder *nv_encoder = NULL;
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder;
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
encoder = to_drm_encoder(nv_encoder);
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
nv_encoder->enc_save = nv04_dac_save;
nv_encoder->enc_restore = nv04_dac_restore;
if (nv_gf4_disp_arch(dev))
helper = &nv17_dac_helper_funcs;
else
helper = &nv04_dac_helper_funcs;
drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC,
NULL);
drm_encoder_helper_add(encoder, helper);
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
drm_connector_attach_encoder(connector, encoder);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/dispnv04/dac.c |
/*
* Copyright 2009 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Ben Skeggs
*/
#include <drm/drm_crtc_helper.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "hw.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_bo.h"
#include "nouveau_gem.h"
#include "nouveau_chan.h"
#include <nvif/if0004.h>
struct nouveau_connector *
nv04_encoder_get_connector(struct nouveau_encoder *encoder)
{
struct drm_device *dev = to_drm_encoder(encoder)->dev;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct nouveau_connector *nv_connector = NULL;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder == to_drm_encoder(encoder))
nv_connector = nouveau_connector(connector);
}
drm_connector_list_iter_end(&conn_iter);
return nv_connector;
}
static void
nv04_display_fini(struct drm_device *dev, bool runtime, bool suspend)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_display *disp = nv04_display(dev);
struct drm_crtc *crtc;
/* Disable flip completion events. */
nvif_event_block(&disp->flip);
/* Disable vblank interrupts. */
NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
if (nv_two_heads(dev))
NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
if (!runtime)
cancel_work_sync(&drm->hpd_work);
if (!suspend)
return;
/* Un-pin FB and cursors so they'll be evicted to system memory. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_framebuffer *fb = crtc->primary->fb;
struct nouveau_bo *nvbo;
if (!fb || !fb->obj[0])
continue;
nvbo = nouveau_gem_object(fb->obj[0]);
nouveau_bo_unpin(nvbo);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
if (nv_crtc->cursor.nvbo) {
if (nv_crtc->cursor.set_offset)
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
}
}
}
static int
nv04_display_init(struct drm_device *dev, bool resume, bool runtime)
{
struct nv04_display *disp = nv04_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_encoder *encoder;
struct drm_crtc *crtc;
int ret;
/* meh.. modeset apparently doesn't setup all the regs and depends
* on pre-existing state, for now load the state of the card *before*
* nouveau was loaded, and then do a modeset.
*
* best thing to do probably is to make save/restore routines not
* save/restore "pre-load" state, but more general so we can save
* on suspend too.
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
nv_crtc->save(&nv_crtc->base);
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
encoder->enc_save(&encoder->base.base);
/* Enable flip completion events. */
nvif_event_allow(&disp->flip);
if (!resume)
return 0;
/* Re-pin FB/cursors. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_framebuffer *fb = crtc->primary->fb;
struct nouveau_bo *nvbo;
if (!fb || !fb->obj[0])
continue;
nvbo = nouveau_gem_object(fb->obj[0]);
ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret)
NV_ERROR(drm, "Could not pin framebuffer\n");
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
if (!nv_crtc->cursor.nvbo)
continue;
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
NOUVEAU_GEM_DOMAIN_VRAM, true);
if (!ret && nv_crtc->cursor.set_offset)
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
NV_ERROR(drm, "Could not pin/map cursor.\n");
}
/* Force CLUT to get re-loaded during modeset. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
nv_crtc->lut.depth = 0;
}
/* This should ensure we don't hit a locking problem when someone
* wakes us up via a connector. We should never go into suspend
* while the display is on anyways.
*/
if (runtime)
return 0;
/* Restore mode. */
drm_helper_resume_force_mode(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
if (!nv_crtc->cursor.nvbo)
continue;
if (nv_crtc->cursor.set_offset)
nv_crtc->cursor.set_offset(nv_crtc,
nv_crtc->cursor.nvbo->offset);
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
nv_crtc->cursor_saved_y);
}
return 0;
}
static void
nv04_display_destroy(struct drm_device *dev)
{
struct nv04_display *disp = nv04_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_encoder *encoder;
struct nouveau_crtc *nv_crtc;
/* Restore state */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
encoder->enc_restore(&encoder->base.base);
list_for_each_entry(nv_crtc, &dev->mode_config.crtc_list, base.head)
nv_crtc->restore(&nv_crtc->base);
nouveau_hw_save_vga_fonts(dev, 0);
nvif_event_dtor(&disp->flip);
nouveau_display(dev)->priv = NULL;
vfree(disp);
nvif_object_unmap(&drm->client.device.object);
}
int
nv04_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder;
struct nouveau_crtc *crtc;
struct nv04_display *disp;
int i, ret;
disp = vzalloc(sizeof(*disp));
if (!disp)
return -ENOMEM;
disp->drm = drm;
nvif_object_map(&drm->client.device.object, NULL, 0);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv04_display_destroy;
nouveau_display(dev)->init = nv04_display_init;
nouveau_display(dev)->fini = nv04_display_fini;
/* Pre-nv50 doesn't support atomic, so don't expose the ioctls */
dev->driver_features &= ~DRIVER_ATOMIC;
/* Request page flip completion event. */
if (drm->channel) {
ret = nvif_event_ctor(&drm->channel->nvsw, "kmsFlip", 0, nv04_flip_complete,
true, NULL, 0, &disp->flip);
if (ret)
return ret;
}
nouveau_hw_save_vga_fonts(dev, 1);
nv04_crtc_create(dev, 0);
if (nv_two_heads(dev))
nv04_crtc_create(dev, 1);
for (i = 0; i < dcb->entries; i++) {
struct dcb_output *dcbent = &dcb->entry[i];
connector = nouveau_connector_create(dev, dcbent);
if (IS_ERR(connector))
continue;
switch (dcbent->type) {
case DCB_OUTPUT_ANALOG:
ret = nv04_dac_create(connector, dcbent);
break;
case DCB_OUTPUT_LVDS:
case DCB_OUTPUT_TMDS:
ret = nv04_dfp_create(connector, dcbent);
break;
case DCB_OUTPUT_TV:
if (dcbent->location == DCB_LOC_ON_CHIP)
ret = nv17_tv_create(connector, dcbent);
else
ret = nv04_tv_create(connector, dcbent);
break;
default:
NV_WARN(drm, "DCB type %d not known\n", dcbent->type);
continue;
}
if (ret)
continue;
}
list_for_each_entry_safe(connector, ct,
&dev->mode_config.connector_list, head) {
if (!connector->possible_encoders) {
NV_WARN(drm, "%s has no encoders, removing\n",
connector->name);
connector->funcs->destroy(connector);
}
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nvkm_i2c_bus *bus =
nvkm_i2c_bus_find(i2c, nv_encoder->dcb->i2c_index);
nv_encoder->i2c = bus ? &bus->i2c : NULL;
}
/* Save previous state */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
crtc->save(&crtc->base);
list_for_each_entry(nv_encoder, &dev->mode_config.encoder_list, base.base.head)
nv_encoder->enc_save(&nv_encoder->base.base);
nouveau_overlay_init(dev);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/dispnv04/disp.c |
/*
* Copyright 2003 NVIDIA, Corporation
* Copyright 2006 Dave Airlie
* Copyright 2007 Maarten Maathuis
* Copyright 2007-2009 Stuart Bennett
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
#include "hw.h"
#include "nvreg.h"
#include <drm/i2c/sil164.h>
#include <subdev/i2c.h>
#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \
NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \
NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
#define FP_TG_CONTROL_OFF (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE | \
NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE | \
NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE)
static inline bool is_fpc_off(uint32_t fpc)
{
return ((fpc & (FP_TG_CONTROL_ON | FP_TG_CONTROL_OFF)) ==
FP_TG_CONTROL_OFF);
}
int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_output *dcbent)
{
/* special case of nv_read_tmds to find crtc associated with an output.
* this does not give a correct answer for off-chip dvi, but there's no
* use for such an answer anyway
*/
int ramdac = (dcbent->or & DCB_OUTPUT_C) >> 2;
NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL,
NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4);
return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac;
}
void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_output *dcbent,
int head, bool dl)
{
/* The BIOS scripts don't do this for us, sadly
* Luckily we do know the values ;-)
*
* head < 0 indicates we wish to force a setting with the overrideval
* (for VT restore etc.)
*/
int ramdac = (dcbent->or & DCB_OUTPUT_C) >> 2;
uint8_t tmds04 = 0x80;
if (head != ramdac)
tmds04 = 0x88;
if (dcbent->type == DCB_OUTPUT_LVDS)
tmds04 |= 0x01;
nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04);
if (dl) /* dual link */
nv_write_tmds(dev, dcbent->or, 1, 0x04, tmds04 ^ 0x08);
}
void nv04_dfp_disable(struct drm_device *dev, int head)
{
struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg;
if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) &
FP_TG_CONTROL_ON) {
/* digital remnants must be cleaned before new crtc
* values programmed. delay is time for the vga stuff
* to realise it's in control again
*/
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
FP_TG_CONTROL_OFF);
msleep(50);
}
/* don't inadvertently turn it on when state written later */
crtcstate[head].fp_control = FP_TG_CONTROL_OFF;
crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] &=
~NV_CIO_CRE_LCD_ROUTE_MASK;
}
void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc;
struct nouveau_crtc *nv_crtc;
uint32_t *fpc;
if (mode == DRM_MODE_DPMS_ON) {
nv_crtc = nouveau_crtc(encoder->crtc);
fpc = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].fp_control;
if (is_fpc_off(*fpc)) {
/* using saved value is ok, as (is_digital && dpms_on &&
* fp_control==OFF) is (at present) *only* true when
* fpc's most recent change was by below "off" code
*/
*fpc = nv_crtc->dpms_saved_fp_control;
}
nv_crtc->fp_users |= 1 << nouveau_encoder(encoder)->dcb->index;
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_FP_TG_CONTROL, *fpc);
} else {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
nv_crtc = nouveau_crtc(crtc);
fpc = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].fp_control;
nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index);
if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) {
nv_crtc->dpms_saved_fp_control = *fpc;
/* cut the FP output */
*fpc &= ~FP_TG_CONTROL_ON;
*fpc |= FP_TG_CONTROL_OFF;
NVWriteRAMDAC(dev, nv_crtc->index,
NV_PRAMDAC_FP_TG_CONTROL, *fpc);
}
}
}
}
static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
struct drm_encoder *slave;
if (dcb->type != DCB_OUTPUT_TMDS || dcb->location == DCB_LOC_ON_CHIP)
return NULL;
/* Some BIOSes (e.g. the one in a Quadro FX1000) report several
* TMDS transmitters at the same I2C address, in the same I2C
* bus. This can still work because in that case one of them is
* always hard-wired to a reasonable configuration using straps,
* and the other one needs to be programmed.
*
* I don't think there's a way to know which is which, even the
* blob programs the one exposed via I2C for *both* heads, so
* let's do the same.
*/
list_for_each_entry(slave, &dev->mode_config.encoder_list, head) {
struct dcb_output *slave_dcb = nouveau_encoder(slave)->dcb;
if (slave_dcb->type == DCB_OUTPUT_TMDS && get_slave_funcs(slave) &&
slave_dcb->tmdsconf.slave_addr == dcb->tmdsconf.slave_addr)
return slave;
}
return NULL;
}
static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_connector *nv_connector =
nv04_encoder_get_connector(nv_encoder);
if (!nv_connector->native_mode ||
nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
mode->hdisplay > nv_connector->native_mode->hdisplay ||
mode->vdisplay > nv_connector->native_mode->vdisplay) {
nv_encoder->mode = *adjusted_mode;
} else {
nv_encoder->mode = *nv_connector->native_mode;
adjusted_mode->clock = nv_connector->native_mode->clock;
}
return true;
}
static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
struct nouveau_encoder *nv_encoder, int head)
{
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
uint32_t bits1618 = nv_encoder->dcb->or & DCB_OUTPUT_A ? 0x10000 : 0x40000;
if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP)
return;
/* SEL_CLK is only used on the primary ramdac
* It toggles spread spectrum PLL output and sets the bindings of PLLs
* to heads on digital outputs
*/
if (head)
state->sel_clk |= bits1618;
else
state->sel_clk &= ~bits1618;
/* nv30:
* bit 0 NVClk spread spectrum on/off
* bit 2 MemClk spread spectrum on/off
* bit 4 PixClk1 spread spectrum on/off toggle
* bit 6 PixClk2 spread spectrum on/off toggle
*
* nv40 (observations from bios behaviour and mmio traces):
* bits 4&6 as for nv30
* bits 5&7 head dependent as for bits 4&6, but do not appear with 4&6;
* maybe a different spread mode
* bits 8&10 seen on dual-link dvi outputs, purpose unknown (set by POST scripts)
* The logic behind turning spread spectrum on/off in the first place,
* and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table
* entry has the necessary info)
*/
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS && nv04_display(dev)->saved_reg.sel_clk & 0xf0) {
int shift = (nv04_display(dev)->saved_reg.sel_clk & 0x50) ? 0 : 1;
state->sel_clk &= ~0xf0;
state->sel_clk |= (head ? 0x40 : 0x10) << shift;
}
}
static void nv04_dfp_prepare(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct drm_device *dev = encoder->dev;
int head = nouveau_crtc(encoder->crtc)->index;
struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg;
uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX];
uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX];
helper->dpms(encoder, DRM_MODE_DPMS_OFF);
nv04_dfp_prepare_sel_clk(dev, nv_encoder, head);
*cr_lcd = (*cr_lcd & ~NV_CIO_CRE_LCD_ROUTE_MASK) | 0x3;
if (nv_two_heads(dev)) {
if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
*cr_lcd |= head ? 0x0 : 0x8;
else {
*cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS)
*cr_lcd |= 0x30;
if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
/* avoid being connected to both crtcs */
*cr_lcd_oth &= ~0x30;
NVWriteVgaCrtc(dev, head ^ 1,
NV_CIO_CRE_LCD__INDEX,
*cr_lcd_oth);
}
}
}
}
static void nv04_dfp_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index];
struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_display_mode *output_mode = &nv_encoder->mode;
struct drm_connector *connector = &nv_connector->base;
const struct drm_framebuffer *fb = encoder->crtc->primary->fb;
uint32_t mode_ratio, panel_ratio;
NV_DEBUG(drm, "Output mode on CRTC %d:\n", nv_crtc->index);
drm_mode_debug_printmodeline(output_mode);
/* Initialize the FP registers in this CRTC. */
regp->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
if (!nv_gf4_disp_arch(dev) ||
(output_mode->hsync_start - output_mode->hdisplay) >=
drm->vbios.digital_min_front_porch)
regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
else
regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - drm->vbios.digital_min_front_porch - 1;
regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
regp->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - 1;
regp->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
regp->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
regp->fp_vert_regs[FP_CRTC] = output_mode->vtotal - 5 - 1;
regp->fp_vert_regs[FP_SYNC_START] = output_mode->vsync_start - 1;
regp->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
regp->fp_vert_regs[FP_VALID_START] = 0;
regp->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - 1;
/* bit26: a bit seen on some g7x, no as yet discernable purpose */
regp->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
(savep->fp_control & (1 << 26 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG));
/* Deal with vsync/hsync polarity */
/* LVDS screens do set this, but modes with +ve syncs are very rare */
if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
/* panel scaling first, as native would get set otherwise */
if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
nv_connector->scaling_mode == DRM_MODE_SCALE_CENTER) /* panel handles it */
regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER;
else if (adjusted_mode->hdisplay == output_mode->hdisplay &&
adjusted_mode->vdisplay == output_mode->vdisplay) /* native mode */
regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
else /* gpu needs to scale */
regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
if (nvif_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
output_mode->clock > 165000)
regp->fp_control |= (2 << 24);
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) {
bool duallink = false, dummy;
if (nv_connector->edid &&
nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
duallink = (((u8 *)nv_connector->edid)[121] == 2);
} else {
nouveau_bios_parse_lvds_table(dev, output_mode->clock,
&duallink, &dummy);
}
if (duallink)
regp->fp_control |= (8 << 28);
} else
if (output_mode->clock > 165000)
regp->fp_control |= (8 << 28);
regp->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
/* We want automatic scaling */
regp->fp_debug_1 = 0;
/* This can override HTOTAL and VTOTAL */
regp->fp_debug_2 = 0;
/* Use 20.12 fixed point format to avoid floats */
mode_ratio = (1 << 12) * adjusted_mode->hdisplay / adjusted_mode->vdisplay;
panel_ratio = (1 << 12) * output_mode->hdisplay / output_mode->vdisplay;
/* if ratios are equal, SCALE_ASPECT will automatically (and correctly)
* get treated the same as SCALE_FULLSCREEN */
if (nv_connector->scaling_mode == DRM_MODE_SCALE_ASPECT &&
mode_ratio != panel_ratio) {
uint32_t diff, scale;
bool divide_by_2 = nv_gf4_disp_arch(dev);
if (mode_ratio < panel_ratio) {
/* vertical needs to expand to glass size (automatic)
* horizontal needs to be scaled at vertical scale factor
* to maintain aspect */
scale = (1 << 12) * adjusted_mode->vdisplay / output_mode->vdisplay;
regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
/* restrict area of screen used, horizontally */
diff = output_mode->hdisplay -
output_mode->vdisplay * mode_ratio / (1 << 12);
regp->fp_horiz_regs[FP_VALID_START] += diff / 2;
regp->fp_horiz_regs[FP_VALID_END] -= diff / 2;
}
if (mode_ratio > panel_ratio) {
/* horizontal needs to expand to glass size (automatic)
* vertical needs to be scaled at horizontal scale factor
* to maintain aspect */
scale = (1 << 12) * adjusted_mode->hdisplay / output_mode->hdisplay;
regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE);
/* restrict area of screen used, vertically */
diff = output_mode->vdisplay -
(1 << 12) * output_mode->hdisplay / mode_ratio;
regp->fp_vert_regs[FP_VALID_START] += diff / 2;
regp->fp_vert_regs[FP_VALID_END] -= diff / 2;
}
}
/* Output property. */
if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
(nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
fb->format->depth > connector->display_info.bpc * 3)) {
if (drm->client.device.info.chipset == 0x11)
regp->dither = savep->dither | 0x00010000;
else {
int i;
regp->dither = savep->dither | 0x00000001;
for (i = 0; i < 3; i++) {
regp->dither_regs[i] = 0xe4e4e4e4;
regp->dither_regs[i + 3] = 0x44444444;
}
}
} else {
if (drm->client.device.info.chipset != 0x11) {
/* reset them */
int i;
for (i = 0; i < 3; i++) {
regp->dither_regs[i] = savep->dither_regs[i];
regp->dither_regs[i + 3] = savep->dither_regs[i + 3];
}
}
regp->dither = savep->dither;
}
regp->fp_margin_color = 0;
}
static void nv04_dfp_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
const struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct dcb_output *dcbe = nv_encoder->dcb;
int head = nouveau_crtc(encoder->crtc)->index;
struct drm_encoder *slave_encoder;
if (dcbe->type == DCB_OUTPUT_TMDS)
run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
else if (dcbe->type == DCB_OUTPUT_LVDS)
call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock);
/* update fp_control state for any changes made by scripts,
* so correct value is written at DPMS on */
nv04_display(dev)->mode_reg.crtc_reg[head].fp_control =
NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
/* This could use refinement for flatpanels, but it should work this way */
if (drm->client.device.info.chipset < 0x44)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
else
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
/* Init external transmitters */
slave_encoder = get_tmds_slave(encoder);
if (slave_encoder)
get_slave_funcs(slave_encoder)->mode_set(
slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
nv04_encoder_get_connector(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
{
#ifdef __powerpc__
struct drm_device *dev = encoder->dev;
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
struct pci_dev *pdev = to_pci_dev(dev->dev);
/* BIOS scripts usually take care of the backlight, thanks
* Apple for your consistency.
*/
if (pdev->device == 0x0174 || pdev->device == 0x0179 ||
pdev->device == 0x0189 || pdev->device == 0x0329) {
if (mode == DRM_MODE_DPMS_ON) {
nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
} else {
nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
}
}
#endif
}
static inline bool is_powersaving_dpms(int mode)
{
return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
}
static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms);
if (nv_encoder->last_dpms == mode)
return;
nv_encoder->last_dpms = mode;
NV_DEBUG(drm, "Setting dpms mode %d on lvds encoder (output %d)\n",
mode, nv_encoder->dcb->index);
if (was_powersaving && is_powersaving_dpms(mode))
return;
if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
/* when removing an output, crtc may not be set, but PANEL_OFF
* must still be run
*/
int head = crtc ? nouveau_crtc(crtc)->index :
nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
if (mode == DRM_MODE_DPMS_ON) {
call_lvds_script(dev, nv_encoder->dcb, head,
LVDS_PANEL_ON, nv_encoder->mode.clock);
} else
/* pxclk of 0 is fine for PANEL_OFF, and for a
* disconnected LVDS encoder there is no native_mode
*/
call_lvds_script(dev, nv_encoder->dcb, head,
LVDS_PANEL_OFF, 0);
}
nv04_dfp_update_backlight(encoder, mode);
nv04_dfp_update_fp_control(encoder, mode);
if (mode == DRM_MODE_DPMS_ON)
nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index);
else {
nv04_display(dev)->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
nv04_display(dev)->mode_reg.sel_clk &= ~0xf0;
}
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
}
static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
if (nv_encoder->last_dpms == mode)
return;
nv_encoder->last_dpms = mode;
NV_DEBUG(drm, "Setting dpms mode %d on tmds encoder (output %d)\n",
mode, nv_encoder->dcb->index);
nv04_dfp_update_backlight(encoder, mode);
nv04_dfp_update_fp_control(encoder, mode);
}
static void nv04_dfp_save(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
if (nv_two_heads(dev))
nv_encoder->restore.head =
nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
}
static void nv04_dfp_restore(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
int head = nv_encoder->restore.head;
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) {
struct nouveau_connector *connector =
nv04_encoder_get_connector(nv_encoder);
if (connector && connector->native_mode)
call_lvds_script(dev, nv_encoder->dcb, head,
LVDS_PANEL_ON,
connector->native_mode->clock);
} else if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS) {
int clock = nouveau_hw_pllvals_to_clk
(&nv04_display(dev)->saved_reg.crtc_reg[head].pllvals);
run_tmds_table(dev, nv_encoder->dcb, head, clock);
}
nv_encoder->last_dpms = NV_DPMS_CLEARED;
}
static void nv04_dfp_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
if (get_slave_funcs(encoder))
get_slave_funcs(encoder)->destroy(encoder);
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
}
static void nv04_tmds_slave_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
struct nvkm_i2c_bus_probe info[] = {
{
{
.type = "sil164",
.addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38),
.platform_data = &(struct sil164_encoder_params) {
SIL164_INPUT_EDGE_RISING
}
}, 0
},
{ }
};
int type;
if (!nv_gf4_disp_arch(dev) || !bus || get_tmds_slave(encoder))
return;
type = nvkm_i2c_bus_probe(bus, "TMDS transmitter", info, NULL, NULL);
if (type < 0)
return;
drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
&bus->i2c, &info[type].dev);
}
static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
.dpms = nv04_lvds_dpms,
.mode_fixup = nv04_dfp_mode_fixup,
.prepare = nv04_dfp_prepare,
.commit = nv04_dfp_commit,
.mode_set = nv04_dfp_mode_set,
.detect = NULL,
};
static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = {
.dpms = nv04_tmds_dpms,
.mode_fixup = nv04_dfp_mode_fixup,
.prepare = nv04_dfp_prepare,
.commit = nv04_dfp_commit,
.mode_set = nv04_dfp_mode_set,
.detect = NULL,
};
static const struct drm_encoder_funcs nv04_dfp_funcs = {
.destroy = nv04_dfp_destroy,
};
int
nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry)
{
const struct drm_encoder_helper_funcs *helper;
struct nouveau_encoder *nv_encoder = NULL;
struct drm_encoder *encoder;
int type;
switch (entry->type) {
case DCB_OUTPUT_TMDS:
type = DRM_MODE_ENCODER_TMDS;
helper = &nv04_tmds_helper_funcs;
break;
case DCB_OUTPUT_LVDS:
type = DRM_MODE_ENCODER_LVDS;
helper = &nv04_lvds_helper_funcs;
break;
default:
return -EINVAL;
}
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->enc_save = nv04_dfp_save;
nv_encoder->enc_restore = nv04_dfp_restore;
encoder = to_drm_encoder(nv_encoder);
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type, NULL);
drm_encoder_helper_add(encoder, helper);
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
if (entry->type == DCB_OUTPUT_TMDS &&
entry->location != DCB_LOC_ON_CHIP)
nv04_tmds_slave_init(encoder);
drm_connector_attach_encoder(connector, encoder);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/dispnv04/dfp.c |
/*
* Copyright (C) 2009 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "nouveau_drv.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "hw.h"
#include "tvnv17.h"
const char * const nv17_tv_norm_names[NUM_TV_NORMS] = {
[TV_NORM_PAL] = "PAL",
[TV_NORM_PAL_M] = "PAL-M",
[TV_NORM_PAL_N] = "PAL-N",
[TV_NORM_PAL_NC] = "PAL-Nc",
[TV_NORM_NTSC_M] = "NTSC-M",
[TV_NORM_NTSC_J] = "NTSC-J",
[TV_NORM_HD480I] = "hd480i",
[TV_NORM_HD480P] = "hd480p",
[TV_NORM_HD576I] = "hd576i",
[TV_NORM_HD576P] = "hd576p",
[TV_NORM_HD720P] = "hd720p",
[TV_NORM_HD1080I] = "hd1080i"
};
/* TV standard specific parameters */
struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = {
[TV_NORM_PAL] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 576, 50000, {
0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
} } } },
[TV_NORM_PAL_M] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 480, 59940, {
0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c,
0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_PAL_N] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 576, 50000, {
0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_PAL_NC] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 576, 50000, {
0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
} } } },
[TV_NORM_NTSC_M] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 480, 59940, {
0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c,
0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_NTSC_J] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 480, 59940, {
0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_HD480I] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 480, 59940, {
0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_HD576I] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 576, 50000, {
0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
} } } },
[TV_NORM_HD480P] = { CTV_ENC_MODE, {
.ctv_enc_mode = {
.mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000,
720, 735, 743, 858, 0, 480, 490, 494, 525, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
.ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
0x354003a, 0x40000, 0x6f0344, 0x18100000,
0x10160004, 0x10060005, 0x1006000c, 0x10060020,
0x10060021, 0x140e0022, 0x10060202, 0x1802020a,
0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff,
0x10000fff, 0x10000fff, 0x10000fff, 0x70,
0x3ff0000, 0x57, 0x2e001e, 0x258012c,
0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
} } } },
[TV_NORM_HD576P] = { CTV_ENC_MODE, {
.ctv_enc_mode = {
.mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000,
720, 730, 738, 864, 0, 576, 581, 585, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
.ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
0x354003a, 0x40000, 0x6f0344, 0x18100000,
0x10060001, 0x10060009, 0x10060026, 0x10060027,
0x140e0028, 0x10060268, 0x1810026d, 0x10000fff,
0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff,
0x10000fff, 0x10000fff, 0x10000fff, 0x69,
0x3ff0000, 0x57, 0x2e001e, 0x258012c,
0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
} } } },
[TV_NORM_HD720P] = { CTV_ENC_MODE, {
.ctv_enc_mode = {
.mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250,
1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
.ctv_regs = { 0x1260394, 0x0, 0x0, 0x622,
0x66b0021, 0x6004a, 0x1210626, 0x8170000,
0x70004, 0x70016, 0x70017, 0x40f0018,
0x702e8, 0x81702ed, 0xfff, 0xfff,
0xfff, 0xfff, 0xfff, 0xfff,
0xfff, 0xfff, 0xfff, 0x0,
0x2e40001, 0x58, 0x2e001e, 0x258012c,
0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300,
0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0
} } } },
[TV_NORM_HD1080I] = { CTV_ENC_MODE, {
.ctv_enc_mode = {
.mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250,
1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
| DRM_MODE_FLAG_INTERLACE) },
.ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868,
0x8940028, 0x60054, 0xe80870, 0xbf70000,
0xbc70004, 0x70005, 0x70012, 0x70013,
0x40f0014, 0x70230, 0xbf70232, 0xbf70233,
0x1c70237, 0x70238, 0x70244, 0x70245,
0x40f0246, 0x70462, 0x1f70464, 0x0,
0x2e40001, 0x58, 0x2e001e, 0x258012c,
0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300,
0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0
} } } }
};
/*
* The following is some guesswork on how the TV encoder flicker
* filter/rescaler works:
*
* It seems to use some sort of resampling filter, it is controlled
* through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they
* control the horizontal and vertical stage respectively, there is
* also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER,
* but they seem to do nothing. A rough guess might be that they could
* be used to independently control the filtering of each interlaced
* field, but I don't know how they are enabled. The whole filtering
* process seems to be disabled with bits 26:27 of PTV_200, but we
* aren't doing that.
*
* The layout of both register sets is the same:
*
* A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40]
* B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c]
*
* Each coefficient is stored in bits [31],[15:9] in two's complement
* format. They seem to be some kind of weights used in a low-pass
* filter. Both A and B coefficients are applied to the 14 nearest
* samples on each side (Listed from nearest to furthermost. They
* roughly cover 2 framebuffer pixels on each side). They are
* probably multiplied with some more hardwired weights before being
* used: B-coefficients are applied the same on both sides,
* A-coefficients are inverted before being applied to the opposite
* side.
*
* After all the hassle, I got the following formula by empirical
* means...
*/
#define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o)
#define id1 (1LL << 8)
#define id2 (1LL << 16)
#define id3 (1LL << 24)
#define id4 (1LL << 32)
#define id5 (1LL << 48)
static struct filter_params{
int64_t k1;
int64_t ki;
int64_t ki2;
int64_t ki3;
int64_t kr;
int64_t kir;
int64_t ki2r;
int64_t ki3r;
int64_t kf;
int64_t kif;
int64_t ki2f;
int64_t ki3f;
int64_t krf;
int64_t kirf;
int64_t ki2rf;
int64_t ki3rf;
} fparams[2][4] = {
/* Horizontal filter parameters */
{
{64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5,
0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4,
9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3,
-8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1},
{-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5,
29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4,
104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3,
-37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1},
{-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5,
33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4,
87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3,
17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1},
{51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5,
-41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4,
-154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3,
112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1},
},
/* Vertical filter parameters */
{
{67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5,
-3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4,
-9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3,
6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1},
{6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5,
8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4,
37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3,
-2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1},
{-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5,
39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4,
152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3,
-38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1},
{-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5,
20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4,
60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3,
-17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1},
}
};
static void tv_setup_filter(struct drm_encoder *encoder)
{
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
struct drm_display_mode *mode = &encoder->crtc->mode;
uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter,
&tv_enc->state.vfilter};
int i, j, k;
int32_t overscan = calc_overscan(tv_enc->overscan);
int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100);
uint64_t rs[] = {mode->hdisplay * id3,
mode->vdisplay * id3};
do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay);
do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay);
for (k = 0; k < 2; k++) {
rs[k] = max((int64_t)rs[k], id2);
for (j = 0; j < 4; j++) {
struct filter_params *p = &fparams[k][j];
for (i = 0; i < 7; i++) {
int64_t c = (p->k1 + p->ki*i + p->ki2*i*i +
p->ki3*i*i*i)
+ (p->kr + p->kir*i + p->ki2r*i*i +
p->ki3r*i*i*i) * rs[k]
+ (p->kf + p->kif*i + p->ki2f*i*i +
p->ki3f*i*i*i) * flicker
+ (p->krf + p->kirf*i + p->ki2rf*i*i +
p->ki3rf*i*i*i) * flicker * rs[k];
(*filters[k])[j][i] = (c + id5/2) >> 39
& (0x1 << 31 | 0x7f << 9);
}
}
}
}
/* Hardware state saving/restoring */
static void tv_save_filter(struct drm_device *dev, uint32_t base,
uint32_t regs[4][7])
{
int i, j;
uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
for (i = 0; i < 4; i++) {
for (j = 0; j < 7; j++)
regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j);
}
}
static void tv_load_filter(struct drm_device *dev, uint32_t base,
uint32_t regs[4][7])
{
int i, j;
uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
for (i = 0; i < 4; i++) {
for (j = 0; j < 7; j++)
nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]);
}
}
void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state)
{
int i;
for (i = 0; i < 0x40; i++)
state->tv_enc[i] = nv_read_tv_enc(dev, i);
tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter);
tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter);
nv_save_ptv(dev, state, 200);
nv_save_ptv(dev, state, 204);
nv_save_ptv(dev, state, 208);
nv_save_ptv(dev, state, 20c);
nv_save_ptv(dev, state, 304);
nv_save_ptv(dev, state, 500);
nv_save_ptv(dev, state, 504);
nv_save_ptv(dev, state, 508);
nv_save_ptv(dev, state, 600);
nv_save_ptv(dev, state, 604);
nv_save_ptv(dev, state, 608);
nv_save_ptv(dev, state, 60c);
nv_save_ptv(dev, state, 610);
nv_save_ptv(dev, state, 614);
}
void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
{
int i;
for (i = 0; i < 0x40; i++)
nv_write_tv_enc(dev, i, state->tv_enc[i]);
tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter);
tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter);
nv_load_ptv(dev, state, 200);
nv_load_ptv(dev, state, 204);
nv_load_ptv(dev, state, 208);
nv_load_ptv(dev, state, 20c);
nv_load_ptv(dev, state, 304);
nv_load_ptv(dev, state, 500);
nv_load_ptv(dev, state, 504);
nv_load_ptv(dev, state, 508);
nv_load_ptv(dev, state, 600);
nv_load_ptv(dev, state, 604);
nv_load_ptv(dev, state, 608);
nv_load_ptv(dev, state, 60c);
nv_load_ptv(dev, state, 610);
nv_load_ptv(dev, state, 614);
/* This is required for some settings to kick in. */
nv_write_tv_enc(dev, 0x3e, 1);
nv_write_tv_enc(dev, 0x3e, 0);
}
/* Timings similar to the ones the blob sets */
const struct drm_display_mode nv17_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
| DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
{ DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0,
320, 344, 392, 560, 0, 240, 240, 246, 263, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
| DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
{ DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0,
400, 432, 496, 640, 0, 300, 300, 303, 314, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
| DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0,
640, 672, 768, 880, 0, 480, 480, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0,
720, 752, 872, 960, 0, 480, 480, 493, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0,
720, 776, 856, 960, 0, 576, 576, 588, 597, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0,
800, 840, 920, 1040, 0, 600, 600, 604, 618, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0,
1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
{}
};
void nv17_tv_update_properties(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct nv17_tv_state *regs = &tv_enc->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
int subconnector = tv_enc->select_subconnector ?
tv_enc->select_subconnector :
tv_enc->subconnector;
switch (subconnector) {
case DRM_MODE_SUBCONNECTOR_Composite:
{
regs->ptv_204 = 0x2;
/* The composite connector may be found on either pin. */
if (tv_enc->pin_mask & 0x4)
regs->ptv_204 |= 0x010000;
else if (tv_enc->pin_mask & 0x2)
regs->ptv_204 |= 0x100000;
else
regs->ptv_204 |= 0x110000;
regs->tv_enc[0x7] = 0x10;
break;
}
case DRM_MODE_SUBCONNECTOR_SVIDEO:
regs->ptv_204 = 0x11012;
regs->tv_enc[0x7] = 0x18;
break;
case DRM_MODE_SUBCONNECTOR_Component:
regs->ptv_204 = 0x111333;
regs->tv_enc[0x7] = 0x14;
break;
case DRM_MODE_SUBCONNECTOR_SCART:
regs->ptv_204 = 0x111012;
regs->tv_enc[0x7] = 0x18;
break;
}
regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20],
255, tv_enc->saturation);
regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22],
255, tv_enc->saturation);
regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
nv_load_ptv(dev, regs, 204);
nv_load_tv_enc(dev, regs, 7);
nv_load_tv_enc(dev, regs, 20);
nv_load_tv_enc(dev, regs, 22);
nv_load_tv_enc(dev, regs, 25);
}
void nv17_tv_update_rescaler(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct nv17_tv_state *regs = &tv_enc->state;
regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8);
tv_setup_filter(encoder);
nv_load_ptv(dev, regs, 208);
tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter);
tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2);
tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter);
}
void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
int head = nouveau_crtc(encoder->crtc)->index;
struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head];
struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
struct drm_display_mode *output_mode =
&get_tv_norm(encoder)->ctv_enc_mode.mode;
int overscan, hmargin, vmargin, hratio, vratio;
/* The rescaler doesn't do the right thing for interlaced modes. */
if (output_mode->flags & DRM_MODE_FLAG_INTERLACE)
overscan = 100;
else
overscan = tv_enc->overscan;
hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20),
hmargin, overscan);
vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20),
vmargin, overscan);
hratio = crtc_mode->hdisplay * 0x800 /
(output_mode->hdisplay - 2*hmargin);
vratio = crtc_mode->vdisplay * 0x800 /
(output_mode->vdisplay - 2*vmargin) & ~3;
regs->fp_horiz_regs[FP_VALID_START] = hmargin;
regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
regs->fp_vert_regs[FP_VALID_START] = vmargin;
regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1;
regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) |
NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START,
regs->fp_horiz_regs[FP_VALID_START]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END,
regs->fp_horiz_regs[FP_VALID_END]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START,
regs->fp_vert_regs[FP_VALID_START]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END,
regs->fp_vert_regs[FP_VALID_END]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c |
/*
* Copyright 2013 Ilia Mirkin
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Implementation based on the pre-KMS implementation in xf86-video-nouveau,
* written by Arthur Huillet.
*/
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include "nouveau_drv.h"
#include "nouveau_bo.h"
#include "nouveau_connector.h"
#include "nouveau_display.h"
#include "nouveau_gem.h"
#include "nvreg.h"
#include "disp.h"
struct nouveau_plane {
struct drm_plane base;
bool flip;
struct nouveau_bo *cur;
struct {
struct drm_property *colorkey;
struct drm_property *contrast;
struct drm_property *brightness;
struct drm_property *hue;
struct drm_property *saturation;
} props;
int colorkey;
int contrast;
int brightness;
int hue;
int saturation;
enum drm_color_encoding color_encoding;
void (*set_params)(struct nouveau_plane *);
};
static uint32_t formats[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
};
/* Sine can be approximated with
* http://en.wikipedia.org/wiki/Bhaskara_I's_sine_approximation_formula
* sin(x degrees) ~= 4 x (180 - x) / (40500 - x (180 - x) )
* Note that this only works for the range [0, 180].
* Also note that sin(x) == -sin(x - 180)
*/
static inline int
sin_mul(int degrees, int factor)
{
if (degrees > 180) {
degrees -= 180;
factor *= -1;
}
return factor * 4 * degrees * (180 - degrees) /
(40500 - degrees * (180 - degrees));
}
/* cos(x) = sin(x + 90) */
static inline int
cos_mul(int degrees, int factor)
{
return sin_mul((degrees + 90) % 360, factor);
}
static int
verify_scaling(const struct drm_framebuffer *fb, uint8_t shift,
uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
uint32_t crtc_w, uint32_t crtc_h)
{
if (crtc_w < (src_w >> shift) || crtc_h < (src_h >> shift)) {
DRM_DEBUG_KMS("Unsuitable framebuffer scaling: %dx%d -> %dx%d\n",
src_w, src_h, crtc_w, crtc_h);
return -ERANGE;
}
if (src_x != 0 || src_y != 0) {
DRM_DEBUG_KMS("Unsuitable framebuffer offset: %d,%d\n",
src_x, src_y);
return -ERANGE;
}
return 0;
}
static int
nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
struct nouveau_drm *drm = nouveau_drm(plane->dev);
struct nvif_object *dev = &drm->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_bo *cur = nv_plane->cur;
struct nouveau_bo *nvbo;
bool flip = nv_plane->flip;
int soff = NV_PCRTC0_SIZE * nv_crtc->index;
int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
unsigned shift = drm->client.device.info.chipset >= 0x30 ? 1 : 3;
unsigned format = 0;
int ret;
/* Source parameters given in 16.16 fixed point, ignore fractional. */
src_x >>= 16;
src_y >>= 16;
src_w >>= 16;
src_h >>= 16;
ret = verify_scaling(fb, shift, 0, 0, src_w, src_h, crtc_w, crtc_h);
if (ret)
return ret;
nvbo = nouveau_gem_object(fb->obj[0]);
ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret)
return ret;
nv_plane->cur = nvbo;
nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
nvif_wr32(dev, NV_PVIDEO_BASE(flip), 0);
nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nvbo->offset);
nvif_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
nvif_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
nvif_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
nvif_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h);
nvif_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
nvif_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
if (fb->format->format == DRM_FORMAT_YUYV ||
fb->format->format == DRM_FORMAT_NV12)
format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
if (fb->format->format == DRM_FORMAT_NV12 ||
fb->format->format == DRM_FORMAT_NV21)
format |= NV_PVIDEO_FORMAT_PLANAR;
if (nv_plane->color_encoding == DRM_COLOR_YCBCR_BT709)
format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
if (nv_plane->colorkey & (1 << 24))
format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
if (format & NV_PVIDEO_FORMAT_PLANAR) {
nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
nvbo->offset + fb->offsets[1]);
}
nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format | fb->pitches[0]);
nvif_wr32(dev, NV_PVIDEO_STOP, 0);
/* TODO: wait for vblank? */
nvif_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1);
nv_plane->flip = !flip;
if (cur)
nouveau_bo_unpin(cur);
return 0;
}
static int
nv10_disable_plane(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx)
{
struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
nvif_wr32(dev, NV_PVIDEO_STOP, 1);
if (nv_plane->cur) {
nouveau_bo_unpin(nv_plane->cur);
nv_plane->cur = NULL;
}
return 0;
}
static void
nv_destroy_plane(struct drm_plane *plane)
{
drm_plane_force_disable(plane);
drm_plane_cleanup(plane);
kfree(plane);
}
static void
nv10_set_params(struct nouveau_plane *plane)
{
struct nvif_object *dev = &nouveau_drm(plane->base.dev)->client.device.object;
u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
(cos_mul(plane->hue, plane->saturation) & 0xffff);
u32 format = 0;
nvif_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma);
nvif_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma);
nvif_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma);
nvif_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma);
nvif_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff);
if (plane->cur) {
if (plane->color_encoding == DRM_COLOR_YCBCR_BT709)
format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
if (plane->colorkey & (1 << 24))
format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
nvif_mask(dev, NV_PVIDEO_FORMAT(plane->flip),
NV_PVIDEO_FORMAT_MATRIX_ITURBT709 |
NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY,
format);
}
}
static int
nv_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t value)
{
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
if (property == nv_plane->props.colorkey)
nv_plane->colorkey = value;
else if (property == nv_plane->props.contrast)
nv_plane->contrast = value;
else if (property == nv_plane->props.brightness)
nv_plane->brightness = value;
else if (property == nv_plane->props.hue)
nv_plane->hue = value;
else if (property == nv_plane->props.saturation)
nv_plane->saturation = value;
else if (property == nv_plane->base.color_encoding_property)
nv_plane->color_encoding = value;
else
return -EINVAL;
if (nv_plane->set_params)
nv_plane->set_params(nv_plane);
return 0;
}
static const struct drm_plane_funcs nv10_plane_funcs = {
.update_plane = nv10_update_plane,
.disable_plane = nv10_disable_plane,
.set_property = nv_set_property,
.destroy = nv_destroy_plane,
};
static void
nv10_overlay_init(struct drm_device *device)
{
struct nouveau_drm *drm = nouveau_drm(device);
struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
unsigned int num_formats = ARRAY_SIZE(formats);
int ret;
if (!plane)
return;
switch (drm->client.device.info.chipset) {
case 0x10:
case 0x11:
case 0x15:
case 0x1a:
case 0x20:
num_formats = 2;
break;
}
ret = drm_universal_plane_init(device, &plane->base, 3 /* both crtc's */,
&nv10_plane_funcs,
formats, num_formats, NULL,
DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret)
goto err;
/* Set up the plane properties */
plane->props.colorkey = drm_property_create_range(
device, 0, "colorkey", 0, 0x01ffffff);
plane->props.contrast = drm_property_create_range(
device, 0, "contrast", 0, 8192 - 1);
plane->props.brightness = drm_property_create_range(
device, 0, "brightness", 0, 1024);
plane->props.hue = drm_property_create_range(
device, 0, "hue", 0, 359);
plane->props.saturation = drm_property_create_range(
device, 0, "saturation", 0, 8192 - 1);
if (!plane->props.colorkey ||
!plane->props.contrast ||
!plane->props.brightness ||
!plane->props.hue ||
!plane->props.saturation)
goto cleanup;
plane->colorkey = 0;
drm_object_attach_property(&plane->base.base,
plane->props.colorkey, plane->colorkey);
plane->contrast = 0x1000;
drm_object_attach_property(&plane->base.base,
plane->props.contrast, plane->contrast);
plane->brightness = 512;
drm_object_attach_property(&plane->base.base,
plane->props.brightness, plane->brightness);
plane->hue = 0;
drm_object_attach_property(&plane->base.base,
plane->props.hue, plane->hue);
plane->saturation = 0x1000;
drm_object_attach_property(&plane->base.base,
plane->props.saturation, plane->saturation);
plane->color_encoding = DRM_COLOR_YCBCR_BT601;
drm_plane_create_color_properties(&plane->base,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
DRM_COLOR_YCBCR_BT601,
DRM_COLOR_YCBCR_LIMITED_RANGE);
plane->set_params = nv10_set_params;
nv10_set_params(plane);
drm_plane_force_disable(&plane->base);
return;
cleanup:
drm_plane_cleanup(&plane->base);
err:
kfree(plane);
NV_ERROR(drm, "Failed to create plane\n");
}
static int
nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
struct nouveau_bo *cur = nv_plane->cur;
struct nouveau_bo *nvbo;
uint32_t overlay = 1;
int brightness = (nv_plane->brightness - 512) * 62 / 512;
int ret, i;
/* Source parameters given in 16.16 fixed point, ignore fractional. */
src_x >>= 16;
src_y >>= 16;
src_w >>= 16;
src_h >>= 16;
ret = verify_scaling(fb, 0, src_x, src_y, src_w, src_h, crtc_w, crtc_h);
if (ret)
return ret;
nvbo = nouveau_gem_object(fb->obj[0]);
ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret)
return ret;
nv_plane->cur = nvbo;
nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0);
nvif_wr32(dev, NV_PVIDEO_RM_STATE, 0);
for (i = 0; i < 2; i++) {
nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
nvbo->offset);
nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i,
fb->pitches[0]);
nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
}
nvif_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x);
nvif_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w);
nvif_wr32(dev, NV_PVIDEO_STEP_SIZE,
(uint32_t)(((src_h - 1) << 11) / (crtc_h - 1)) << 16 | (uint32_t)(((src_w - 1) << 11) / (crtc_w - 1)));
/* It should be possible to convert hue/contrast to this */
nvif_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness);
nvif_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness);
nvif_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness);
nvif_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0);
nvif_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */
nvif_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */
nvif_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03);
nvif_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38);
nvif_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey);
if (nv_plane->colorkey & (1 << 24))
overlay |= 0x10;
if (fb->format->format == DRM_FORMAT_YUYV)
overlay |= 0x100;
nvif_wr32(dev, NV_PVIDEO_OVERLAY, overlay);
nvif_wr32(dev, NV_PVIDEO_SU_STATE, nvif_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16));
if (cur)
nouveau_bo_unpin(cur);
return 0;
}
static int
nv04_disable_plane(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx)
{
struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
nvif_mask(dev, NV_PVIDEO_OVERLAY, 1, 0);
nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0);
nvif_wr32(dev, NV_PVIDEO_RM_STATE, 0);
if (nv_plane->cur) {
nouveau_bo_unpin(nv_plane->cur);
nv_plane->cur = NULL;
}
return 0;
}
static const struct drm_plane_funcs nv04_plane_funcs = {
.update_plane = nv04_update_plane,
.disable_plane = nv04_disable_plane,
.set_property = nv_set_property,
.destroy = nv_destroy_plane,
};
static void
nv04_overlay_init(struct drm_device *device)
{
struct nouveau_drm *drm = nouveau_drm(device);
struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
int ret;
if (!plane)
return;
ret = drm_universal_plane_init(device, &plane->base, 1 /* single crtc */,
&nv04_plane_funcs, formats, 2, NULL,
DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret)
goto err;
/* Set up the plane properties */
plane->props.colorkey = drm_property_create_range(
device, 0, "colorkey", 0, 0x01ffffff);
plane->props.brightness = drm_property_create_range(
device, 0, "brightness", 0, 1024);
if (!plane->props.colorkey ||
!plane->props.brightness)
goto cleanup;
plane->colorkey = 0;
drm_object_attach_property(&plane->base.base,
plane->props.colorkey, plane->colorkey);
plane->brightness = 512;
drm_object_attach_property(&plane->base.base,
plane->props.brightness, plane->brightness);
drm_plane_force_disable(&plane->base);
return;
cleanup:
drm_plane_cleanup(&plane->base);
err:
kfree(plane);
NV_ERROR(drm, "Failed to create plane\n");
}
void
nouveau_overlay_init(struct drm_device *device)
{
struct nvif_device *dev = &nouveau_drm(device)->client.device;
if (dev->info.chipset < 0x10)
nv04_overlay_init(device);
else if (dev->info.chipset <= 0x40)
nv10_overlay_init(device);
}
| linux-master | drivers/gpu/drm/nouveau/dispnv04/overlay.c |
// SPDX-License-Identifier: MIT
#include <drm/drm_mode.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_crtc.h"
#include "hw.h"
static void
nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
{
nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, true);
}
static void
nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
{
nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, false);
}
static void
nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
{
nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
NV_PRAMDAC_CU_START_POS,
XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
XLATE(x, 0, NV_PRAMDAC_CU_START_POS_X));
}
static void
crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
{
NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
crtcstate->CRTC[index]);
}
static void
nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
{
struct drm_device *dev = nv_crtc->base.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
struct drm_crtc *crtc = &nv_crtc->base;
regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] =
MASK(NV_CIO_CRE_HCUR_ASI) |
XLATE(offset, 17, NV_CIO_CRE_HCUR_ADDR0_ADR);
regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] =
XLATE(offset, 11, NV_CIO_CRE_HCUR_ADDR1_ADR);
if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] |=
MASK(NV_CIO_CRE_HCUR_ADDR1_CUR_DBL);
regp->CRTC[NV_CIO_CRE_HCUR_ADDR2_INDEX] = offset >> 24;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
}
int
nv04_cursor_init(struct nouveau_crtc *crtc)
{
crtc->cursor.set_offset = nv04_cursor_set_offset;
crtc->cursor.set_pos = nv04_cursor_set_pos;
crtc->cursor.hide = nv04_cursor_hide;
crtc->cursor.show = nv04_cursor_show;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/dispnv04/cursor.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "hwmgr.h"
#include "amdgpu_smu.h"
#include "amdgpu_dpm_internal.h"
void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
{
struct drm_device *ddev = adev_to_drm(adev);
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
adev->pm.dpm.new_active_crtcs = 0;
adev->pm.dpm.new_active_crtc_count = 0;
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (amdgpu_crtc->enabled) {
adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
adev->pm.dpm.new_active_crtc_count++;
}
}
}
}
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
u32 vblank_in_pixels;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
vblank_in_pixels =
amdgpu_crtc->hw_mode.crtc_htotal *
(amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
(amdgpu_crtc->v_border * 2));
vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
break;
}
}
}
return vblank_time_us;
}
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
u32 vrefresh = 0;
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
break;
}
}
}
return vrefresh;
}
| linux-master | drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Rafał Miłecki <[email protected]>
* Alex Deucher <[email protected]>
*/
#include "amdgpu.h"
#include "amdgpu_drv.h"
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
#include "atom.h"
#include <linux/pci.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
#include <linux/pm_runtime.h>
#include <asm/processor.h>
static const struct hwmon_temp_label {
enum PP_HWMON_TEMP channel;
const char *label;
} temp_label[] = {
{PP_TEMP_EDGE, "edge"},
{PP_TEMP_JUNCTION, "junction"},
{PP_TEMP_MEM, "mem"},
};
const char * const amdgpu_pp_profile_name[] = {
"BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
"VR",
"COMPUTE",
"CUSTOM",
"WINDOW_3D",
"CAPPED",
"UNCAPPED",
};
/**
* DOC: power_dpm_state
*
* The power_dpm_state file is a legacy interface and is only provided for
* backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
* certain power related parameters. The file power_dpm_state is used for this.
* It accepts the following arguments:
*
* - battery
*
* - balanced
*
* - performance
*
* battery
*
* On older GPUs, the vbios provided a special power state for battery
* operation. Selecting battery switched to this state. This is no
* longer provided on newer GPUs so the option does nothing in that case.
*
* balanced
*
* On older GPUs, the vbios provided a special power state for balanced
* operation. Selecting balanced switched to this state. This is no
* longer provided on newer GPUs so the option does nothing in that case.
*
* performance
*
* On older GPUs, the vbios provided a special power state for performance
* operation. Selecting performance switched to this state. This is no
* longer provided on newer GPUs so the option does nothing in that case.
*
*/
static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_pm_state_type pm;
int ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
amdgpu_dpm_get_current_power_state(adev, &pm);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
}
static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_pm_state_type state;
int ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
state = POWER_STATE_TYPE_BALANCED;
else if (strncmp("performance", buf, strlen("performance")) == 0)
state = POWER_STATE_TYPE_PERFORMANCE;
else
return -EINVAL;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
amdgpu_dpm_set_power_state(adev, state);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return count;
}
/**
* DOC: power_dpm_force_performance_level
*
* The amdgpu driver provides a sysfs API for adjusting certain power
* related parameters. The file power_dpm_force_performance_level is
* used for this. It accepts the following arguments:
*
* - auto
*
* - low
*
* - high
*
* - manual
*
* - profile_standard
*
* - profile_min_sclk
*
* - profile_min_mclk
*
* - profile_peak
*
* auto
*
* When auto is selected, the driver will attempt to dynamically select
* the optimal power profile for current conditions in the driver.
*
* low
*
* When low is selected, the clocks are forced to the lowest power state.
*
* high
*
* When high is selected, the clocks are forced to the highest power state.
*
* manual
*
* When manual is selected, the user can manually adjust which power states
* are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
* and pp_dpm_pcie files and adjust the power state transition heuristics
* via the pp_power_profile_mode sysfs file.
*
* profile_standard
* profile_min_sclk
* profile_min_mclk
* profile_peak
*
* When the profiling modes are selected, clock and power gating are
* disabled and the clocks are set for different profiling cases. This
* mode is recommended for profiling specific work loads where you do
* not want clock or power gating for clock fluctuation to interfere
* with your results. profile_standard sets the clocks to a fixed clock
* level which varies from asic to asic. profile_min_sclk forces the sclk
* to the lowest level. profile_min_mclk forces the mclk to the lowest level.
* profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
*
*/
static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_dpm_forced_level level = 0xff;
int ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
level = amdgpu_dpm_get_performance_level(adev);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
(level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
(level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
(level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
(level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
(level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
"unknown");
}
static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_dpm_forced_level level;
int ret = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
level = AMD_DPM_FORCED_LEVEL_HIGH;
} else if (strncmp("auto", buf, strlen("auto")) == 0) {
level = AMD_DPM_FORCED_LEVEL_AUTO;
} else if (strncmp("manual", buf, strlen("manual")) == 0) {
level = AMD_DPM_FORCED_LEVEL_MANUAL;
} else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
} else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
} else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
} else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
} else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
} else {
return -EINVAL;
}
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
if (amdgpu_dpm_force_performance_level(adev, level)) {
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
return -EINVAL;
}
/* override whatever a user ctx may have set */
adev->pm.stable_pstate_ctx = NULL;
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return count;
}
static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
struct pp_states_info data;
uint32_t i;
int buf_len, ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
if (amdgpu_dpm_get_pp_num_states(adev, &data))
memset(&data, 0, sizeof(data));
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
for (i = 0; i < data.nums; i++)
buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
(data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
(data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
(data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
(data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
return buf_len;
}
static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
struct pp_states_info data = {0};
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
amdgpu_dpm_get_current_power_state(adev, &pm);
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return ret;
for (i = 0; i < data.nums; i++) {
if (pm == data.states[i])
break;
}
if (i == data.nums)
i = -EINVAL;
return sysfs_emit(buf, "%d\n", i);
}
static ssize_t amdgpu_get_pp_force_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (adev->pm.pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
return sysfs_emit(buf, "\n");
}
static ssize_t amdgpu_set_pp_force_state(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_pm_state_type state = 0;
struct pp_states_info data;
unsigned long idx;
int ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
adev->pm.pp_force_state_enabled = false;
if (strlen(buf) == 1)
return count;
ret = kstrtoul(buf, 0, &idx);
if (ret || idx >= ARRAY_SIZE(data.states))
return -EINVAL;
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
if (ret)
goto err_out;
state = data.states[idx];
/* only set user selected power states */
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
state != POWER_STATE_TYPE_DEFAULT) {
ret = amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_ENABLE_USER_STATE, &state);
if (ret)
goto err_out;
adev->pm.pp_force_state_enabled = true;
}
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return count;
err_out:
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
/**
* DOC: pp_table
*
* The amdgpu driver provides a sysfs API for uploading new powerplay
* tables. The file pp_table is used for this. Reading the file
* will dump the current power play table. Writing to the file
* will attempt to upload a new powerplay table and re-initialize
* powerplay using that new table.
*
*/
static ssize_t amdgpu_get_pp_table(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
char *table = NULL;
int size, ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
size = amdgpu_dpm_get_pp_table(adev, &table);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (size <= 0)
return size;
if (size >= PAGE_SIZE)
size = PAGE_SIZE - 1;
memcpy(buf, table, size);
return size;
}
static ssize_t amdgpu_set_pp_table(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
ret = amdgpu_dpm_set_pp_table(adev, buf, count);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return ret;
return count;
}
/**
* DOC: pp_od_clk_voltage
*
* The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
* in each power level within a power state. The pp_od_clk_voltage is used for
* this.
*
* Note that the actual memory controller clock rate are exposed, not
* the effective memory clock of the DRAMs. To translate it, use the
* following formula:
*
* Clock conversion (Mhz):
*
* HBM: effective_memory_clock = memory_controller_clock * 1
*
* G5: effective_memory_clock = memory_controller_clock * 1
*
* G6: effective_memory_clock = memory_controller_clock * 2
*
* DRAM data rate (MT/s):
*
* HBM: effective_memory_clock * 2 = data_rate
*
* G5: effective_memory_clock * 4 = data_rate
*
* G6: effective_memory_clock * 8 = data_rate
*
* Bandwidth (MB/s):
*
* data_rate * vram_bit_width / 8 = memory_bandwidth
*
* Some examples:
*
* G5 on RX460:
*
* memory_controller_clock = 1750 Mhz
*
* effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
*
* data rate = 1750 * 4 = 7000 MT/s
*
* memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
*
* G6 on RX5700:
*
* memory_controller_clock = 875 Mhz
*
* effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
*
* data rate = 1750 * 8 = 14000 MT/s
*
* memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
*
* < For Vega10 and previous ASICs >
*
* Reading the file will display:
*
* - a list of engine clock levels and voltages labeled OD_SCLK
*
* - a list of memory clock levels and voltages labeled OD_MCLK
*
* - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
*
* To manually adjust these settings, first select manual using
* power_dpm_force_performance_level. Enter a new value for each
* level by writing a string that contains "s/m level clock voltage" to
* the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
* at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
* 810 mV. When you have edited all of the states as needed, write
* "c" (commit) to the file to commit your changes. If you want to reset to the
* default power levels, write "r" (reset) to the file to reset them.
*
*
* < For Vega20 and newer ASICs >
*
* Reading the file will display:
*
* - minimum and maximum engine clock labeled OD_SCLK
*
* - minimum(not available for Vega20 and Navi1x) and maximum memory
* clock labeled OD_MCLK
*
* - three <frequency, voltage> points labeled OD_VDDC_CURVE.
* They can be used to calibrate the sclk voltage curve. This is
* available for Vega20 and NV1X.
*
* - voltage offset for the six anchor points of the v/f curve labeled
* OD_VDDC_CURVE. They can be used to calibrate the v/f curve. This
* is only availabe for some SMU13 ASICs.
*
* - voltage offset(in mV) applied on target voltage calculation.
* This is available for Sienna Cichlid, Navy Flounder and Dimgrey
* Cavefish. For these ASICs, the target voltage calculation can be
* illustrated by "voltage = voltage calculated from v/f curve +
* overdrive vddgfx offset"
*
* - a list of valid ranges for sclk, mclk, and voltage curve points
* labeled OD_RANGE
*
* < For APUs >
*
* Reading the file will display:
*
* - minimum and maximum engine clock labeled OD_SCLK
*
* - a list of valid ranges for sclk labeled OD_RANGE
*
* < For VanGogh >
*
* Reading the file will display:
*
* - minimum and maximum engine clock labeled OD_SCLK
* - minimum and maximum core clocks labeled OD_CCLK
*
* - a list of valid ranges for sclk and cclk labeled OD_RANGE
*
* To manually adjust these settings:
*
* - First select manual using power_dpm_force_performance_level
*
* - For clock frequency setting, enter a new value by writing a
* string that contains "s/m index clock" to the file. The index
* should be 0 if to set minimum clock. And 1 if to set maximum
* clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
* "m 1 800" will update maximum mclk to be 800Mhz. For core
* clocks on VanGogh, the string contains "p core index clock".
* E.g., "p 2 0 800" would set the minimum core clock on core
* 2 to 800Mhz.
*
* For sclk voltage curve,
* - For NV1X, enter the new values by writing a string that
* contains "vc point clock voltage" to the file. The points
* are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will update
* point1 with clock set as 300Mhz and voltage as 600mV. "vc 2
* 1000 1000" will update point3 with clock set as 1000Mhz and
* voltage 1000mV.
* - For SMU13 ASICs, enter the new values by writing a string that
* contains "vc anchor_point_index voltage_offset" to the file.
* There are total six anchor points defined on the v/f curve with
* index as 0 - 5.
* - "vc 0 10" will update the voltage offset for point1 as 10mv.
* - "vc 5 -10" will update the voltage offset for point6 as -10mv.
*
* To update the voltage offset applied for gfxclk/voltage calculation,
* enter the new value by writing a string that contains "vo offset".
* This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish.
* And the offset can be a positive or negative value.
*
* - When you have edited all of the states as needed, write "c" (commit)
* to the file to commit your changes
*
* - If you want to reset to the default power levels, write "r" (reset)
* to the file to reset them
*
*/
static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret;
uint32_t parameter_size = 0;
long parameter[64];
char buf_cpy[128];
char *tmp_str;
char *sub_str;
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (count > 127)
return -EINVAL;
if (*buf == 's')
type = PP_OD_EDIT_SCLK_VDDC_TABLE;
else if (*buf == 'p')
type = PP_OD_EDIT_CCLK_VDDC_TABLE;
else if (*buf == 'm')
type = PP_OD_EDIT_MCLK_VDDC_TABLE;
else if (*buf == 'r')
type = PP_OD_RESTORE_DEFAULT_TABLE;
else if (*buf == 'c')
type = PP_OD_COMMIT_DPM_TABLE;
else if (!strncmp(buf, "vc", 2))
type = PP_OD_EDIT_VDDC_CURVE;
else if (!strncmp(buf, "vo", 2))
type = PP_OD_EDIT_VDDGFX_OFFSET;
else
return -EINVAL;
memcpy(buf_cpy, buf, count+1);
tmp_str = buf_cpy;
if ((type == PP_OD_EDIT_VDDC_CURVE) ||
(type == PP_OD_EDIT_VDDGFX_OFFSET))
tmp_str++;
while (isspace(*++tmp_str));
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
if (strlen(sub_str) == 0)
continue;
ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
if (ret)
return -EINVAL;
parameter_size++;
while (isspace(*tmp_str))
tmp_str++;
}
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
type,
parameter,
parameter_size))
goto err_out;
if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
parameter, parameter_size))
goto err_out;
if (type == PP_OD_COMMIT_DPM_TABLE) {
if (amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_READJUST_POWER_STATE,
NULL))
goto err_out;
}
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return count;
err_out:
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return -EINVAL;
}
static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
int size = 0;
int ret;
enum pp_clock_type od_clocks[6] = {
OD_SCLK,
OD_MCLK,
OD_VDDC_CURVE,
OD_RANGE,
OD_VDDGFX_OFFSET,
OD_CCLK,
};
uint clk_index;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
if (ret)
break;
}
if (ret == -ENOENT) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
}
if (size == 0)
size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
}
/**
* DOC: pp_features
*
* The amdgpu driver provides a sysfs API for adjusting what powerplay
* features to be enabled. The file pp_features is used for this. And
* this is only available for Vega10 and later dGPUs.
*
* Reading back the file will show you the followings:
* - Current ppfeature masks
* - List of the all supported powerplay features with their naming,
* bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
*
* To manually enable or disable a specific feature, just set or clear
* the corresponding bit from original ppfeature masks and input the
* new ppfeature masks.
*/
static ssize_t amdgpu_set_pp_features(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
uint64_t featuremask;
int ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
return -EINVAL;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
return count;
}
static ssize_t amdgpu_get_pp_features(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
ssize_t size;
int ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
size = amdgpu_dpm_get_ppfeature_status(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
}
/**
* DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
*
* The amdgpu driver provides a sysfs API for adjusting what power levels
* are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
* pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
* this.
*
* pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
* Vega10 and later ASICs.
* pp_dpm_fclk interface is only available for Vega20 and later ASICs.
*
* Reading back the files will show you the available power levels within
* the power state and the clock information for those levels.
*
* To manually adjust these states, first select manual using
* power_dpm_force_performance_level.
* Secondly, enter a new value for each level by inputing a string that
* contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
* E.g.,
*
* .. code-block:: bash
*
* echo "4 5 6" > pp_dpm_sclk
*
* will enable sclk levels 4, 5, and 6.
*
* NOTE: change to the dcefclk max dpm level is not supported now
*/
static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
enum pp_clock_type type,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
int size = 0;
int ret = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
if (ret == -ENOENT)
size = amdgpu_dpm_print_clock_levels(adev, type, buf);
if (size == 0)
size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
}
/*
* Worst case: 32 bits individually specified, in octal at 12 characters
* per line (+1 for \n).
*/
#define AMDGPU_MASK_BUF_MAX (32 * 13)
static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
{
int ret;
unsigned long level;
char *sub_str = NULL;
char *tmp;
char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
const char delimiter[3] = {' ', '\n', '\0'};
size_t bytes;
*mask = 0;
bytes = min(count, sizeof(buf_cpy) - 1);
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
if (strlen(sub_str)) {
ret = kstrtoul(sub_str, 0, &level);
if (ret || level > 31)
return -EINVAL;
*mask |= 1 << level;
} else
break;
}
return 0;
}
static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
enum pp_clock_type type,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret;
uint32_t mask = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
ret = amdgpu_dpm_force_clock_level(adev, type, mask);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (ret)
return -EINVAL;
return count;
}
static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
}
static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
}
static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
}
static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
}
static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
}
static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
}
static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
}
static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
}
static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
}
static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
}
static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
}
static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
}
static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t value = 0;
int ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
value = amdgpu_dpm_get_sclk_od(adev);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%d\n", value);
}
static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret;
long int value;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return count;
}
static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t value = 0;
int ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
value = amdgpu_dpm_get_mclk_od(adev);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%d\n", value);
}
static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret;
long int value;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = kstrtol(buf, 0, &value);
if (ret)
return -EINVAL;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return count;
}
/**
* DOC: pp_power_profile_mode
*
* The amdgpu driver provides a sysfs API for adjusting the heuristics
* related to switching between power levels in a power state. The file
* pp_power_profile_mode is used for this.
*
* Reading this file outputs a list of all of the predefined power profiles
* and the relevant heuristics settings for that profile.
*
* To select a profile or create a custom profile, first select manual using
* power_dpm_force_performance_level. Writing the number of a predefined
* profile to pp_power_profile_mode will enable those heuristics. To
* create a custom set of heuristics, write a string of numbers to the file
* starting with the number of the custom profile along with a setting
* for each heuristic parameter. Due to differences across asic families
* the heuristic parameters vary from family to family.
*
*/
static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
ssize_t size;
int ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
size = amdgpu_dpm_get_power_profile_mode(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
}
static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
int ret;
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t parameter_size = 0;
long parameter[64];
char *sub_str, buf_cpy[128];
char *tmp_str;
uint32_t i = 0;
char tmp[2];
long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
tmp[0] = *(buf);
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
if (ret)
return -EINVAL;
if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (count < 2 || count > 127)
return -EINVAL;
while (isspace(*++buf))
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
if (strlen(sub_str) == 0)
continue;
ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
if (ret)
return -EINVAL;
parameter_size++;
while (isspace(*tmp_str))
tmp_str++;
}
}
parameter[parameter_size] = profile_mode;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (!ret)
return count;
return -EINVAL;
}
static unsigned int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
enum amd_pp_sensors sensor,
void *query)
{
int r, size = sizeof(uint32_t);
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
/* get the sensor value */
r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
/**
* DOC: gpu_busy_percent
*
* The amdgpu driver provides a sysfs API for reading how busy the GPU
* is as a percentage. The file gpu_busy_percent is used for this.
* The SMU firmware computes a percentage of load based on the
* aggregate activity level in the IP cores.
*/
static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
unsigned int value;
int r;
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
if (r)
return r;
return sysfs_emit(buf, "%d\n", value);
}
/**
* DOC: mem_busy_percent
*
* The amdgpu driver provides a sysfs API for reading how busy the VRAM
* is as a percentage. The file mem_busy_percent is used for this.
* The SMU firmware computes a percentage of load based on the
* aggregate activity level in the IP cores.
*/
static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
unsigned int value;
int r;
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
if (r)
return r;
return sysfs_emit(buf, "%d\n", value);
}
/**
* DOC: pcie_bw
*
* The amdgpu driver provides a sysfs API for estimating how much data
* has been received and sent by the GPU in the last second through PCIe.
* The file pcie_bw is used for this.
* The Perf counters count the number of received and sent messages and return
* those values, as well as the maximum payload size of a PCIe packet (mps).
* Note that it is not possible to easily and quickly obtain the size of each
* packet transmitted, so we output the max payload size (mps) to allow for
* quick estimation of the PCIe bandwidth usage
*/
static ssize_t amdgpu_get_pcie_bw(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
uint64_t count0 = 0, count1 = 0;
int ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (adev->flags & AMD_IS_APU)
return -ENODATA;
if (!adev->asic_funcs->get_pcie_usage)
return -ENODATA;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%llu %llu %i\n",
count0, count1, pcie_get_mps(adev->pdev));
}
/**
* DOC: unique_id
*
* The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
* The file unique_id is used for this.
* This will provide a Unique ID that will persist from machine to machine
*
* NOTE: This will only work for GFX9 and newer. This file will be absent
* on unsupported ASICs (GFX8 and older)
*/
static ssize_t amdgpu_get_unique_id(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (adev->unique_id)
return sysfs_emit(buf, "%016llx\n", adev->unique_id);
return 0;
}
/**
* DOC: thermal_throttling_logging
*
* Thermal throttling pulls down the clock frequency and thus the performance.
* It's an useful mechanism to protect the chip from overheating. Since it
* impacts performance, the user controls whether it is enabled and if so,
* the log frequency.
*
* Reading back the file shows you the status(enabled or disabled) and
* the interval(in seconds) between each thermal logging.
*
* Writing an integer to the file, sets a new logging interval, in seconds.
* The value should be between 1 and 3600. If the value is less than 1,
* thermal logging is disabled. Values greater than 3600 are ignored.
*/
static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
adev_to_drm(adev)->unique,
atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
adev->throttling_logging_rs.interval / HZ + 1);
}
static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
long throttling_logging_interval;
unsigned long flags;
int ret = 0;
ret = kstrtol(buf, 0, &throttling_logging_interval);
if (ret)
return ret;
if (throttling_logging_interval > 3600)
return -EINVAL;
if (throttling_logging_interval > 0) {
raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
/*
* Reset the ratelimit timer internals.
* This can effectively restart the timer.
*/
adev->throttling_logging_rs.interval =
(throttling_logging_interval - 1) * HZ;
adev->throttling_logging_rs.begin = 0;
adev->throttling_logging_rs.printed = 0;
adev->throttling_logging_rs.missed = 0;
raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
atomic_set(&adev->throttling_logging_enabled, 1);
} else {
atomic_set(&adev->throttling_logging_enabled, 0);
}
return count;
}
/**
* DOC: apu_thermal_cap
*
* The amdgpu driver provides a sysfs API for retrieving/updating thermal
* limit temperature in millidegrees Celsius
*
* Reading back the file shows you core limit value
*
* Writing an integer to the file, sets a new thermal limit. The value
* should be between 0 and 100. If the value is less than 0 or greater
* than 100, then the write request will be ignored.
*/
static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int ret, size;
u32 limit;
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
if (!ret)
size = sysfs_emit(buf, "%u\n", limit);
else
size = sysfs_emit(buf, "failed to get thermal limit\n");
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
}
static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
int ret;
u32 value;
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
ret = kstrtou32(buf, 10, &value);
if (ret)
return ret;
if (value > 100) {
dev_err(dev, "Invalid argument !\n");
return -EINVAL;
}
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
if (ret) {
dev_err(dev, "failed to update thermal limit\n");
return ret;
}
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return count;
}
/**
* DOC: gpu_metrics
*
* The amdgpu driver provides a sysfs API for retrieving current gpu
* metrics data. The file gpu_metrics is used for this. Reading the
* file will dump all the current gpu metrics data.
*
* These data include temperature, frequency, engines utilization,
* power consume, throttler status, fan speed and cpu core statistics(
* available for APU only). That's it will give a snapshot of all sensors
* at the same time.
*/
static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
void *gpu_metrics;
ssize_t size = 0;
int ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
if (size <= 0)
goto out;
if (size >= PAGE_SIZE)
size = PAGE_SIZE - 1;
memcpy(buf, gpu_metrics, size);
out:
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
}
static int amdgpu_show_powershift_percent(struct device *dev,
char *buf, enum amd_pp_sensors sensor)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t ss_power;
int r = 0, i;
r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
if (r == -EOPNOTSUPP) {
/* sensor not available on dGPU, try to read from APU */
adev = NULL;
mutex_lock(&mgpu_info.mutex);
for (i = 0; i < mgpu_info.num_gpu; i++) {
if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
adev = mgpu_info.gpu_ins[i].adev;
break;
}
}
mutex_unlock(&mgpu_info.mutex);
if (adev)
r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
}
if (r)
return r;
return sysfs_emit(buf, "%u%%\n", ss_power);
}
/**
* DOC: smartshift_apu_power
*
* The amdgpu driver provides a sysfs API for reporting APU power
* shift in percentage if platform supports smartshift. Value 0 means that
* there is no powershift and values between [1-100] means that the power
* is shifted to APU, the percentage of boost is with respect to APU power
* limit on the platform.
*/
static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
char *buf)
{
return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
}
/**
* DOC: smartshift_dgpu_power
*
* The amdgpu driver provides a sysfs API for reporting dGPU power
* shift in percentage if platform supports smartshift. Value 0 means that
* there is no powershift and values between [1-100] means that the power is
* shifted to dGPU, the percentage of boost is with respect to dGPU power
* limit on the platform.
*/
static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
char *buf)
{
return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
}
/**
* DOC: smartshift_bias
*
* The amdgpu driver provides a sysfs API for reporting the
* smartshift(SS2.0) bias level. The value ranges from -100 to 100
* and the default is 0. -100 sets maximum preference to APU
* and 100 sets max perference to dGPU.
*/
static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int r = 0;
r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
return r;
}
static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
int r = 0;
int bias = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(ddev->dev);
if (r < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return r;
}
r = kstrtoint(buf, 10, &bias);
if (r)
goto out;
if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
amdgpu_smartshift_bias = bias;
r = count;
/* TODO: update bias level with SMU message */
out:
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return r;
}
static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states)
{
if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
}
static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states)
{
uint32_t ss_power;
if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
*states = ATTR_STATE_UNSUPPORTED;
else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
(void *)&ss_power))
*states = ATTR_STATE_UNSUPPORTED;
else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
(void *)&ss_power))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
}
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
.attr_update = ss_power_attr_update),
AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
.attr_update = ss_power_attr_update),
AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
.attr_update = ss_bias_attr_update),
};
static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states)
{
struct device_attribute *dev_attr = &attr->dev_attr;
uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
const char *attr_name = dev_attr->attr.name;
if (!(attr->flags & mask)) {
*states = ATTR_STATE_UNSUPPORTED;
return 0;
}
#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
if (gc_ver < IP_VERSION(9, 0, 0))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
if (gc_ver < IP_VERSION(9, 0, 0) ||
!amdgpu_device_has_display_hardware(adev))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
if (mp1_ver < IP_VERSION(10, 0, 0))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
*states = ATTR_STATE_UNSUPPORTED;
if (amdgpu_dpm_is_overdrive_supported(adev))
*states = ATTR_STATE_SUPPORTED;
} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pcie_bw)) {
/* PCIe Perf counters won't work on APU nodes */
if (adev->flags & AMD_IS_APU)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(unique_id)) {
switch (gc_ver) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
*states = ATTR_STATE_SUPPORTED;
break;
default:
*states = ATTR_STATE_UNSUPPORTED;
}
} else if (DEVICE_ATTR_IS(pp_features)) {
if ((adev->flags & AMD_IS_APU &&
gc_ver != IP_VERSION(9, 4, 3)) ||
gc_ver < IP_VERSION(9, 0, 0))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(gpu_metrics)) {
if (gc_ver < IP_VERSION(9, 1, 0))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
if (!(gc_ver == IP_VERSION(10, 3, 1) ||
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3) ||
gc_ver == IP_VERSION(9, 4, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
if (!(gc_ver == IP_VERSION(10, 3, 1) ||
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3) ||
gc_ver == IP_VERSION(9, 4, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
else if (gc_ver == IP_VERSION(10, 3, 0) && amdgpu_sriov_vf(adev))
*states = ATTR_STATE_UNSUPPORTED;
}
switch (gc_ver) {
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
/* the Mi series card does not support standalone mclk/socclk/fclk level setting */
if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
DEVICE_ATTR_IS(pp_dpm_socclk) ||
DEVICE_ATTR_IS(pp_dpm_fclk)) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
break;
case IP_VERSION(10, 3, 0):
if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
amdgpu_sriov_vf(adev)) {
dev_attr->attr.mode &= ~0222;
dev_attr->store = NULL;
}
break;
default:
break;
}
if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
/* SMU MP1 does not support dcefclk level setting */
if (gc_ver >= IP_VERSION(10, 0, 0)) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
}
/* setting should not be allowed from VF if not in one VF mode */
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
#undef DEVICE_ATTR_IS
return 0;
}
static int amdgpu_device_attr_create(struct amdgpu_device *adev,
struct amdgpu_device_attr *attr,
uint32_t mask, struct list_head *attr_list)
{
int ret = 0;
enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
struct amdgpu_device_attr_entry *attr_entry;
struct device_attribute *dev_attr;
const char *name;
int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
if (!attr)
return -EINVAL;
dev_attr = &attr->dev_attr;
name = dev_attr->attr.name;
attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
ret = attr_update(adev, attr, mask, &attr_states);
if (ret) {
dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
name, ret);
return ret;
}
if (attr_states == ATTR_STATE_UNSUPPORTED)
return 0;
ret = device_create_file(adev->dev, dev_attr);
if (ret) {
dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
name, ret);
}
attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
if (!attr_entry)
return -ENOMEM;
attr_entry->attr = attr;
INIT_LIST_HEAD(&attr_entry->entry);
list_add_tail(&attr_entry->entry, attr_list);
return ret;
}
static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
{
struct device_attribute *dev_attr = &attr->dev_attr;
device_remove_file(adev->dev, dev_attr);
}
static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
struct list_head *attr_list);
static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
struct amdgpu_device_attr *attrs,
uint32_t counts,
uint32_t mask,
struct list_head *attr_list)
{
int ret = 0;
uint32_t i = 0;
for (i = 0; i < counts; i++) {
ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
if (ret)
goto failed;
}
return 0;
failed:
amdgpu_device_attr_remove_groups(adev, attr_list);
return ret;
}
static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
struct list_head *attr_list)
{
struct amdgpu_device_attr_entry *entry, *entry_tmp;
if (list_empty(attr_list))
return ;
list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
amdgpu_device_attr_remove(adev, entry->attr);
list_del(&entry->entry);
kfree(entry);
}
}
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int channel = to_sensor_dev_attr(attr)->index;
int r, temp = 0;
if (channel >= PP_TEMP_MAX)
return -EINVAL;
switch (channel) {
case PP_TEMP_JUNCTION:
/* get current junction temperature */
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
(void *)&temp);
break;
case PP_TEMP_EDGE:
/* get current edge temperature */
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
(void *)&temp);
break;
case PP_TEMP_MEM:
/* get current memory temperature */
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
(void *)&temp);
break;
default:
r = -EINVAL;
break;
}
if (r)
return r;
return sysfs_emit(buf, "%d\n", temp);
}
static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int hyst = to_sensor_dev_attr(attr)->index;
int temp;
if (hyst)
temp = adev->pm.dpm.thermal.min_temp;
else
temp = adev->pm.dpm.thermal.max_temp;
return sysfs_emit(buf, "%d\n", temp);
}
static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int hyst = to_sensor_dev_attr(attr)->index;
int temp;
if (hyst)
temp = adev->pm.dpm.thermal.min_hotspot_temp;
else
temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
return sysfs_emit(buf, "%d\n", temp);
}
static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int hyst = to_sensor_dev_attr(attr)->index;
int temp;
if (hyst)
temp = adev->pm.dpm.thermal.min_mem_temp;
else
temp = adev->pm.dpm.thermal.max_mem_crit_temp;
return sysfs_emit(buf, "%d\n", temp);
}
static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int channel = to_sensor_dev_attr(attr)->index;
if (channel >= PP_TEMP_MAX)
return -EINVAL;
return sysfs_emit(buf, "%s\n", temp_label[channel].label);
}
static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int channel = to_sensor_dev_attr(attr)->index;
int temp = 0;
if (channel >= PP_TEMP_MAX)
return -EINVAL;
switch (channel) {
case PP_TEMP_JUNCTION:
temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
break;
case PP_TEMP_EDGE:
temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
break;
case PP_TEMP_MEM:
temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
break;
}
return sysfs_emit(buf, "%d\n", temp);
}
static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
int ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
}
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (ret)
return -EINVAL;
return sysfs_emit(buf, "%u\n", pwm_mode);
}
static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err, ret;
int value;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = kstrtoint(buf, 10, &value);
if (err)
return err;
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
}
ret = amdgpu_dpm_set_fan_control_mode(adev, value);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (ret)
return -EINVAL;
return count;
}
static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%i\n", 0);
}
static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%i\n", 255);
}
static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
u32 value;
u32 pwm_mode;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = kstrtou32(buf, 10, &value);
if (err)
return err;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
if (err)
goto out;
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pr_info("manual fan speed control should be enabled first\n");
err = -EINVAL;
goto out;
}
err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
out:
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return err;
return count;
}
static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
u32 speed = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return err;
return sysfs_emit(buf, "%i\n", speed);
}
static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
u32 speed = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return err;
return sysfs_emit(buf, "%i\n", speed);
}
static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 min_rpm = 0;
int r;
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
(void *)&min_rpm);
if (r)
return r;
return sysfs_emit(buf, "%d\n", min_rpm);
}
static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 max_rpm = 0;
int r;
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
(void *)&max_rpm);
if (r)
return r;
return sysfs_emit(buf, "%d\n", max_rpm);
}
static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
u32 rpm = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return err;
return sysfs_emit(buf, "%i\n", rpm);
}
static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
u32 value;
u32 pwm_mode;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = kstrtou32(buf, 10, &value);
if (err)
return err;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
if (err)
goto out;
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
err = -ENODATA;
goto out;
}
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
out:
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return err;
return count;
}
static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
int ret;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
}
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (ret)
return -EINVAL;
return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
}
static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
int value;
u32 pwm_mode;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = kstrtoint(buf, 10, &value);
if (err)
return err;
if (value == 0)
pwm_mode = AMD_FAN_CTRL_AUTO;
else if (value == 1)
pwm_mode = AMD_FAN_CTRL_MANUAL;
else
return -EINVAL;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return -EINVAL;
return count;
}
static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 vddgfx;
int r;
/* get the voltage */
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
(void *)&vddgfx);
if (r)
return r;
return sysfs_emit(buf, "%d\n", vddgfx);
}
static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "vddgfx\n");
}
static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 vddnb;
int r;
/* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
/* get the voltage */
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
(void *)&vddnb);
if (r)
return r;
return sysfs_emit(buf, "%d\n", vddnb);
}
static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "vddnb\n");
}
static unsigned int amdgpu_hwmon_get_power(struct device *dev,
enum amd_pp_sensors sensor)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
unsigned int uw;
u32 query = 0;
int r;
r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query);
if (r)
return r;
/* convert to microwatts */
uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
return uw;
}
static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
struct device_attribute *attr,
char *buf)
{
unsigned int val;
val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
if (val < 0)
return val;
return sysfs_emit(buf, "%u\n", val);
}
static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
struct device_attribute *attr,
char *buf)
{
unsigned int val;
val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
if (val < 0)
return val;
return sysfs_emit(buf, "%u\n", val);
}
static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%i\n", 0);
}
static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
struct device_attribute *attr,
char *buf,
enum pp_power_limit_level pp_limit_level)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
uint32_t limit;
ssize_t size;
int r;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_dpm_get_power_limit(adev, &limit,
pp_limit_level, power_type);
if (!r)
size = sysfs_emit(buf, "%u\n", limit * 1000000);
else
size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return size;
}
static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
}
static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
}
static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
}
static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
if (gc_ver == IP_VERSION(10, 3, 1))
return sysfs_emit(buf, "%s\n",
to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
"fastPPT" : "slowPPT");
else
return sysfs_emit(buf, "PPT\n");
}
static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int limit_type = to_sensor_dev_attr(attr)->index;
int err;
u32 value;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
err = kstrtou32(buf, 10, &value);
if (err)
return err;
value = value / 1000000; /* convert to Watt */
value |= limit_type << 24;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
err = amdgpu_dpm_set_power_limit(adev, value);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return err;
return count;
}
static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t sclk;
int r;
/* get the sclk */
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
(void *)&sclk);
if (r)
return r;
return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "sclk\n");
}
static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t mclk;
int r;
/* get the sclk */
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
(void *)&mclk);
if (r)
return r;
return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "mclk\n");
}
/**
* DOC: hwmon
*
* The amdgpu driver exposes the following sensor interfaces:
*
* - GPU temperature (via the on-die sensor)
*
* - GPU voltage
*
* - Northbridge voltage (APUs only)
*
* - GPU power
*
* - GPU fan
*
* - GPU gfx/compute engine clock
*
* - GPU memory clock (dGPU only)
*
* hwmon interfaces for GPU temperature:
*
* - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
* - temp2_input and temp3_input are supported on SOC15 dGPUs only
*
* - temp[1-3]_label: temperature channel label
* - temp2_label and temp3_label are supported on SOC15 dGPUs only
*
* - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
* - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
*
* - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
* - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
*
* - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
* - these are supported on SOC15 dGPUs only
*
* hwmon interfaces for GPU voltage:
*
* - in0_input: the voltage on the GPU in millivolts
*
* - in1_input: the voltage on the Northbridge in millivolts
*
* hwmon interfaces for GPU power:
*
* - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU.
*
* - power1_input: instantaneous power used by the SoC in microWatts. On APUs this includes the CPU.
*
* - power1_cap_min: minimum cap supported in microWatts
*
* - power1_cap_max: maximum cap supported in microWatts
*
* - power1_cap: selected power cap in microWatts
*
* hwmon interfaces for GPU fan:
*
* - pwm1: pulse width modulation fan level (0-255)
*
* - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
*
* - pwm1_min: pulse width modulation fan control minimum level (0)
*
* - pwm1_max: pulse width modulation fan control maximum level (255)
*
* - fan1_min: a minimum value Unit: revolution/min (RPM)
*
* - fan1_max: a maximum value Unit: revolution/max (RPM)
*
* - fan1_input: fan speed in RPM
*
* - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
*
* - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
*
* NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
* That will get the former one overridden.
*
* hwmon interfaces for GPU clocks:
*
* - freq1_input: the gfx/compute clock in hertz
*
* - freq2_input: the memory clock in hertz
*
* You can use hwmon tools like sensors to view this information on your system.
*
*/
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_crit.dev_attr.attr,
&sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp3_crit.dev_attr.attr,
&sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_emergency.dev_attr.attr,
&sensor_dev_attr_temp2_emergency.dev_attr.attr,
&sensor_dev_attr_temp3_emergency.dev_attr.attr,
&sensor_dev_attr_temp1_label.dev_attr.attr,
&sensor_dev_attr_temp2_label.dev_attr.attr,
&sensor_dev_attr_temp3_label.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1_min.dev_attr.attr,
&sensor_dev_attr_pwm1_max.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan1_max.dev_attr.attr,
&sensor_dev_attr_fan1_target.dev_attr.attr,
&sensor_dev_attr_fan1_enable.dev_attr.attr,
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_label.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_label.dev_attr.attr,
&sensor_dev_attr_power1_average.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
&sensor_dev_attr_power1_cap_min.dev_attr.attr,
&sensor_dev_attr_power1_cap.dev_attr.attr,
&sensor_dev_attr_power1_cap_default.dev_attr.attr,
&sensor_dev_attr_power1_label.dev_attr.attr,
&sensor_dev_attr_power2_average.dev_attr.attr,
&sensor_dev_attr_power2_cap_max.dev_attr.attr,
&sensor_dev_attr_power2_cap_min.dev_attr.attr,
&sensor_dev_attr_power2_cap.dev_attr.attr,
&sensor_dev_attr_power2_cap_default.dev_attr.attr,
&sensor_dev_attr_power2_label.dev_attr.attr,
&sensor_dev_attr_freq1_input.dev_attr.attr,
&sensor_dev_attr_freq1_label.dev_attr.attr,
&sensor_dev_attr_freq2_input.dev_attr.attr,
&sensor_dev_attr_freq2_label.dev_attr.attr,
NULL
};
static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = kobj_to_dev(kobj);
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
uint32_t tmp;
/* under multi-vf mode, the hwmon attributes are all not supported */
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
/* under pp one vf mode manage of hwmon attributes is not supported */
if (amdgpu_sriov_is_pp_one_vf(adev))
effective_mode &= ~S_IWUSR;
/* Skip fan attributes if fan is not present */
if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
/* Skip fan attributes on APU */
if ((adev->flags & AMD_IS_APU) &&
(attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
/* Skip crit temp on APU */
if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
(gc_ver == IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
return 0;
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
/* mask fan attributes if we have no bindings for this asic to expose */
if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
effective_mode &= ~S_IRUGO;
if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
effective_mode &= ~S_IWUSR;
/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
if (((adev->family == AMDGPU_FAMILY_SI) ||
((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
(gc_ver != IP_VERSION(9, 4, 3)))) &&
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
return 0;
/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
if (((adev->family == AMDGPU_FAMILY_SI) ||
((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
(attr == &sensor_dev_attr_power1_average.dev_attr.attr))
return 0;
/* not all products support both average and instantaneous */
if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP)
return 0;
if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP)
return 0;
/* hide max/min values if we can't both query and manage the fan */
if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
(amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
(amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
(amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
(amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
(attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
return 0;
if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
(gc_ver == IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
attr == &sensor_dev_attr_in0_label.dev_attr.attr))
return 0;
/* only APUs other than gc 9,4,3 have vddnb */
if ((!(adev->flags & AMD_IS_APU) || (gc_ver == IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
/* no mclk on APUs other than gc 9,4,3*/
if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
return 0;
if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
(gc_ver != IP_VERSION(9, 4, 3)) &&
(attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
return 0;
/* hotspot temperature for gc 9,4,3*/
if ((gc_ver == IP_VERSION(9, 4, 3)) &&
(attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_label.dev_attr.attr))
return 0;
/* only SOC15 dGPUs support hotspot and mem temperatures */
if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0) ||
(gc_ver == IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
return 0;
/* only Vangogh has fast PPT limit and power labels */
if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
(attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
attr == &sensor_dev_attr_power2_label.dev_attr.attr))
return 0;
return effective_mode;
}
static const struct attribute_group hwmon_attrgroup = {
.attrs = hwmon_attributes,
.is_visible = hwmon_attributes_visible,
};
static const struct attribute_group *hwmon_groups[] = {
&hwmon_attrgroup,
NULL
};
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
int ret;
uint32_t mask = 0;
if (adev->pm.sysfs_initialized)
return 0;
INIT_LIST_HEAD(&adev->pm.pm_attr_list);
if (adev->pm.dpm_enabled == 0)
return 0;
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev,
hwmon_groups);
if (IS_ERR(adev->pm.int_hwmon_dev)) {
ret = PTR_ERR(adev->pm.int_hwmon_dev);
dev_err(adev->dev,
"Unable to register hwmon device: %d\n", ret);
return ret;
}
switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
case SRIOV_VF_MODE_ONE_VF:
mask = ATTR_FLAG_ONEVF;
break;
case SRIOV_VF_MODE_MULTI_VF:
mask = 0;
break;
case SRIOV_VF_MODE_BARE_METAL:
default:
mask = ATTR_FLAG_MASK_ALL;
break;
}
ret = amdgpu_device_attr_create_groups(adev,
amdgpu_device_attrs,
ARRAY_SIZE(amdgpu_device_attrs),
mask,
&adev->pm.pm_attr_list);
if (ret)
return ret;
adev->pm.sysfs_initialized = true;
return 0;
}
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
if (adev->pm.int_hwmon_dev)
hwmon_device_unregister(adev->pm.int_hwmon_dev);
amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
}
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
struct amdgpu_device *adev)
{
uint16_t *p_val;
uint32_t size;
int i;
uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
GFP_KERNEL);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
(void *)p_val, &size)) {
for (i = 0; i < num_cpu_cores; i++)
seq_printf(m, "\t%u MHz (CPU%d)\n",
*(p_val + i), i);
}
kfree(p_val);
}
}
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
{
uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
uint32_t value;
uint64_t value64 = 0;
uint32_t query = 0;
int size;
/* GPU Clocks */
size = sizeof(value);
seq_printf(m, "GFX Clocks and Power:\n");
amdgpu_debugfs_prints_cpu_info(m, adev);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
seq_printf(m, "\t%u mV (VDDGFX)\n", value);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
seq_printf(m, "\t%u mV (VDDNB)\n", value);
size = sizeof(uint32_t);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
size = sizeof(uint32_t);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
seq_printf(m, "\t%u.%u W (current GPU)\n", query >> 8, query & 0xff);
size = sizeof(value);
seq_printf(m, "\n");
/* GPU Temp */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
seq_printf(m, "GPU Temperature: %u C\n", value/1000);
/* GPU Load */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
seq_printf(m, "GPU Load: %u %%\n", value);
/* MEM Load */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
seq_printf(m, "MEM Load: %u %%\n", value);
seq_printf(m, "\n");
/* SMC feature mask */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
/* ASICs greater than CHIP_VEGA20 supports these sensors */
if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
/* VCN clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
if (!value) {
seq_printf(m, "VCN: Disabled\n");
} else {
seq_printf(m, "VCN: Enabled\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
}
}
seq_printf(m, "\n");
} else {
/* UVD clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
if (!value) {
seq_printf(m, "UVD: Disabled\n");
} else {
seq_printf(m, "UVD: Enabled\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
}
}
seq_printf(m, "\n");
/* VCE clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
if (!value) {
seq_printf(m, "VCE: Disabled\n");
} else {
seq_printf(m, "VCE: Enabled\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
}
}
}
return 0;
}
static const struct cg_flag_name clocks[] = {
{AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
{AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
{AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
{AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
{AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
{AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
{AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
{AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
{AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
{AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
{AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
{AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
{AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
{AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
{AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
{AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
{AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
{AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
{AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
{AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
{0, NULL},
};
static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
{
int i;
for (i = 0; clocks[i].flag; i++)
seq_printf(m, "\t%s: %s\n", clocks[i].name,
(flags & clocks[i].flag) ? "On" : "Off");
}
static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
{
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
struct drm_device *dev = adev_to_drm(adev);
u64 flags = 0;
int r;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
pm_runtime_put_autosuspend(dev->dev);
return r;
}
if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
r = amdgpu_debugfs_pm_info_pp(m, adev);
if (r)
goto out;
}
amdgpu_device_ip_get_clockgating_state(adev, &flags);
seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
amdgpu_parse_cg_state(m, flags);
seq_printf(m, "\n");
out:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return r;
}
DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
/*
* amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
*
* Reads debug memory region allocated to PMFW
*/
static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
size_t smu_prv_buf_size;
void *smu_prv_buf;
int ret = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
if (ret)
return ret;
if (!smu_prv_buf || !smu_prv_buf_size)
return -EINVAL;
return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
smu_prv_buf_size);
}
static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = amdgpu_pm_prv_buffer_read,
.llseek = default_llseek,
};
#endif
void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
if (!adev->pm.dpm_enabled)
return;
debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
&amdgpu_debugfs_pm_info_fops);
if (adev->pm.smu_prv_buffer_size > 0)
debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
adev,
&amdgpu_debugfs_pm_prv_buffer_fops,
adev->pm.smu_prv_buffer_size);
amdgpu_dpm_stb_debug_fs_init(adev);
#endif
}
| linux-master | drivers/gpu/drm/amd/pm/amdgpu_pm.c |
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_i2c.h"
#include "amdgpu_dpm.h"
#include "atom.h"
#include "amd_pcie.h"
#include "amdgpu_display.h"
#include "hwmgr.h"
#include <linux/power_supply.h>
#include "amdgpu_smu.h"
#define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_sclk)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
low);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_mclk)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
low);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
{
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
dev_dbg(adev->dev, "IP block%d already in the target %s state!",
block_type, gate ? "gate" : "ungate");
return 0;
}
mutex_lock(&adev->pm.mutex);
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCE:
case AMD_IP_BLOCK_TYPE_GFX:
case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_SDMA:
case AMD_IP_BLOCK_TYPE_JPEG:
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate));
break;
default:
break;
}
if (!ret)
atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_set_gfx_power_up_by_imu(smu);
mutex_unlock(&adev->pm.mutex);
msleep(10);
return ret;
}
int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
int ret = 0;
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
return -ENOENT;
mutex_lock(&adev->pm.mutex);
/* enter BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
int ret = 0;
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
return -ENOENT;
mutex_lock(&adev->pm.mutex);
/* exit BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
enum pp_mp1_state mp1_state)
{
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (pp_funcs && pp_funcs->set_mp1_state) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_mp1_state(
adev->powerplay.pp_handle,
mp1_state);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
bool baco_cap;
int ret = 0;
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
return false;
/* Don't use baco for reset in S3.
* This is a workaround for some platforms
* where entering BACO during suspend
* seems to cause reboots or hangs.
* This might be related to the fact that BACO controls
* power to the whole GPU including devices like audio and USB.
* Powering down/up everything may adversely affect these other
* devices. Needs more investigation.
*/
if (adev->in_s3)
return false;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_asic_baco_capability(pp_handle,
&baco_cap);
mutex_unlock(&adev->pm.mutex);
return ret ? false : baco_cap;
}
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
int ret = 0;
if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
return -ENOENT;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->asic_reset_mode_2(pp_handle);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
int ret = 0;
if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
return -ENOENT;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
int ret = 0;
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
return -ENOENT;
mutex_lock(&adev->pm.mutex);
/* enter BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
if (ret)
goto out;
/* exit BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
out:
mutex_unlock(&adev->pm.mutex);
return ret;
}
bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
{
struct smu_context *smu = adev->powerplay.pp_handle;
bool support_mode1_reset = false;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
support_mode1_reset = smu_mode1_reset_is_support(smu);
mutex_unlock(&adev->pm.mutex);
}
return support_mode1_reset;
}
int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = -EOPNOTSUPP;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
ret = smu_mode1_reset(smu);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (amdgpu_sriov_vf(adev))
return 0;
if (pp_funcs && pp_funcs->switch_power_profile) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->switch_power_profile(
adev->powerplay.pp_handle, type, en);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
uint32_t pstate)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (pp_funcs && pp_funcs->set_xgmi_pstate) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
pstate);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
uint32_t cstate)
{
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
if (pp_funcs && pp_funcs->set_df_cstate) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_df_cstate(pp_handle, cstate);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
ret = smu_allow_xgmi_power_down(smu, en);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
{
void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs =
adev->powerplay.pp_funcs;
int ret = 0;
if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
uint32_t msg_id)
{
void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs =
adev->powerplay.pp_funcs;
int ret = 0;
if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_clockgating_by_smu(pp_handle,
msg_id);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
bool acquire)
{
void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs =
adev->powerplay.pp_funcs;
int ret = -EOPNOTSUPP;
if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->smu_i2c_bus_access(pp_handle,
acquire);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
{
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
if (power_supply_is_system_supplied() > 0)
adev->pm.ac_power = true;
else
adev->pm.ac_power = false;
if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
if (is_support_sw_smu(adev))
smu_set_ac_dc(adev->powerplay.pp_handle);
mutex_unlock(&adev->pm.mutex);
}
}
int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
void *data, uint32_t *size)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = -EINVAL;
if (!data || !size)
return -EINVAL;
if (pp_funcs && pp_funcs->read_sensor) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
sensor,
data,
size);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = -EINVAL;
if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = -EINVAL;
if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int i;
if (!adev->pm.dpm_enabled)
return;
if (!pp_funcs->pm_compute_clocks)
return;
if (adev->mode_info.num_crtc)
amdgpu_display_bandwidth_update(adev);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (ring && ring->sched.ready)
amdgpu_fence_wait_empty(ring);
}
mutex_lock(&adev->pm.mutex);
pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
mutex_unlock(&adev->pm.mutex);
}
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
if (adev->family == AMDGPU_FAMILY_SI) {
mutex_lock(&adev->pm.mutex);
if (enable) {
adev->pm.dpm.uvd_active = true;
adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
} else {
adev->pm.dpm.uvd_active = false;
}
mutex_unlock(&adev->pm.mutex);
amdgpu_dpm_compute_clocks(adev);
return;
}
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
if (ret)
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
}
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
if (adev->family == AMDGPU_FAMILY_SI) {
mutex_lock(&adev->pm.mutex);
if (enable) {
adev->pm.dpm.vce_active = true;
/* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
} else {
adev->pm.dpm.vce_active = false;
}
mutex_unlock(&adev->pm.mutex);
amdgpu_dpm_compute_clocks(adev);
return;
}
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
if (ret)
DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
}
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
if (ret)
DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
}
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int r = 0;
if (!pp_funcs || !pp_funcs->load_firmware)
return 0;
mutex_lock(&adev->pm.mutex);
r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
if (r) {
pr_err("smu firmware loading failed\n");
goto out;
}
if (smu_version)
*smu_version = adev->pm.fw_version;
out:
mutex_unlock(&adev->pm.mutex);
return r;
}
int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
enable);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_send_hbm_bad_pages_num(smu, size);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_send_hbm_bad_channel_flag(smu, size);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
uint32_t *max)
{
int ret = 0;
if (type != PP_SCLK)
return -EINVAL;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
SMU_SCLK,
min,
max);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t min,
uint32_t max)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
if (type != PP_SCLK)
return -EINVAL;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_set_soft_freq_range(smu,
SMU_SCLK,
min,
max);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
if (!is_support_sw_smu(adev))
return 0;
mutex_lock(&adev->pm.mutex);
ret = smu_write_watermarks_table(smu);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
enum smu_event_type event,
uint64_t event_arg)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_wait_for_event(smu, event, event_arg);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_set_residency_gfxoff(smu, value);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_get_residency_gfxoff(smu, value);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_get_entrycount_gfxoff(smu, value);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_get_status_gfxoff(smu, value);
mutex_unlock(&adev->pm.mutex);
return ret;
}
uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
{
struct smu_context *smu = adev->powerplay.pp_handle;
if (!is_support_sw_smu(adev))
return 0;
return atomic64_read(&smu->throttle_int_counter);
}
/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
* @adev: amdgpu_device pointer
* @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
*
*/
void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
enum gfx_change_state state)
{
mutex_lock(&adev->pm.mutex);
if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->gfx_state_change_set)
((adev)->powerplay.pp_funcs->gfx_state_change_set(
(adev)->powerplay.pp_handle, state));
mutex_unlock(&adev->pm.mutex);
}
int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
void *umc_ecc)
{
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
if (!is_support_sw_smu(adev))
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_get_ecc_info(smu, umc_ecc);
mutex_unlock(&adev->pm.mutex);
return ret;
}
struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
uint32_t idx)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct amd_vce_state *vstate = NULL;
if (!pp_funcs->get_vce_clock_state)
return NULL;
mutex_lock(&adev->pm.mutex);
vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
idx);
mutex_unlock(&adev->pm.mutex);
return vstate;
}
void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
enum amd_pm_state_type *state)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
mutex_lock(&adev->pm.mutex);
if (!pp_funcs->get_current_power_state) {
*state = adev->pm.dpm.user_state;
goto out;
}
*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
if (*state < POWER_STATE_TYPE_DEFAULT ||
*state > POWER_STATE_TYPE_INTERNAL_3DPERF)
*state = adev->pm.dpm.user_state;
out:
mutex_unlock(&adev->pm.mutex);
}
void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
enum amd_pm_state_type state)
{
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.user_state = state;
mutex_unlock(&adev->pm.mutex);
if (is_support_sw_smu(adev))
return;
if (amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_ENABLE_USER_STATE,
&state) == -EOPNOTSUPP)
amdgpu_dpm_compute_clocks(adev);
}
enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_dpm_forced_level level;
if (!pp_funcs)
return AMD_DPM_FORCED_LEVEL_AUTO;
mutex_lock(&adev->pm.mutex);
if (pp_funcs->get_performance_level)
level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
else
level = adev->pm.dpm.forced_level;
mutex_unlock(&adev->pm.mutex);
return level;
}
int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
enum amd_dpm_forced_level level)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_dpm_forced_level current_level;
uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
if (!pp_funcs || !pp_funcs->force_performance_level)
return 0;
if (adev->pm.dpm.thermal_active)
return -EINVAL;
current_level = amdgpu_dpm_get_performance_level(adev);
if (current_level == level)
return 0;
if (adev->asic_type == CHIP_RAVEN) {
if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
level == AMD_DPM_FORCED_LEVEL_MANUAL)
amdgpu_gfx_off_ctrl(adev, false);
else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
level != AMD_DPM_FORCED_LEVEL_MANUAL)
amdgpu_gfx_off_ctrl(adev, true);
}
}
if (!(current_level & profile_mode_mask) &&
(level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
return -EINVAL;
if (!(current_level & profile_mode_mask) &&
(level & profile_mode_mask)) {
/* enter UMD Pstate */
amdgpu_device_ip_set_powergating_state(adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
amdgpu_device_ip_set_clockgating_state(adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_UNGATE);
} else if ((current_level & profile_mode_mask) &&
!(level & profile_mode_mask)) {
/* exit UMD Pstate */
amdgpu_device_ip_set_clockgating_state(adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_GATE);
amdgpu_device_ip_set_powergating_state(adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_GATE);
}
mutex_lock(&adev->pm.mutex);
if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
level)) {
mutex_unlock(&adev->pm.mutex);
return -EINVAL;
}
adev->pm.dpm.forced_level = level;
mutex_unlock(&adev->pm.mutex);
return 0;
}
int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
struct pp_states_info *states)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_pp_num_states)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
states);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
enum amd_pp_task task_id,
enum amd_pm_state_type *user_state)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->dispatch_tasks)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
task_id,
user_state);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_pp_table)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
table);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
uint32_t type,
long *input,
uint32_t size)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->set_fine_grain_clk_vol)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
type,
input,
size);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
uint32_t type,
long *input,
uint32_t size)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->odn_edit_dpm_table)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
type,
input,
size);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
enum pp_clock_type type,
char *buf)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->print_clock_levels)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
type,
buf);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
enum pp_clock_type type,
char *buf,
int *offset)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->emit_clock_levels)
return -ENOENT;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
type,
buf,
offset);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
uint64_t ppfeature_masks)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->set_ppfeature_status)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
ppfeature_masks);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_ppfeature_status)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
buf);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t mask)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->force_clock_level)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
type,
mask);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_sclk_od)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (is_support_sw_smu(adev))
return 0;
mutex_lock(&adev->pm.mutex);
if (pp_funcs->set_sclk_od)
pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
mutex_unlock(&adev->pm.mutex);
if (amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_READJUST_POWER_STATE,
NULL) == -EOPNOTSUPP) {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_dpm_compute_clocks(adev);
}
return 0;
}
int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_mclk_od)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (is_support_sw_smu(adev))
return 0;
mutex_lock(&adev->pm.mutex);
if (pp_funcs->set_mclk_od)
pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
mutex_unlock(&adev->pm.mutex);
if (amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_READJUST_POWER_STATE,
NULL) == -EOPNOTSUPP) {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_dpm_compute_clocks(adev);
}
return 0;
}
int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
char *buf)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_power_profile_mode)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
buf);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
long *input, uint32_t size)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->set_power_profile_mode)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
input,
size);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_gpu_metrics)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
table);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
uint32_t *fan_mode)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_fan_control_mode)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
fan_mode);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
uint32_t speed)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->set_fan_speed_pwm)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
speed);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
uint32_t *speed)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_fan_speed_pwm)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
speed);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
uint32_t *speed)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_fan_speed_rpm)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
speed);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
uint32_t speed)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->set_fan_speed_rpm)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
speed);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
uint32_t mode)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->set_fan_control_mode)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
mode);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
uint32_t *limit,
enum pp_power_limit_level pp_limit_level,
enum pp_power_type power_type)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_power_limit)
return -ENODATA;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
limit,
pp_limit_level,
power_type);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
uint32_t limit)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->set_power_limit)
return -EINVAL;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
limit);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
{
bool cclk_dpm_supported = false;
if (!is_support_sw_smu(adev))
return false;
mutex_lock(&adev->pm.mutex);
cclk_dpm_supported = is_support_cclk_dpm(adev);
mutex_unlock(&adev->pm.mutex);
return (int)cclk_dpm_supported;
}
int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
struct seq_file *m)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->debugfs_print_current_performance_level)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
m);
mutex_unlock(&adev->pm.mutex);
return 0;
}
int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
void **addr,
size_t *size)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_smu_prv_buf_details)
return -ENOSYS;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
addr,
size);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
{
if (is_support_sw_smu(adev)) {
struct smu_context *smu = adev->powerplay.pp_handle;
return (smu->od_enabled || smu->is_apu);
} else {
struct pp_hwmgr *hwmgr;
/*
* dpm on some legacy asics don't carry od_enabled member
* as its pp_handle is casted directly from adev.
*/
if (amdgpu_dpm_is_legacy_dpm(adev))
return false;
hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
return hwmgr->od_enabled;
}
}
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const char *buf,
size_t size)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->set_pp_table)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
buf,
size);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
{
struct smu_context *smu = adev->powerplay.pp_handle;
if (!is_support_sw_smu(adev))
return INT_MAX;
return smu->cpu_core_num;
}
void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
{
if (!is_support_sw_smu(adev))
return;
amdgpu_smu_stb_debug_fs_init(adev);
}
int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
const struct amd_pp_display_configuration *input)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->display_configuration_change)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
input);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
enum amd_pp_clock_type type,
struct amd_pp_clocks *clocks)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_clock_by_type)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
type,
clocks);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
struct amd_pp_simple_clock_info *clocks)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_display_mode_validation_clocks)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
clocks);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_latency *clocks)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_clock_by_type_with_latency)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
type,
clocks);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_clock_by_type_with_voltage)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
type,
clocks);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
void *clock_ranges)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->set_watermarks_for_clocks_ranges)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
clock_ranges);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
struct pp_display_clock_request *clock)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->display_clock_voltage_request)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
clock);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
struct amd_pp_clock_info *clocks)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_current_clocks)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
clocks);
mutex_unlock(&adev->pm.mutex);
return ret;
}
void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->notify_smu_enable_pwe)
return;
mutex_lock(&adev->pm.mutex);
pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
mutex_unlock(&adev->pm.mutex);
}
int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
uint32_t count)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->set_active_display_count)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
count);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
uint32_t clock)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->set_min_deep_sleep_dcefclk)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
clock);
mutex_unlock(&adev->pm.mutex);
return ret;
}
void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
uint32_t clock)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->set_hard_min_dcefclk_by_freq)
return;
mutex_lock(&adev->pm.mutex);
pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
clock);
mutex_unlock(&adev->pm.mutex);
}
void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
uint32_t clock)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!pp_funcs->set_hard_min_fclk_by_freq)
return;
mutex_lock(&adev->pm.mutex);
pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
clock);
mutex_unlock(&adev->pm.mutex);
}
int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
bool disable_memory_clock_switch)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->display_disable_memory_clock_switch)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
disable_memory_clock_switch);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
struct pp_smu_nv_clock_table *max_clocks)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_max_sustainable_clocks_by_dc)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
max_clocks);
mutex_unlock(&adev->pm.mutex);
return ret;
}
enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
unsigned int *clock_values_in_khz,
unsigned int *num_states)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_uclk_dpm_states)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
clock_values_in_khz,
num_states);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
struct dpm_clocks *clock_table)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_dpm_clock_table)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
clock_table);
mutex_unlock(&adev->pm.mutex);
return ret;
}
| linux-master | drivers/gpu/drm/amd/pm/amdgpu_dpm.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "amdgpu.h"
#include "amdgpu_i2c.h"
#include "amdgpu_atombios.h"
#include "atom.h"
#include "amd_pcie.h"
#include "legacy_dpm.h"
#include "amdgpu_dpm_internal.h"
#include "amdgpu_display.h"
#define amdgpu_dpm_pre_set_power_state(adev) \
((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
#define amdgpu_dpm_post_set_power_state(adev) \
((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
#define amdgpu_dpm_display_configuration_changed(adev) \
((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
#define amdgpu_dpm_print_power_state(adev, ps) \
((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
#define amdgpu_dpm_vblank_too_short(adev) \
((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
void amdgpu_dpm_print_class_info(u32 class, u32 class2)
{
const char *s;
switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
default:
s = "none";
break;
case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
s = "battery";
break;
case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
s = "balanced";
break;
case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
s = "performance";
break;
}
printk("\tui class: %s\n", s);
printk("\tinternal class:");
if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
(class2 == 0))
pr_cont(" none");
else {
if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
pr_cont(" boot");
if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
pr_cont(" thermal");
if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
pr_cont(" limited_pwr");
if (class & ATOM_PPLIB_CLASSIFICATION_REST)
pr_cont(" rest");
if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
pr_cont(" forced");
if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
pr_cont(" 3d_perf");
if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
pr_cont(" ovrdrv");
if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
pr_cont(" uvd");
if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
pr_cont(" 3d_low");
if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
pr_cont(" acpi");
if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
pr_cont(" uvd_hd2");
if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
pr_cont(" uvd_hd");
if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
pr_cont(" uvd_sd");
if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
pr_cont(" limited_pwr2");
if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
pr_cont(" ulv");
if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
pr_cont(" uvd_mvc");
}
pr_cont("\n");
}
void amdgpu_dpm_print_cap_info(u32 caps)
{
printk("\tcaps:");
if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
pr_cont(" single_disp");
if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
pr_cont(" video");
if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
pr_cont(" no_dc");
pr_cont("\n");
}
void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
printk("\tstatus:");
if (rps == adev->pm.dpm.current_ps)
pr_cont(" c");
if (rps == adev->pm.dpm.requested_ps)
pr_cont(" r");
if (rps == adev->pm.dpm.boot_ps)
pr_cont(" b");
pr_cont("\n");
}
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
{
int i;
if (adev->powerplay.pp_funcs->print_power_state == NULL)
return;
for (i = 0; i < adev->pm.dpm.num_ps; i++)
amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
}
union power_info {
struct _ATOM_POWERPLAY_INFO info;
struct _ATOM_POWERPLAY_INFO_V2 info_2;
struct _ATOM_POWERPLAY_INFO_V3 info_3;
struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
};
int amdgpu_get_platform_caps(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
return -EINVAL;
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
return 0;
}
union fan_info {
struct _ATOM_PPLIB_FANTABLE fan;
struct _ATOM_PPLIB_FANTABLE2 fan2;
struct _ATOM_PPLIB_FANTABLE3 fan3;
};
static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
{
u32 size = atom_table->ucNumEntries *
sizeof(struct amdgpu_clock_voltage_dependency_entry);
int i;
ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
if (!amdgpu_table->entries)
return -ENOMEM;
entry = &atom_table->entries[0];
for (i = 0; i < atom_table->ucNumEntries; i++) {
amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
(entry->ucClockHigh << 16);
amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
}
amdgpu_table->count = atom_table->ucNumEntries;
return 0;
}
/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
union power_info *power_info;
union fan_info *fan_info;
ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
int ret, i;
if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
return -EINVAL;
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
/* fan table */
if (le16_to_cpu(power_info->pplib.usTableSize) >=
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
if (power_info->pplib3.usFanTableOffset) {
fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib3.usFanTableOffset));
adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
if (fan_info->fan.ucFanTableFormat >= 2)
adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
else
adev->pm.dpm.fan.t_max = 10900;
adev->pm.dpm.fan.cycle_delay = 100000;
if (fan_info->fan.ucFanTableFormat >= 3) {
adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
adev->pm.dpm.fan.default_max_fan_pwm =
le16_to_cpu(fan_info->fan3.usFanPWMMax);
adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
adev->pm.dpm.fan.fan_output_sensitivity =
le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
}
adev->pm.dpm.fan.ucode_fan_control = true;
}
}
/* clock dependancy tables, shedding tables */
if (le16_to_cpu(power_info->pplib.usTableSize) >=
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
dep_table);
if (ret) {
amdgpu_free_extended_power_table(adev);
return ret;
}
}
if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
dep_table);
if (ret) {
amdgpu_free_extended_power_table(adev);
return ret;
}
}
if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
dep_table);
if (ret) {
amdgpu_free_extended_power_table(adev);
return ret;
}
}
if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
dep_table);
if (ret) {
amdgpu_free_extended_power_table(adev);
return ret;
}
}
if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
if (clk_v->ucNumEntries) {
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
le16_to_cpu(clk_v->entries[0].usSclkLow) |
(clk_v->entries[0].ucSclkHigh << 16);
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
le16_to_cpu(clk_v->entries[0].usMclkLow) |
(clk_v->entries[0].ucMclkHigh << 16);
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
le16_to_cpu(clk_v->entries[0].usVddc);
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
le16_to_cpu(clk_v->entries[0].usVddci);
}
}
if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
(ATOM_PPLIB_PhaseSheddingLimits_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
kcalloc(psl->ucNumEntries,
sizeof(struct amdgpu_phase_shedding_limits_entry),
GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
amdgpu_free_extended_power_table(adev);
return -ENOMEM;
}
entry = &psl->entries[0];
for (i = 0; i < psl->ucNumEntries; i++) {
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
le16_to_cpu(entry->usVoltage);
entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
}
adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
psl->ucNumEntries;
}
}
/* cac data */
if (le16_to_cpu(power_info->pplib.usTableSize) >=
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
if (adev->pm.dpm.tdp_od_limit)
adev->pm.dpm.power_control = true;
else
adev->pm.dpm.power_control = false;
adev->pm.dpm.tdp_adjustment = 0;
adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
if (power_info->pplib5.usCACLeakageTableOffset) {
ATOM_PPLIB_CAC_Leakage_Table *cac_table =
(ATOM_PPLIB_CAC_Leakage_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
ATOM_PPLIB_CAC_Leakage_Record *entry;
u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
amdgpu_free_extended_power_table(adev);
return -ENOMEM;
}
entry = &cac_table->entries[0];
for (i = 0; i < cac_table->ucNumEntries; i++) {
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
le16_to_cpu(entry->usVddc1);
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
le16_to_cpu(entry->usVddc2);
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
le16_to_cpu(entry->usVddc3);
} else {
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
le16_to_cpu(entry->usVddc);
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
le32_to_cpu(entry->ulLeakageValue);
}
entry = (ATOM_PPLIB_CAC_Leakage_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
}
adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
}
}
/* ext tables */
if (le16_to_cpu(power_info->pplib.usTableSize) >=
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
ext_hdr->usVCETableOffset) {
VCEClockInfoArray *array = (VCEClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1 + array->ucNumEntries * sizeof(VCEClockInfo));
ATOM_PPLIB_VCE_State_Table *states =
(ATOM_PPLIB_VCE_State_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
ATOM_PPLIB_VCE_State_Record *state_entry;
VCEClockInfo *vce_clk;
u32 size = limits->numEntries *
sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
amdgpu_free_extended_power_table(adev);
return -ENOMEM;
}
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
state_entry = &states->entries[0];
for (i = 0; i < limits->numEntries; i++) {
vce_clk = (VCEClockInfo *)
((u8 *)&array->entries[0] +
(entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
le16_to_cpu(entry->usVoltage);
entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
}
adev->pm.dpm.num_of_vce_states =
states->numEntries > AMD_MAX_VCE_LEVELS ?
AMD_MAX_VCE_LEVELS : states->numEntries;
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
vce_clk = (VCEClockInfo *)
((u8 *)&array->entries[0] +
(state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
adev->pm.dpm.vce_states[i].evclk =
le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
adev->pm.dpm.vce_states[i].ecclk =
le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
adev->pm.dpm.vce_states[i].clk_idx =
state_entry->ucClockInfoIndex & 0x3f;
adev->pm.dpm.vce_states[i].pstate =
(state_entry->ucClockInfoIndex & 0xc0) >> 6;
state_entry = (ATOM_PPLIB_VCE_State_Record *)
((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
}
}
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
ext_hdr->usUVDTableOffset) {
UVDClockInfoArray *array = (UVDClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
u32 size = limits->numEntries *
sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
amdgpu_free_extended_power_table(adev);
return -ENOMEM;
}
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
for (i = 0; i < limits->numEntries; i++) {
UVDClockInfo *uvd_clk = (UVDClockInfo *)
((u8 *)&array->entries[0] +
(entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
le16_to_cpu(entry->usVoltage);
entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
}
}
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
ext_hdr->usSAMUTableOffset) {
ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
u32 size = limits->numEntries *
sizeof(struct amdgpu_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
amdgpu_free_extended_power_table(adev);
return -ENOMEM;
}
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
for (i = 0; i < limits->numEntries; i++) {
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
le16_to_cpu(entry->usVoltage);
entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
}
}
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
ext_hdr->usPPMTableOffset) {
ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usPPMTableOffset));
adev->pm.dpm.dyn_state.ppm_table =
kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.ppm_table) {
amdgpu_free_extended_power_table(adev);
return -ENOMEM;
}
adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
le16_to_cpu(ppm->usCpuCoreNumber);
adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
le32_to_cpu(ppm->ulPlatformTDP);
adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
le32_to_cpu(ppm->ulSmallACPlatformTDP);
adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
le32_to_cpu(ppm->ulPlatformTDC);
adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
le32_to_cpu(ppm->ulSmallACPlatformTDC);
adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
le32_to_cpu(ppm->ulApuTDP);
adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
le32_to_cpu(ppm->ulDGpuTDP);
adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
le32_to_cpu(ppm->ulDGpuUlvPower);
adev->pm.dpm.dyn_state.ppm_table->tj_max =
le32_to_cpu(ppm->ulTjmax);
}
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
ext_hdr->usACPTableOffset) {
ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
u32 size = limits->numEntries *
sizeof(struct amdgpu_clock_voltage_dependency_entry);
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
kzalloc(size, GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
amdgpu_free_extended_power_table(adev);
return -ENOMEM;
}
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
limits->numEntries;
entry = &limits->entries[0];
for (i = 0; i < limits->numEntries; i++) {
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
le16_to_cpu(entry->usVoltage);
entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
}
}
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
ext_hdr->usPowerTuneTableOffset) {
u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
ATOM_PowerTune_Table *pt;
adev->pm.dpm.dyn_state.cac_tdp_table =
kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
amdgpu_free_extended_power_table(adev);
return -ENOMEM;
}
if (rev > 0) {
ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
ppt->usMaximumPowerDeliveryLimit;
pt = &ppt->power_tune_table;
} else {
ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
pt = &ppt->power_tune_table;
}
adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
le16_to_cpu(pt->usConfigurableTDP);
adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
le16_to_cpu(pt->usBatteryPowerLimit);
adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
le16_to_cpu(pt->usSmallPowerLimit);
adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
le16_to_cpu(pt->usLowCACLeakage);
adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
le16_to_cpu(pt->usHighCACLeakage);
}
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
ext_hdr->usSclkVddgfxTableOffset) {
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
ret = amdgpu_parse_clk_voltage_dep_table(
&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
dep_table);
if (ret) {
kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
return ret;
}
}
}
return 0;
}
void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
{
struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
kfree(dyn_state->vddc_dependency_on_sclk.entries);
kfree(dyn_state->vddci_dependency_on_mclk.entries);
kfree(dyn_state->vddc_dependency_on_mclk.entries);
kfree(dyn_state->mvdd_dependency_on_mclk.entries);
kfree(dyn_state->cac_leakage_table.entries);
kfree(dyn_state->phase_shedding_limits_table.entries);
kfree(dyn_state->ppm_table);
kfree(dyn_state->cac_tdp_table);
kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
}
static const char *pp_lib_thermal_controller_names[] = {
"NONE",
"lm63",
"adm1032",
"adm1030",
"max6649",
"lm64",
"f75375",
"RV6xx",
"RV770",
"adt7473",
"NONE",
"External GPIO",
"Evergreen",
"emc2103",
"Sumo",
"Northern Islands",
"Southern Islands",
"lm96163",
"Sea Islands",
"Kaveri/Kabini",
};
void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
ATOM_PPLIB_POWERPLAYTABLE *power_table;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
ATOM_PPLIB_THERMALCONTROLLER *controller;
struct amdgpu_i2c_bus_rec i2c_bus;
u16 data_offset;
u8 frev, crev;
if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
return;
power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
(mode_info->atom_context->bios + data_offset);
controller = &power_table->sThermalController;
/* add the i2c bus for thermal/fan chip */
if (controller->ucType > 0) {
if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
adev->pm.no_fan = true;
adev->pm.fan_pulses_per_revolution =
controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
if (adev->pm.fan_pulses_per_revolution) {
adev->pm.fan_min_rpm = controller->ucFanMinRPM;
adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
}
if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_NI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_SI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_CI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_KV;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
DRM_INFO("External GPIO thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
} else if (controller->ucType ==
ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
} else if (controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
if (adev->pm.i2c_bus) {
struct i2c_board_info info = { };
const char *name = pp_lib_thermal_controller_names[controller->ucType];
info.addr = controller->ucI2cAddress >> 1;
strscpy(info.type, name, sizeof(info.type));
i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
}
} else {
DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
controller->ucType,
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
}
}
}
struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (idx < adev->pm.dpm.num_of_vce_states)
return &adev->pm.dpm.vce_states[idx];
return NULL;
}
static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
enum amd_pm_state_type dpm_state)
{
int i;
struct amdgpu_ps *ps;
u32 ui_class;
bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
true : false;
/* check if the vblank period is too short to adjust the mclk */
if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
if (amdgpu_dpm_vblank_too_short(adev))
single_display = false;
}
/* certain older asics have a separare 3D performance state,
* so try that first if the user selected performance
*/
if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
/* balanced states don't exist at the moment */
if (dpm_state == POWER_STATE_TYPE_BALANCED)
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
restart_search:
/* Pick the best power state based on current conditions */
for (i = 0; i < adev->pm.dpm.num_ps; i++) {
ps = &adev->pm.dpm.ps[i];
ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
switch (dpm_state) {
/* user states */
case POWER_STATE_TYPE_BATTERY:
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
if (single_display)
return ps;
} else
return ps;
}
break;
case POWER_STATE_TYPE_BALANCED:
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
if (single_display)
return ps;
} else
return ps;
}
break;
case POWER_STATE_TYPE_PERFORMANCE:
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
if (single_display)
return ps;
} else
return ps;
}
break;
/* internal states */
case POWER_STATE_TYPE_INTERNAL_UVD:
if (adev->pm.dpm.uvd_ps)
return adev->pm.dpm.uvd_ps;
else
break;
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_UVD_HD:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_BOOT:
return adev->pm.dpm.boot_ps;
case POWER_STATE_TYPE_INTERNAL_THERMAL:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_ACPI:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_ULV:
if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_3DPERF:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
return ps;
break;
default:
break;
}
}
/* use a fallback state if we didn't match */
switch (dpm_state) {
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
goto restart_search;
case POWER_STATE_TYPE_INTERNAL_UVD_HD:
case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
if (adev->pm.dpm.uvd_ps) {
return adev->pm.dpm.uvd_ps;
} else {
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
goto restart_search;
}
case POWER_STATE_TYPE_INTERNAL_THERMAL:
dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
goto restart_search;
case POWER_STATE_TYPE_INTERNAL_ACPI:
dpm_state = POWER_STATE_TYPE_BATTERY;
goto restart_search;
case POWER_STATE_TYPE_BATTERY:
case POWER_STATE_TYPE_BALANCED:
case POWER_STATE_TYPE_INTERNAL_3DPERF:
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
goto restart_search;
default:
break;
}
return NULL;
}
static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state;
int ret;
bool equal = false;
/* if dpm init failed */
if (!adev->pm.dpm_enabled)
return 0;
if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
/* add other state override checks here */
if ((!adev->pm.dpm.thermal_active) &&
(!adev->pm.dpm.uvd_active))
adev->pm.dpm.state = adev->pm.dpm.user_state;
}
dpm_state = adev->pm.dpm.state;
ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
if (ps)
adev->pm.dpm.requested_ps = ps;
else
return -EINVAL;
if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
printk("switching to power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
}
/* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active;
if (pp_funcs->display_configuration_changed)
amdgpu_dpm_display_configuration_changed(adev);
ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret)
return ret;
if (pp_funcs->check_state_equal) {
if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
equal = false;
}
if (equal)
return 0;
if (pp_funcs->set_power_state)
pp_funcs->set_power_state(adev->powerplay.pp_handle);
amdgpu_dpm_post_set_power_state(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
if (pp_funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
/* force low perf level for thermal */
pp_funcs->force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
/* save the user's level */
adev->pm.dpm.forced_level = level;
} else {
/* otherwise, user selected level */
pp_funcs->force_performance_level(adev, adev->pm.dpm.forced_level);
}
}
return 0;
}
void amdgpu_legacy_dpm_compute_clocks(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_dpm_get_active_displays(adev);
amdgpu_dpm_change_power_state_locked(adev);
}
void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device,
pm.dpm.thermal.work);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
/* switch to the thermal state */
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
int temp, size = sizeof(temp);
if (!adev->pm.dpm_enabled)
return;
if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp,
&size)) {
if (temp < adev->pm.dpm.thermal.min_temp)
/* switch back the user state */
dpm_state = adev->pm.dpm.user_state;
} else {
if (adev->pm.dpm.thermal.high_to_low)
/* switch back the user state */
dpm_state = adev->pm.dpm.user_state;
}
if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
adev->pm.dpm.thermal_active = true;
else
adev->pm.dpm.thermal_active = false;
adev->pm.dpm.state = dpm_state;
amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
}
| linux-master | drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c |
/*
* Copyright 2013 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "cikd.h"
#include "atom.h"
#include "amdgpu_atombios.h"
#include "amdgpu_dpm.h"
#include "kv_dpm.h"
#include "gfx_v7_0.h"
#include <linux/seq_file.h>
#include "smu/smu_7_0_0_d.h"
#include "smu/smu_7_0_0_sh_mask.h"
#include "gca/gfx_7_2_d.h"
#include "gca/gfx_7_2_sh_mask.h"
#include "legacy_dpm.h"
#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
#define KV_MINIMUM_ENGINE_CLOCK 800
#define SMC_RAM_END 0x40000
static const struct amd_pm_funcs kv_dpm_funcs;
static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
static int kv_enable_nb_dpm(struct amdgpu_device *adev,
bool enable);
static void kv_init_graphics_levels(struct amdgpu_device *adev);
static int kv_calculate_ds_divider(struct amdgpu_device *adev);
static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev);
static int kv_calculate_dpm_settings(struct amdgpu_device *adev);
static void kv_enable_new_levels(struct amdgpu_device *adev);
static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
struct amdgpu_ps *new_rps);
static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level);
static int kv_set_enabled_levels(struct amdgpu_device *adev);
static int kv_force_dpm_highest(struct amdgpu_device *adev);
static int kv_force_dpm_lowest(struct amdgpu_device *adev);
static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
struct amdgpu_ps *new_rps,
struct amdgpu_ps *old_rps);
static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
int min_temp, int max_temp);
static int kv_init_fps_limits(struct amdgpu_device *adev);
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev,
struct sumo_vid_mapping_table *vid_mapping_table,
u32 vid_2bit)
{
struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
u32 i;
if (vddc_sclk_table && vddc_sclk_table->count) {
if (vid_2bit < vddc_sclk_table->count)
return vddc_sclk_table->entries[vid_2bit].v;
else
return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
} else {
for (i = 0; i < vid_mapping_table->num_entries; i++) {
if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
return vid_mapping_table->entries[i].vid_7bit;
}
return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
}
}
static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev,
struct sumo_vid_mapping_table *vid_mapping_table,
u32 vid_7bit)
{
struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
u32 i;
if (vddc_sclk_table && vddc_sclk_table->count) {
for (i = 0; i < vddc_sclk_table->count; i++) {
if (vddc_sclk_table->entries[i].v == vid_7bit)
return i;
}
return vddc_sclk_table->count - 1;
} else {
for (i = 0; i < vid_mapping_table->num_entries; i++) {
if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
return vid_mapping_table->entries[i].vid_2bit;
}
return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
}
}
static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable)
{
/* This bit selects who handles display phy powergating.
* Clear the bit to let atom handle it.
* Set it to let the driver handle it.
* For now we just let atom handle it.
*/
#if 0
u32 v = RREG32(mmDOUT_SCRATCH3);
if (enable)
v |= 0x4;
else
v &= 0xFFFFFFFB;
WREG32(mmDOUT_SCRATCH3, v);
#endif
}
static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,
struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
ATOM_AVAILABLE_SCLK_LIST *table)
{
u32 i;
u32 n = 0;
u32 prev_sclk = 0;
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK > prev_sclk) {
sclk_voltage_mapping_table->entries[n].sclk_frequency =
table[i].ulSupportedSCLK;
sclk_voltage_mapping_table->entries[n].vid_2bit =
table[i].usVoltageIndex;
prev_sclk = table[i].ulSupportedSCLK;
n++;
}
}
sclk_voltage_mapping_table->num_max_dpm_entries = n;
}
static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
struct sumo_vid_mapping_table *vid_mapping_table,
ATOM_AVAILABLE_SCLK_LIST *table)
{
u32 i, j;
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK != 0) {
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
table[i].usVoltageID;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
table[i].usVoltageIndex;
}
}
for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
if (vid_mapping_table->entries[i].vid_7bit == 0) {
for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
if (vid_mapping_table->entries[j].vid_7bit != 0) {
vid_mapping_table->entries[i] =
vid_mapping_table->entries[j];
vid_mapping_table->entries[j].vid_7bit = 0;
break;
}
}
if (j == SUMO_MAX_NUMBER_VOLTAGES)
break;
}
}
vid_mapping_table->num_entries = i;
}
#if 0
static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = {
{ 0, 4, 1 },
{ 1, 4, 1 },
{ 2, 5, 1 },
{ 3, 4, 2 },
{ 4, 1, 1 },
{ 5, 5, 2 },
{ 6, 6, 1 },
{ 7, 9, 2 },
{ 0xffffffff }
};
static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = {
{ 0, 4, 1 },
{ 0xffffffff }
};
static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = {
{ 0, 4, 1 },
{ 0xffffffff }
};
static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = {
{ 0, 4, 1 },
{ 0xffffffff }
};
static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = {
{ 0, 4, 1 },
{ 0xffffffff }
};
static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = {
{ 0, 4, 1 },
{ 1, 4, 1 },
{ 2, 5, 1 },
{ 3, 4, 1 },
{ 4, 1, 1 },
{ 5, 5, 1 },
{ 6, 6, 1 },
{ 7, 9, 1 },
{ 8, 4, 1 },
{ 9, 2, 1 },
{ 10, 3, 1 },
{ 11, 6, 1 },
{ 12, 8, 2 },
{ 13, 1, 1 },
{ 14, 2, 1 },
{ 15, 3, 1 },
{ 16, 1, 1 },
{ 17, 4, 1 },
{ 18, 3, 1 },
{ 19, 1, 1 },
{ 20, 8, 1 },
{ 21, 5, 1 },
{ 22, 1, 1 },
{ 23, 1, 1 },
{ 24, 4, 1 },
{ 27, 6, 1 },
{ 28, 1, 1 },
{ 0xffffffff }
};
static const struct kv_lcac_config_reg sx0_cac_config_reg[] = {
{ 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
static const struct kv_lcac_config_reg mc0_cac_config_reg[] = {
{ 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
static const struct kv_lcac_config_reg mc1_cac_config_reg[] = {
{ 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
static const struct kv_lcac_config_reg mc2_cac_config_reg[] = {
{ 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
static const struct kv_lcac_config_reg mc3_cac_config_reg[] = {
{ 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
static const struct kv_lcac_config_reg cpl_cac_config_reg[] = {
{ 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
#endif
static const struct kv_pt_config_reg didt_config_kv[] = {
{ 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
{ 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
{ 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
{ 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
{ 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
{ 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
{ 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
{ 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
{ 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
{ 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
{ 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
{ 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
{ 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
{ 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
{ 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
{ 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
{ 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
{ 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
{ 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
{ 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
{ 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0xFFFFFFFF }
};
static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps)
{
struct kv_ps *ps = rps->ps_priv;
return ps;
}
static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev)
{
struct kv_power_info *pi = adev->pm.dpm.priv;
return pi;
}
#if 0
static void kv_program_local_cac_table(struct amdgpu_device *adev,
const struct kv_lcac_config_values *local_cac_table,
const struct kv_lcac_config_reg *local_cac_reg)
{
u32 i, count, data;
const struct kv_lcac_config_values *values = local_cac_table;
while (values->block_id != 0xffffffff) {
count = values->signal_id;
for (i = 0; i < count; i++) {
data = ((values->block_id << local_cac_reg->block_shift) &
local_cac_reg->block_mask);
data |= ((i << local_cac_reg->signal_shift) &
local_cac_reg->signal_mask);
data |= ((values->t << local_cac_reg->t_shift) &
local_cac_reg->t_mask);
data |= ((1 << local_cac_reg->enable_shift) &
local_cac_reg->enable_mask);
WREG32_SMC(local_cac_reg->cntl, data);
}
values++;
}
}
#endif
static int kv_program_pt_config_registers(struct amdgpu_device *adev,
const struct kv_pt_config_reg *cac_config_regs)
{
const struct kv_pt_config_reg *config_regs = cac_config_regs;
u32 data;
u32 cache = 0;
if (config_regs == NULL)
return -EINVAL;
while (config_regs->offset != 0xFFFFFFFF) {
if (config_regs->type == KV_CONFIGREG_CACHE) {
cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
} else {
switch (config_regs->type) {
case KV_CONFIGREG_SMC_IND:
data = RREG32_SMC(config_regs->offset);
break;
case KV_CONFIGREG_DIDT_IND:
data = RREG32_DIDT(config_regs->offset);
break;
default:
data = RREG32(config_regs->offset);
break;
}
data &= ~config_regs->mask;
data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
data |= cache;
cache = 0;
switch (config_regs->type) {
case KV_CONFIGREG_SMC_IND:
WREG32_SMC(config_regs->offset, data);
break;
case KV_CONFIGREG_DIDT_IND:
WREG32_DIDT(config_regs->offset, data);
break;
default:
WREG32(config_regs->offset, data);
break;
}
}
config_regs++;
}
return 0;
}
static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 data;
if (pi->caps_sq_ramping) {
data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
if (enable)
data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
else
data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
}
if (pi->caps_db_ramping) {
data = RREG32_DIDT(ixDIDT_DB_CTRL0);
if (enable)
data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
else
data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
WREG32_DIDT(ixDIDT_DB_CTRL0, data);
}
if (pi->caps_td_ramping) {
data = RREG32_DIDT(ixDIDT_TD_CTRL0);
if (enable)
data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
else
data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
WREG32_DIDT(ixDIDT_TD_CTRL0, data);
}
if (pi->caps_tcp_ramping) {
data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
if (enable)
data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
else
data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
}
}
static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
{
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
if (pi->caps_sq_ramping ||
pi->caps_db_ramping ||
pi->caps_td_ramping ||
pi->caps_tcp_ramping) {
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable) {
ret = kv_program_pt_config_registers(adev, didt_config_kv);
if (ret) {
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return ret;
}
}
kv_do_enable_didt(adev, enable);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
return 0;
}
#if 0
static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
if (pi->caps_cac) {
WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0);
WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0);
kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0);
WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0);
kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0);
WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0);
kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0);
WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0);
kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0);
WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0);
kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0);
WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0);
kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
}
}
#endif
static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable)
{
struct kv_power_info *pi = kv_get_pi(adev);
int ret = 0;
if (pi->caps_cac) {
if (enable) {
ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac);
if (ret)
pi->cac_enabled = false;
else
pi->cac_enabled = true;
} else if (pi->cac_enabled) {
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac);
pi->cac_enabled = false;
}
}
return ret;
}
static int kv_process_firmware_header(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 tmp;
int ret;
ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU7_Firmware_Header, DpmTable),
&tmp, pi->sram_end);
if (ret == 0)
pi->dpm_table_start = tmp;
ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU7_Firmware_Header, SoftRegisters),
&tmp, pi->sram_end);
if (ret == 0)
pi->soft_regs_start = tmp;
return ret;
}
static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
pi->graphics_voltage_change_enable = 1;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
&pi->graphics_voltage_change_enable,
sizeof(u8), pi->sram_end);
return ret;
}
static int kv_set_dpm_interval(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
pi->graphics_interval = 1;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
&pi->graphics_interval,
sizeof(u8), pi->sram_end);
return ret;
}
static int kv_set_dpm_boot_state(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
&pi->graphics_boot_level,
sizeof(u8), pi->sram_end);
return ret;
}
static void kv_program_vc(struct amdgpu_device *adev)
{
WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100);
}
static void kv_clear_vc(struct amdgpu_device *adev)
{
WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
}
static int kv_set_divider_value(struct amdgpu_device *adev,
u32 index, u32 sclk)
{
struct kv_power_info *pi = kv_get_pi(adev);
struct atom_clock_dividers dividers;
int ret;
ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
sclk, false, ÷rs);
if (ret)
return ret;
pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
return 0;
}
static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
u16 voltage)
{
return 6200 - (voltage * 25);
}
static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev,
u32 vid_2bit)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 vid_8bit = kv_convert_vid2_to_vid7(adev,
&pi->sys_info.vid_mapping_table,
vid_2bit);
return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit);
}
static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid)
{
struct kv_power_info *pi = kv_get_pi(adev);
pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
pi->graphics_level[index].MinVddNb =
cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid));
return 0;
}
static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at)
{
struct kv_power_info *pi = kv_get_pi(adev);
pi->graphics_level[index].AT = cpu_to_be16((u16)at);
return 0;
}
static void kv_dpm_power_level_enable(struct amdgpu_device *adev,
u32 index, bool enable)
{
struct kv_power_info *pi = kv_get_pi(adev);
pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
}
static void kv_start_dpm(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
WREG32_SMC(ixGENERAL_PWRMGT, tmp);
amdgpu_kv_smc_dpm_enable(adev, true);
}
static void kv_stop_dpm(struct amdgpu_device *adev)
{
amdgpu_kv_smc_dpm_enable(adev, false);
}
static void kv_start_am(struct amdgpu_device *adev)
{
u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
}
static void kv_reset_am(struct amdgpu_device *adev)
{
u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
}
static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze)
{
return amdgpu_kv_notify_message_to_smu(adev, freeze ?
PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
}
static int kv_force_lowest_valid(struct amdgpu_device *adev)
{
return kv_force_dpm_lowest(adev);
}
static int kv_unforce_levels(struct amdgpu_device *adev)
{
if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel);
else
return kv_set_enabled_levels(adev);
}
static int kv_update_sclk_t(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 low_sclk_interrupt_t = 0;
int ret = 0;
if (pi->caps_sclk_throttle_low_notification) {
low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
(u8 *)&low_sclk_interrupt_t,
sizeof(u32), pi->sram_end);
}
return ret;
}
static int kv_program_bootup_state(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 i;
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
if (table && table->count) {
for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
if (table->entries[i].clk == pi->boot_pl.sclk)
break;
}
pi->graphics_boot_level = (u8)i;
kv_dpm_power_level_enable(adev, i, true);
} else {
struct sumo_sclk_voltage_mapping_table *table =
&pi->sys_info.sclk_voltage_mapping_table;
if (table->num_max_dpm_entries == 0)
return -EINVAL;
for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
break;
}
pi->graphics_boot_level = (u8)i;
kv_dpm_power_level_enable(adev, i, true);
}
return 0;
}
static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
pi->graphics_therm_throttle_enable = 1;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
&pi->graphics_therm_throttle_enable,
sizeof(u8), pi->sram_end);
return ret;
}
static int kv_upload_dpm_settings(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
(u8 *)&pi->graphics_level,
sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
pi->sram_end);
if (ret)
return ret;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
&pi->graphics_dpm_level_count,
sizeof(u8), pi->sram_end);
return ret;
}
static u32 kv_get_clock_difference(u32 a, u32 b)
{
return (a >= b) ? a - b : b - a;
}
static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 value;
if (pi->caps_enable_dfs_bypass) {
if (kv_get_clock_difference(clk, 40000) < 200)
value = 3;
else if (kv_get_clock_difference(clk, 30000) < 200)
value = 2;
else if (kv_get_clock_difference(clk, 20000) < 200)
value = 7;
else if (kv_get_clock_difference(clk, 15000) < 200)
value = 6;
else if (kv_get_clock_difference(clk, 10000) < 200)
value = 8;
else
value = 0;
} else {
value = 0;
}
return value;
}
static int kv_populate_uvd_table(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_uvd_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
struct atom_clock_dividers dividers;
int ret;
u32 i;
if (table == NULL || table->count == 0)
return 0;
pi->uvd_level_count = 0;
for (i = 0; i < table->count; i++) {
if (pi->high_voltage_t &&
(pi->high_voltage_t < table->entries[i].v))
break;
pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
pi->uvd_level[i].VClkBypassCntl =
(u8)kv_get_clk_bypass(adev, table->entries[i].vclk);
pi->uvd_level[i].DClkBypassCntl =
(u8)kv_get_clk_bypass(adev, table->entries[i].dclk);
ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
table->entries[i].vclk, false, ÷rs);
if (ret)
return ret;
pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
table->entries[i].dclk, false, ÷rs);
if (ret)
return ret;
pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
pi->uvd_level_count++;
}
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
(u8 *)&pi->uvd_level_count,
sizeof(u8), pi->sram_end);
if (ret)
return ret;
pi->uvd_interval = 1;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, UVDInterval),
&pi->uvd_interval,
sizeof(u8), pi->sram_end);
if (ret)
return ret;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, UvdLevel),
(u8 *)&pi->uvd_level,
sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
pi->sram_end);
return ret;
}
static int kv_populate_vce_table(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
u32 i;
struct amdgpu_vce_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
struct atom_clock_dividers dividers;
if (table == NULL || table->count == 0)
return 0;
pi->vce_level_count = 0;
for (i = 0; i < table->count; i++) {
if (pi->high_voltage_t &&
pi->high_voltage_t < table->entries[i].v)
break;
pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
pi->vce_level[i].ClkBypassCntl =
(u8)kv_get_clk_bypass(adev, table->entries[i].evclk);
ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
table->entries[i].evclk, false, ÷rs);
if (ret)
return ret;
pi->vce_level[i].Divider = (u8)dividers.post_div;
pi->vce_level_count++;
}
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
(u8 *)&pi->vce_level_count,
sizeof(u8),
pi->sram_end);
if (ret)
return ret;
pi->vce_interval = 1;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, VCEInterval),
(u8 *)&pi->vce_interval,
sizeof(u8),
pi->sram_end);
if (ret)
return ret;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, VceLevel),
(u8 *)&pi->vce_level,
sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
pi->sram_end);
return ret;
}
static int kv_populate_samu_table(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
struct atom_clock_dividers dividers;
int ret;
u32 i;
if (table == NULL || table->count == 0)
return 0;
pi->samu_level_count = 0;
for (i = 0; i < table->count; i++) {
if (pi->high_voltage_t &&
pi->high_voltage_t < table->entries[i].v)
break;
pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
pi->samu_level[i].ClkBypassCntl =
(u8)kv_get_clk_bypass(adev, table->entries[i].clk);
ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
table->entries[i].clk, false, ÷rs);
if (ret)
return ret;
pi->samu_level[i].Divider = (u8)dividers.post_div;
pi->samu_level_count++;
}
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
(u8 *)&pi->samu_level_count,
sizeof(u8),
pi->sram_end);
if (ret)
return ret;
pi->samu_interval = 1;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
(u8 *)&pi->samu_interval,
sizeof(u8),
pi->sram_end);
if (ret)
return ret;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, SamuLevel),
(u8 *)&pi->samu_level,
sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
pi->sram_end);
if (ret)
return ret;
return ret;
}
static int kv_populate_acp_table(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
struct atom_clock_dividers dividers;
int ret;
u32 i;
if (table == NULL || table->count == 0)
return 0;
pi->acp_level_count = 0;
for (i = 0; i < table->count; i++) {
pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
table->entries[i].clk, false, ÷rs);
if (ret)
return ret;
pi->acp_level[i].Divider = (u8)dividers.post_div;
pi->acp_level_count++;
}
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
(u8 *)&pi->acp_level_count,
sizeof(u8),
pi->sram_end);
if (ret)
return ret;
pi->acp_interval = 1;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, ACPInterval),
(u8 *)&pi->acp_interval,
sizeof(u8),
pi->sram_end);
if (ret)
return ret;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, AcpLevel),
(u8 *)&pi->acp_level,
sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
pi->sram_end);
if (ret)
return ret;
return ret;
}
static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 i;
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
if (table && table->count) {
for (i = 0; i < pi->graphics_dpm_level_count; i++) {
if (pi->caps_enable_dfs_bypass) {
if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
pi->graphics_level[i].ClkBypassCntl = 3;
else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
pi->graphics_level[i].ClkBypassCntl = 2;
else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
pi->graphics_level[i].ClkBypassCntl = 7;
else if (kv_get_clock_difference(table->entries[i].clk, 20000) < 200)
pi->graphics_level[i].ClkBypassCntl = 6;
else if (kv_get_clock_difference(table->entries[i].clk, 10000) < 200)
pi->graphics_level[i].ClkBypassCntl = 8;
else
pi->graphics_level[i].ClkBypassCntl = 0;
} else {
pi->graphics_level[i].ClkBypassCntl = 0;
}
}
} else {
struct sumo_sclk_voltage_mapping_table *table =
&pi->sys_info.sclk_voltage_mapping_table;
for (i = 0; i < pi->graphics_dpm_level_count; i++) {
if (pi->caps_enable_dfs_bypass) {
if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
pi->graphics_level[i].ClkBypassCntl = 3;
else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
pi->graphics_level[i].ClkBypassCntl = 2;
else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
pi->graphics_level[i].ClkBypassCntl = 7;
else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
pi->graphics_level[i].ClkBypassCntl = 6;
else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
pi->graphics_level[i].ClkBypassCntl = 8;
else
pi->graphics_level[i].ClkBypassCntl = 0;
} else {
pi->graphics_level[i].ClkBypassCntl = 0;
}
}
}
}
static int kv_enable_ulv(struct amdgpu_device *adev, bool enable)
{
return amdgpu_kv_notify_message_to_smu(adev, enable ?
PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
}
static void kv_reset_acp_boot_level(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
pi->acp_boot_level = 0xff;
}
static void kv_update_current_ps(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
struct kv_ps *new_ps = kv_get_ps(rps);
struct kv_power_info *pi = kv_get_pi(adev);
pi->current_rps = *rps;
pi->current_ps = *new_ps;
pi->current_rps.ps_priv = &pi->current_ps;
adev->pm.dpm.current_ps = &pi->current_rps;
}
static void kv_update_requested_ps(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
struct kv_ps *new_ps = kv_get_ps(rps);
struct kv_power_info *pi = kv_get_pi(adev);
pi->requested_rps = *rps;
pi->requested_ps = *new_ps;
pi->requested_rps.ps_priv = &pi->requested_ps;
adev->pm.dpm.requested_ps = &pi->requested_rps;
}
static void kv_dpm_enable_bapm(void *handle, bool enable)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
if (pi->bapm_enable) {
ret = amdgpu_kv_smc_bapm_enable(adev, enable);
if (ret)
DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
}
}
static bool kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
{
switch (sensor) {
case THERMAL_TYPE_KV:
return true;
case THERMAL_TYPE_NONE:
case THERMAL_TYPE_EXTERNAL:
case THERMAL_TYPE_EXTERNAL_GPIO:
default:
return false;
}
}
static int kv_dpm_enable(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
ret = kv_process_firmware_header(adev);
if (ret) {
DRM_ERROR("kv_process_firmware_header failed\n");
return ret;
}
kv_init_fps_limits(adev);
kv_init_graphics_levels(adev);
ret = kv_program_bootup_state(adev);
if (ret) {
DRM_ERROR("kv_program_bootup_state failed\n");
return ret;
}
kv_calculate_dfs_bypass_settings(adev);
ret = kv_upload_dpm_settings(adev);
if (ret) {
DRM_ERROR("kv_upload_dpm_settings failed\n");
return ret;
}
ret = kv_populate_uvd_table(adev);
if (ret) {
DRM_ERROR("kv_populate_uvd_table failed\n");
return ret;
}
ret = kv_populate_vce_table(adev);
if (ret) {
DRM_ERROR("kv_populate_vce_table failed\n");
return ret;
}
ret = kv_populate_samu_table(adev);
if (ret) {
DRM_ERROR("kv_populate_samu_table failed\n");
return ret;
}
ret = kv_populate_acp_table(adev);
if (ret) {
DRM_ERROR("kv_populate_acp_table failed\n");
return ret;
}
kv_program_vc(adev);
#if 0
kv_initialize_hardware_cac_manager(adev);
#endif
kv_start_am(adev);
if (pi->enable_auto_thermal_throttling) {
ret = kv_enable_auto_thermal_throttling(adev);
if (ret) {
DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
return ret;
}
}
ret = kv_enable_dpm_voltage_scaling(adev);
if (ret) {
DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
return ret;
}
ret = kv_set_dpm_interval(adev);
if (ret) {
DRM_ERROR("kv_set_dpm_interval failed\n");
return ret;
}
ret = kv_set_dpm_boot_state(adev);
if (ret) {
DRM_ERROR("kv_set_dpm_boot_state failed\n");
return ret;
}
ret = kv_enable_ulv(adev, true);
if (ret) {
DRM_ERROR("kv_enable_ulv failed\n");
return ret;
}
kv_start_dpm(adev);
ret = kv_enable_didt(adev, true);
if (ret) {
DRM_ERROR("kv_enable_didt failed\n");
return ret;
}
ret = kv_enable_smc_cac(adev, true);
if (ret) {
DRM_ERROR("kv_enable_smc_cac failed\n");
return ret;
}
kv_reset_acp_boot_level(adev);
ret = amdgpu_kv_smc_bapm_enable(adev, false);
if (ret) {
DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
return ret;
}
if (adev->irq.installed &&
kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
if (ret) {
DRM_ERROR("kv_set_thermal_temperature_range failed\n");
return ret;
}
amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
}
return ret;
}
static void kv_dpm_disable(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
int err;
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
err = amdgpu_kv_smc_bapm_enable(adev, false);
if (err)
DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
if (adev->asic_type == CHIP_MULLINS)
kv_enable_nb_dpm(adev, false);
/* powerup blocks */
kv_dpm_powergate_acp(adev, false);
kv_dpm_powergate_samu(adev, false);
if (pi->caps_vce_pg) /* power on the VCE block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
if (pi->caps_uvd_pg) /* power on the UVD block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
kv_enable_smc_cac(adev, false);
kv_enable_didt(adev, false);
kv_clear_vc(adev);
kv_stop_dpm(adev);
kv_enable_ulv(adev, false);
kv_reset_am(adev);
kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
}
#if 0
static int kv_write_smc_soft_register(struct amdgpu_device *adev,
u16 reg_offset, u32 value)
{
struct kv_power_info *pi = kv_get_pi(adev);
return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset,
(u8 *)&value, sizeof(u16), pi->sram_end);
}
static int kv_read_smc_soft_register(struct amdgpu_device *adev,
u16 reg_offset, u32 *value)
{
struct kv_power_info *pi = kv_get_pi(adev);
return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset,
value, pi->sram_end);
}
#endif
static void kv_init_sclk_t(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
pi->low_sclk_interrupt_t = 0;
}
static int kv_init_fps_limits(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
int ret = 0;
if (pi->caps_fps) {
u16 tmp;
tmp = 45;
pi->fps_high_t = cpu_to_be16(tmp);
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, FpsHighT),
(u8 *)&pi->fps_high_t,
sizeof(u16), pi->sram_end);
tmp = 30;
pi->fps_low_t = cpu_to_be16(tmp);
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, FpsLowT),
(u8 *)&pi->fps_low_t,
sizeof(u16), pi->sram_end);
}
return ret;
}
static void kv_init_powergate_state(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
pi->uvd_power_gated = false;
pi->vce_power_gated = false;
pi->samu_power_gated = false;
pi->acp_power_gated = false;
}
static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
{
return amdgpu_kv_notify_message_to_smu(adev, enable ?
PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
}
static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
{
return amdgpu_kv_notify_message_to_smu(adev, enable ?
PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
}
static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
{
return amdgpu_kv_notify_message_to_smu(adev, enable ?
PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
}
static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
{
return amdgpu_kv_notify_message_to_smu(adev, enable ?
PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
}
static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
{
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_uvd_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
int ret;
u32 mask;
if (!gate) {
if (table->count)
pi->uvd_boot_level = table->count - 1;
else
pi->uvd_boot_level = 0;
if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
mask = 1 << pi->uvd_boot_level;
} else {
mask = 0x1f;
}
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
(uint8_t *)&pi->uvd_boot_level,
sizeof(u8), pi->sram_end);
if (ret)
return ret;
amdgpu_kv_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_UVDDPM_SetEnabledMask,
mask);
}
return kv_enable_uvd_dpm(adev, !gate);
}
static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk)
{
u8 i;
struct amdgpu_vce_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
for (i = 0; i < table->count; i++) {
if (table->entries[i].evclk >= evclk)
break;
}
return i;
}
static int kv_update_vce_dpm(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_new_state,
struct amdgpu_ps *amdgpu_current_state)
{
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_vce_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
int ret;
if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
if (pi->caps_stable_p_state)
pi->vce_boot_level = table->count - 1;
else
pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk);
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
(u8 *)&pi->vce_boot_level,
sizeof(u8),
pi->sram_end);
if (ret)
return ret;
if (pi->caps_stable_p_state)
amdgpu_kv_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_VCEDPM_SetEnabledMask,
(1 << pi->vce_boot_level));
kv_enable_vce_dpm(adev, true);
} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
kv_enable_vce_dpm(adev, false);
}
return 0;
}
static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
{
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
int ret;
if (!gate) {
if (pi->caps_stable_p_state)
pi->samu_boot_level = table->count - 1;
else
pi->samu_boot_level = 0;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
(u8 *)&pi->samu_boot_level,
sizeof(u8),
pi->sram_end);
if (ret)
return ret;
if (pi->caps_stable_p_state)
amdgpu_kv_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SAMUDPM_SetEnabledMask,
(1 << pi->samu_boot_level));
}
return kv_enable_samu_dpm(adev, !gate);
}
static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
{
return 0;
}
static void kv_update_acp_boot_level(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
u8 acp_boot_level;
if (!pi->caps_stable_p_state) {
acp_boot_level = kv_get_acp_boot_level(adev);
if (acp_boot_level != pi->acp_boot_level) {
pi->acp_boot_level = acp_boot_level;
amdgpu_kv_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_ACPDPM_SetEnabledMask,
(1 << pi->acp_boot_level));
}
}
}
static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
{
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
int ret;
if (!gate) {
if (pi->caps_stable_p_state)
pi->acp_boot_level = table->count - 1;
else
pi->acp_boot_level = kv_get_acp_boot_level(adev);
ret = amdgpu_kv_copy_bytes_to_smc(adev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
(u8 *)&pi->acp_boot_level,
sizeof(u8),
pi->sram_end);
if (ret)
return ret;
if (pi->caps_stable_p_state)
amdgpu_kv_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_ACPDPM_SetEnabledMask,
(1 << pi->acp_boot_level));
}
return kv_enable_acp_dpm(adev, !gate);
}
static void kv_dpm_powergate_uvd(void *handle, bool gate)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
pi->uvd_power_gated = gate;
if (gate) {
/* stop the UVD block */
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
kv_update_uvd_dpm(adev, gate);
if (pi->caps_uvd_pg)
/* power off the UVD block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
} else {
if (pi->caps_uvd_pg)
/* power on the UVD block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
/* re-init the UVD block */
kv_update_uvd_dpm(adev, gate);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE);
}
}
static void kv_dpm_powergate_vce(void *handle, bool gate)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
pi->vce_power_gated = gate;
if (gate) {
/* stop the VCE block */
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
kv_enable_vce_dpm(adev, false);
if (pi->caps_vce_pg) /* power off the VCE block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
} else {
if (pi->caps_vce_pg) /* power on the VCE block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
kv_enable_vce_dpm(adev, true);
/* re-init the VCE block */
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_UNGATE);
}
}
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
{
struct kv_power_info *pi = kv_get_pi(adev);
if (pi->samu_power_gated == gate)
return;
pi->samu_power_gated = gate;
if (gate) {
kv_update_samu_dpm(adev, true);
if (pi->caps_samu_pg)
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF);
} else {
if (pi->caps_samu_pg)
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON);
kv_update_samu_dpm(adev, false);
}
}
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate)
{
struct kv_power_info *pi = kv_get_pi(adev);
if (pi->acp_power_gated == gate)
return;
if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
return;
pi->acp_power_gated = gate;
if (gate) {
kv_update_acp_dpm(adev, true);
if (pi->caps_acp_pg)
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF);
} else {
if (pi->caps_acp_pg)
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON);
kv_update_acp_dpm(adev, false);
}
}
static void kv_set_valid_clock_range(struct amdgpu_device *adev,
struct amdgpu_ps *new_rps)
{
struct kv_ps *new_ps = kv_get_ps(new_rps);
struct kv_power_info *pi = kv_get_pi(adev);
u32 i;
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
if (table && table->count) {
for (i = 0; i < pi->graphics_dpm_level_count; i++) {
if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
(i == (pi->graphics_dpm_level_count - 1))) {
pi->lowest_valid = i;
break;
}
}
for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
break;
}
pi->highest_valid = i;
if (pi->lowest_valid > pi->highest_valid) {
if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
(table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
pi->highest_valid = pi->lowest_valid;
else
pi->lowest_valid = pi->highest_valid;
}
} else {
struct sumo_sclk_voltage_mapping_table *table =
&pi->sys_info.sclk_voltage_mapping_table;
for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
i == (int)(pi->graphics_dpm_level_count - 1)) {
pi->lowest_valid = i;
break;
}
}
for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
if (table->entries[i].sclk_frequency <=
new_ps->levels[new_ps->num_levels - 1].sclk)
break;
}
pi->highest_valid = i;
if (pi->lowest_valid > pi->highest_valid) {
if ((new_ps->levels[0].sclk -
table->entries[pi->highest_valid].sclk_frequency) >
(table->entries[pi->lowest_valid].sclk_frequency -
new_ps->levels[new_ps->num_levels - 1].sclk))
pi->highest_valid = pi->lowest_valid;
else
pi->lowest_valid = pi->highest_valid;
}
}
}
static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev,
struct amdgpu_ps *new_rps)
{
struct kv_ps *new_ps = kv_get_ps(new_rps);
struct kv_power_info *pi = kv_get_pi(adev);
int ret = 0;
u8 clk_bypass_cntl;
if (pi->caps_enable_dfs_bypass) {
clk_bypass_cntl = new_ps->need_dfs_bypass ?
pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
ret = amdgpu_kv_copy_bytes_to_smc(adev,
(pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
(pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
&clk_bypass_cntl,
sizeof(u8), pi->sram_end);
}
return ret;
}
static int kv_enable_nb_dpm(struct amdgpu_device *adev,
bool enable)
{
struct kv_power_info *pi = kv_get_pi(adev);
int ret = 0;
if (enable) {
if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable);
if (ret == 0)
pi->nb_dpm_enabled = true;
}
} else {
if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable);
if (ret == 0)
pi->nb_dpm_enabled = false;
}
}
return ret;
}
static int kv_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
ret = kv_force_dpm_highest(adev);
if (ret)
return ret;
} else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
ret = kv_force_dpm_lowest(adev);
if (ret)
return ret;
} else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
ret = kv_unforce_levels(adev);
if (ret)
return ret;
}
adev->pm.dpm.forced_level = level;
return 0;
}
static int kv_dpm_pre_set_power_state(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps;
kv_update_requested_ps(adev, new_ps);
kv_apply_state_adjust_rules(adev,
&pi->requested_rps,
&pi->current_rps);
return 0;
}
static int kv_dpm_set_power_state(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
struct amdgpu_ps *old_ps = &pi->current_rps;
int ret;
if (pi->bapm_enable) {
ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
if (ret) {
DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
return ret;
}
}
if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
if (pi->enable_dpm) {
kv_set_valid_clock_range(adev, new_ps);
kv_update_dfs_bypass_settings(adev, new_ps);
ret = kv_calculate_ds_divider(adev);
if (ret) {
DRM_ERROR("kv_calculate_ds_divider failed\n");
return ret;
}
kv_calculate_nbps_level_settings(adev);
kv_calculate_dpm_settings(adev);
kv_force_lowest_valid(adev);
kv_enable_new_levels(adev);
kv_upload_dpm_settings(adev);
kv_program_nbps_index_settings(adev, new_ps);
kv_unforce_levels(adev);
kv_set_enabled_levels(adev);
kv_force_lowest_valid(adev);
kv_unforce_levels(adev);
ret = kv_update_vce_dpm(adev, new_ps, old_ps);
if (ret) {
DRM_ERROR("kv_update_vce_dpm failed\n");
return ret;
}
kv_update_sclk_t(adev);
if (adev->asic_type == CHIP_MULLINS)
kv_enable_nb_dpm(adev, true);
}
} else {
if (pi->enable_dpm) {
kv_set_valid_clock_range(adev, new_ps);
kv_update_dfs_bypass_settings(adev, new_ps);
ret = kv_calculate_ds_divider(adev);
if (ret) {
DRM_ERROR("kv_calculate_ds_divider failed\n");
return ret;
}
kv_calculate_nbps_level_settings(adev);
kv_calculate_dpm_settings(adev);
kv_freeze_sclk_dpm(adev, true);
kv_upload_dpm_settings(adev);
kv_program_nbps_index_settings(adev, new_ps);
kv_freeze_sclk_dpm(adev, false);
kv_set_enabled_levels(adev);
ret = kv_update_vce_dpm(adev, new_ps, old_ps);
if (ret) {
DRM_ERROR("kv_update_vce_dpm failed\n");
return ret;
}
kv_update_acp_boot_level(adev);
kv_update_sclk_t(adev);
kv_enable_nb_dpm(adev, true);
}
}
return 0;
}
static void kv_dpm_post_set_power_state(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
kv_update_current_ps(adev, new_ps);
}
static void kv_dpm_setup_asic(struct amdgpu_device *adev)
{
sumo_take_smu_control(adev, true);
kv_init_powergate_state(adev);
kv_init_sclk_t(adev);
}
#if 0
static void kv_dpm_reset_asic(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
kv_force_lowest_valid(adev);
kv_init_graphics_levels(adev);
kv_program_bootup_state(adev);
kv_upload_dpm_settings(adev);
kv_force_lowest_valid(adev);
kv_unforce_levels(adev);
} else {
kv_init_graphics_levels(adev);
kv_program_bootup_state(adev);
kv_freeze_sclk_dpm(adev, true);
kv_upload_dpm_settings(adev);
kv_freeze_sclk_dpm(adev, false);
kv_set_enabled_level(adev, pi->graphics_boot_level);
}
}
#endif
static void kv_construct_max_power_limits_table(struct amdgpu_device *adev,
struct amdgpu_clock_and_voltage_limits *table)
{
struct kv_power_info *pi = kv_get_pi(adev);
if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
table->sclk =
pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
table->vddc =
kv_convert_2bit_index_to_voltage(adev,
pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
}
table->mclk = pi->sys_info.nbp_memory_clock[0];
}
static void kv_patch_voltage_values(struct amdgpu_device *adev)
{
int i;
struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
&adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
struct amdgpu_clock_voltage_dependency_table *samu_table =
&adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
struct amdgpu_clock_voltage_dependency_table *acp_table =
&adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
if (uvd_table->count) {
for (i = 0; i < uvd_table->count; i++)
uvd_table->entries[i].v =
kv_convert_8bit_index_to_voltage(adev,
uvd_table->entries[i].v);
}
if (vce_table->count) {
for (i = 0; i < vce_table->count; i++)
vce_table->entries[i].v =
kv_convert_8bit_index_to_voltage(adev,
vce_table->entries[i].v);
}
if (samu_table->count) {
for (i = 0; i < samu_table->count; i++)
samu_table->entries[i].v =
kv_convert_8bit_index_to_voltage(adev,
samu_table->entries[i].v);
}
if (acp_table->count) {
for (i = 0; i < acp_table->count; i++)
acp_table->entries[i].v =
kv_convert_8bit_index_to_voltage(adev,
acp_table->entries[i].v);
}
}
static void kv_construct_boot_state(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
pi->boot_pl.ds_divider_index = 0;
pi->boot_pl.ss_divider_index = 0;
pi->boot_pl.allow_gnb_slow = 1;
pi->boot_pl.force_nbp_state = 0;
pi->boot_pl.display_wm = 0;
pi->boot_pl.vce_wm = 0;
}
static int kv_force_dpm_highest(struct amdgpu_device *adev)
{
int ret;
u32 enable_mask, i;
ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
if (ret)
return ret;
for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
if (enable_mask & (1 << i))
break;
}
if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
else
return kv_set_enabled_level(adev, i);
}
static int kv_force_dpm_lowest(struct amdgpu_device *adev)
{
int ret;
u32 enable_mask, i;
ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
if (ret)
return ret;
for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
if (enable_mask & (1 << i))
break;
}
if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
else
return kv_set_enabled_level(adev, i);
}
static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
u32 sclk, u32 min_sclk_in_sr)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 i;
u32 temp;
u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK);
if (sclk < min)
return 0;
if (!pi->caps_sclk_ds)
return 0;
for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
temp = sclk >> i;
if (temp >= min)
break;
}
return (u8)i;
}
static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit)
{
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
int i;
if (table && table->count) {
for (i = table->count - 1; i >= 0; i--) {
if (pi->high_voltage_t &&
(kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <=
pi->high_voltage_t)) {
*limit = i;
return 0;
}
}
} else {
struct sumo_sclk_voltage_mapping_table *table =
&pi->sys_info.sclk_voltage_mapping_table;
for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
if (pi->high_voltage_t &&
(kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <=
pi->high_voltage_t)) {
*limit = i;
return 0;
}
}
}
*limit = 0;
return 0;
}
static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
struct amdgpu_ps *new_rps,
struct amdgpu_ps *old_rps)
{
struct kv_ps *ps = kv_get_ps(new_rps);
struct kv_power_info *pi = kv_get_pi(adev);
u32 min_sclk = 10000; /* ??? */
u32 sclk, mclk = 0;
int i, limit;
bool force_high;
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
u32 stable_p_state_sclk = 0;
struct amdgpu_clock_and_voltage_limits *max_limits =
&adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
if (new_rps->vce_active) {
new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
} else {
new_rps->evclk = 0;
new_rps->ecclk = 0;
}
mclk = max_limits->mclk;
sclk = min_sclk;
if (pi->caps_stable_p_state) {
stable_p_state_sclk = (max_limits->sclk * 75) / 100;
for (i = table->count - 1; i >= 0; i--) {
if (stable_p_state_sclk >= table->entries[i].clk) {
stable_p_state_sclk = table->entries[i].clk;
break;
}
}
if (i > 0)
stable_p_state_sclk = table->entries[0].clk;
sclk = stable_p_state_sclk;
}
if (new_rps->vce_active) {
if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
}
ps->need_dfs_bypass = true;
for (i = 0; i < ps->num_levels; i++) {
if (ps->levels[i].sclk < sclk)
ps->levels[i].sclk = sclk;
}
if (table && table->count) {
for (i = 0; i < ps->num_levels; i++) {
if (pi->high_voltage_t &&
(pi->high_voltage_t <
kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
kv_get_high_voltage_limit(adev, &limit);
ps->levels[i].sclk = table->entries[limit].clk;
}
}
} else {
struct sumo_sclk_voltage_mapping_table *table =
&pi->sys_info.sclk_voltage_mapping_table;
for (i = 0; i < ps->num_levels; i++) {
if (pi->high_voltage_t &&
(pi->high_voltage_t <
kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
kv_get_high_voltage_limit(adev, &limit);
ps->levels[i].sclk = table->entries[limit].sclk_frequency;
}
}
}
if (pi->caps_stable_p_state) {
for (i = 0; i < ps->num_levels; i++) {
ps->levels[i].sclk = stable_p_state_sclk;
}
}
pi->video_start = new_rps->dclk || new_rps->vclk ||
new_rps->evclk || new_rps->ecclk;
if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
pi->battery_state = true;
else
pi->battery_state = false;
if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
ps->dpm0_pg_nb_ps_lo = 0x1;
ps->dpm0_pg_nb_ps_hi = 0x0;
ps->dpmx_nb_ps_lo = 0x1;
ps->dpmx_nb_ps_hi = 0x0;
} else {
ps->dpm0_pg_nb_ps_lo = 0x3;
ps->dpm0_pg_nb_ps_hi = 0x0;
ps->dpmx_nb_ps_lo = 0x3;
ps->dpmx_nb_ps_hi = 0x0;
if (pi->sys_info.nb_dpm_enable) {
force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
pi->disable_nb_ps3_in_battery;
ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
ps->dpm0_pg_nb_ps_hi = 0x2;
ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
ps->dpmx_nb_ps_hi = 0x2;
}
}
}
static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev,
u32 index, bool enable)
{
struct kv_power_info *pi = kv_get_pi(adev);
pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
}
static int kv_calculate_ds_divider(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 sclk_in_sr = 10000; /* ??? */
u32 i;
if (pi->lowest_valid > pi->highest_valid)
return -EINVAL;
for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
pi->graphics_level[i].DeepSleepDivId =
kv_get_sleep_divider_id_from_clock(adev,
be32_to_cpu(pi->graphics_level[i].SclkFrequency),
sclk_in_sr);
}
return 0;
}
static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 i;
bool force_high;
struct amdgpu_clock_and_voltage_limits *max_limits =
&adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
u32 mclk = max_limits->mclk;
if (pi->lowest_valid > pi->highest_valid)
return -EINVAL;
if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
pi->graphics_level[i].GnbSlow = 1;
pi->graphics_level[i].ForceNbPs1 = 0;
pi->graphics_level[i].UpH = 0;
}
if (!pi->sys_info.nb_dpm_enable)
return 0;
force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
(adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
if (force_high) {
for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
pi->graphics_level[i].GnbSlow = 0;
} else {
if (pi->battery_state)
pi->graphics_level[0].ForceNbPs1 = 1;
pi->graphics_level[1].GnbSlow = 0;
pi->graphics_level[2].GnbSlow = 0;
pi->graphics_level[3].GnbSlow = 0;
pi->graphics_level[4].GnbSlow = 0;
}
} else {
for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
pi->graphics_level[i].GnbSlow = 1;
pi->graphics_level[i].ForceNbPs1 = 0;
pi->graphics_level[i].UpH = 0;
}
if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
pi->graphics_level[pi->lowest_valid].UpH = 0x28;
pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
if (pi->lowest_valid != pi->highest_valid)
pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
}
}
return 0;
}
static int kv_calculate_dpm_settings(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 i;
if (pi->lowest_valid > pi->highest_valid)
return -EINVAL;
for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
return 0;
}
static void kv_init_graphics_levels(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 i;
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
if (table && table->count) {
u32 vid_2bit;
pi->graphics_dpm_level_count = 0;
for (i = 0; i < table->count; i++) {
if (pi->high_voltage_t &&
(pi->high_voltage_t <
kv_convert_8bit_index_to_voltage(adev, table->entries[i].v)))
break;
kv_set_divider_value(adev, i, table->entries[i].clk);
vid_2bit = kv_convert_vid7_to_vid2(adev,
&pi->sys_info.vid_mapping_table,
table->entries[i].v);
kv_set_vid(adev, i, vid_2bit);
kv_set_at(adev, i, pi->at[i]);
kv_dpm_power_level_enabled_for_throttle(adev, i, true);
pi->graphics_dpm_level_count++;
}
} else {
struct sumo_sclk_voltage_mapping_table *table =
&pi->sys_info.sclk_voltage_mapping_table;
pi->graphics_dpm_level_count = 0;
for (i = 0; i < table->num_max_dpm_entries; i++) {
if (pi->high_voltage_t &&
pi->high_voltage_t <
kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit))
break;
kv_set_divider_value(adev, i, table->entries[i].sclk_frequency);
kv_set_vid(adev, i, table->entries[i].vid_2bit);
kv_set_at(adev, i, pi->at[i]);
kv_dpm_power_level_enabled_for_throttle(adev, i, true);
pi->graphics_dpm_level_count++;
}
}
for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
kv_dpm_power_level_enable(adev, i, false);
}
static void kv_enable_new_levels(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 i;
for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
if (i >= pi->lowest_valid && i <= pi->highest_valid)
kv_dpm_power_level_enable(adev, i, true);
}
}
static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level)
{
u32 new_mask = (1 << level);
return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
new_mask);
}
static int kv_set_enabled_levels(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
u32 i, new_mask = 0;
for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
new_mask |= (1 << i);
return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
new_mask);
}
static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
struct amdgpu_ps *new_rps)
{
struct kv_ps *new_ps = kv_get_ps(new_rps);
struct kv_power_info *pi = kv_get_pi(adev);
u32 nbdpmconfig1;
if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
return;
if (pi->sys_info.nb_dpm_enable) {
nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1);
nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK |
NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK |
NB_DPM_CONFIG_1__DpmXNbPsLo_MASK |
NB_DPM_CONFIG_1__DpmXNbPsHi_MASK);
nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) |
(new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) |
(new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) |
(new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT);
WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1);
}
}
static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
int min_temp, int max_temp)
{
int low_temp = 0 * 1000;
int high_temp = 255 * 1000;
u32 tmp;
if (low_temp < min_temp)
low_temp = min_temp;
if (high_temp > max_temp)
high_temp = max_temp;
if (high_temp < low_temp) {
DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
return -EINVAL;
}
tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK |
CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) |
((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);
WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp);
adev->pm.dpm.thermal.min_temp = low_temp;
adev->pm.dpm.thermal.max_temp = high_temp;
return 0;
}
union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO info;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
};
static int kv_parse_sys_info_table(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
union igp_info *igp_info;
u8 frev, crev;
u16 data_offset;
int i;
if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
igp_info = (union igp_info *)(mode_info->atom_context->bios +
data_offset);
if (crev != 8) {
DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
return -EINVAL;
}
pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
pi->sys_info.bootup_nb_voltage_index =
le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
if (igp_info->info_8.ucHtcTmpLmt == 0)
pi->sys_info.htc_tmp_lmt = 203;
else
pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
if (igp_info->info_8.ucHtcHystLmt == 0)
pi->sys_info.htc_hyst_lmt = 5;
else
pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
}
if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
pi->sys_info.nb_dpm_enable = true;
else
pi->sys_info.nb_dpm_enable = false;
for (i = 0; i < KV_NUM_NBPSTATES; i++) {
pi->sys_info.nbp_memory_clock[i] =
le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
pi->sys_info.nbp_n_clock[i] =
le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
}
if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
pi->caps_enable_dfs_bypass = true;
sumo_construct_sclk_voltage_mapping_table(adev,
&pi->sys_info.sclk_voltage_mapping_table,
igp_info->info_8.sAvail_SCLK);
sumo_construct_vid_mapping_table(adev,
&pi->sys_info.vid_mapping_table,
igp_info->info_8.sAvail_SCLK);
kv_construct_max_power_limits_table(adev,
&adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
}
return 0;
}
union power_info {
struct _ATOM_POWERPLAY_INFO info;
struct _ATOM_POWERPLAY_INFO_V2 info_2;
struct _ATOM_POWERPLAY_INFO_V3 info_3;
struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
};
union pplib_clock_info {
struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
};
union pplib_power_state {
struct _ATOM_PPLIB_STATE v1;
struct _ATOM_PPLIB_STATE_V2 v2;
};
static void kv_patch_boot_state(struct amdgpu_device *adev,
struct kv_ps *ps)
{
struct kv_power_info *pi = kv_get_pi(adev);
ps->num_levels = 1;
ps->levels[0] = pi->boot_pl;
}
static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev,
struct amdgpu_ps *rps,
struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
u8 table_rev)
{
struct kv_ps *ps = kv_get_ps(rps);
rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
rps->class = le16_to_cpu(non_clock_info->usClassification);
rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
} else {
rps->vclk = 0;
rps->dclk = 0;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
adev->pm.dpm.boot_ps = rps;
kv_patch_boot_state(adev, ps);
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
adev->pm.dpm.uvd_ps = rps;
}
static void kv_parse_pplib_clock_info(struct amdgpu_device *adev,
struct amdgpu_ps *rps, int index,
union pplib_clock_info *clock_info)
{
struct kv_power_info *pi = kv_get_pi(adev);
struct kv_ps *ps = kv_get_ps(rps);
struct kv_pl *pl = &ps->levels[index];
u32 sclk;
sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
sclk |= clock_info->sumo.ucEngineClockHigh << 16;
pl->sclk = sclk;
pl->vddc_index = clock_info->sumo.vddcIndex;
ps->num_levels = index + 1;
if (pi->caps_sclk_ds) {
pl->ds_divider_index = 5;
pl->ss_divider_index = 5;
}
}
static int kv_parse_power_table(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
union pplib_power_state *power_state;
int i, j, k, non_clock_array_index, clock_array_index;
union pplib_clock_info *clock_info;
struct _StateArray *state_array;
struct _ClockInfoArray *clock_info_array;
struct _NonClockInfoArray *non_clock_info_array;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
struct kv_ps *ps;
if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
return -EINVAL;
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
amdgpu_add_thermal_controller(adev);
state_array = (struct _StateArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usStateArrayOffset));
clock_info_array = (struct _ClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
non_clock_info_array = (struct _NonClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
sizeof(struct amdgpu_ps),
GFP_KERNEL);
if (!adev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
for (i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(adev->pm.dpm.ps);
return -ENOMEM;
}
adev->pm.dpm.ps[i].ps_priv = ps;
k = 0;
idx = (u8 *)&power_state->v2.clockInfoIndex[0];
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
clock_array_index = idx[j];
if (clock_array_index >= clock_info_array->ucNumEntries)
continue;
if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
break;
clock_info = (union pplib_clock_info *)
((u8 *)&clock_info_array->clockInfo[0] +
(clock_array_index * clock_info_array->ucEntrySize));
kv_parse_pplib_clock_info(adev,
&adev->pm.dpm.ps[i], k,
clock_info);
k++;
}
kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
non_clock_info,
non_clock_info_array->ucEntrySize);
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
}
adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
sclk |= clock_info->sumo.ucEngineClockHigh << 16;
adev->pm.dpm.vce_states[i].sclk = sclk;
adev->pm.dpm.vce_states[i].mclk = 0;
}
return 0;
}
static int kv_dpm_init(struct amdgpu_device *adev)
{
struct kv_power_info *pi;
int ret, i;
pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
if (pi == NULL)
return -ENOMEM;
adev->pm.dpm.priv = pi;
ret = amdgpu_get_platform_caps(adev);
if (ret)
return ret;
ret = amdgpu_parse_extended_power_table(adev);
if (ret)
return ret;
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
pi->sram_end = SMC_RAM_END;
pi->enable_nb_dpm = true;
pi->caps_power_containment = true;
pi->caps_cac = true;
pi->enable_didt = false;
if (pi->enable_didt) {
pi->caps_sq_ramping = true;
pi->caps_db_ramping = true;
pi->caps_td_ramping = true;
pi->caps_tcp_ramping = true;
}
if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
pi->caps_sclk_ds = true;
else
pi->caps_sclk_ds = false;
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
if (amdgpu_bapm == 0)
pi->bapm_enable = false;
else
pi->bapm_enable = true;
pi->voltage_drop_t = 0;
pi->caps_sclk_throttle_low_notification = false;
pi->caps_fps = false; /* true? */
pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
pi->caps_uvd_dpm = true;
pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
pi->caps_stable_p_state = false;
ret = kv_parse_sys_info_table(adev);
if (ret)
return ret;
kv_patch_voltage_values(adev);
kv_construct_boot_state(adev);
ret = kv_parse_power_table(adev);
if (ret)
return ret;
pi->enable_dpm = true;
return 0;
}
static void
kv_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
u32 current_index =
(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
u32 sclk, tmp;
u16 vddc;
if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
seq_printf(m, "invalid dpm profile %d\n", current_index);
} else {
sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp);
seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
seq_printf(m, "power level %d sclk: %u vddc: %u\n",
current_index, sclk, vddc);
}
}
static void
kv_dpm_print_power_state(void *handle, void *request_ps)
{
int i;
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
struct kv_ps *ps = kv_get_ps(rps);
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_dpm_print_class_info(rps->class, rps->class2);
amdgpu_dpm_print_cap_info(rps->caps);
printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
for (i = 0; i < ps->num_levels; i++) {
struct kv_pl *pl = &ps->levels[i];
printk("\t\tpower level %d sclk: %u vddc: %u\n",
i, pl->sclk,
kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
}
amdgpu_dpm_print_ps_status(adev, rps);
}
static void kv_dpm_fini(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->pm.dpm.num_ps; i++) {
kfree(adev->pm.dpm.ps[i].ps_priv);
}
kfree(adev->pm.dpm.ps);
kfree(adev->pm.dpm.priv);
amdgpu_free_extended_power_table(adev);
}
static void kv_dpm_display_configuration_changed(void *handle)
{
}
static u32 kv_dpm_get_sclk(void *handle, bool low)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
if (low)
return requested_state->levels[0].sclk;
else
return requested_state->levels[requested_state->num_levels - 1].sclk;
}
static u32 kv_dpm_get_mclk(void *handle, bool low)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
return pi->sys_info.bootup_uma_clk;
}
/* get temperature in millidegrees */
static int kv_dpm_get_temp(void *handle)
{
u32 temp;
int actual_temp = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = RREG32_SMC(0xC0300E0C);
if (temp)
actual_temp = (temp / 8) - 49;
else
actual_temp = 0;
actual_temp = actual_temp * 1000;
return actual_temp;
}
static int kv_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->powerplay.pp_funcs = &kv_dpm_funcs;
adev->powerplay.pp_handle = adev;
kv_dpm_set_irq_funcs(adev);
return 0;
}
static int kv_dpm_late_init(void *handle)
{
/* powerdown unused blocks for now */
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->pm.dpm_enabled)
return 0;
kv_dpm_powergate_acp(adev, true);
kv_dpm_powergate_samu(adev, true);
return 0;
}
static int kv_dpm_sw_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
&adev->pm.dpm.thermal.irq);
if (ret)
return ret;
ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
&adev->pm.dpm.thermal.irq);
if (ret)
return ret;
/* default to balanced state */
adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
adev->pm.default_sclk = adev->clock.default_sclk;
adev->pm.default_mclk = adev->clock.default_mclk;
adev->pm.current_sclk = adev->clock.default_sclk;
adev->pm.current_mclk = adev->clock.default_mclk;
adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
if (amdgpu_dpm == 0)
return 0;
INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
ret = kv_dpm_init(adev);
if (ret)
goto dpm_failed;
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
DRM_INFO("amdgpu: dpm initialized\n");
return 0;
dpm_failed:
kv_dpm_fini(adev);
DRM_ERROR("amdgpu: dpm initialization failed\n");
return ret;
}
static int kv_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
flush_work(&adev->pm.dpm.thermal.work);
kv_dpm_fini(adev);
return 0;
}
static int kv_dpm_hw_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!amdgpu_dpm)
return 0;
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
return ret;
}
static int kv_dpm_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.dpm_enabled)
kv_dpm_disable(adev);
return 0;
}
static int kv_dpm_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.dpm_enabled) {
/* disable dpm */
kv_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
}
return 0;
}
static int kv_dpm_resume(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
}
return 0;
}
static bool kv_dpm_is_idle(void *handle)
{
return true;
}
static int kv_dpm_wait_for_idle(void *handle)
{
return 0;
}
static int kv_dpm_soft_reset(void *handle)
{
return 0;
}
static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 cg_thermal_int;
switch (type) {
case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
break;
default:
break;
}
break;
case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
break;
default:
break;
}
break;
default:
break;
}
return 0;
}
static int kv_dpm_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
bool queue_thermal = false;
if (entry == NULL)
return -EINVAL;
switch (entry->src_id) {
case 230: /* thermal low to high */
DRM_DEBUG("IH: thermal low to high\n");
adev->pm.dpm.thermal.high_to_low = false;
queue_thermal = true;
break;
case 231: /* thermal high to low */
DRM_DEBUG("IH: thermal high to low\n");
adev->pm.dpm.thermal.high_to_low = true;
queue_thermal = true;
break;
default:
break;
}
if (queue_thermal)
schedule_work(&adev->pm.dpm.thermal.work);
return 0;
}
static int kv_dpm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static int kv_dpm_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
const struct kv_pl *kv_cpl2)
{
return ((kv_cpl1->sclk == kv_cpl2->sclk) &&
(kv_cpl1->vddc_index == kv_cpl2->vddc_index) &&
(kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) &&
(kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
}
static int kv_check_state_equal(void *handle,
void *current_ps,
void *request_ps,
bool *equal)
{
struct kv_ps *kv_cps;
struct kv_ps *kv_rps;
int i;
struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
kv_cps = kv_get_ps(cps);
kv_rps = kv_get_ps(rps);
if (kv_cps == NULL) {
*equal = false;
return 0;
}
if (kv_cps->num_levels != kv_rps->num_levels) {
*equal = false;
return 0;
}
for (i = 0; i < kv_cps->num_levels; i++) {
if (!kv_are_power_levels_equal(&(kv_cps->levels[i]),
&(kv_rps->levels[i]))) {
*equal = false;
return 0;
}
}
/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
return 0;
}
static int kv_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
uint32_t sclk;
u32 pl_index =
(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
return -EINVAL;
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
if (pl_index < SMU__NUM_SCLK_DPM_STATE) {
sclk = be32_to_cpu(
pi->graphics_level[pl_index].SclkFrequency);
*((uint32_t *)value) = sclk;
*size = 4;
return 0;
}
return -EINVAL;
case AMDGPU_PP_SENSOR_GPU_TEMP:
*((uint32_t *)value) = kv_dpm_get_temp(adev);
*size = 4;
return 0;
default:
return -EOPNOTSUPP;
}
}
static int kv_set_powergating_by_smu(void *handle,
uint32_t block_type, bool gate)
{
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
kv_dpm_powergate_uvd(handle, gate);
break;
case AMD_IP_BLOCK_TYPE_VCE:
kv_dpm_powergate_vce(handle, gate);
break;
default:
break;
}
return 0;
}
static const struct amd_ip_funcs kv_dpm_ip_funcs = {
.name = "kv_dpm",
.early_init = kv_dpm_early_init,
.late_init = kv_dpm_late_init,
.sw_init = kv_dpm_sw_init,
.sw_fini = kv_dpm_sw_fini,
.hw_init = kv_dpm_hw_init,
.hw_fini = kv_dpm_hw_fini,
.suspend = kv_dpm_suspend,
.resume = kv_dpm_resume,
.is_idle = kv_dpm_is_idle,
.wait_for_idle = kv_dpm_wait_for_idle,
.soft_reset = kv_dpm_soft_reset,
.set_clockgating_state = kv_dpm_set_clockgating_state,
.set_powergating_state = kv_dpm_set_powergating_state,
};
const struct amdgpu_ip_block_version kv_smu_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SMC,
.major = 1,
.minor = 0,
.rev = 0,
.funcs = &kv_dpm_ip_funcs,
};
static const struct amd_pm_funcs kv_dpm_funcs = {
.pre_set_power_state = &kv_dpm_pre_set_power_state,
.set_power_state = &kv_dpm_set_power_state,
.post_set_power_state = &kv_dpm_post_set_power_state,
.display_configuration_changed = &kv_dpm_display_configuration_changed,
.get_sclk = &kv_dpm_get_sclk,
.get_mclk = &kv_dpm_get_mclk,
.print_power_state = &kv_dpm_print_power_state,
.debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
.force_performance_level = &kv_dpm_force_performance_level,
.set_powergating_by_smu = kv_set_powergating_by_smu,
.enable_bapm = &kv_dpm_enable_bapm,
.get_vce_clock_state = amdgpu_get_vce_clock_state,
.check_state_equal = kv_check_state_equal,
.read_sensor = &kv_dpm_read_sensor,
.pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks,
};
static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
.set = kv_dpm_set_interrupt_state,
.process = kv_dpm_process_interrupt,
};
static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
{
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
}
| linux-master | drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c |
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#include <linux/firmware.h>
#include "amdgpu.h"
#include "sid.h"
#include "ppsmc.h"
#include "amdgpu_ucode.h"
#include "sislands_smc.h"
static int si_set_smc_sram_address(struct amdgpu_device *adev,
u32 smc_address, u32 limit)
{
if (smc_address & 3)
return -EINVAL;
if ((smc_address + 3) > limit)
return -EINVAL;
WREG32(SMC_IND_INDEX_0, smc_address);
WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
return 0;
}
int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit)
{
unsigned long flags;
int ret = 0;
u32 data, original_data, addr, extra_shift;
if (smc_start_address & 3)
return -EINVAL;
if ((smc_start_address + byte_count) > limit)
return -EINVAL;
addr = smc_start_address;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
while (byte_count >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
ret = si_set_smc_sram_address(adev, addr, limit);
if (ret)
goto done;
WREG32(SMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
addr += 4;
}
/* RMW for the final bytes */
if (byte_count > 0) {
data = 0;
ret = si_set_smc_sram_address(adev, addr, limit);
if (ret)
goto done;
original_data = RREG32(SMC_IND_DATA_0);
extra_shift = 8 * (4 - byte_count);
while (byte_count > 0) {
/* SMC address space is BE */
data = (data << 8) + *src++;
byte_count--;
}
data <<= extra_shift;
data |= (original_data & ~((~0UL) << extra_shift));
ret = si_set_smc_sram_address(adev, addr, limit);
if (ret)
goto done;
WREG32(SMC_IND_DATA_0, data);
}
done:
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return ret;
}
void amdgpu_si_start_smc(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
tmp &= ~RST_REG;
WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
}
void amdgpu_si_reset_smc(struct amdgpu_device *adev)
{
u32 tmp;
RREG32(CB_CGTT_SCLK_CTRL);
RREG32(CB_CGTT_SCLK_CTRL);
RREG32(CB_CGTT_SCLK_CTRL);
RREG32(CB_CGTT_SCLK_CTRL);
tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
RST_REG;
WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
}
int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev)
{
static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
}
void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
if (enable)
tmp &= ~CK_DISABLE;
else
tmp |= CK_DISABLE;
WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
}
bool amdgpu_si_is_smc_running(struct amdgpu_device *adev)
{
u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
if (!(rst & RST_REG) && !(clk & CK_DISABLE))
return true;
return false;
}
PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
PPSMC_Msg msg)
{
u32 tmp;
int i;
if (!amdgpu_si_is_smc_running(adev))
return PPSMC_Result_Failed;
WREG32(SMC_MESSAGE_0, msg);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(SMC_RESP_0);
if (tmp != 0)
break;
udelay(1);
}
return (PPSMC_Result)RREG32(SMC_RESP_0);
}
PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
{
u32 tmp;
int i;
if (!amdgpu_si_is_smc_running(adev))
return PPSMC_Result_OK;
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
if ((tmp & CKEN) == 0)
break;
udelay(1);
}
return PPSMC_Result_OK;
}
int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
{
const struct smc_firmware_header_v1_0 *hdr;
unsigned long flags;
u32 ucode_start_address;
u32 ucode_size;
const u8 *src;
u32 data;
if (!adev->pm.fw)
return -EINVAL;
hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
amdgpu_ucode_print_smc_hdr(&hdr->header);
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
src = (const u8 *)
(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
if (ucode_size & 3)
return -EINVAL;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
while (ucode_size >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
WREG32(SMC_IND_DATA_0, data);
src += 4;
ucode_size -= 4;
}
WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return 0;
}
int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
u32 *value, u32 limit)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
*value = RREG32(SMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return ret;
}
int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
u32 value, u32 limit)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
WREG32(SMC_IND_DATA_0, value);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return ret;
}
| linux-master | drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c |
/*
* Copyright 2013 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/module.h>
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
#include "amdgpu_atombios.h"
#include "amdgpu_dpm_internal.h"
#include "amd_pcie.h"
#include "sid.h"
#include "r600_dpm.h"
#include "si_dpm.h"
#include "atom.h"
#include "../include/pptable.h"
#include <linux/math64.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <legacy_dpm.h>
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
#define MC_CG_ARB_FREQ_F2 0x0c
#define MC_CG_ARB_FREQ_F3 0x0d
#define SMC_RAM_END 0x20000
#define SCLK_MIN_DEEPSLEEP_FREQ 1350
/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
#define BIOS_SCRATCH_4 0x5cd
MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
MODULE_FIRMWARE("amdgpu/verde_smc.bin");
MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
MODULE_FIRMWARE("amdgpu/oland_smc.bin");
MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
static const struct amd_pm_funcs si_dpm_funcs;
union power_info {
struct _ATOM_POWERPLAY_INFO info;
struct _ATOM_POWERPLAY_INFO_V2 info_2;
struct _ATOM_POWERPLAY_INFO_V3 info_3;
struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
};
union fan_info {
struct _ATOM_PPLIB_FANTABLE fan;
struct _ATOM_PPLIB_FANTABLE2 fan2;
struct _ATOM_PPLIB_FANTABLE3 fan3;
};
union pplib_clock_info {
struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
struct _ATOM_PPLIB_SI_CLOCK_INFO si;
};
enum si_dpm_auto_throttle_src {
SI_DPM_AUTO_THROTTLE_SRC_THERMAL,
SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL
};
enum si_dpm_event_src {
SI_DPM_EVENT_SRC_ANALOG = 0,
SI_DPM_EVENT_SRC_EXTERNAL = 1,
SI_DPM_EVENT_SRC_DIGITAL = 2,
SI_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
};
static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
{
R600_UTC_DFLT_00,
R600_UTC_DFLT_01,
R600_UTC_DFLT_02,
R600_UTC_DFLT_03,
R600_UTC_DFLT_04,
R600_UTC_DFLT_05,
R600_UTC_DFLT_06,
R600_UTC_DFLT_07,
R600_UTC_DFLT_08,
R600_UTC_DFLT_09,
R600_UTC_DFLT_10,
R600_UTC_DFLT_11,
R600_UTC_DFLT_12,
R600_UTC_DFLT_13,
R600_UTC_DFLT_14,
};
static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
{
R600_DTC_DFLT_00,
R600_DTC_DFLT_01,
R600_DTC_DFLT_02,
R600_DTC_DFLT_03,
R600_DTC_DFLT_04,
R600_DTC_DFLT_05,
R600_DTC_DFLT_06,
R600_DTC_DFLT_07,
R600_DTC_DFLT_08,
R600_DTC_DFLT_09,
R600_DTC_DFLT_10,
R600_DTC_DFLT_11,
R600_DTC_DFLT_12,
R600_DTC_DFLT_13,
R600_DTC_DFLT_14,
};
static const struct si_cac_config_reg cac_weights_tahiti[] =
{
{ 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
{ 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
{ 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
{ 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
{ 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg lcac_tahiti[] =
{
{ 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg cac_override_tahiti[] =
{
{ 0xFFFFFFFF }
};
static const struct si_powertune_data powertune_data_tahiti =
{
((1 << 16) | 27027),
6,
0,
4,
95,
{
0UL,
0UL,
4521550UL,
309631529UL,
-1270850L,
4513710L,
40
},
595000000UL,
12,
{
0,
0,
0,
0,
0,
0,
0,
0
},
true
};
static const struct si_dte_data dte_data_tahiti =
{
{ 1159409, 0, 0, 0, 0 },
{ 777, 0, 0, 0, 0 },
2,
54000,
127000,
25,
2,
10,
13,
{ 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
{ 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
{ 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
85,
false
};
static const struct si_dte_data dte_data_tahiti_pro =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
45000,
100,
0xA,
1,
0,
0x10,
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
{ 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
90,
true
};
static const struct si_dte_data dte_data_new_zealand =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
{ 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
0x5,
0xAFC8,
0x69,
0x32,
1,
0,
0x10,
{ 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
{ 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
85,
true
};
static const struct si_dte_data dte_data_aruba_pro =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
45000,
100,
0xA,
1,
0,
0x10,
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
{ 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
90,
true
};
static const struct si_dte_data dte_data_malta =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
45000,
100,
0xA,
1,
0,
0x10,
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
90,
true
};
static const struct si_cac_config_reg cac_weights_pitcairn[] =
{
{ 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
{ 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
{ 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
{ 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
{ 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
{ 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
{ 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg lcac_pitcairn[] =
{
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg cac_override_pitcairn[] =
{
{ 0xFFFFFFFF }
};
static const struct si_powertune_data powertune_data_pitcairn =
{
((1 << 16) | 27027),
5,
0,
6,
100,
{
51600000UL,
1800000UL,
7194395UL,
309631529UL,
-1270850L,
4513710L,
100
},
117830498UL,
12,
{
0,
0,
0,
0,
0,
0,
0,
0
},
true
};
static const struct si_dte_data dte_data_pitcairn =
{
{ 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0 },
0,
0,
0,
0,
0,
0,
0,
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
0,
false
};
static const struct si_dte_data dte_data_curacao_xt =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
45000,
100,
0xA,
1,
0,
0x10,
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
90,
true
};
static const struct si_dte_data dte_data_curacao_pro =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
45000,
100,
0xA,
1,
0,
0x10,
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
90,
true
};
static const struct si_dte_data dte_data_neptune_xt =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
45000,
100,
0xA,
1,
0,
0x10,
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
{ 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
90,
true
};
static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
{
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
{
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg cac_weights_heathrow[] =
{
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
{
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg cac_weights_cape_verde[] =
{
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg lcac_cape_verde[] =
{
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg cac_override_cape_verde[] =
{
{ 0xFFFFFFFF }
};
static const struct si_powertune_data powertune_data_cape_verde =
{
((1 << 16) | 0x6993),
5,
0,
7,
105,
{
0UL,
0UL,
7194395UL,
309631529UL,
-1270850L,
4513710L,
100
},
117830498UL,
12,
{
0,
0,
0,
0,
0,
0,
0,
0
},
true
};
static const struct si_dte_data dte_data_cape_verde =
{
{ 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0 },
0,
0,
0,
0,
0,
0,
0,
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
0,
false
};
static const struct si_dte_data dte_data_venus_xtx =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
5,
55000,
0x69,
0xA,
1,
0,
0x3,
{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
{ 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
90,
true
};
static const struct si_dte_data dte_data_venus_xt =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
5,
55000,
0x69,
0xA,
1,
0,
0x3,
{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
{ 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
90,
true
};
static const struct si_dte_data dte_data_venus_pro =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
5,
55000,
0x69,
0xA,
1,
0,
0x3,
{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
{ 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
90,
true
};
static const struct si_cac_config_reg cac_weights_oland[] =
{
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg cac_weights_mars_pro[] =
{
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
{ 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg cac_weights_mars_xt[] =
{
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
{ 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg cac_weights_oland_pro[] =
{
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
{ 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg cac_weights_oland_xt[] =
{
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
{ 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg lcac_oland[] =
{
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg lcac_mars_pro[] =
{
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_cac_config_reg cac_override_oland[] =
{
{ 0xFFFFFFFF }
};
static const struct si_powertune_data powertune_data_oland =
{
((1 << 16) | 0x6993),
5,
0,
7,
105,
{
0UL,
0UL,
7194395UL,
309631529UL,
-1270850L,
4513710L,
100
},
117830498UL,
12,
{
0,
0,
0,
0,
0,
0,
0,
0
},
true
};
static const struct si_powertune_data powertune_data_mars_pro =
{
((1 << 16) | 0x6993),
5,
0,
7,
105,
{
0UL,
0UL,
7194395UL,
309631529UL,
-1270850L,
4513710L,
100
},
117830498UL,
12,
{
0,
0,
0,
0,
0,
0,
0,
0
},
true
};
static const struct si_dte_data dte_data_oland =
{
{ 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0 },
0,
0,
0,
0,
0,
0,
0,
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
0,
false
};
static const struct si_dte_data dte_data_mars_pro =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
55000,
105,
0xA,
1,
0,
0x10,
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
{ 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
90,
true
};
static const struct si_dte_data dte_data_sun_xt =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
5,
55000,
105,
0xA,
1,
0,
0x10,
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
{ 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
90,
true
};
static const struct si_cac_config_reg cac_weights_hainan[] =
{
{ 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
{ 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
{ 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
{ 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
{ 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
{ 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
{ 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
{ 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
{ 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
{ 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
{ 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
{ 0xFFFFFFFF }
};
static const struct si_powertune_data powertune_data_hainan =
{
((1 << 16) | 0x6993),
5,
0,
9,
105,
{
0UL,
0UL,
7194395UL,
309631529UL,
-1270850L,
4513710L,
100
},
117830498UL,
12,
{
0,
0,
0,
0,
0,
0,
0,
0
},
true
};
static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev);
static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev);
static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev);
static struct si_ps *si_get_ps(struct amdgpu_ps *rps);
static int si_populate_voltage_value(struct amdgpu_device *adev,
const struct atom_voltage_table *table,
u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
static int si_get_std_voltage_value(struct amdgpu_device *adev,
SISLANDS_SMC_VOLTAGE_VALUE *voltage,
u16 *std_voltage);
static int si_write_smc_soft_register(struct amdgpu_device *adev,
u16 reg_offset, u32 value);
static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
struct rv7xx_pl *pl,
SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
static int si_calculate_sclk_params(struct amdgpu_device *adev,
u32 engine_clock,
SISLANDS_SMC_SCLK_VALUE *sclk);
static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
{
struct si_power_info *pi = adev->pm.dpm.priv;
return pi;
}
static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
u16 v, s32 t, u32 ileakage, u32 *leakage)
{
s64 kt, kv, leakage_w, i_leakage, vddc;
s64 temperature, t_slope, t_intercept, av, bv, t_ref;
s64 tmp;
i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
vddc = div64_s64(drm_int2fixp(v), 1000);
temperature = div64_s64(drm_int2fixp(t), 1000);
t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
av = div64_s64(drm_int2fixp(coeff->av), 100000000);
bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
t_ref = drm_int2fixp(coeff->t_ref);
tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
*leakage = drm_fixp2int(leakage_w * 1000);
}
static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev,
const struct ni_leakage_coeffients *coeff,
u16 v,
s32 t,
u32 i_leakage,
u32 *leakage)
{
si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
}
static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
const u32 fixed_kt, u16 v,
u32 ileakage, u32 *leakage)
{
s64 kt, kv, leakage_w, i_leakage, vddc;
i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
vddc = div64_s64(drm_int2fixp(v), 1000);
kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
*leakage = drm_fixp2int(leakage_w * 1000);
}
static void si_calculate_leakage_for_v(struct amdgpu_device *adev,
const struct ni_leakage_coeffients *coeff,
const u32 fixed_kt,
u16 v,
u32 i_leakage,
u32 *leakage)
{
si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
}
static void si_update_dte_from_pl2(struct amdgpu_device *adev,
struct si_dte_data *dte_data)
{
u32 p_limit1 = adev->pm.dpm.tdp_limit;
u32 p_limit2 = adev->pm.dpm.near_tdp_limit;
u32 k = dte_data->k;
u32 t_max = dte_data->max_t;
u32 t_split[5] = { 10, 15, 20, 25, 30 };
u32 t_0 = dte_data->t0;
u32 i;
if (p_limit2 != 0 && p_limit2 <= p_limit1) {
dte_data->tdep_count = 3;
for (i = 0; i < k; i++) {
dte_data->r[i] =
(t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
(p_limit2 * (u32)100);
}
dte_data->tdep_r[1] = dte_data->r[4] * 2;
for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
dte_data->tdep_r[i] = dte_data->r[4];
}
} else {
DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
}
}
static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = adev->pm.dpm.priv;
return pi;
}
static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev)
{
struct ni_power_info *pi = adev->pm.dpm.priv;
return pi;
}
static struct si_ps *si_get_ps(struct amdgpu_ps *aps)
{
struct si_ps *ps = aps->ps_priv;
return ps;
}
static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
{
struct ni_power_info *ni_pi = ni_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
bool update_dte_from_pl2 = false;
if (adev->asic_type == CHIP_TAHITI) {
si_pi->cac_weights = cac_weights_tahiti;
si_pi->lcac_config = lcac_tahiti;
si_pi->cac_override = cac_override_tahiti;
si_pi->powertune_data = &powertune_data_tahiti;
si_pi->dte_data = dte_data_tahiti;
switch (adev->pdev->device) {
case 0x6798:
si_pi->dte_data.enable_dte_by_default = true;
break;
case 0x6799:
si_pi->dte_data = dte_data_new_zealand;
break;
case 0x6790:
case 0x6791:
case 0x6792:
case 0x679E:
si_pi->dte_data = dte_data_aruba_pro;
update_dte_from_pl2 = true;
break;
case 0x679B:
si_pi->dte_data = dte_data_malta;
update_dte_from_pl2 = true;
break;
case 0x679A:
si_pi->dte_data = dte_data_tahiti_pro;
update_dte_from_pl2 = true;
break;
default:
if (si_pi->dte_data.enable_dte_by_default == true)
DRM_ERROR("DTE is not enabled!\n");
break;
}
} else if (adev->asic_type == CHIP_PITCAIRN) {
si_pi->cac_weights = cac_weights_pitcairn;
si_pi->lcac_config = lcac_pitcairn;
si_pi->cac_override = cac_override_pitcairn;
si_pi->powertune_data = &powertune_data_pitcairn;
switch (adev->pdev->device) {
case 0x6810:
case 0x6818:
si_pi->dte_data = dte_data_curacao_xt;
update_dte_from_pl2 = true;
break;
case 0x6819:
case 0x6811:
si_pi->dte_data = dte_data_curacao_pro;
update_dte_from_pl2 = true;
break;
case 0x6800:
case 0x6806:
si_pi->dte_data = dte_data_neptune_xt;
update_dte_from_pl2 = true;
break;
default:
si_pi->dte_data = dte_data_pitcairn;
break;
}
} else if (adev->asic_type == CHIP_VERDE) {
si_pi->lcac_config = lcac_cape_verde;
si_pi->cac_override = cac_override_cape_verde;
si_pi->powertune_data = &powertune_data_cape_verde;
switch (adev->pdev->device) {
case 0x683B:
case 0x683F:
case 0x6829:
case 0x6835:
si_pi->cac_weights = cac_weights_cape_verde_pro;
si_pi->dte_data = dte_data_cape_verde;
break;
case 0x682C:
si_pi->cac_weights = cac_weights_cape_verde_pro;
si_pi->dte_data = dte_data_sun_xt;
update_dte_from_pl2 = true;
break;
case 0x6825:
case 0x6827:
si_pi->cac_weights = cac_weights_heathrow;
si_pi->dte_data = dte_data_cape_verde;
break;
case 0x6824:
case 0x682D:
si_pi->cac_weights = cac_weights_chelsea_xt;
si_pi->dte_data = dte_data_cape_verde;
break;
case 0x682F:
si_pi->cac_weights = cac_weights_chelsea_pro;
si_pi->dte_data = dte_data_cape_verde;
break;
case 0x6820:
si_pi->cac_weights = cac_weights_heathrow;
si_pi->dte_data = dte_data_venus_xtx;
break;
case 0x6821:
si_pi->cac_weights = cac_weights_heathrow;
si_pi->dte_data = dte_data_venus_xt;
break;
case 0x6823:
case 0x682B:
case 0x6822:
case 0x682A:
si_pi->cac_weights = cac_weights_chelsea_pro;
si_pi->dte_data = dte_data_venus_pro;
break;
default:
si_pi->cac_weights = cac_weights_cape_verde;
si_pi->dte_data = dte_data_cape_verde;
break;
}
} else if (adev->asic_type == CHIP_OLAND) {
si_pi->lcac_config = lcac_mars_pro;
si_pi->cac_override = cac_override_oland;
si_pi->powertune_data = &powertune_data_mars_pro;
si_pi->dte_data = dte_data_mars_pro;
switch (adev->pdev->device) {
case 0x6601:
case 0x6621:
case 0x6603:
case 0x6605:
si_pi->cac_weights = cac_weights_mars_pro;
update_dte_from_pl2 = true;
break;
case 0x6600:
case 0x6606:
case 0x6620:
case 0x6604:
si_pi->cac_weights = cac_weights_mars_xt;
update_dte_from_pl2 = true;
break;
case 0x6611:
case 0x6613:
case 0x6608:
si_pi->cac_weights = cac_weights_oland_pro;
update_dte_from_pl2 = true;
break;
case 0x6610:
si_pi->cac_weights = cac_weights_oland_xt;
update_dte_from_pl2 = true;
break;
default:
si_pi->cac_weights = cac_weights_oland;
si_pi->lcac_config = lcac_oland;
si_pi->cac_override = cac_override_oland;
si_pi->powertune_data = &powertune_data_oland;
si_pi->dte_data = dte_data_oland;
break;
}
} else if (adev->asic_type == CHIP_HAINAN) {
si_pi->cac_weights = cac_weights_hainan;
si_pi->lcac_config = lcac_oland;
si_pi->cac_override = cac_override_oland;
si_pi->powertune_data = &powertune_data_hainan;
si_pi->dte_data = dte_data_sun_xt;
update_dte_from_pl2 = true;
} else {
DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
return;
}
ni_pi->enable_power_containment = false;
ni_pi->enable_cac = false;
ni_pi->enable_sq_ramping = false;
si_pi->enable_dte = false;
if (si_pi->powertune_data->enable_powertune_by_default) {
ni_pi->enable_power_containment = true;
ni_pi->enable_cac = true;
if (si_pi->dte_data.enable_dte_by_default) {
si_pi->enable_dte = true;
if (update_dte_from_pl2)
si_update_dte_from_pl2(adev, &si_pi->dte_data);
}
ni_pi->enable_sq_ramping = true;
}
ni_pi->driver_calculate_cac_leakage = true;
ni_pi->cac_configuration_required = true;
if (ni_pi->cac_configuration_required) {
ni_pi->support_cac_long_term_average = true;
si_pi->dyn_powertune_data.l2_lta_window_size =
si_pi->powertune_data->l2_lta_window_size_default;
si_pi->dyn_powertune_data.lts_truncate =
si_pi->powertune_data->lts_truncate_default;
} else {
ni_pi->support_cac_long_term_average = false;
si_pi->dyn_powertune_data.l2_lta_window_size = 0;
si_pi->dyn_powertune_data.lts_truncate = 0;
}
si_pi->dyn_powertune_data.disable_uvd_powertune = false;
}
static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev)
{
return 1;
}
static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
{
u32 xclk;
u32 wintime;
u32 cac_window;
u32 cac_window_size;
xclk = amdgpu_asic_get_xclk(adev);
if (xclk == 0)
return 0;
cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
wintime = (cac_window_size * 100) / xclk;
return wintime;
}
static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
{
return power_in_watts;
}
static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev,
bool adjust_polarity,
u32 tdp_adjustment,
u32 *tdp_limit,
u32 *near_tdp_limit)
{
u32 adjustment_delta, max_tdp_limit;
if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit)
return -EINVAL;
max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100;
if (adjust_polarity) {
*tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
*near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit);
} else {
*tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
adjustment_delta = adev->pm.dpm.tdp_limit - *tdp_limit;
if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted)
*near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
else
*near_tdp_limit = 0;
}
if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
return -EINVAL;
if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
return -EINVAL;
return 0;
}
static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state)
{
struct ni_power_info *ni_pi = ni_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
if (ni_pi->enable_power_containment) {
SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
PP_SIslands_PAPMParameters *papm_parm;
struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
u32 tdp_limit;
u32 near_tdp_limit;
int ret;
if (scaling_factor == 0)
return -EINVAL;
memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
ret = si_calculate_adjusted_tdp_limits(adev,
false, /* ??? */
adev->pm.dpm.tdp_adjustment,
&tdp_limit,
&near_tdp_limit);
if (ret)
return ret;
smc_table->dpm2Params.TDPLimit =
cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
smc_table->dpm2Params.NearTDPLimit =
cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
smc_table->dpm2Params.SafePowerLimit =
cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
ret = amdgpu_si_copy_bytes_to_smc(adev,
(si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
(u8 *)(&(smc_table->dpm2Params.TDPLimit)),
sizeof(u32) * 3,
si_pi->sram_end);
if (ret)
return ret;
if (si_pi->enable_ppm) {
papm_parm = &si_pi->papm_parm;
memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
papm_parm->dGPU_T_Warning = cpu_to_be32(95);
papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
papm_parm->PlatformPowerLimit = 0xffffffff;
papm_parm->NearTDPLimitPAPM = 0xffffffff;
ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start,
(u8 *)papm_parm,
sizeof(PP_SIslands_PAPMParameters),
si_pi->sram_end);
if (ret)
return ret;
}
}
return 0;
}
static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state)
{
struct ni_power_info *ni_pi = ni_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
if (ni_pi->enable_power_containment) {
SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
int ret;
memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
smc_table->dpm2Params.NearTDPLimit =
cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
smc_table->dpm2Params.SafePowerLimit =
cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
ret = amdgpu_si_copy_bytes_to_smc(adev,
(si_pi->state_table_start +
offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
(u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
sizeof(u32) * 2,
si_pi->sram_end);
if (ret)
return ret;
}
return 0;
}
static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev,
const u16 prev_std_vddc,
const u16 curr_std_vddc)
{
u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
u64 prev_vddc = (u64)prev_std_vddc;
u64 curr_vddc = (u64)curr_std_vddc;
u64 pwr_efficiency_ratio, n, d;
if ((prev_vddc == 0) || (curr_vddc == 0))
return 0;
n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
d = prev_vddc * prev_vddc;
pwr_efficiency_ratio = div64_u64(n, d);
if (pwr_efficiency_ratio > (u64)0xFFFF)
return 0;
return (u16)pwr_efficiency_ratio;
}
static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state)
{
struct si_power_info *si_pi = si_get_pi(adev);
if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
amdgpu_state->vclk && amdgpu_state->dclk)
return true;
return false;
}
struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev)
{
struct evergreen_power_info *pi = adev->pm.dpm.priv;
return pi;
}
static int si_populate_power_containment_values(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state,
SISLANDS_SMC_SWSTATE *smc_state)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct ni_power_info *ni_pi = ni_get_pi(adev);
struct si_ps *state = si_get_ps(amdgpu_state);
SISLANDS_SMC_VOLTAGE_VALUE vddc;
u32 prev_sclk;
u32 max_sclk;
u32 min_sclk;
u16 prev_std_vddc;
u16 curr_std_vddc;
int i;
u16 pwr_efficiency_ratio;
u8 max_ps_percent;
bool disable_uvd_power_tune;
int ret;
if (ni_pi->enable_power_containment == false)
return 0;
if (state->performance_level_count == 0)
return -EINVAL;
if (smc_state->levelCount != state->performance_level_count)
return -EINVAL;
disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state);
smc_state->levels[0].dpm2.MaxPS = 0;
smc_state->levels[0].dpm2.NearTDPDec = 0;
smc_state->levels[0].dpm2.AboveSafeInc = 0;
smc_state->levels[0].dpm2.BelowSafeInc = 0;
smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
for (i = 1; i < state->performance_level_count; i++) {
prev_sclk = state->performance_levels[i-1].sclk;
max_sclk = state->performance_levels[i].sclk;
if (i == 1)
max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
else
max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
if (prev_sclk > max_sclk)
return -EINVAL;
if ((max_ps_percent == 0) ||
(prev_sclk == max_sclk) ||
disable_uvd_power_tune)
min_sclk = max_sclk;
else if (i == 1)
min_sclk = prev_sclk;
else
min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
if (min_sclk < state->performance_levels[0].sclk)
min_sclk = state->performance_levels[0].sclk;
if (min_sclk == 0)
return -EINVAL;
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
state->performance_levels[i-1].vddc, &vddc);
if (ret)
return ret;
ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc);
if (ret)
return ret;
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
state->performance_levels[i].vddc, &vddc);
if (ret)
return ret;
ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc);
if (ret)
return ret;
pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev,
prev_std_vddc, curr_std_vddc);
smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
}
return 0;
}
static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state,
SISLANDS_SMC_SWSTATE *smc_state)
{
struct ni_power_info *ni_pi = ni_get_pi(adev);
struct si_ps *state = si_get_ps(amdgpu_state);
u32 sq_power_throttle, sq_power_throttle2;
bool enable_sq_ramping = ni_pi->enable_sq_ramping;
int i;
if (state->performance_level_count == 0)
return -EINVAL;
if (smc_state->levelCount != state->performance_level_count)
return -EINVAL;
if (adev->pm.dpm.sq_ramping_threshold == 0)
return -EINVAL;
if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
enable_sq_ramping = false;
if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
enable_sq_ramping = false;
if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
enable_sq_ramping = false;
if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
enable_sq_ramping = false;
if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
sq_power_throttle = 0;
sq_power_throttle2 = 0;
if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
enable_sq_ramping) {
sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
} else {
sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
}
smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
}
return 0;
}
static int si_enable_power_containment(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_new_state,
bool enable)
{
struct ni_power_info *ni_pi = ni_get_pi(adev);
PPSMC_Result smc_result;
int ret = 0;
if (ni_pi->enable_power_containment) {
if (enable) {
if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
if (smc_result != PPSMC_Result_OK) {
ret = -EINVAL;
ni_pi->pc_enabled = false;
} else {
ni_pi->pc_enabled = true;
}
}
} else {
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
if (smc_result != PPSMC_Result_OK)
ret = -EINVAL;
ni_pi->pc_enabled = false;
}
}
return ret;
}
static int si_initialize_smc_dte_tables(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
int ret = 0;
struct si_dte_data *dte_data = &si_pi->dte_data;
Smc_SIslands_DTE_Configuration *dte_tables = NULL;
u32 table_size;
u8 tdep_count;
u32 i;
if (dte_data == NULL)
si_pi->enable_dte = false;
if (si_pi->enable_dte == false)
return 0;
if (dte_data->k <= 0)
return -EINVAL;
dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
if (dte_tables == NULL) {
si_pi->enable_dte = false;
return -ENOMEM;
}
table_size = dte_data->k;
if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
tdep_count = dte_data->tdep_count;
if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
dte_tables->K = cpu_to_be32(table_size);
dte_tables->T0 = cpu_to_be32(dte_data->t0);
dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
dte_tables->WindowSize = dte_data->window_size;
dte_tables->temp_select = dte_data->temp_select;
dte_tables->DTE_mode = dte_data->dte_mode;
dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
if (tdep_count > 0)
table_size--;
for (i = 0; i < table_size; i++) {
dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
dte_tables->R[i] = cpu_to_be32(dte_data->r[i]);
}
dte_tables->Tdep_count = tdep_count;
for (i = 0; i < (u32)tdep_count; i++) {
dte_tables->T_limits[i] = dte_data->t_limits[i];
dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
}
ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start,
(u8 *)dte_tables,
sizeof(Smc_SIslands_DTE_Configuration),
si_pi->sram_end);
kfree(dte_tables);
return ret;
}
static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev,
u16 *max, u16 *min)
{
struct si_power_info *si_pi = si_get_pi(adev);
struct amdgpu_cac_leakage_table *table =
&adev->pm.dpm.dyn_state.cac_leakage_table;
u32 i;
u32 v0_loadline;
if (table == NULL)
return -EINVAL;
*max = 0;
*min = 0xFFFF;
for (i = 0; i < table->count; i++) {
if (table->entries[i].vddc > *max)
*max = table->entries[i].vddc;
if (table->entries[i].vddc < *min)
*min = table->entries[i].vddc;
}
if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
return -EINVAL;
v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
if (v0_loadline > 0xFFFFUL)
return -EINVAL;
*min = (u16)v0_loadline;
if ((*min > *max) || (*max == 0) || (*min == 0))
return -EINVAL;
return 0;
}
static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
{
return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
}
static int si_init_dte_leakage_table(struct amdgpu_device *adev,
PP_SIslands_CacConfig *cac_tables,
u16 vddc_max, u16 vddc_min, u16 vddc_step,
u16 t0, u16 t_step)
{
struct si_power_info *si_pi = si_get_pi(adev);
u32 leakage;
unsigned int i, j;
s32 t;
u32 smc_leakage;
u32 scaling_factor;
u16 voltage;
scaling_factor = si_get_smc_power_scaling_factor(adev);
for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
t = (1000 * (i * t_step + t0));
for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
voltage = vddc_max - (vddc_step * j);
si_calculate_leakage_for_v_and_t(adev,
&si_pi->powertune_data->leakage_coefficients,
voltage,
t,
si_pi->dyn_powertune_data.cac_leakage,
&leakage);
smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
if (smc_leakage > 0xFFFF)
smc_leakage = 0xFFFF;
cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
cpu_to_be16((u16)smc_leakage);
}
}
return 0;
}
static int si_init_simplified_leakage_table(struct amdgpu_device *adev,
PP_SIslands_CacConfig *cac_tables,
u16 vddc_max, u16 vddc_min, u16 vddc_step)
{
struct si_power_info *si_pi = si_get_pi(adev);
u32 leakage;
unsigned int i, j;
u32 smc_leakage;
u32 scaling_factor;
u16 voltage;
scaling_factor = si_get_smc_power_scaling_factor(adev);
for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
voltage = vddc_max - (vddc_step * j);
si_calculate_leakage_for_v(adev,
&si_pi->powertune_data->leakage_coefficients,
si_pi->powertune_data->fixed_kt,
voltage,
si_pi->dyn_powertune_data.cac_leakage,
&leakage);
smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
if (smc_leakage > 0xFFFF)
smc_leakage = 0xFFFF;
for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
cpu_to_be16((u16)smc_leakage);
}
return 0;
}
static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
{
struct ni_power_info *ni_pi = ni_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
PP_SIslands_CacConfig *cac_tables = NULL;
u16 vddc_max, vddc_min, vddc_step;
u16 t0, t_step;
u32 load_line_slope, reg;
int ret = 0;
u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100;
if (ni_pi->enable_cac == false)
return 0;
cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
if (!cac_tables)
return -ENOMEM;
reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
WREG32(CG_CAC_CTRL, reg);
si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
si_pi->dyn_powertune_data.dc_pwr_value =
si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev);
si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min);
if (ret)
goto done_free;
vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
t_step = 4;
t0 = 60;
if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
ret = si_init_dte_leakage_table(adev, cac_tables,
vddc_max, vddc_min, vddc_step,
t0, t_step);
else
ret = si_init_simplified_leakage_table(adev, cac_tables,
vddc_max, vddc_min, vddc_step);
if (ret)
goto done_free;
load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
cac_tables->R_LL = cpu_to_be32(load_line_slope);
cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
cac_tables->calculation_repeats = cpu_to_be32(2);
cac_tables->dc_cac = cpu_to_be32(0);
cac_tables->log2_PG_LKG_SCALE = 12;
cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start,
(u8 *)cac_tables,
sizeof(PP_SIslands_CacConfig),
si_pi->sram_end);
if (ret)
goto done_free;
ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
done_free:
if (ret) {
ni_pi->enable_cac = false;
ni_pi->enable_power_containment = false;
}
kfree(cac_tables);
return ret;
}
static int si_program_cac_config_registers(struct amdgpu_device *adev,
const struct si_cac_config_reg *cac_config_regs)
{
const struct si_cac_config_reg *config_regs = cac_config_regs;
u32 data = 0, offset;
if (!config_regs)
return -EINVAL;
while (config_regs->offset != 0xFFFFFFFF) {
switch (config_regs->type) {
case SISLANDS_CACCONFIG_CGIND:
offset = SMC_CG_IND_START + config_regs->offset;
if (offset < SMC_CG_IND_END)
data = RREG32_SMC(offset);
break;
default:
data = RREG32(config_regs->offset);
break;
}
data &= ~config_regs->mask;
data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
switch (config_regs->type) {
case SISLANDS_CACCONFIG_CGIND:
offset = SMC_CG_IND_START + config_regs->offset;
if (offset < SMC_CG_IND_END)
WREG32_SMC(offset, data);
break;
default:
WREG32(config_regs->offset, data);
break;
}
config_regs++;
}
return 0;
}
static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev)
{
struct ni_power_info *ni_pi = ni_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
int ret;
if ((ni_pi->enable_cac == false) ||
(ni_pi->cac_configuration_required == false))
return 0;
ret = si_program_cac_config_registers(adev, si_pi->lcac_config);
if (ret)
return ret;
ret = si_program_cac_config_registers(adev, si_pi->cac_override);
if (ret)
return ret;
ret = si_program_cac_config_registers(adev, si_pi->cac_weights);
if (ret)
return ret;
return 0;
}
static int si_enable_smc_cac(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_new_state,
bool enable)
{
struct ni_power_info *ni_pi = ni_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
PPSMC_Result smc_result;
int ret = 0;
if (ni_pi->enable_cac) {
if (enable) {
if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
if (ni_pi->support_cac_long_term_average) {
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable);
if (smc_result != PPSMC_Result_OK)
ni_pi->support_cac_long_term_average = false;
}
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
if (smc_result != PPSMC_Result_OK) {
ret = -EINVAL;
ni_pi->cac_enabled = false;
} else {
ni_pi->cac_enabled = true;
}
if (si_pi->enable_dte) {
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
if (smc_result != PPSMC_Result_OK)
ret = -EINVAL;
}
}
} else if (ni_pi->cac_enabled) {
if (si_pi->enable_dte)
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
ni_pi->cac_enabled = false;
if (ni_pi->support_cac_long_term_average)
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable);
}
}
return ret;
}
static int si_init_smc_spll_table(struct amdgpu_device *adev)
{
struct ni_power_info *ni_pi = ni_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
SISLANDS_SMC_SCLK_VALUE sclk_params;
u32 fb_div, p_div;
u32 clk_s, clk_v;
u32 sclk = 0;
int ret = 0;
u32 tmp;
int i;
if (si_pi->spll_table_start == 0)
return -EINVAL;
spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
if (spll_table == NULL)
return -ENOMEM;
for (i = 0; i < 256; i++) {
ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
if (ret)
break;
p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
fb_div &= ~0x00001FFF;
fb_div >>= 1;
clk_v >>= 6;
if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
ret = -EINVAL;
if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
ret = -EINVAL;
if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
ret = -EINVAL;
if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
ret = -EINVAL;
if (ret)
break;
tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
spll_table->freq[i] = cpu_to_be32(tmp);
tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
spll_table->ss[i] = cpu_to_be32(tmp);
sclk += 512;
}
if (!ret)
ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start,
(u8 *)spll_table,
sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
si_pi->sram_end);
if (ret)
ni_pi->enable_power_containment = false;
kfree(spll_table);
return ret;
}
static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
u16 vce_voltage)
{
u16 highest_leakage = 0;
struct si_power_info *si_pi = si_get_pi(adev);
int i;
for (i = 0; i < si_pi->leakage_voltage.count; i++){
if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
}
if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
return highest_leakage;
return vce_voltage;
}
static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
u32 evclk, u32 ecclk, u16 *voltage)
{
u32 i;
int ret = -EINVAL;
struct amdgpu_vce_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
if (((evclk == 0) && (ecclk == 0)) ||
(table && (table->count == 0))) {
*voltage = 0;
return 0;
}
for (i = 0; i < table->count; i++) {
if ((evclk <= table->entries[i].evclk) &&
(ecclk <= table->entries[i].ecclk)) {
*voltage = table->entries[i].v;
ret = 0;
break;
}
}
/* if no match return the highest voltage */
if (ret)
*voltage = table->entries[table->count - 1].v;
*voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage);
return ret;
}
static bool si_dpm_vblank_too_short(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
/* we never hit the non-gddr5 limit so disable it */
u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
if (vblank_time < switch_limit)
return true;
else
return false;
}
static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
u32 arb_freq_src, u32 arb_freq_dest)
{
u32 mc_arb_dram_timing;
u32 mc_arb_dram_timing2;
u32 burst_time;
u32 mc_cg_config;
switch (arb_freq_src) {
case MC_CG_ARB_FREQ_F0:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
break;
case MC_CG_ARB_FREQ_F1:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
break;
case MC_CG_ARB_FREQ_F2:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
break;
case MC_CG_ARB_FREQ_F3:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
break;
default:
return -EINVAL;
}
switch (arb_freq_dest) {
case MC_CG_ARB_FREQ_F0:
WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
break;
case MC_CG_ARB_FREQ_F1:
WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
break;
case MC_CG_ARB_FREQ_F2:
WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
break;
case MC_CG_ARB_FREQ_F3:
WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
break;
default:
return -EINVAL;
}
mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
WREG32(MC_CG_CONFIG, mc_cg_config);
WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
return 0;
}
static void ni_update_current_ps(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
struct si_ps *new_ps = si_get_ps(rps);
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct ni_power_info *ni_pi = ni_get_pi(adev);
eg_pi->current_rps = *rps;
ni_pi->current_ps = *new_ps;
eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
adev->pm.dpm.current_ps = &eg_pi->current_rps;
}
static void ni_update_requested_ps(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
struct si_ps *new_ps = si_get_ps(rps);
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct ni_power_info *ni_pi = ni_get_pi(adev);
eg_pi->requested_rps = *rps;
ni_pi->requested_ps = *new_ps;
eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
}
static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
struct amdgpu_ps *new_ps,
struct amdgpu_ps *old_ps)
{
struct si_ps *new_state = si_get_ps(new_ps);
struct si_ps *current_state = si_get_ps(old_ps);
if ((new_ps->vclk == old_ps->vclk) &&
(new_ps->dclk == old_ps->dclk))
return;
if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
current_state->performance_levels[current_state->performance_level_count - 1].sclk)
return;
amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
}
static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
struct amdgpu_ps *new_ps,
struct amdgpu_ps *old_ps)
{
struct si_ps *new_state = si_get_ps(new_ps);
struct si_ps *current_state = si_get_ps(old_ps);
if ((new_ps->vclk == old_ps->vclk) &&
(new_ps->dclk == old_ps->dclk))
return;
if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
current_state->performance_levels[current_state->performance_level_count - 1].sclk)
return;
amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
}
static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
{
unsigned int i;
for (i = 0; i < table->count; i++)
if (voltage <= table->entries[i].value)
return table->entries[i].value;
return table->entries[table->count - 1].value;
}
static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks,
u32 max_clock, u32 requested_clock)
{
unsigned int i;
if ((clocks == NULL) || (clocks->count == 0))
return (requested_clock < max_clock) ? requested_clock : max_clock;
for (i = 0; i < clocks->count; i++) {
if (clocks->values[i] >= requested_clock)
return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
}
return (clocks->values[clocks->count - 1] < max_clock) ?
clocks->values[clocks->count - 1] : max_clock;
}
static u32 btc_get_valid_mclk(struct amdgpu_device *adev,
u32 max_mclk, u32 requested_mclk)
{
return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
max_mclk, requested_mclk);
}
static u32 btc_get_valid_sclk(struct amdgpu_device *adev,
u32 max_sclk, u32 requested_sclk)
{
return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
max_sclk, requested_sclk);
}
static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
u32 *max_clock)
{
u32 i, clock = 0;
if ((table == NULL) || (table->count == 0)) {
*max_clock = clock;
return;
}
for (i = 0; i < table->count; i++) {
if (clock < table->entries[i].clk)
clock = table->entries[i].clk;
}
*max_clock = clock;
}
static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
u32 clock, u16 max_voltage, u16 *voltage)
{
u32 i;
if ((table == NULL) || (table->count == 0))
return;
for (i= 0; i < table->count; i++) {
if (clock <= table->entries[i].clk) {
if (*voltage < table->entries[i].v)
*voltage = (u16)((table->entries[i].v < max_voltage) ?
table->entries[i].v : max_voltage);
return;
}
}
*voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
}
static void btc_adjust_clock_combinations(struct amdgpu_device *adev,
const struct amdgpu_clock_and_voltage_limits *max_limits,
struct rv7xx_pl *pl)
{
if ((pl->mclk == 0) || (pl->sclk == 0))
return;
if (pl->mclk == pl->sclk)
return;
if (pl->mclk > pl->sclk) {
if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
pl->sclk = btc_get_valid_sclk(adev,
max_limits->sclk,
(pl->mclk +
(adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
adev->pm.dpm.dyn_state.mclk_sclk_ratio);
} else {
if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
pl->mclk = btc_get_valid_mclk(adev,
max_limits->mclk,
pl->sclk -
adev->pm.dpm.dyn_state.sclk_mclk_delta);
}
}
static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
u16 max_vddc, u16 max_vddci,
u16 *vddc, u16 *vddci)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
u16 new_voltage;
if ((0 == *vddc) || (0 == *vddci))
return;
if (*vddc > *vddci) {
if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
(*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
*vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
}
} else {
if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
(*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
*vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
}
}
}
static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
u32 *p, u32 *u)
{
u32 b_c = 0;
u32 i_c;
u32 tmp;
i_c = (i * r_c) / 100;
tmp = i_c >> p_b;
while (tmp) {
b_c++;
tmp >>= 1;
}
*u = (b_c + 1) / 2;
*p = i_c / (1 << (2 * (*u)));
}
static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
{
u32 k, a, ah, al;
u32 t1;
if ((fl == 0) || (fh == 0) || (fl > fh))
return -EINVAL;
k = (100 * fh) / fl;
t1 = (t * (k - 100));
a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
a = (a + 5) / 10;
ah = ((a * t) + 5000) / 10000;
al = a - ah;
*th = t - ah;
*tl = t + al;
return 0;
}
static bool r600_is_uvd_state(u32 class, u32 class2)
{
if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
return true;
if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
return true;
if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
return true;
if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
return true;
if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
return true;
return false;
}
static u8 rv770_get_memory_module_index(struct amdgpu_device *adev)
{
return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
}
static void rv770_get_max_vddc(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
u16 vddc;
if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc))
pi->max_vddc = 0;
else
pi->max_vddc = vddc;
}
static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct amdgpu_atom_ss ss;
pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
ASIC_INTERNAL_ENGINE_SS, 0);
pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
ASIC_INTERNAL_MEMORY_SS, 0);
if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
else
pi->dynamic_ss = false;
}
static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
struct si_ps *ps = si_get_ps(rps);
struct amdgpu_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching = false;
bool disable_sclk_switching = false;
u32 mclk, sclk;
u16 vddc, vddci, min_vce_voltage = 0;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
u32 max_sclk = 0, max_mclk = 0;
int i;
if (adev->asic_type == CHIP_HAINAN) {
if ((adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0xC3) ||
(adev->pdev->device == 0x6664) ||
(adev->pdev->device == 0x6665) ||
(adev->pdev->device == 0x6667)) {
max_sclk = 75000;
}
if ((adev->pdev->revision == 0xC3) ||
(adev->pdev->device == 0x6665)) {
max_sclk = 60000;
max_mclk = 80000;
}
} else if (adev->asic_type == CHIP_OLAND) {
if ((adev->pdev->revision == 0xC7) ||
(adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83) ||
(adev->pdev->revision == 0x87) ||
(adev->pdev->device == 0x6604) ||
(adev->pdev->device == 0x6605)) {
max_sclk = 75000;
}
}
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk,
&min_vce_voltage);
} else {
rps->evclk = 0;
rps->ecclk = 0;
}
if ((adev->pm.dpm.new_active_crtc_count > 1) ||
si_dpm_vblank_too_short(adev))
disable_mclk_switching = true;
if (rps->vclk || rps->dclk) {
disable_mclk_switching = true;
disable_sclk_switching = true;
}
if (adev->pm.ac_power)
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
else
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
for (i = ps->performance_level_count - 2; i >= 0; i--) {
if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
}
if (adev->pm.ac_power == false) {
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].mclk > max_limits->mclk)
ps->performance_levels[i].mclk = max_limits->mclk;
if (ps->performance_levels[i].sclk > max_limits->sclk)
ps->performance_levels[i].sclk = max_limits->sclk;
if (ps->performance_levels[i].vddc > max_limits->vddc)
ps->performance_levels[i].vddc = max_limits->vddc;
if (ps->performance_levels[i].vddci > max_limits->vddci)
ps->performance_levels[i].vddci = max_limits->vddci;
}
}
/* limit clocks to max supported clocks based on voltage dependency tables */
btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
&max_sclk_vddc);
btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
&max_mclk_vddci);
btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
&max_mclk_vddc);
for (i = 0; i < ps->performance_level_count; i++) {
if (max_sclk_vddc) {
if (ps->performance_levels[i].sclk > max_sclk_vddc)
ps->performance_levels[i].sclk = max_sclk_vddc;
}
if (max_mclk_vddci) {
if (ps->performance_levels[i].mclk > max_mclk_vddci)
ps->performance_levels[i].mclk = max_mclk_vddci;
}
if (max_mclk_vddc) {
if (ps->performance_levels[i].mclk > max_mclk_vddc)
ps->performance_levels[i].mclk = max_mclk_vddc;
}
if (max_mclk) {
if (ps->performance_levels[i].mclk > max_mclk)
ps->performance_levels[i].mclk = max_mclk;
}
if (max_sclk) {
if (ps->performance_levels[i].sclk > max_sclk)
ps->performance_levels[i].sclk = max_sclk;
}
}
/* XXX validate the min clocks required for display */
if (disable_mclk_switching) {
mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
} else {
mclk = ps->performance_levels[0].mclk;
vddci = ps->performance_levels[0].vddci;
}
if (disable_sclk_switching) {
sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
} else {
sclk = ps->performance_levels[0].sclk;
vddc = ps->performance_levels[0].vddc;
}
if (rps->vce_active) {
if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
}
/* adjusted low state */
ps->performance_levels[0].sclk = sclk;
ps->performance_levels[0].mclk = mclk;
ps->performance_levels[0].vddc = vddc;
ps->performance_levels[0].vddci = vddci;
if (disable_sclk_switching) {
sclk = ps->performance_levels[0].sclk;
for (i = 1; i < ps->performance_level_count; i++) {
if (sclk < ps->performance_levels[i].sclk)
sclk = ps->performance_levels[i].sclk;
}
for (i = 0; i < ps->performance_level_count; i++) {
ps->performance_levels[i].sclk = sclk;
ps->performance_levels[i].vddc = vddc;
}
} else {
for (i = 1; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
}
}
if (disable_mclk_switching) {
mclk = ps->performance_levels[0].mclk;
for (i = 1; i < ps->performance_level_count; i++) {
if (mclk < ps->performance_levels[i].mclk)
mclk = ps->performance_levels[i].mclk;
}
for (i = 0; i < ps->performance_level_count; i++) {
ps->performance_levels[i].mclk = mclk;
ps->performance_levels[i].vddci = vddci;
}
} else {
for (i = 1; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
}
}
for (i = 0; i < ps->performance_level_count; i++)
btc_adjust_clock_combinations(adev, max_limits,
&ps->performance_levels[i]);
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].vddc < min_vce_voltage)
ps->performance_levels[i].vddc = min_vce_voltage;
btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
ps->performance_levels[i].sclk,
max_limits->vddc, &ps->performance_levels[i].vddc);
btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
ps->performance_levels[i].mclk,
max_limits->vddci, &ps->performance_levels[i].vddci);
btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
ps->performance_levels[i].mclk,
max_limits->vddc, &ps->performance_levels[i].vddc);
btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
adev->clock.current_dispclk,
max_limits->vddc, &ps->performance_levels[i].vddc);
}
for (i = 0; i < ps->performance_level_count; i++) {
btc_apply_voltage_delta_rules(adev,
max_limits->vddc, max_limits->vddci,
&ps->performance_levels[i].vddc,
&ps->performance_levels[i].vddci);
}
ps->dc_compatible = true;
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
ps->dc_compatible = false;
}
}
#if 0
static int si_read_smc_soft_register(struct amdgpu_device *adev,
u16 reg_offset, u32 *value)
{
struct si_power_info *si_pi = si_get_pi(adev);
return amdgpu_si_read_smc_sram_dword(adev,
si_pi->soft_regs_start + reg_offset, value,
si_pi->sram_end);
}
#endif
static int si_write_smc_soft_register(struct amdgpu_device *adev,
u16 reg_offset, u32 value)
{
struct si_power_info *si_pi = si_get_pi(adev);
return amdgpu_si_write_smc_sram_dword(adev,
si_pi->soft_regs_start + reg_offset,
value, si_pi->sram_end);
}
static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
{
bool ret = false;
u32 tmp, width, row, column, bank, density;
bool is_memory_gddr5, is_special;
tmp = RREG32(MC_SEQ_MISC0);
is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
& (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
tmp = RREG32(MC_ARB_RAMCFG);
row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
density = (1 << (row + column - 20 + bank)) * width;
if ((adev->pdev->device == 0x6819) &&
is_memory_gddr5 && is_special && (density == 0x400))
ret = true;
return ret;
}
static void si_get_leakage_vddc(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
u16 vddc, count = 0;
int i, ret;
for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
si_pi->leakage_voltage.entries[count].voltage = vddc;
si_pi->leakage_voltage.entries[count].leakage_index =
SISLANDS_LEAKAGE_INDEX0 + i;
count++;
}
}
si_pi->leakage_voltage.count = count;
}
static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev,
u32 index, u16 *leakage_voltage)
{
struct si_power_info *si_pi = si_get_pi(adev);
int i;
if (leakage_voltage == NULL)
return -EINVAL;
if ((index & 0xff00) != 0xff00)
return -EINVAL;
if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
return -EINVAL;
if (index < SISLANDS_LEAKAGE_INDEX0)
return -EINVAL;
for (i = 0; i < si_pi->leakage_voltage.count; i++) {
if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
*leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
return 0;
}
}
return -EAGAIN;
}
static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
bool want_thermal_protection;
enum si_dpm_event_src dpm_event_src;
switch (sources) {
case 0:
default:
want_thermal_protection = false;
break;
case (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL):
want_thermal_protection = true;
dpm_event_src = SI_DPM_EVENT_SRC_DIGITAL;
break;
case (1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
want_thermal_protection = true;
dpm_event_src = SI_DPM_EVENT_SRC_EXTERNAL;
break;
case ((1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
(1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL)):
want_thermal_protection = true;
dpm_event_src = SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
break;
}
if (want_thermal_protection) {
WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
if (pi->thermal_protection)
WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
} else {
WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
}
}
static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
enum si_dpm_auto_throttle_src source,
bool enable)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
if (enable) {
if (!(pi->active_auto_throttle_sources & (1 << source))) {
pi->active_auto_throttle_sources |= 1 << source;
si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
}
} else {
if (pi->active_auto_throttle_sources & (1 << source)) {
pi->active_auto_throttle_sources &= ~(1 << source);
si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
}
}
}
static void si_start_dpm(struct amdgpu_device *adev)
{
WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
}
static void si_stop_dpm(struct amdgpu_device *adev)
{
WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
}
static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
{
if (enable)
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
else
WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
}
#if 0
static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev,
u32 thermal_level)
{
PPSMC_Result ret;
if (thermal_level == 0) {
ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
if (ret == PPSMC_Result_OK)
return 0;
else
return -EINVAL;
}
return 0;
}
static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev)
{
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
}
#endif
#if 0
static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power)
{
if (ac_power)
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
0 : -EINVAL;
return 0;
}
#endif
static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
PPSMC_Msg msg, u32 parameter)
{
WREG32(SMC_SCRATCH0, parameter);
return amdgpu_si_send_msg_to_smc(adev, msg);
}
static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev)
{
if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
return -EINVAL;
return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
0 : -EINVAL;
}
static int si_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
struct si_ps *ps = si_get_ps(rps);
u32 levels = ps->performance_level_count;
if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
return -EINVAL;
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
return -EINVAL;
}
adev->pm.dpm.forced_level = level;
return 0;
}
#if 0
static int si_set_boot_state(struct amdgpu_device *adev)
{
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
0 : -EINVAL;
}
#endif
static int si_set_sw_state(struct amdgpu_device *adev)
{
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
0 : -EINVAL;
}
static int si_halt_smc(struct amdgpu_device *adev)
{
if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
return -EINVAL;
return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ?
0 : -EINVAL;
}
static int si_resume_smc(struct amdgpu_device *adev)
{
if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
return -EINVAL;
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
0 : -EINVAL;
}
static void si_dpm_start_smc(struct amdgpu_device *adev)
{
amdgpu_si_program_jump_on_start(adev);
amdgpu_si_start_smc(adev);
amdgpu_si_smc_clock(adev, true);
}
static void si_dpm_stop_smc(struct amdgpu_device *adev)
{
amdgpu_si_reset_smc(adev);
amdgpu_si_smc_clock(adev, false);
}
static int si_process_firmware_header(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
int ret;
ret = amdgpu_si_read_smc_sram_dword(adev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
&tmp, si_pi->sram_end);
if (ret)
return ret;
si_pi->state_table_start = tmp;
ret = amdgpu_si_read_smc_sram_dword(adev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
&tmp, si_pi->sram_end);
if (ret)
return ret;
si_pi->soft_regs_start = tmp;
ret = amdgpu_si_read_smc_sram_dword(adev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
&tmp, si_pi->sram_end);
if (ret)
return ret;
si_pi->mc_reg_table_start = tmp;
ret = amdgpu_si_read_smc_sram_dword(adev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
&tmp, si_pi->sram_end);
if (ret)
return ret;
si_pi->fan_table_start = tmp;
ret = amdgpu_si_read_smc_sram_dword(adev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
&tmp, si_pi->sram_end);
if (ret)
return ret;
si_pi->arb_table_start = tmp;
ret = amdgpu_si_read_smc_sram_dword(adev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
&tmp, si_pi->sram_end);
if (ret)
return ret;
si_pi->cac_table_start = tmp;
ret = amdgpu_si_read_smc_sram_dword(adev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
&tmp, si_pi->sram_end);
if (ret)
return ret;
si_pi->dte_table_start = tmp;
ret = amdgpu_si_read_smc_sram_dword(adev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
&tmp, si_pi->sram_end);
if (ret)
return ret;
si_pi->spll_table_start = tmp;
ret = amdgpu_si_read_smc_sram_dword(adev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
&tmp, si_pi->sram_end);
if (ret)
return ret;
si_pi->papm_cfg_table_start = tmp;
return ret;
}
static void si_read_clock_registers(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
}
static void si_enable_thermal_protection(struct amdgpu_device *adev,
bool enable)
{
if (enable)
WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
else
WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
}
static void si_enable_acpi_power_management(struct amdgpu_device *adev)
{
WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
}
#if 0
static int si_enter_ulp_state(struct amdgpu_device *adev)
{
WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
udelay(25000);
return 0;
}
static int si_exit_ulp_state(struct amdgpu_device *adev)
{
int i;
WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
udelay(7000);
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32(SMC_RESP_0) == 1)
break;
udelay(1000);
}
return 0;
}
#endif
static int si_notify_smc_display_change(struct amdgpu_device *adev,
bool has_display)
{
PPSMC_Msg msg = has_display ?
PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?
0 : -EINVAL;
}
static void si_program_response_times(struct amdgpu_device *adev)
{
u32 voltage_response_time, acpi_delay_time, vbi_time_out;
u32 vddc_dly, acpi_dly, vbi_dly;
u32 reference_clock;
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
if (voltage_response_time == 0)
voltage_response_time = 1000;
acpi_delay_time = 15000;
vbi_time_out = 100000;
reference_clock = amdgpu_asic_get_xclk(adev);
vddc_dly = (voltage_response_time * reference_clock) / 100;
acpi_dly = (acpi_delay_time * reference_clock) / 100;
vbi_dly = (vbi_time_out * reference_clock) / 100;
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
}
static void si_program_ds_registers(struct amdgpu_device *adev)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
u32 tmp;
/* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0)
tmp = 0x10;
else
tmp = 0x1;
if (eg_pi->sclk_deep_sleep) {
WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
~AUTOSCALE_ON_SS_CLEAR);
}
}
static void si_program_display_gap(struct amdgpu_device *adev)
{
u32 tmp, pipe;
int i;
tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
if (adev->pm.dpm.new_active_crtc_count > 0)
tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
else
tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
if (adev->pm.dpm.new_active_crtc_count > 1)
tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
else
tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
WREG32(CG_DISPLAY_GAP_CNTL, tmp);
tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
if ((adev->pm.dpm.new_active_crtc_count > 0) &&
(!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
/* find the first active crtc */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->pm.dpm.new_active_crtcs & (1 << i))
break;
}
if (i == adev->mode_info.num_crtc)
pipe = 0;
else
pipe = i;
tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
}
/* Setting this to false forces the performance state to low if the crtcs are disabled.
* This can be a problem on PowerXpress systems or if you want to use the card
* for offscreen rendering or compute if there are no crtcs enabled.
*/
si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
}
static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
if (enable) {
if (pi->sclk_ss)
WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
} else {
WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
}
}
static void si_setup_bsp(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
u32 xclk = amdgpu_asic_get_xclk(adev);
r600_calculate_u_and_p(pi->asi,
xclk,
16,
&pi->bsp,
&pi->bsu);
r600_calculate_u_and_p(pi->pasi,
xclk,
16,
&pi->pbsp,
&pi->pbsu);
pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
WREG32(CG_BSP, pi->dsp);
}
static void si_program_git(struct amdgpu_device *adev)
{
WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
}
static void si_program_tp(struct amdgpu_device *adev)
{
int i;
enum r600_td td = R600_TD_DFLT;
for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
if (td == R600_TD_AUTO)
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
else
WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
if (td == R600_TD_UP)
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
if (td == R600_TD_DOWN)
WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
}
static void si_program_tpp(struct amdgpu_device *adev)
{
WREG32(CG_TPC, R600_TPC_DFLT);
}
static void si_program_sstp(struct amdgpu_device *adev)
{
WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
}
static void si_enable_display_gap(struct amdgpu_device *adev)
{
u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
WREG32(CG_DISPLAY_GAP_CNTL, tmp);
}
static void si_program_vc(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
WREG32(CG_FTV, pi->vrc);
}
static void si_clear_vc(struct amdgpu_device *adev)
{
WREG32(CG_FTV, 0);
}
static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
{
u8 mc_para_index;
if (memory_clock < 10000)
mc_para_index = 0;
else if (memory_clock >= 80000)
mc_para_index = 0x0f;
else
mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
return mc_para_index;
}
static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
{
u8 mc_para_index;
if (strobe_mode) {
if (memory_clock < 12500)
mc_para_index = 0x00;
else if (memory_clock > 47500)
mc_para_index = 0x0f;
else
mc_para_index = (u8)((memory_clock - 10000) / 2500);
} else {
if (memory_clock < 65000)
mc_para_index = 0x00;
else if (memory_clock > 135000)
mc_para_index = 0x0f;
else
mc_para_index = (u8)((memory_clock - 60000) / 5000);
}
return mc_para_index;
}
static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
bool strobe_mode = false;
u8 result = 0;
if (mclk <= pi->mclk_strobe_mode_threshold)
strobe_mode = true;
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
else
result = si_get_ddr3_mclk_frequency_ratio(mclk);
if (strobe_mode)
result |= SISLANDS_SMC_STROBE_ENABLE;
return result;
}
static int si_upload_firmware(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
amdgpu_si_reset_smc(adev);
amdgpu_si_smc_clock(adev, false);
return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end);
}
static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev,
const struct atom_voltage_table *table,
const struct amdgpu_phase_shedding_limits_table *limits)
{
u32 data, num_bits, num_levels;
if ((table == NULL) || (limits == NULL))
return false;
data = table->mask_low;
num_bits = hweight32(data);
if (num_bits == 0)
return false;
num_levels = (1 << num_bits);
if (table->count != num_levels)
return false;
if (limits->count != (num_levels - 1))
return false;
return true;
}
static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
u32 max_voltage_steps,
struct atom_voltage_table *voltage_table)
{
unsigned int i, diff;
if (voltage_table->count <= max_voltage_steps)
return;
diff = voltage_table->count - max_voltage_steps;
for (i= 0; i < max_voltage_steps; i++)
voltage_table->entries[i] = voltage_table->entries[i + diff];
voltage_table->count = max_voltage_steps;
}
static int si_get_svi2_voltage_table(struct amdgpu_device *adev,
struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
struct atom_voltage_table *voltage_table)
{
u32 i;
if (voltage_dependency_table == NULL)
return -EINVAL;
voltage_table->mask_low = 0;
voltage_table->phase_delay = 0;
voltage_table->count = voltage_dependency_table->count;
for (i = 0; i < voltage_table->count; i++) {
voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
voltage_table->entries[i].smio_low = 0;
}
return 0;
}
static int si_construct_voltage_tables(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
int ret;
if (pi->voltage_control) {
ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
if (ret)
return ret;
if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
si_trim_voltage_table_to_fit_state_table(adev,
SISLANDS_MAX_NO_VREG_STEPS,
&eg_pi->vddc_voltage_table);
} else if (si_pi->voltage_control_svi2) {
ret = si_get_svi2_voltage_table(adev,
&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
&eg_pi->vddc_voltage_table);
if (ret)
return ret;
} else {
return -EINVAL;
}
if (eg_pi->vddci_control) {
ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
if (ret)
return ret;
if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
si_trim_voltage_table_to_fit_state_table(adev,
SISLANDS_MAX_NO_VREG_STEPS,
&eg_pi->vddci_voltage_table);
}
if (si_pi->vddci_control_svi2) {
ret = si_get_svi2_voltage_table(adev,
&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
&eg_pi->vddci_voltage_table);
if (ret)
return ret;
}
if (pi->mvdd_control) {
ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
if (ret) {
pi->mvdd_control = false;
return ret;
}
if (si_pi->mvdd_voltage_table.count == 0) {
pi->mvdd_control = false;
return -EINVAL;
}
if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
si_trim_voltage_table_to_fit_state_table(adev,
SISLANDS_MAX_NO_VREG_STEPS,
&si_pi->mvdd_voltage_table);
}
if (si_pi->vddc_phase_shed_control) {
ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
if (ret)
si_pi->vddc_phase_shed_control = false;
if ((si_pi->vddc_phase_shed_table.count == 0) ||
(si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
si_pi->vddc_phase_shed_control = false;
}
return 0;
}
static void si_populate_smc_voltage_table(struct amdgpu_device *adev,
const struct atom_voltage_table *voltage_table,
SISLANDS_SMC_STATETABLE *table)
{
unsigned int i;
for (i = 0; i < voltage_table->count; i++)
table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
}
static int si_populate_smc_voltage_tables(struct amdgpu_device *adev,
SISLANDS_SMC_STATETABLE *table)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
u8 i;
if (si_pi->voltage_control_svi2) {
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
si_pi->svc_gpio_id);
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
si_pi->svd_gpio_id);
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
2);
} else {
if (eg_pi->vddc_voltage_table.count) {
si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table);
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
table->maxVDDCIndexInPPTable = i;
break;
}
}
}
if (eg_pi->vddci_voltage_table.count) {
si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table);
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
}
if (si_pi->mvdd_voltage_table.count) {
si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table);
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
}
if (si_pi->vddc_phase_shed_control) {
if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table,
&adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
(u32)si_pi->vddc_phase_shed_table.phase_delay);
} else {
si_pi->vddc_phase_shed_control = false;
}
}
}
return 0;
}
static int si_populate_voltage_value(struct amdgpu_device *adev,
const struct atom_voltage_table *table,
u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
{
unsigned int i;
for (i = 0; i < table->count; i++) {
if (value <= table->entries[i].value) {
voltage->index = (u8)i;
voltage->value = cpu_to_be16(table->entries[i].value);
break;
}
}
if (i >= table->count)
return -EINVAL;
return 0;
}
static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
SISLANDS_SMC_VOLTAGE_VALUE *voltage)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
if (pi->mvdd_control) {
if (mclk <= pi->mvdd_split_frequency)
voltage->index = 0;
else
voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
}
return 0;
}
static int si_get_std_voltage_value(struct amdgpu_device *adev,
SISLANDS_SMC_VOLTAGE_VALUE *voltage,
u16 *std_voltage)
{
u16 v_index;
bool voltage_found = false;
*std_voltage = be16_to_cpu(voltage->value);
if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
return -EINVAL;
for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
if (be16_to_cpu(voltage->value) ==
(u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
voltage_found = true;
if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
*std_voltage =
adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
else
*std_voltage =
adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
break;
}
}
if (!voltage_found) {
for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
if (be16_to_cpu(voltage->value) <=
(u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
voltage_found = true;
if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
*std_voltage =
adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
else
*std_voltage =
adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
break;
}
}
}
} else {
if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
*std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
}
}
return 0;
}
static int si_populate_std_voltage_value(struct amdgpu_device *adev,
u16 value, u8 index,
SISLANDS_SMC_VOLTAGE_VALUE *voltage)
{
voltage->index = index;
voltage->value = cpu_to_be16(value);
return 0;
}
static int si_populate_phase_shedding_value(struct amdgpu_device *adev,
const struct amdgpu_phase_shedding_limits_table *limits,
u16 voltage, u32 sclk, u32 mclk,
SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
{
unsigned int i;
for (i = 0; i < limits->count; i++) {
if ((voltage <= limits->entries[i].voltage) &&
(sclk <= limits->entries[i].sclk) &&
(mclk <= limits->entries[i].mclk))
break;
}
smc_voltage->phase_settings = (u8)i;
return 0;
}
static int si_init_arb_table_index(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
int ret;
ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
&tmp, si_pi->sram_end);
if (ret)
return ret;
tmp &= 0x00FFFFFF;
tmp |= MC_CG_ARB_FREQ_F1 << 24;
return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start,
tmp, si_pi->sram_end);
}
static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
{
return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
}
static int si_reset_to_default(struct amdgpu_device *adev)
{
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
0 : -EINVAL;
}
static int si_force_switch_to_arb_f0(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
int ret;
ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
&tmp, si_pi->sram_end);
if (ret)
return ret;
tmp = (tmp >> 24) & 0xff;
if (tmp == MC_CG_ARB_FREQ_F0)
return 0;
return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
}
static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
u32 engine_clock)
{
u32 dram_rows;
u32 dram_refresh_rate;
u32 mc_arb_rfsh_rate;
u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp >= 4)
dram_rows = 16384;
else
dram_rows = 1 << (tmp + 10);
dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
return mc_arb_rfsh_rate;
}
static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
struct rv7xx_pl *pl,
SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
{
u32 dram_timing;
u32 dram_timing2;
u32 burst_time;
arb_regs->mc_arb_rfsh_rate =
(u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
amdgpu_atombios_set_engine_dram_timings(adev,
pl->sclk,
pl->mclk);
dram_timing = RREG32(MC_ARB_DRAM_TIMING);
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing);
arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
arb_regs->mc_arb_burst_time = (u8)burst_time;
return 0;
}
static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state,
unsigned int first_arb_set)
{
struct si_power_info *si_pi = si_get_pi(adev);
struct si_ps *state = si_get_ps(amdgpu_state);
SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
int i, ret = 0;
for (i = 0; i < state->performance_level_count; i++) {
ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs);
if (ret)
break;
ret = amdgpu_si_copy_bytes_to_smc(adev,
si_pi->arb_table_start +
offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
(u8 *)&arb_regs,
sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
si_pi->sram_end);
if (ret)
break;
}
return ret;
}
static int si_program_memory_timing_parameters(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_new_state)
{
return si_do_program_memory_timing_parameters(adev, amdgpu_new_state,
SISLANDS_DRIVER_STATE_ARB_INDEX);
}
static int si_populate_initial_mvdd_value(struct amdgpu_device *adev,
struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
if (pi->mvdd_control)
return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table,
si_pi->mvdd_bootup_value, voltage);
return 0;
}
static int si_populate_smc_initial_state(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_initial_state,
SISLANDS_SMC_STATETABLE *table)
{
struct si_ps *initial_state = si_get_ps(amdgpu_initial_state);
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
u32 reg;
int ret;
table->initialState.level.mclk.vDLL_CNTL =
cpu_to_be32(si_pi->clock_registers.dll_cntl);
table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
table->initialState.level.mclk.vMPLL_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
table->initialState.level.mclk.vMPLL_FUNC_CNTL_1 =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
table->initialState.level.mclk.vMPLL_FUNC_CNTL_2 =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
table->initialState.level.mclk.vMPLL_SS =
cpu_to_be32(si_pi->clock_registers.mpll_ss1);
table->initialState.level.mclk.vMPLL_SS2 =
cpu_to_be32(si_pi->clock_registers.mpll_ss2);
table->initialState.level.mclk.mclk_value =
cpu_to_be32(initial_state->performance_levels[0].mclk);
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
table->initialState.level.sclk.sclk_value =
cpu_to_be32(initial_state->performance_levels[0].sclk);
table->initialState.level.arbRefreshState =
SISLANDS_INITIAL_STATE_ARB_INDEX;
table->initialState.level.ACIndex = 0;
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
initial_state->performance_levels[0].vddc,
&table->initialState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(adev,
&table->initialState.level.vddc,
&std_vddc);
if (!ret)
si_populate_std_voltage_value(adev, std_vddc,
table->initialState.level.vddc.index,
&table->initialState.level.std_vddc);
}
if (eg_pi->vddci_control)
si_populate_voltage_value(adev,
&eg_pi->vddci_voltage_table,
initial_state->performance_levels[0].vddci,
&table->initialState.level.vddci);
if (si_pi->vddc_phase_shed_control)
si_populate_phase_shedding_value(adev,
&adev->pm.dpm.dyn_state.phase_shedding_limits_table,
initial_state->performance_levels[0].vddc,
initial_state->performance_levels[0].sclk,
initial_state->performance_levels[0].mclk,
&table->initialState.level.vddc);
si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd);
reg = CG_R(0xffff) | CG_L(0);
table->initialState.level.aT = cpu_to_be32(reg);
table->initialState.level.bSP = cpu_to_be32(pi->dsp);
table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
table->initialState.level.strobeMode =
si_get_strobe_mode_settings(adev,
initial_state->performance_levels[0].mclk);
if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
table->initialState.level.mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
else
table->initialState.level.mcFlags = 0;
}
table->initialState.levelCount = 1;
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
table->initialState.level.dpm2.MaxPS = 0;
table->initialState.level.dpm2.NearTDPDec = 0;
table->initialState.level.dpm2.AboveSafeInc = 0;
table->initialState.level.dpm2.BelowSafeInc = 0;
table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
static enum si_pcie_gen si_gen_pcie_gen_support(struct amdgpu_device *adev,
u32 sys_mask,
enum si_pcie_gen asic_gen,
enum si_pcie_gen default_gen)
{
switch (asic_gen) {
case SI_PCIE_GEN1:
return SI_PCIE_GEN1;
case SI_PCIE_GEN2:
return SI_PCIE_GEN2;
case SI_PCIE_GEN3:
return SI_PCIE_GEN3;
default:
if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
(default_gen == SI_PCIE_GEN3))
return SI_PCIE_GEN3;
else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
(default_gen == SI_PCIE_GEN2))
return SI_PCIE_GEN2;
else
return SI_PCIE_GEN1;
}
return SI_PCIE_GEN1;
}
static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
SISLANDS_SMC_STATETABLE *table)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
u32 dll_cntl = si_pi->clock_registers.dll_cntl;
u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
u32 reg;
int ret;
table->ACPIState = table->initialState;
table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
if (pi->acpi_vddc) {
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
pi->acpi_vddc, &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(adev,
&table->ACPIState.level.vddc, &std_vddc);
if (!ret)
si_populate_std_voltage_value(adev, std_vddc,
table->ACPIState.level.vddc.index,
&table->ACPIState.level.std_vddc);
}
table->ACPIState.level.gen2PCIE = si_pi->acpi_pcie_gen;
if (si_pi->vddc_phase_shed_control) {
si_populate_phase_shedding_value(adev,
&adev->pm.dpm.dyn_state.phase_shedding_limits_table,
pi->acpi_vddc,
0,
0,
&table->ACPIState.level.vddc);
}
} else {
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
pi->min_vddc_in_table, &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(adev,
&table->ACPIState.level.vddc, &std_vddc);
if (!ret)
si_populate_std_voltage_value(adev, std_vddc,
table->ACPIState.level.vddc.index,
&table->ACPIState.level.std_vddc);
}
table->ACPIState.level.gen2PCIE =
(u8)si_gen_pcie_gen_support(adev,
si_pi->sys_pcie_mask,
si_pi->boot_pcie_gen,
SI_PCIE_GEN1);
if (si_pi->vddc_phase_shed_control)
si_populate_phase_shedding_value(adev,
&adev->pm.dpm.dyn_state.phase_shedding_limits_table,
pi->min_vddc_in_table,
0,
0,
&table->ACPIState.level.vddc);
}
if (pi->acpi_vddc) {
if (eg_pi->acpi_vddci)
si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
eg_pi->acpi_vddci,
&table->ACPIState.level.vddci);
}
mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(4);
table->ACPIState.level.mclk.vDLL_CNTL =
cpu_to_be32(dll_cntl);
table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL =
cpu_to_be32(mclk_pwrmgt_cntl);
table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL =
cpu_to_be32(mpll_ad_func_cntl);
table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL =
cpu_to_be32(mpll_dq_func_cntl);
table->ACPIState.level.mclk.vMPLL_FUNC_CNTL =
cpu_to_be32(mpll_func_cntl);
table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_1 =
cpu_to_be32(mpll_func_cntl_1);
table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_2 =
cpu_to_be32(mpll_func_cntl_2);
table->ACPIState.level.mclk.vMPLL_SS =
cpu_to_be32(si_pi->clock_registers.mpll_ss1);
table->ACPIState.level.mclk.vMPLL_SS2 =
cpu_to_be32(si_pi->clock_registers.mpll_ss2);
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL =
cpu_to_be32(spll_func_cntl);
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
cpu_to_be32(spll_func_cntl_2);
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
cpu_to_be32(spll_func_cntl_3);
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
cpu_to_be32(spll_func_cntl_4);
table->ACPIState.level.mclk.mclk_value = 0;
table->ACPIState.level.sclk.sclk_value = 0;
si_populate_mvdd_value(adev, 0, &table->ACPIState.level.mvdd);
if (eg_pi->dynamic_ac_timing)
table->ACPIState.level.ACIndex = 0;
table->ACPIState.level.dpm2.MaxPS = 0;
table->ACPIState.level.dpm2.NearTDPDec = 0;
table->ACPIState.level.dpm2.AboveSafeInc = 0;
table->ACPIState.level.dpm2.BelowSafeInc = 0;
table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
static int si_populate_ulv_state(struct amdgpu_device *adev,
struct SISLANDS_SMC_SWSTATE_SINGLE *state)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
struct si_ulv_param *ulv = &si_pi->ulv;
u32 sclk_in_sr = 1350; /* ??? */
int ret;
ret = si_convert_power_level_to_smc(adev, &ulv->pl,
&state->level);
if (!ret) {
if (eg_pi->sclk_deep_sleep) {
if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
else
state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
}
if (ulv->one_pcie_lane_in_ulv)
state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
state->level.arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
state->level.ACIndex = 1;
state->level.std_vddc = state->level.vddc;
state->levelCount = 1;
state->flags |= PPSMC_SWSTATE_FLAG_DC;
}
return ret;
}
static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
struct si_ulv_param *ulv = &si_pi->ulv;
SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
int ret;
ret = si_populate_memory_timing_parameters(adev, &ulv->pl,
&arb_regs);
if (ret)
return ret;
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
ulv->volt_change_delay);
ret = amdgpu_si_copy_bytes_to_smc(adev,
si_pi->arb_table_start +
offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
(u8 *)&arb_regs,
sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
si_pi->sram_end);
return ret;
}
static void si_get_mvdd_configuration(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
pi->mvdd_split_frequency = 30000;
}
static int si_init_smc_table(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
const struct si_ulv_param *ulv = &si_pi->ulv;
SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable;
int ret;
u32 lane_width;
u32 vr_hot_gpio;
si_populate_smc_voltage_tables(adev, table);
switch (adev->pm.int_thermal_type) {
case THERMAL_TYPE_SI:
case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
break;
case THERMAL_TYPE_NONE:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
break;
default:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
break;
}
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819))
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
}
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
vr_hot_gpio = adev->pm.dpm.backbias_response_time;
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
vr_hot_gpio);
}
ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table);
if (ret)
return ret;
ret = si_populate_smc_acpi_state(adev, table);
if (ret)
return ret;
table->driverState.flags = table->initialState.flags;
table->driverState.levelCount = table->initialState.levelCount;
table->driverState.levels[0] = table->initialState.level;
ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
SISLANDS_INITIAL_STATE_ARB_INDEX);
if (ret)
return ret;
if (ulv->supported && ulv->pl.vddc) {
ret = si_populate_ulv_state(adev, &table->ULVState);
if (ret)
return ret;
ret = si_program_ulv_memory_timing_parameters(adev);
if (ret)
return ret;
WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
lane_width = amdgpu_get_pcie_lanes(adev);
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
} else {
table->ULVState = table->initialState;
}
return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start,
(u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
si_pi->sram_end);
}
static int si_calculate_sclk_params(struct amdgpu_device *adev,
u32 engine_clock,
SISLANDS_SMC_SCLK_VALUE *sclk)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
struct atom_clock_dividers dividers;
u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
u64 tmp;
u32 reference_clock = adev->clock.spll.reference_freq;
u32 reference_divider;
u32 fbdiv;
int ret;
ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
engine_clock, false, ÷rs);
if (ret)
return ret;
reference_divider = 1 + dividers.ref_div;
tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
do_div(tmp, reference_clock);
fbdiv = (u32) tmp;
spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(2);
spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
spll_func_cntl_3 |= SPLL_DITHEN;
if (pi->sclk_ss) {
struct amdgpu_atom_ss ss;
u32 vco_freq = engine_clock * dividers.post_div;
if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
cg_spll_spread_spectrum &= ~CLK_S_MASK;
cg_spll_spread_spectrum |= CLK_S(clk_s);
cg_spll_spread_spectrum |= SSEN;
cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
}
}
sclk->sclk_value = engine_clock;
sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
return 0;
}
static int si_populate_sclk_value(struct amdgpu_device *adev,
u32 engine_clock,
SISLANDS_SMC_SCLK_VALUE *sclk)
{
SISLANDS_SMC_SCLK_VALUE sclk_tmp;
int ret;
ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp);
if (!ret) {
sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
}
return ret;
}
static int si_populate_mclk_value(struct amdgpu_device *adev,
u32 engine_clock,
u32 memory_clock,
SISLANDS_SMC_MCLK_VALUE *mclk,
bool strobe_mode,
bool dll_state_on)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
u32 dll_cntl = si_pi->clock_registers.dll_cntl;
u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1;
u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2;
struct atom_mpll_param mpll_param;
int ret;
ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
if (ret)
return ret;
mpll_func_cntl &= ~BWCTRL_MASK;
mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
YCLK_POST_DIV(mpll_param.post_div);
}
if (pi->mclk_ss) {
struct amdgpu_atom_ss ss;
u32 freq_nom;
u32 tmp;
u32 reference_clock = adev->clock.mpll.reference_freq;
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
freq_nom = memory_clock * 4;
else
freq_nom = memory_clock * 2;
tmp = freq_nom / reference_clock;
tmp = tmp * tmp;
if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
u32 clks = reference_clock * 5 / ss.rate;
u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
mpll_ss1 &= ~CLKV_MASK;
mpll_ss1 |= CLKV(clkv);
mpll_ss2 &= ~CLKS_MASK;
mpll_ss2 |= CLKS(clks);
}
}
mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
if (dll_state_on)
mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
else
mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
mclk->mclk_value = cpu_to_be32(memory_clock);
mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
return 0;
}
static void si_populate_smc_sp(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state,
SISLANDS_SMC_SWSTATE *smc_state)
{
struct si_ps *ps = si_get_ps(amdgpu_state);
struct rv7xx_power_info *pi = rv770_get_pi(adev);
int i;
for (i = 0; i < ps->performance_level_count - 1; i++)
smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
smc_state->levels[ps->performance_level_count - 1].bSP =
cpu_to_be32(pi->psp);
}
static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
struct rv7xx_pl *pl,
SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
int ret;
bool dll_state_on;
u16 std_vddc;
bool gmc_pg = false;
if (eg_pi->pcie_performance_request &&
(si_pi->force_pcie_gen != SI_PCIE_GEN_INVALID))
level->gen2PCIE = (u8)si_pi->force_pcie_gen;
else
level->gen2PCIE = (u8)pl->pcie_gen;
ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk);
if (ret)
return ret;
level->mcFlags = 0;
if (pi->mclk_stutter_mode_threshold &&
(pl->mclk <= pi->mclk_stutter_mode_threshold) &&
!eg_pi->uvd_enabled &&
(RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
(adev->pm.dpm.new_active_crtc_count <= 2)) {
level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
if (gmc_pg)
level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
}
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
if (pl->mclk > pi->mclk_edc_enable_threshold)
level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk);
if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
else
dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
} else {
dll_state_on = false;
}
} else {
level->strobeMode = si_get_strobe_mode_settings(adev,
pl->mclk);
dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
}
ret = si_populate_mclk_value(adev,
pl->sclk,
pl->mclk,
&level->mclk,
(level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
if (ret)
return ret;
ret = si_populate_voltage_value(adev,
&eg_pi->vddc_voltage_table,
pl->vddc, &level->vddc);
if (ret)
return ret;
ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc);
if (ret)
return ret;
ret = si_populate_std_voltage_value(adev, std_vddc,
level->vddc.index, &level->std_vddc);
if (ret)
return ret;
if (eg_pi->vddci_control) {
ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
pl->vddci, &level->vddci);
if (ret)
return ret;
}
if (si_pi->vddc_phase_shed_control) {
ret = si_populate_phase_shedding_value(adev,
&adev->pm.dpm.dyn_state.phase_shedding_limits_table,
pl->vddc,
pl->sclk,
pl->mclk,
&level->vddc);
if (ret)
return ret;
}
level->MaxPoweredUpCU = si_pi->max_cu;
ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd);
return ret;
}
static int si_populate_smc_t(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state,
SISLANDS_SMC_SWSTATE *smc_state)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct si_ps *state = si_get_ps(amdgpu_state);
u32 a_t;
u32 t_l, t_h;
u32 high_bsp;
int i, ret;
if (state->performance_level_count >= 9)
return -EINVAL;
if (state->performance_level_count < 2) {
a_t = CG_R(0xffff) | CG_L(0);
smc_state->levels[0].aT = cpu_to_be32(a_t);
return 0;
}
smc_state->levels[0].aT = cpu_to_be32(0);
for (i = 0; i <= state->performance_level_count - 2; i++) {
ret = r600_calculate_at(
(50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
100 * R600_AH_DFLT,
state->performance_levels[i + 1].sclk,
state->performance_levels[i].sclk,
&t_l,
&t_h);
if (ret) {
t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
}
a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
a_t |= CG_R(t_l * pi->bsp / 20000);
smc_state->levels[i].aT = cpu_to_be32(a_t);
high_bsp = (i == state->performance_level_count - 2) ?
pi->pbsp : pi->bsp;
a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
}
return 0;
}
static int si_disable_ulv(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
struct si_ulv_param *ulv = &si_pi->ulv;
if (ulv->supported)
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
0 : -EINVAL;
return 0;
}
static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state)
{
const struct si_power_info *si_pi = si_get_pi(adev);
const struct si_ulv_param *ulv = &si_pi->ulv;
const struct si_ps *state = si_get_ps(amdgpu_state);
int i;
if (state->performance_levels[0].mclk != ulv->pl.mclk)
return false;
/* XXX validate against display requirements! */
for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
if (adev->clock.current_dispclk <=
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
if (ulv->pl.vddc <
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
return false;
}
}
if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0))
return false;
return true;
}
static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_new_state)
{
const struct si_power_info *si_pi = si_get_pi(adev);
const struct si_ulv_param *ulv = &si_pi->ulv;
if (ulv->supported) {
if (si_is_state_ulv_compatible(adev, amdgpu_new_state))
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
0 : -EINVAL;
}
return 0;
}
static int si_convert_power_state_to_smc(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state,
SISLANDS_SMC_SWSTATE *smc_state)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct ni_power_info *ni_pi = ni_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
struct si_ps *state = si_get_ps(amdgpu_state);
int i, ret;
u32 threshold;
u32 sclk_in_sr = 1350; /* ??? */
if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
return -EINVAL;
threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
if (amdgpu_state->vclk && amdgpu_state->dclk) {
eg_pi->uvd_enabled = true;
if (eg_pi->smu_uvd_hs)
smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
} else {
eg_pi->uvd_enabled = false;
}
if (state->dc_compatible)
smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
smc_state->levelCount = 0;
for (i = 0; i < state->performance_level_count; i++) {
if (eg_pi->sclk_deep_sleep) {
if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
else
smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
}
}
ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i],
&smc_state->levels[i]);
smc_state->levels[i].arbRefreshState =
(u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
if (ret)
return ret;
if (ni_pi->enable_power_containment)
smc_state->levels[i].displayWatermark =
(state->performance_levels[i].sclk < threshold) ?
PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
else
smc_state->levels[i].displayWatermark = (i < 2) ?
PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
if (eg_pi->dynamic_ac_timing)
smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
else
smc_state->levels[i].ACIndex = 0;
smc_state->levelCount++;
}
si_write_smc_soft_register(adev,
SI_SMC_SOFT_REGISTER_watermark_threshold,
threshold / 512);
si_populate_smc_sp(adev, amdgpu_state, smc_state);
ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state);
if (ret)
ni_pi->enable_power_containment = false;
ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state);
if (ret)
ni_pi->enable_sq_ramping = false;
return si_populate_smc_t(adev, amdgpu_state, smc_state);
}
static int si_upload_sw_state(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_new_state)
{
struct si_power_info *si_pi = si_get_pi(adev);
struct si_ps *new_state = si_get_ps(amdgpu_new_state);
int ret;
u32 address = si_pi->state_table_start +
offsetof(SISLANDS_SMC_STATETABLE, driverState);
SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
size_t state_size = struct_size(smc_state, levels,
new_state->performance_level_count);
memset(smc_state, 0, state_size);
ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state);
if (ret)
return ret;
return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
state_size, si_pi->sram_end);
}
static int si_upload_ulv_state(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
struct si_ulv_param *ulv = &si_pi->ulv;
int ret = 0;
if (ulv->supported && ulv->pl.vddc) {
u32 address = si_pi->state_table_start +
offsetof(SISLANDS_SMC_STATETABLE, ULVState);
struct SISLANDS_SMC_SWSTATE_SINGLE *smc_state = &si_pi->smc_statetable.ULVState;
u32 state_size = sizeof(struct SISLANDS_SMC_SWSTATE_SINGLE);
memset(smc_state, 0, state_size);
ret = si_populate_ulv_state(adev, smc_state);
if (!ret)
ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
state_size, si_pi->sram_end);
}
return ret;
}
static int si_upload_smc_data(struct amdgpu_device *adev)
{
struct amdgpu_crtc *amdgpu_crtc = NULL;
int i;
if (adev->pm.dpm.new_active_crtc_count == 0)
return 0;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
amdgpu_crtc = adev->mode_info.crtcs[i];
break;
}
}
if (amdgpu_crtc == NULL)
return 0;
if (amdgpu_crtc->line_time <= 0)
return 0;
if (si_write_smc_soft_register(adev,
SI_SMC_SOFT_REGISTER_crtc_index,
amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
return 0;
if (si_write_smc_soft_register(adev,
SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
return 0;
if (si_write_smc_soft_register(adev,
SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
return 0;
return 0;
}
static int si_set_mc_special_registers(struct amdgpu_device *adev,
struct si_mc_reg_table *table)
{
u8 i, j, k;
u32 temp_reg;
for (i = 0, j = table->last; i < table->last; i++) {
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
switch (table->mc_reg_address[i].s1) {
case MC_SEQ_MISC1:
temp_reg = RREG32(MC_PMG_CMD_EMRS);
table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS;
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP;
for (k = 0; k < table->num_entries; k++)
table->mc_reg_table_entry[k].mc_data[j] =
((temp_reg & 0xffff0000)) |
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
j++;
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
temp_reg = RREG32(MC_PMG_CMD_MRS);
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
for (k = 0; k < table->num_entries; k++) {
table->mc_reg_table_entry[k].mc_data[j] =
(temp_reg & 0xffff0000) |
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
}
j++;
if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
for (k = 0; k < table->num_entries; k++)
table->mc_reg_table_entry[k].mc_data[j] =
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
j++;
}
break;
case MC_SEQ_RESERVE_M:
temp_reg = RREG32(MC_PMG_CMD_MRS1);
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1;
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP;
for(k = 0; k < table->num_entries; k++)
table->mc_reg_table_entry[k].mc_data[j] =
(temp_reg & 0xffff0000) |
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
j++;
break;
default:
break;
}
}
table->last = j;
return 0;
}
static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
{
bool result = true;
switch (in_reg) {
case MC_SEQ_RAS_TIMING:
*out_reg = MC_SEQ_RAS_TIMING_LP;
break;
case MC_SEQ_CAS_TIMING:
*out_reg = MC_SEQ_CAS_TIMING_LP;
break;
case MC_SEQ_MISC_TIMING:
*out_reg = MC_SEQ_MISC_TIMING_LP;
break;
case MC_SEQ_MISC_TIMING2:
*out_reg = MC_SEQ_MISC_TIMING2_LP;
break;
case MC_SEQ_RD_CTL_D0:
*out_reg = MC_SEQ_RD_CTL_D0_LP;
break;
case MC_SEQ_RD_CTL_D1:
*out_reg = MC_SEQ_RD_CTL_D1_LP;
break;
case MC_SEQ_WR_CTL_D0:
*out_reg = MC_SEQ_WR_CTL_D0_LP;
break;
case MC_SEQ_WR_CTL_D1:
*out_reg = MC_SEQ_WR_CTL_D1_LP;
break;
case MC_PMG_CMD_EMRS:
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP;
break;
case MC_PMG_CMD_MRS:
*out_reg = MC_SEQ_PMG_CMD_MRS_LP;
break;
case MC_PMG_CMD_MRS1:
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP;
break;
case MC_SEQ_PMG_TIMING:
*out_reg = MC_SEQ_PMG_TIMING_LP;
break;
case MC_PMG_CMD_MRS2:
*out_reg = MC_SEQ_PMG_CMD_MRS2_LP;
break;
case MC_SEQ_WR_CTL_2:
*out_reg = MC_SEQ_WR_CTL_2_LP;
break;
default:
result = false;
break;
}
return result;
}
static void si_set_valid_flag(struct si_mc_reg_table *table)
{
u8 i, j;
for (i = 0; i < table->last; i++) {
for (j = 1; j < table->num_entries; j++) {
if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
table->valid_flag |= 1 << i;
break;
}
}
}
}
static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
{
u32 i;
u16 address;
for (i = 0; i < table->last; i++)
table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
address : table->mc_reg_address[i].s1;
}
static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
struct si_mc_reg_table *si_table)
{
u8 i, j;
if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
if (table->num_entries > MAX_AC_TIMING_ENTRIES)
return -EINVAL;
for (i = 0; i < table->last; i++)
si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
si_table->last = table->last;
for (i = 0; i < table->num_entries; i++) {
si_table->mc_reg_table_entry[i].mclk_max =
table->mc_reg_table_entry[i].mclk_max;
for (j = 0; j < table->last; j++) {
si_table->mc_reg_table_entry[i].mc_data[j] =
table->mc_reg_table_entry[i].mc_data[j];
}
}
si_table->num_entries = table->num_entries;
return 0;
}
static int si_initialize_mc_reg_table(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
struct atom_mc_reg_table *table;
struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
u8 module_index = rv770_get_memory_module_index(adev);
int ret;
table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
if (!table)
return -ENOMEM;
WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
if (ret)
goto init_mc_done;
ret = si_copy_vbios_mc_reg_table(table, si_table);
if (ret)
goto init_mc_done;
si_set_s0_mc_reg_index(si_table);
ret = si_set_mc_special_registers(adev, si_table);
if (ret)
goto init_mc_done;
si_set_valid_flag(si_table);
init_mc_done:
kfree(table);
return ret;
}
static void si_populate_mc_reg_addresses(struct amdgpu_device *adev,
SMC_SIslands_MCRegisters *mc_reg_table)
{
struct si_power_info *si_pi = si_get_pi(adev);
u32 i, j;
for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
break;
mc_reg_table->address[i].s0 =
cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
mc_reg_table->address[i].s1 =
cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
i++;
}
}
mc_reg_table->last = (u8)i;
}
static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
SMC_SIslands_MCRegisterSet *data,
u32 num_entries, u32 valid_flag)
{
u32 i, j;
for(i = 0, j = 0; j < num_entries; j++) {
if (valid_flag & (1 << j)) {
data->value[i] = cpu_to_be32(entry->mc_data[j]);
i++;
}
}
}
static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
struct rv7xx_pl *pl,
SMC_SIslands_MCRegisterSet *mc_reg_table_data)
{
struct si_power_info *si_pi = si_get_pi(adev);
u32 i = 0;
for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
break;
}
if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
--i;
si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
mc_reg_table_data, si_pi->mc_reg_table.last,
si_pi->mc_reg_table.valid_flag);
}
static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state,
SMC_SIslands_MCRegisters *mc_reg_table)
{
struct si_ps *state = si_get_ps(amdgpu_state);
int i;
for (i = 0; i < state->performance_level_count; i++) {
si_convert_mc_reg_table_entry_to_smc(adev,
&state->performance_levels[i],
&mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
}
}
static int si_populate_mc_reg_table(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_boot_state)
{
struct si_ps *boot_state = si_get_ps(amdgpu_boot_state);
struct si_power_info *si_pi = si_get_pi(adev);
struct si_ulv_param *ulv = &si_pi->ulv;
SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1);
si_populate_mc_reg_addresses(adev, smc_mc_reg_table);
si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0],
&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
si_pi->mc_reg_table.last,
si_pi->mc_reg_table.valid_flag);
if (ulv->supported && ulv->pl.vddc != 0)
si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl,
&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
else
si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
si_pi->mc_reg_table.last,
si_pi->mc_reg_table.valid_flag);
si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table);
return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start,
(u8 *)smc_mc_reg_table,
sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
}
static int si_upload_mc_reg_table(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_new_state)
{
struct si_ps *new_state = si_get_ps(amdgpu_new_state);
struct si_power_info *si_pi = si_get_pi(adev);
u32 address = si_pi->mc_reg_table_start +
offsetof(SMC_SIslands_MCRegisters,
data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table);
return amdgpu_si_copy_bytes_to_smc(adev, address,
(u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
si_pi->sram_end);
}
static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
{
if (enable)
WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
else
WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
}
static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_state)
{
struct si_ps *state = si_get_ps(amdgpu_state);
int i;
u16 pcie_speed, max_speed = 0;
for (i = 0; i < state->performance_level_count; i++) {
pcie_speed = state->performance_levels[i].pcie_gen;
if (max_speed < pcie_speed)
max_speed = pcie_speed;
}
return max_speed;
}
static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
{
u32 speed_cntl;
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
return (u16)speed_cntl;
}
static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_new_state,
struct amdgpu_ps *amdgpu_current_state)
{
struct si_power_info *si_pi = si_get_pi(adev);
enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
enum si_pcie_gen current_link_speed;
if (si_pi->force_pcie_gen == SI_PCIE_GEN_INVALID)
current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
else
current_link_speed = si_pi->force_pcie_gen;
si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID;
si_pi->pspp_notify_required = false;
if (target_link_speed > current_link_speed) {
switch (target_link_speed) {
#if defined(CONFIG_ACPI)
case SI_PCIE_GEN3:
if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
break;
si_pi->force_pcie_gen = SI_PCIE_GEN2;
if (current_link_speed == SI_PCIE_GEN2)
break;
fallthrough;
case SI_PCIE_GEN2:
if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
fallthrough;
#endif
default:
si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
break;
}
} else {
if (target_link_speed < current_link_speed)
si_pi->pspp_notify_required = true;
}
}
static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_new_state,
struct amdgpu_ps *amdgpu_current_state)
{
struct si_power_info *si_pi = si_get_pi(adev);
enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
u8 request;
if (si_pi->pspp_notify_required) {
if (target_link_speed == SI_PCIE_GEN3)
request = PCIE_PERF_REQ_PECI_GEN3;
else if (target_link_speed == SI_PCIE_GEN2)
request = PCIE_PERF_REQ_PECI_GEN2;
else
request = PCIE_PERF_REQ_PECI_GEN1;
if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
(si_get_current_pcie_speed(adev) > 0))
return;
#if defined(CONFIG_ACPI)
amdgpu_acpi_pcie_performance_request(adev, request, false);
#endif
}
}
#if 0
static int si_ds_request(struct amdgpu_device *adev,
bool ds_status_on, u32 count_write)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
if (eg_pi->sclk_deep_sleep) {
if (ds_status_on)
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
PPSMC_Result_OK) ?
0 : -EINVAL;
else
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
PPSMC_Result_OK) ? 0 : -EINVAL;
}
return 0;
}
#endif
static void si_set_max_cu_value(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
if (adev->asic_type == CHIP_VERDE) {
switch (adev->pdev->device) {
case 0x6820:
case 0x6825:
case 0x6821:
case 0x6823:
case 0x6827:
si_pi->max_cu = 10;
break;
case 0x682D:
case 0x6824:
case 0x682F:
case 0x6826:
si_pi->max_cu = 8;
break;
case 0x6828:
case 0x6830:
case 0x6831:
case 0x6838:
case 0x6839:
case 0x683D:
si_pi->max_cu = 10;
break;
case 0x683B:
case 0x683F:
case 0x6829:
si_pi->max_cu = 8;
break;
default:
si_pi->max_cu = 0;
break;
}
} else {
si_pi->max_cu = 0;
}
}
static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev,
struct amdgpu_clock_voltage_dependency_table *table)
{
u32 i;
int j;
u16 leakage_voltage;
if (table) {
for (i = 0; i < table->count; i++) {
switch (si_get_leakage_voltage_from_leakage_index(adev,
table->entries[i].v,
&leakage_voltage)) {
case 0:
table->entries[i].v = leakage_voltage;
break;
case -EAGAIN:
return -EINVAL;
case -EINVAL:
default:
break;
}
}
for (j = (table->count - 2); j >= 0; j--) {
table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
table->entries[j].v : table->entries[j + 1].v;
}
}
return 0;
}
static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev)
{
int ret = 0;
ret = si_patch_single_dependency_table_based_on_leakage(adev,
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
if (ret)
DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
ret = si_patch_single_dependency_table_based_on_leakage(adev,
&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
if (ret)
DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
ret = si_patch_single_dependency_table_based_on_leakage(adev,
&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
if (ret)
DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
return ret;
}
static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
struct amdgpu_ps *amdgpu_new_state,
struct amdgpu_ps *amdgpu_current_state)
{
u32 lane_width;
u32 new_lane_width =
((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
u32 current_lane_width =
((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
if (new_lane_width != current_lane_width) {
amdgpu_set_pcie_lanes(adev, new_lane_width);
lane_width = amdgpu_get_pcie_lanes(adev);
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
}
}
static void si_dpm_setup_asic(struct amdgpu_device *adev)
{
si_read_clock_registers(adev);
si_enable_acpi_power_management(adev);
}
static int si_thermal_enable_alert(struct amdgpu_device *adev,
bool enable)
{
u32 thermal_int = RREG32(CG_THERMAL_INT);
if (enable) {
PPSMC_Result result;
thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
WREG32(CG_THERMAL_INT, thermal_int);
result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
if (result != PPSMC_Result_OK) {
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
return -EINVAL;
}
} else {
thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
WREG32(CG_THERMAL_INT, thermal_int);
}
return 0;
}
static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
int min_temp, int max_temp)
{
int low_temp = 0 * 1000;
int high_temp = 255 * 1000;
if (low_temp < min_temp)
low_temp = min_temp;
if (high_temp > max_temp)
high_temp = max_temp;
if (high_temp < low_temp) {
DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
return -EINVAL;
}
WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
adev->pm.dpm.thermal.min_temp = low_temp;
adev->pm.dpm.thermal.max_temp = high_temp;
return 0;
}
static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
{
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
if (si_pi->fan_ctrl_is_in_default_mode) {
tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
si_pi->fan_ctrl_default_mode = tmp;
tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
si_pi->t_min = tmp;
si_pi->fan_ctrl_is_in_default_mode = false;
}
tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
tmp |= TMIN(0);
WREG32(CG_FDO_CTRL2, tmp);
tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
tmp |= FDO_PWM_MODE(mode);
WREG32(CG_FDO_CTRL2, tmp);
}
static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
u32 duty100;
u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
u16 fdo_min, slope1, slope2;
u32 reference_clock, tmp;
int ret;
u64 tmp64;
if (!si_pi->fan_table_start) {
adev->pm.dpm.fan.ucode_fan_control = false;
return 0;
}
duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
if (duty100 == 0) {
adev->pm.dpm.fan.ucode_fan_control = false;
return 0;
}
tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
do_div(tmp64, 10000);
fdo_min = (u16)tmp64;
t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
fan_table.slope1 = cpu_to_be16(slope1);
fan_table.slope2 = cpu_to_be16(slope2);
fan_table.fdo_min = cpu_to_be16(fdo_min);
fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
fan_table.hys_up = cpu_to_be16(1);
fan_table.hys_slope = cpu_to_be16(1);
fan_table.temp_resp_lim = cpu_to_be16(5);
reference_clock = amdgpu_asic_get_xclk(adev);
fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
reference_clock) / 1600);
fan_table.fdo_max = cpu_to_be16((u16)duty100);
tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
fan_table.temp_src = (uint8_t)tmp;
ret = amdgpu_si_copy_bytes_to_smc(adev,
si_pi->fan_table_start,
(u8 *)(&fan_table),
sizeof(fan_table),
si_pi->sram_end);
if (ret) {
DRM_ERROR("Failed to load fan table to the SMC.");
adev->pm.dpm.fan.ucode_fan_control = false;
}
return ret;
}
static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
PPSMC_Result ret;
ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl);
if (ret == PPSMC_Result_OK) {
si_pi->fan_is_controlled_by_smc = true;
return 0;
} else {
return -EINVAL;
}
}
static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
PPSMC_Result ret;
ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl);
if (ret == PPSMC_Result_OK) {
si_pi->fan_is_controlled_by_smc = false;
return 0;
} else {
return -EINVAL;
}
}
static int si_dpm_get_fan_speed_pwm(void *handle,
u32 *speed)
{
u32 duty, duty100;
u64 tmp64;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!speed)
return -EINVAL;
if (adev->pm.no_fan)
return -ENOENT;
duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
if (duty100 == 0)
return -EINVAL;
tmp64 = (u64)duty * 255;
do_div(tmp64, duty100);
*speed = MIN((u32)tmp64, 255);
return 0;
}
static int si_dpm_set_fan_speed_pwm(void *handle,
u32 speed)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
u32 duty, duty100;
u64 tmp64;
if (adev->pm.no_fan)
return -ENOENT;
if (si_pi->fan_is_controlled_by_smc)
return -EINVAL;
if (speed > 255)
return -EINVAL;
duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
if (duty100 == 0)
return -EINVAL;
tmp64 = (u64)speed * duty100;
do_div(tmp64, 255);
duty = (u32)tmp64;
tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
tmp |= FDO_STATIC_DUTY(duty);
WREG32(CG_FDO_CTRL0, tmp);
return 0;
}
static int si_dpm_set_fan_control_mode(void *handle, u32 mode)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (mode == U32_MAX)
return -EINVAL;
if (mode) {
/* stop auto-manage */
if (adev->pm.dpm.fan.ucode_fan_control)
si_fan_ctrl_stop_smc_fan_control(adev);
si_fan_ctrl_set_static_mode(adev, mode);
} else {
/* restart auto-manage */
if (adev->pm.dpm.fan.ucode_fan_control)
si_thermal_start_smc_fan_control(adev);
else
si_fan_ctrl_set_default_mode(adev);
}
return 0;
}
static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
if (!fan_mode)
return -EINVAL;
if (si_pi->fan_is_controlled_by_smc)
return 0;
tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
*fan_mode = (tmp >> FDO_PWM_MODE_SHIFT);
return 0;
}
#if 0
static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
u32 *speed)
{
u32 tach_period;
u32 xclk = amdgpu_asic_get_xclk(adev);
if (adev->pm.no_fan)
return -ENOENT;
if (adev->pm.fan_pulses_per_revolution == 0)
return -ENOENT;
tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
if (tach_period == 0)
return -ENOENT;
*speed = 60 * xclk * 10000 / tach_period;
return 0;
}
static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
u32 speed)
{
u32 tach_period, tmp;
u32 xclk = amdgpu_asic_get_xclk(adev);
if (adev->pm.no_fan)
return -ENOENT;
if (adev->pm.fan_pulses_per_revolution == 0)
return -ENOENT;
if ((speed < adev->pm.fan_min_rpm) ||
(speed > adev->pm.fan_max_rpm))
return -EINVAL;
if (adev->pm.dpm.fan.ucode_fan_control)
si_fan_ctrl_stop_smc_fan_control(adev);
tach_period = 60 * xclk * 10000 / (8 * speed);
tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
tmp |= TARGET_PERIOD(tach_period);
WREG32(CG_TACH_CTRL, tmp);
si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
return 0;
}
#endif
static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
if (!si_pi->fan_ctrl_is_in_default_mode) {
tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
WREG32(CG_FDO_CTRL2, tmp);
tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
tmp |= TMIN(si_pi->t_min);
WREG32(CG_FDO_CTRL2, tmp);
si_pi->fan_ctrl_is_in_default_mode = true;
}
}
static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev)
{
if (adev->pm.dpm.fan.ucode_fan_control) {
si_fan_ctrl_start_smc_fan_control(adev);
si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
}
}
static void si_thermal_initialize(struct amdgpu_device *adev)
{
u32 tmp;
if (adev->pm.fan_pulses_per_revolution) {
tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
WREG32(CG_TACH_CTRL, tmp);
}
tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
tmp |= TACH_PWM_RESP_RATE(0x28);
WREG32(CG_FDO_CTRL2, tmp);
}
static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
{
int ret;
si_thermal_initialize(adev);
ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
if (ret)
return ret;
ret = si_thermal_enable_alert(adev, true);
if (ret)
return ret;
if (adev->pm.dpm.fan.ucode_fan_control) {
ret = si_halt_smc(adev);
if (ret)
return ret;
ret = si_thermal_setup_fan_table(adev);
if (ret)
return ret;
ret = si_resume_smc(adev);
if (ret)
return ret;
si_thermal_start_smc_fan_control(adev);
}
return 0;
}
static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev)
{
if (!adev->pm.no_fan) {
si_fan_ctrl_set_default_mode(adev);
si_fan_ctrl_stop_smc_fan_control(adev);
}
}
static int si_dpm_enable(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
int ret;
if (amdgpu_si_is_smc_running(adev))
return -EINVAL;
if (pi->voltage_control || si_pi->voltage_control_svi2)
si_enable_voltage_control(adev, true);
if (pi->mvdd_control)
si_get_mvdd_configuration(adev);
if (pi->voltage_control || si_pi->voltage_control_svi2) {
ret = si_construct_voltage_tables(adev);
if (ret) {
DRM_ERROR("si_construct_voltage_tables failed\n");
return ret;
}
}
if (eg_pi->dynamic_ac_timing) {
ret = si_initialize_mc_reg_table(adev);
if (ret)
eg_pi->dynamic_ac_timing = false;
}
if (pi->dynamic_ss)
si_enable_spread_spectrum(adev, true);
if (pi->thermal_protection)
si_enable_thermal_protection(adev, true);
si_setup_bsp(adev);
si_program_git(adev);
si_program_tp(adev);
si_program_tpp(adev);
si_program_sstp(adev);
si_enable_display_gap(adev);
si_program_vc(adev);
ret = si_upload_firmware(adev);
if (ret) {
DRM_ERROR("si_upload_firmware failed\n");
return ret;
}
ret = si_process_firmware_header(adev);
if (ret) {
DRM_ERROR("si_process_firmware_header failed\n");
return ret;
}
ret = si_initial_switch_from_arb_f0_to_f1(adev);
if (ret) {
DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
return ret;
}
ret = si_init_smc_table(adev);
if (ret) {
DRM_ERROR("si_init_smc_table failed\n");
return ret;
}
ret = si_init_smc_spll_table(adev);
if (ret) {
DRM_ERROR("si_init_smc_spll_table failed\n");
return ret;
}
ret = si_init_arb_table_index(adev);
if (ret) {
DRM_ERROR("si_init_arb_table_index failed\n");
return ret;
}
if (eg_pi->dynamic_ac_timing) {
ret = si_populate_mc_reg_table(adev, boot_ps);
if (ret) {
DRM_ERROR("si_populate_mc_reg_table failed\n");
return ret;
}
}
ret = si_initialize_smc_cac_tables(adev);
if (ret) {
DRM_ERROR("si_initialize_smc_cac_tables failed\n");
return ret;
}
ret = si_initialize_hardware_cac_manager(adev);
if (ret) {
DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
return ret;
}
ret = si_initialize_smc_dte_tables(adev);
if (ret) {
DRM_ERROR("si_initialize_smc_dte_tables failed\n");
return ret;
}
ret = si_populate_smc_tdp_limits(adev, boot_ps);
if (ret) {
DRM_ERROR("si_populate_smc_tdp_limits failed\n");
return ret;
}
ret = si_populate_smc_tdp_limits_2(adev, boot_ps);
if (ret) {
DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
return ret;
}
si_program_response_times(adev);
si_program_ds_registers(adev);
si_dpm_start_smc(adev);
ret = si_notify_smc_display_change(adev, false);
if (ret) {
DRM_ERROR("si_notify_smc_display_change failed\n");
return ret;
}
si_enable_sclk_control(adev, true);
si_start_dpm(adev);
si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
si_thermal_start_thermal_controller(adev);
ni_update_current_ps(adev, boot_ps);
return 0;
}
static void si_dpm_disable(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
if (!amdgpu_si_is_smc_running(adev))
return;
si_thermal_stop_thermal_controller(adev);
si_disable_ulv(adev);
si_clear_vc(adev);
if (pi->thermal_protection)
si_enable_thermal_protection(adev, false);
si_enable_power_containment(adev, boot_ps, false);
si_enable_smc_cac(adev, boot_ps, false);
si_enable_spread_spectrum(adev, false);
si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
si_stop_dpm(adev);
si_reset_to_default(adev);
si_dpm_stop_smc(adev);
si_force_switch_to_arb_f0(adev);
ni_update_current_ps(adev, boot_ps);
}
static int si_dpm_pre_set_power_state(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps;
ni_update_requested_ps(adev, new_ps);
si_apply_state_adjust_rules(adev, &eg_pi->requested_rps);
return 0;
}
static int si_power_control_set_level(struct amdgpu_device *adev)
{
struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps;
int ret;
ret = si_restrict_performance_levels_before_switch(adev);
if (ret)
return ret;
ret = si_halt_smc(adev);
if (ret)
return ret;
ret = si_populate_smc_tdp_limits(adev, new_ps);
if (ret)
return ret;
ret = si_populate_smc_tdp_limits_2(adev, new_ps);
if (ret)
return ret;
ret = si_resume_smc(adev);
if (ret)
return ret;
return si_set_sw_state(adev);
}
static void si_set_vce_clock(struct amdgpu_device *adev,
struct amdgpu_ps *new_rps,
struct amdgpu_ps *old_rps)
{
if ((old_rps->evclk != new_rps->evclk) ||
(old_rps->ecclk != new_rps->ecclk)) {
/* Turn the clocks on when encoding, off otherwise */
if (new_rps->evclk || new_rps->ecclk) {
/* Place holder for future VCE1.0 porting to amdgpu
vce_v1_0_enable_mgcg(adev, false, false);*/
} else {
/* Place holder for future VCE1.0 porting to amdgpu
vce_v1_0_enable_mgcg(adev, true, false);
amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);*/
}
}
}
static int si_dpm_set_power_state(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
struct amdgpu_ps *old_ps = &eg_pi->current_rps;
int ret;
ret = si_disable_ulv(adev);
if (ret) {
DRM_ERROR("si_disable_ulv failed\n");
return ret;
}
ret = si_restrict_performance_levels_before_switch(adev);
if (ret) {
DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
return ret;
}
if (eg_pi->pcie_performance_request)
si_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps);
ret = si_enable_power_containment(adev, new_ps, false);
if (ret) {
DRM_ERROR("si_enable_power_containment failed\n");
return ret;
}
ret = si_enable_smc_cac(adev, new_ps, false);
if (ret) {
DRM_ERROR("si_enable_smc_cac failed\n");
return ret;
}
ret = si_halt_smc(adev);
if (ret) {
DRM_ERROR("si_halt_smc failed\n");
return ret;
}
ret = si_upload_sw_state(adev, new_ps);
if (ret) {
DRM_ERROR("si_upload_sw_state failed\n");
return ret;
}
ret = si_upload_smc_data(adev);
if (ret) {
DRM_ERROR("si_upload_smc_data failed\n");
return ret;
}
ret = si_upload_ulv_state(adev);
if (ret) {
DRM_ERROR("si_upload_ulv_state failed\n");
return ret;
}
if (eg_pi->dynamic_ac_timing) {
ret = si_upload_mc_reg_table(adev, new_ps);
if (ret) {
DRM_ERROR("si_upload_mc_reg_table failed\n");
return ret;
}
}
ret = si_program_memory_timing_parameters(adev, new_ps);
if (ret) {
DRM_ERROR("si_program_memory_timing_parameters failed\n");
return ret;
}
si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps);
ret = si_resume_smc(adev);
if (ret) {
DRM_ERROR("si_resume_smc failed\n");
return ret;
}
ret = si_set_sw_state(adev);
if (ret) {
DRM_ERROR("si_set_sw_state failed\n");
return ret;
}
ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps);
si_set_vce_clock(adev, new_ps, old_ps);
if (eg_pi->pcie_performance_request)
si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps);
if (ret) {
DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
return ret;
}
ret = si_enable_smc_cac(adev, new_ps, true);
if (ret) {
DRM_ERROR("si_enable_smc_cac failed\n");
return ret;
}
ret = si_enable_power_containment(adev, new_ps, true);
if (ret) {
DRM_ERROR("si_enable_power_containment failed\n");
return ret;
}
ret = si_power_control_set_level(adev);
if (ret) {
DRM_ERROR("si_power_control_set_level failed\n");
return ret;
}
return 0;
}
static void si_dpm_post_set_power_state(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
ni_update_current_ps(adev, new_ps);
}
#if 0
void si_dpm_reset_asic(struct amdgpu_device *adev)
{
si_restrict_performance_levels_before_switch(adev);
si_disable_ulv(adev);
si_set_boot_state(adev);
}
#endif
static void si_dpm_display_configuration_changed(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
si_program_display_gap(adev);
}
static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev,
struct amdgpu_ps *rps,
struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
u8 table_rev)
{
rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
rps->class = le16_to_cpu(non_clock_info->usClassification);
rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
} else if (r600_is_uvd_state(rps->class, rps->class2)) {
rps->vclk = RV770_DEFAULT_VCLK_FREQ;
rps->dclk = RV770_DEFAULT_DCLK_FREQ;
} else {
rps->vclk = 0;
rps->dclk = 0;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
adev->pm.dpm.boot_ps = rps;
if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
adev->pm.dpm.uvd_ps = rps;
}
static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
struct amdgpu_ps *rps, int index,
union pplib_clock_info *clock_info)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
struct si_ps *ps = si_get_ps(rps);
u16 leakage_voltage;
struct rv7xx_pl *pl = &ps->performance_levels[index];
int ret;
ps->performance_level_count = index + 1;
pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
pl->flags = le32_to_cpu(clock_info->si.ulFlags);
pl->pcie_gen = si_gen_pcie_gen_support(adev,
si_pi->sys_pcie_mask,
si_pi->boot_pcie_gen,
clock_info->si.ucPCIEGen);
/* patch up vddc if necessary */
ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
&leakage_voltage);
if (ret == 0)
pl->vddc = leakage_voltage;
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
pi->acpi_vddc = pl->vddc;
eg_pi->acpi_vddci = pl->vddci;
si_pi->acpi_pcie_gen = pl->pcie_gen;
}
if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
index == 0) {
/* XXX disable for A0 tahiti */
si_pi->ulv.supported = false;
si_pi->ulv.pl = *pl;
si_pi->ulv.one_pcie_lane_in_ulv = false;
si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
}
if (pi->min_vddc_in_table > pl->vddc)
pi->min_vddc_in_table = pl->vddc;
if (pi->max_vddc_in_table < pl->vddc)
pi->max_vddc_in_table = pl->vddc;
/* patch up boot state */
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
u16 vddc, vddci, mvdd;
amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd);
pl->mclk = adev->clock.default_mclk;
pl->sclk = adev->clock.default_sclk;
pl->vddc = vddc;
pl->vddci = vddci;
si_pi->mvdd_bootup_value = mvdd;
}
if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
}
}
union pplib_power_state {
struct _ATOM_PPLIB_STATE v1;
struct _ATOM_PPLIB_STATE_V2 v2;
};
static int si_parse_power_table(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
union pplib_power_state *power_state;
int i, j, k, non_clock_array_index, clock_array_index;
union pplib_clock_info *clock_info;
struct _StateArray *state_array;
struct _ClockInfoArray *clock_info_array;
struct _NonClockInfoArray *non_clock_info_array;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
struct si_ps *ps;
if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
return -EINVAL;
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
amdgpu_add_thermal_controller(adev);
state_array = (struct _StateArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usStateArrayOffset));
clock_info_array = (struct _ClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
non_clock_info_array = (struct _NonClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
sizeof(struct amdgpu_ps),
GFP_KERNEL);
if (!adev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
for (adev->pm.dpm.num_ps = 0, i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL);
if (ps == NULL)
return -ENOMEM;
adev->pm.dpm.ps[i].ps_priv = ps;
si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
non_clock_info,
non_clock_info_array->ucEntrySize);
k = 0;
idx = (u8 *)&power_state->v2.clockInfoIndex[0];
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
clock_array_index = idx[j];
if (clock_array_index >= clock_info_array->ucNumEntries)
continue;
if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
break;
clock_info = (union pplib_clock_info *)
((u8 *)&clock_info_array->clockInfo[0] +
(clock_array_index * clock_info_array->ucEntrySize));
si_parse_pplib_clock_info(adev,
&adev->pm.dpm.ps[i], k,
clock_info);
k++;
}
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
adev->pm.dpm.num_ps++;
}
/* fill in the vce power states */
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
u32 sclk, mclk;
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
clock_info = (union pplib_clock_info *)
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
sclk |= clock_info->si.ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
mclk |= clock_info->si.ucMemoryClockHigh << 16;
adev->pm.dpm.vce_states[i].sclk = sclk;
adev->pm.dpm.vce_states[i].mclk = mclk;
}
return 0;
}
static int si_dpm_init(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
struct ni_power_info *ni_pi;
struct si_power_info *si_pi;
struct atom_clock_dividers dividers;
int ret;
si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
if (si_pi == NULL)
return -ENOMEM;
adev->pm.dpm.priv = si_pi;
ni_pi = &si_pi->ni;
eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx;
si_pi->sys_pcie_mask =
adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID;
si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
si_set_max_cu_value(adev);
rv770_get_max_vddc(adev);
si_get_leakage_vddc(adev);
si_patch_dependency_tables_based_on_leakage(adev);
pi->acpi_vddc = 0;
eg_pi->acpi_vddci = 0;
pi->min_vddc_in_table = 0;
pi->max_vddc_in_table = 0;
ret = amdgpu_get_platform_caps(adev);
if (ret)
return ret;
ret = amdgpu_parse_extended_power_table(adev);
if (ret)
return ret;
ret = si_parse_power_table(adev);
if (ret)
return ret;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
kcalloc(4,
sizeof(struct amdgpu_clock_voltage_dependency_entry),
GFP_KERNEL);
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
amdgpu_free_extended_power_table(adev);
return -ENOMEM;
}
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
if (adev->pm.dpm.voltage_response_time == 0)
adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
if (adev->pm.dpm.backbias_response_time == 0)
adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
0, false, ÷rs);
if (ret)
pi->ref_div = dividers.ref_div + 1;
else
pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
eg_pi->smu_uvd_hs = false;
pi->mclk_strobe_mode_threshold = 40000;
if (si_is_special_1gb_platform(adev))
pi->mclk_stutter_mode_threshold = 0;
else
pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
pi->mclk_edc_enable_threshold = 40000;
eg_pi->mclk_edc_wr_enable_threshold = 40000;
ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
pi->voltage_control =
amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
VOLTAGE_OBJ_GPIO_LUT);
if (!pi->voltage_control) {
si_pi->voltage_control_svi2 =
amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
VOLTAGE_OBJ_SVID2);
if (si_pi->voltage_control_svi2)
amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
&si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
}
pi->mvdd_control =
amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
VOLTAGE_OBJ_GPIO_LUT);
eg_pi->vddci_control =
amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
VOLTAGE_OBJ_GPIO_LUT);
if (!eg_pi->vddci_control)
si_pi->vddci_control_svi2 =
amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
VOLTAGE_OBJ_SVID2);
si_pi->vddc_phase_shed_control =
amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
VOLTAGE_OBJ_PHASE_LUT);
rv770_get_engine_memory_ss(adev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
pi->vrc = SISLANDS_VRC_DFLT;
pi->gfx_clock_gating = true;
eg_pi->sclk_deep_sleep = true;
si_pi->sclk_deep_sleep_above_low = false;
if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
eg_pi->dynamic_ac_timing = true;
eg_pi->light_sleep = true;
#if defined(CONFIG_ACPI)
eg_pi->pcie_performance_request =
amdgpu_acpi_is_pcie_performance_request_supported(adev);
#else
eg_pi->pcie_performance_request = false;
#endif
si_pi->sram_end = SMC_RAM_END;
adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
si_initialize_powertune_defaults(adev);
/* make sure dc limits are valid */
if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
(adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
si_pi->fan_ctrl_is_in_default_mode = true;
return 0;
}
static void si_dpm_fini(struct amdgpu_device *adev)
{
int i;
if (adev->pm.dpm.ps)
for (i = 0; i < adev->pm.dpm.num_ps; i++)
kfree(adev->pm.dpm.ps[i].ps_priv);
kfree(adev->pm.dpm.ps);
kfree(adev->pm.dpm.priv);
kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
amdgpu_free_extended_power_table(adev);
}
static void si_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *rps = &eg_pi->current_rps;
struct si_ps *ps = si_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
CURRENT_STATE_INDEX_SHIFT;
if (current_index >= ps->performance_level_count) {
seq_printf(m, "invalid dpm profile %d\n", current_index);
} else {
pl = &ps->performance_levels[current_index];
seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
}
}
static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 cg_thermal_int;
switch (type) {
case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
cg_thermal_int |= THERM_INT_MASK_HIGH;
WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
cg_thermal_int &= ~THERM_INT_MASK_HIGH;
WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
}
break;
case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
cg_thermal_int |= THERM_INT_MASK_LOW;
WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
cg_thermal_int &= ~THERM_INT_MASK_LOW;
WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
}
break;
default:
break;
}
return 0;
}
static int si_dpm_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
bool queue_thermal = false;
if (entry == NULL)
return -EINVAL;
switch (entry->src_id) {
case 230: /* thermal low to high */
DRM_DEBUG("IH: thermal low to high\n");
adev->pm.dpm.thermal.high_to_low = false;
queue_thermal = true;
break;
case 231: /* thermal high to low */
DRM_DEBUG("IH: thermal high to low\n");
adev->pm.dpm.thermal.high_to_low = true;
queue_thermal = true;
break;
default:
break;
}
if (queue_thermal)
schedule_work(&adev->pm.dpm.thermal.work);
return 0;
}
static int si_dpm_late_init(void *handle)
{
return 0;
}
/**
* si_dpm_init_microcode - load ucode images from disk
*
* @adev: amdgpu_device pointer
*
* Use the firmware interface to load the ucode images into
* the driver (not loaded into hw).
* Returns 0 on success, error on failure.
*/
static int si_dpm_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[30];
int err;
DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_TAHITI:
chip_name = "tahiti";
break;
case CHIP_PITCAIRN:
if ((adev->pdev->revision == 0x81) &&
((adev->pdev->device == 0x6810) ||
(adev->pdev->device == 0x6811)))
chip_name = "pitcairn_k";
else
chip_name = "pitcairn";
break;
case CHIP_VERDE:
if (((adev->pdev->device == 0x6820) &&
((adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83))) ||
((adev->pdev->device == 0x6821) &&
((adev->pdev->revision == 0x83) ||
(adev->pdev->revision == 0x87))) ||
((adev->pdev->revision == 0x87) &&
((adev->pdev->device == 0x6823) ||
(adev->pdev->device == 0x682b))))
chip_name = "verde_k";
else
chip_name = "verde";
break;
case CHIP_OLAND:
if (((adev->pdev->revision == 0x81) &&
((adev->pdev->device == 0x6600) ||
(adev->pdev->device == 0x6604) ||
(adev->pdev->device == 0x6605) ||
(adev->pdev->device == 0x6610))) ||
((adev->pdev->revision == 0x83) &&
(adev->pdev->device == 0x6610)))
chip_name = "oland_k";
else
chip_name = "oland";
break;
case CHIP_HAINAN:
if (((adev->pdev->revision == 0x81) &&
(adev->pdev->device == 0x6660)) ||
((adev->pdev->revision == 0x83) &&
((adev->pdev->device == 0x6660) ||
(adev->pdev->device == 0x6663) ||
(adev->pdev->device == 0x6665) ||
(adev->pdev->device == 0x6667))))
chip_name = "hainan_k";
else if ((adev->pdev->revision == 0xc3) &&
(adev->pdev->device == 0x6665))
chip_name = "banks_k_2";
else
chip_name = "hainan";
break;
default: BUG();
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
if (err) {
DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
err, fw_name);
amdgpu_ucode_release(&adev->pm.fw);
}
return err;
}
static int si_dpm_sw_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
if (ret)
return ret;
ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
if (ret)
return ret;
/* default to balanced state */
adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
adev->pm.default_sclk = adev->clock.default_sclk;
adev->pm.default_mclk = adev->clock.default_mclk;
adev->pm.current_sclk = adev->clock.default_sclk;
adev->pm.current_mclk = adev->clock.default_mclk;
adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
if (amdgpu_dpm == 0)
return 0;
ret = si_dpm_init_microcode(adev);
if (ret)
return ret;
INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
ret = si_dpm_init(adev);
if (ret)
goto dpm_failed;
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
DRM_INFO("amdgpu: dpm initialized\n");
return 0;
dpm_failed:
si_dpm_fini(adev);
DRM_ERROR("amdgpu: dpm initialization failed\n");
return ret;
}
static int si_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
flush_work(&adev->pm.dpm.thermal.work);
si_dpm_fini(adev);
return 0;
}
static int si_dpm_hw_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!amdgpu_dpm)
return 0;
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
return ret;
}
static int si_dpm_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.dpm_enabled)
si_dpm_disable(adev);
return 0;
}
static int si_dpm_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.dpm_enabled) {
/* disable dpm */
si_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
}
return 0;
}
static int si_dpm_resume(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
}
return 0;
}
static bool si_dpm_is_idle(void *handle)
{
/* XXX */
return true;
}
static int si_dpm_wait_for_idle(void *handle)
{
/* XXX */
return 0;
}
static int si_dpm_soft_reset(void *handle)
{
return 0;
}
static int si_dpm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static int si_dpm_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
/* get temperature in millidegrees */
static int si_dpm_get_temp(void *handle)
{
u32 temp;
int actual_temp = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
CTF_TEMP_SHIFT;
if (temp & 0x200)
actual_temp = 255;
else
actual_temp = temp & 0x1ff;
actual_temp = (actual_temp * 1000);
return actual_temp;
}
static u32 si_dpm_get_sclk(void *handle, bool low)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
if (low)
return requested_state->performance_levels[0].sclk;
else
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
}
static u32 si_dpm_get_mclk(void *handle, bool low)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
if (low)
return requested_state->performance_levels[0].mclk;
else
return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
}
static void si_dpm_print_power_state(void *handle,
void *current_ps)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
struct si_ps *ps = si_get_ps(rps);
struct rv7xx_pl *pl;
int i;
amdgpu_dpm_print_class_info(rps->class, rps->class2);
amdgpu_dpm_print_cap_info(rps->caps);
DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
for (i = 0; i < ps->performance_level_count; i++) {
pl = &ps->performance_levels[i];
if (adev->asic_type >= CHIP_TAHITI)
DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
else
DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
}
amdgpu_dpm_print_ps_status(adev, rps);
}
static int si_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->powerplay.pp_funcs = &si_dpm_funcs;
adev->powerplay.pp_handle = adev;
si_dpm_set_irq_funcs(adev);
return 0;
}
static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
const struct rv7xx_pl *si_cpl2)
{
return ((si_cpl1->mclk == si_cpl2->mclk) &&
(si_cpl1->sclk == si_cpl2->sclk) &&
(si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
(si_cpl1->vddc == si_cpl2->vddc) &&
(si_cpl1->vddci == si_cpl2->vddci));
}
static int si_check_state_equal(void *handle,
void *current_ps,
void *request_ps,
bool *equal)
{
struct si_ps *si_cps;
struct si_ps *si_rps;
int i;
struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
si_cps = si_get_ps((struct amdgpu_ps *)cps);
si_rps = si_get_ps((struct amdgpu_ps *)rps);
if (si_cps == NULL) {
printk("si_cps is NULL\n");
*equal = false;
return 0;
}
if (si_cps->performance_level_count != si_rps->performance_level_count) {
*equal = false;
return 0;
}
for (i = 0; i < si_cps->performance_level_count; i++) {
if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
&(si_rps->performance_levels[i]))) {
*equal = false;
return 0;
}
}
/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
return 0;
}
static int si_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *rps = &eg_pi->current_rps;
struct si_ps *ps = si_get_ps(rps);
uint32_t sclk, mclk;
u32 pl_index =
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
CURRENT_STATE_INDEX_SHIFT;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
return -EINVAL;
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
if (pl_index < ps->performance_level_count) {
sclk = ps->performance_levels[pl_index].sclk;
*((uint32_t *)value) = sclk;
*size = 4;
return 0;
}
return -EINVAL;
case AMDGPU_PP_SENSOR_GFX_MCLK:
if (pl_index < ps->performance_level_count) {
mclk = ps->performance_levels[pl_index].mclk;
*((uint32_t *)value) = mclk;
*size = 4;
return 0;
}
return -EINVAL;
case AMDGPU_PP_SENSOR_GPU_TEMP:
*((uint32_t *)value) = si_dpm_get_temp(adev);
*size = 4;
return 0;
default:
return -EOPNOTSUPP;
}
}
static const struct amd_ip_funcs si_dpm_ip_funcs = {
.name = "si_dpm",
.early_init = si_dpm_early_init,
.late_init = si_dpm_late_init,
.sw_init = si_dpm_sw_init,
.sw_fini = si_dpm_sw_fini,
.hw_init = si_dpm_hw_init,
.hw_fini = si_dpm_hw_fini,
.suspend = si_dpm_suspend,
.resume = si_dpm_resume,
.is_idle = si_dpm_is_idle,
.wait_for_idle = si_dpm_wait_for_idle,
.soft_reset = si_dpm_soft_reset,
.set_clockgating_state = si_dpm_set_clockgating_state,
.set_powergating_state = si_dpm_set_powergating_state,
};
const struct amdgpu_ip_block_version si_smu_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_SMC,
.major = 6,
.minor = 0,
.rev = 0,
.funcs = &si_dpm_ip_funcs,
};
static const struct amd_pm_funcs si_dpm_funcs = {
.pre_set_power_state = &si_dpm_pre_set_power_state,
.set_power_state = &si_dpm_set_power_state,
.post_set_power_state = &si_dpm_post_set_power_state,
.display_configuration_changed = &si_dpm_display_configuration_changed,
.get_sclk = &si_dpm_get_sclk,
.get_mclk = &si_dpm_get_mclk,
.print_power_state = &si_dpm_print_power_state,
.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
.force_performance_level = &si_dpm_force_performance_level,
.vblank_too_short = &si_dpm_vblank_too_short,
.set_fan_control_mode = &si_dpm_set_fan_control_mode,
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
.set_fan_speed_pwm = &si_dpm_set_fan_speed_pwm,
.get_fan_speed_pwm = &si_dpm_get_fan_speed_pwm,
.check_state_equal = &si_check_state_equal,
.get_vce_clock_state = amdgpu_get_vce_clock_state,
.read_sensor = &si_dpm_read_sensor,
.pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks,
};
static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
.set = si_dpm_set_interrupt_state,
.process = si_dpm_process_interrupt,
};
static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
{
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
}
| linux-master | drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c |
/*
* Copyright 2013 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#include "amdgpu.h"
#include "cikd.h"
#include "kv_dpm.h"
#include "smu/smu_7_0_0_d.h"
#include "smu/smu_7_0_0_sh_mask.h"
int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id)
{
u32 i;
u32 tmp = 0;
WREG32(mmSMC_MESSAGE_0, id & SMC_MESSAGE_0__SMC_MSG_MASK);
for (i = 0; i < adev->usec_timeout; i++) {
if ((RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK) != 0)
break;
udelay(1);
}
tmp = RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK;
if (tmp != 1) {
if (tmp == 0xFF)
return -EINVAL;
else if (tmp == 0xFE)
return -EINVAL;
}
return 0;
}
int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask)
{
int ret;
ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
if (ret == 0)
*enable_mask = RREG32_SMC(ixSMC_SYSCON_MSG_ARG_0);
return ret;
}
int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
PPSMC_Msg msg, u32 parameter)
{
WREG32(mmSMC_MSG_ARG_0, parameter);
return amdgpu_kv_notify_message_to_smu(adev, msg);
}
static int kv_set_smc_sram_address(struct amdgpu_device *adev,
u32 smc_address, u32 limit)
{
if (smc_address & 3)
return -EINVAL;
if ((smc_address + 3) > limit)
return -EINVAL;
WREG32(mmSMC_IND_INDEX_0, smc_address);
WREG32_P(mmSMC_IND_ACCESS_CNTL, 0,
~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
return 0;
}
int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
u32 *value, u32 limit)
{
int ret;
ret = kv_set_smc_sram_address(adev, smc_address, limit);
if (ret)
return ret;
*value = RREG32(mmSMC_IND_DATA_0);
return 0;
}
int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable)
{
if (enable)
return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Enable);
else
return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Disable);
}
int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable)
{
if (enable)
return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableBAPM);
else
return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableBAPM);
}
int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit)
{
int ret;
u32 data, original_data, addr, extra_shift, t_byte, count, mask;
if ((smc_start_address + byte_count) > limit)
return -EINVAL;
addr = smc_start_address;
t_byte = addr & 3;
/* RMW for the initial bytes */
if (t_byte != 0) {
addr -= t_byte;
ret = kv_set_smc_sram_address(adev, addr, limit);
if (ret)
return ret;
original_data = RREG32(mmSMC_IND_DATA_0);
data = 0;
mask = 0;
count = 4;
while (count > 0) {
if (t_byte > 0) {
mask = (mask << 8) | 0xff;
t_byte--;
} else if (byte_count > 0) {
data = (data << 8) + *src++;
byte_count--;
mask <<= 8;
} else {
data <<= 8;
mask = (mask << 8) | 0xff;
}
count--;
}
data |= original_data & mask;
ret = kv_set_smc_sram_address(adev, addr, limit);
if (ret)
return ret;
WREG32(mmSMC_IND_DATA_0, data);
addr += 4;
}
while (byte_count >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
ret = kv_set_smc_sram_address(adev, addr, limit);
if (ret)
return ret;
WREG32(mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
addr += 4;
}
/* RMW for the final bytes */
if (byte_count > 0) {
data = 0;
ret = kv_set_smc_sram_address(adev, addr, limit);
if (ret)
return ret;
original_data = RREG32(mmSMC_IND_DATA_0);
extra_shift = 8 * (4 - byte_count);
while (byte_count > 0) {
/* SMC address space is BE */
data = (data << 8) + *src++;
byte_count--;
}
data <<= extra_shift;
data |= (original_data & ~((~0UL) << extra_shift));
ret = kv_set_smc_sram_address(adev, addr, limit);
if (ret)
return ret;
WREG32(mmSMC_IND_DATA_0, data);
}
return 0;
}
| linux-master | drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c |
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/reboot.h>
#include "amd_shared.h"
#include "amd_powerplay.h"
#include "power_state.h"
#include "amdgpu.h"
#include "hwmgr.h"
#include "amdgpu_dpm_internal.h"
#include "amdgpu_display.h"
static const struct amd_pm_funcs pp_dpm_funcs;
static int amd_powerplay_create(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr;
if (adev == NULL)
return -EINVAL;
hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
if (hwmgr == NULL)
return -ENOMEM;
hwmgr->adev = adev;
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
hwmgr->device = amdgpu_cgs_create_device(adev);
mutex_init(&hwmgr->msg_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
hwmgr->feature_mask = adev->pm.pp_feature;
hwmgr->display_config = &adev->pm.pm_display_cfg;
adev->powerplay.pp_handle = hwmgr;
adev->powerplay.pp_funcs = &pp_dpm_funcs;
return 0;
}
static void amd_powerplay_destroy(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
mutex_destroy(&hwmgr->msg_lock);
kfree(hwmgr->hardcode_pp_table);
hwmgr->hardcode_pp_table = NULL;
kfree(hwmgr);
hwmgr = NULL;
}
static int pp_early_init(void *handle)
{
int ret;
struct amdgpu_device *adev = handle;
ret = amd_powerplay_create(adev);
if (ret != 0)
return ret;
ret = hwmgr_early_init(adev->powerplay.pp_handle);
if (ret)
return -EINVAL;
return 0;
}
static void pp_swctf_delayed_work_handler(struct work_struct *work)
{
struct pp_hwmgr *hwmgr =
container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
struct amdgpu_device *adev = hwmgr->adev;
struct amdgpu_dpm_thermal *range =
&adev->pm.dpm.thermal;
uint32_t gpu_temperature, size;
int ret;
/*
* If the hotspot/edge temperature is confirmed as below SW CTF setting point
* after the delay enforced, nothing will be done.
* Otherwise, a graceful shutdown will be performed to prevent further damage.
*/
if (range->sw_ctf_threshold &&
hwmgr->hwmgr_func->read_sensor) {
ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
&gpu_temperature,
&size);
/*
* For some legacy ASICs, hotspot temperature retrieving might be not
* supported. Check the edge temperature instead then.
*/
if (ret == -EOPNOTSUPP)
ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
AMDGPU_PP_SENSOR_EDGE_TEMP,
&gpu_temperature,
&size);
if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
return;
}
dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
orderly_poweroff(true);
}
static int pp_sw_init(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
ret = hwmgr_sw_init(hwmgr);
pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
if (!ret)
INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work,
pp_swctf_delayed_work_handler);
return ret;
}
static int pp_sw_fini(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
hwmgr_sw_fini(hwmgr);
amdgpu_ucode_release(&adev->pm.fw);
return 0;
}
static int pp_hw_init(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
ret = hwmgr_hw_init(hwmgr);
if (ret)
pr_err("powerplay hw init failed\n");
return ret;
}
static int pp_hw_fini(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
hwmgr_hw_fini(hwmgr);
return 0;
}
static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
{
int r = -EINVAL;
void *cpu_ptr = NULL;
uint64_t gpu_addr;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->pm.smu_prv_buffer,
&gpu_addr,
&cpu_ptr)) {
DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
return;
}
if (hwmgr->hwmgr_func->notify_cac_buffer_info)
r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
lower_32_bits((unsigned long)cpu_ptr),
upper_32_bits((unsigned long)cpu_ptr),
lower_32_bits(gpu_addr),
upper_32_bits(gpu_addr),
adev->pm.smu_prv_buffer_size);
if (r) {
amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
adev->pm.smu_prv_buffer = NULL;
DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
}
}
static int pp_late_init(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
if (hwmgr && hwmgr->pm_en)
hwmgr_handle_task(hwmgr,
AMD_PP_TASK_COMPLETE_INIT, NULL);
if (adev->pm.smu_prv_buffer_size != 0)
pp_reserve_vram_for_smu(adev);
return 0;
}
static void pp_late_fini(void *handle)
{
struct amdgpu_device *adev = handle;
if (adev->pm.smu_prv_buffer)
amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
amd_powerplay_destroy(adev);
}
static bool pp_is_idle(void *handle)
{
return false;
}
static int pp_wait_for_idle(void *handle)
{
return 0;
}
static int pp_sw_reset(void *handle)
{
return 0;
}
static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static int pp_suspend(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
return hwmgr_suspend(hwmgr);
}
static int pp_resume(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
return hwmgr_resume(hwmgr);
}
static int pp_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static const struct amd_ip_funcs pp_ip_funcs = {
.name = "powerplay",
.early_init = pp_early_init,
.late_init = pp_late_init,
.sw_init = pp_sw_init,
.sw_fini = pp_sw_fini,
.hw_init = pp_hw_init,
.hw_fini = pp_hw_fini,
.late_fini = pp_late_fini,
.suspend = pp_suspend,
.resume = pp_resume,
.is_idle = pp_is_idle,
.wait_for_idle = pp_wait_for_idle,
.soft_reset = pp_sw_reset,
.set_clockgating_state = pp_set_clockgating_state,
.set_powergating_state = pp_set_powergating_state,
};
const struct amdgpu_ip_block_version pp_smu_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_SMC,
.major = 1,
.minor = 0,
.rev = 0,
.funcs = &pp_ip_funcs,
};
/* This interface only be supported On Vi,
* because only smu7/8 can help to load gfx/sdma fw,
* smu need to be enabled before load other ip's fw.
* so call start smu to load smu7 fw and other ip's fw
*/
static int pp_dpm_load_fw(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
return -EINVAL;
if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
pr_err("fw load failed\n");
return -EINVAL;
}
return 0;
}
static int pp_dpm_fw_loading_complete(void *handle)
{
return 0;
}
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
}
static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level *level)
{
uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
if (!(hwmgr->dpm_level & profile_mode_mask)) {
/* enter umd pstate, save current level, disable gfx cg*/
if (*level & profile_mode_mask) {
hwmgr->saved_dpm_level = hwmgr->dpm_level;
hwmgr->en_umd_pstate = true;
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
if (!(*level & profile_mode_mask)) {
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
*level = hwmgr->saved_dpm_level;
hwmgr->en_umd_pstate = false;
}
}
}
static int pp_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (level == hwmgr->dpm_level)
return 0;
pp_dpm_en_umd_pstate(hwmgr, &level);
hwmgr->request_dpm_level = level;
hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
return 0;
}
static enum amd_dpm_forced_level pp_dpm_get_performance_level(
void *handle)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
return hwmgr->dpm_level;
}
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return 0;
if (hwmgr->hwmgr_func->get_sclk == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
}
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return 0;
if (hwmgr->hwmgr_func->get_mclk == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
}
static void pp_dpm_powergate_vce(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return;
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
}
static void pp_dpm_powergate_uvd(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return;
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
}
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
enum amd_pm_state_type *user_state)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
return hwmgr_handle_task(hwmgr, task_id, user_state);
}
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
struct pp_power_state *state;
enum amd_pm_state_type pm_type;
if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
return -EINVAL;
state = hwmgr->current_ps;
switch (state->classification.ui_label) {
case PP_StateUILabel_Battery:
pm_type = POWER_STATE_TYPE_BATTERY;
break;
case PP_StateUILabel_Balanced:
pm_type = POWER_STATE_TYPE_BALANCED;
break;
case PP_StateUILabel_Performance:
pm_type = POWER_STATE_TYPE_PERFORMANCE;
break;
default:
if (state->classification.flags & PP_StateClassificationFlag_Boot)
pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
else
pm_type = POWER_STATE_TYPE_DEFAULT;
break;
}
return pm_type;
}
static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EOPNOTSUPP;
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
return -EOPNOTSUPP;
if (mode == U32_MAX)
return -EINVAL;
hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
return 0;
}
static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EOPNOTSUPP;
if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
return -EOPNOTSUPP;
if (!fan_mode)
return -EINVAL;
*fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
return 0;
}
static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EOPNOTSUPP;
if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
return -EOPNOTSUPP;
if (speed == U32_MAX)
return -EINVAL;
return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
}
static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EOPNOTSUPP;
if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
return -EOPNOTSUPP;
if (!speed)
return -EINVAL;
return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
}
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EOPNOTSUPP;
if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
return -EOPNOTSUPP;
if (!rpm)
return -EINVAL;
return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
}
static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EOPNOTSUPP;
if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
return -EOPNOTSUPP;
if (rpm == U32_MAX)
return -EINVAL;
return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
}
static int pp_dpm_get_pp_num_states(void *handle,
struct pp_states_info *data)
{
struct pp_hwmgr *hwmgr = handle;
int i;
memset(data, 0, sizeof(*data));
if (!hwmgr || !hwmgr->pm_en || !hwmgr->ps)
return -EINVAL;
data->nums = hwmgr->num_ps;
for (i = 0; i < hwmgr->num_ps; i++) {
struct pp_power_state *state = (struct pp_power_state *)
((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
switch (state->classification.ui_label) {
case PP_StateUILabel_Battery:
data->states[i] = POWER_STATE_TYPE_BATTERY;
break;
case PP_StateUILabel_Balanced:
data->states[i] = POWER_STATE_TYPE_BALANCED;
break;
case PP_StateUILabel_Performance:
data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
break;
default:
if (state->classification.flags & PP_StateClassificationFlag_Boot)
data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
else
data->states[i] = POWER_STATE_TYPE_DEFAULT;
}
}
return 0;
}
static int pp_dpm_get_pp_table(void *handle, char **table)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table)
return -EINVAL;
*table = (char *)hwmgr->soft_pp_table;
return hwmgr->soft_pp_table_size;
}
static int amd_powerplay_reset(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
int ret;
ret = hwmgr_hw_fini(hwmgr);
if (ret)
return ret;
ret = hwmgr_hw_init(hwmgr);
if (ret)
return ret;
return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
}
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
struct pp_hwmgr *hwmgr = handle;
int ret = -ENOMEM;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (!hwmgr->hardcode_pp_table) {
hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
hwmgr->soft_pp_table_size,
GFP_KERNEL);
if (!hwmgr->hardcode_pp_table)
return ret;
}
memcpy(hwmgr->hardcode_pp_table, buf, size);
hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
ret = amd_powerplay_reset(handle);
if (ret)
return ret;
if (hwmgr->hwmgr_func->avfs_control)
ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
return ret;
}
static int pp_dpm_force_clock_level(void *handle,
enum pp_clock_type type, uint32_t mask)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->force_clock_level == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
pr_debug("force clock level is for dpm manual mode only.\n");
return -EINVAL;
}
return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
}
static int pp_dpm_emit_clock_levels(void *handle,
enum pp_clock_type type,
char *buf,
int *offset)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EOPNOTSUPP;
if (!hwmgr->hwmgr_func->emit_clock_levels)
return -ENOENT;
return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset);
}
static int pp_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
}
static int pp_dpm_get_sclk_od(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
}
static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
}
static int pp_dpm_get_mclk_od(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
}
static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
}
static int pp_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en || !value)
return -EINVAL;
switch (idx) {
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
*((uint32_t *)value) = hwmgr->pstate_sclk * 100;
return 0;
case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
*((uint32_t *)value) = hwmgr->pstate_mclk * 100;
return 0;
case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
*((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
return 0;
case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
*((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
return 0;
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
return 0;
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
return 0;
default:
return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
}
}
static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return NULL;
if (idx < hwmgr->num_vce_state_tables)
return &hwmgr->vce_states[idx];
return NULL;
}
static int pp_get_power_profile_mode(void *handle, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
return -EOPNOTSUPP;
if (!buf)
return -EINVAL;
return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}
static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
return -EOPNOTSUPP;
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
pr_debug("power profile setting is for manual dpm mode only.\n");
return -EINVAL;
}
return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
}
static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
return 0;
return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
}
static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
}
static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr)
return -EINVAL;
if (!hwmgr->pm_en)
return 0;
if (hwmgr->hwmgr_func->set_mp1_state)
return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
return 0;
}
static int pp_dpm_switch_power_profile(void *handle,
enum PP_SMC_POWER_PROFILE type, bool en)
{
struct pp_hwmgr *hwmgr = handle;
long workload;
uint32_t index;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
if (!en) {
hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
index = fls(hwmgr->workload_mask);
index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
workload = hwmgr->workload_setting[index];
} else {
hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
index = fls(hwmgr->workload_mask);
index = index <= Workload_Policy_Max ? index - 1 : 0;
workload = hwmgr->workload_setting[index];
}
if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
return -EINVAL;
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
return 0;
}
static int pp_set_power_limit(void *handle, uint32_t limit)
{
struct pp_hwmgr *hwmgr = handle;
uint32_t max_power_limit;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_power_limit == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
if (limit == 0)
limit = hwmgr->default_power_limit;
max_power_limit = hwmgr->default_power_limit;
if (hwmgr->od_enabled) {
max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
max_power_limit /= 100;
}
if (limit > max_power_limit)
return -EINVAL;
hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
hwmgr->power_limit = limit;
return 0;
}
static int pp_get_power_limit(void *handle, uint32_t *limit,
enum pp_power_limit_level pp_limit_level,
enum pp_power_type power_type)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en || !limit)
return -EINVAL;
if (power_type != PP_PWR_TYPE_SUSTAINED)
return -EOPNOTSUPP;
switch (pp_limit_level) {
case PP_PWR_LIMIT_CURRENT:
*limit = hwmgr->power_limit;
break;
case PP_PWR_LIMIT_DEFAULT:
*limit = hwmgr->default_power_limit;
break;
case PP_PWR_LIMIT_MAX:
*limit = hwmgr->default_power_limit;
if (hwmgr->od_enabled) {
*limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
*limit /= 100;
}
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
static int pp_display_configuration_change(void *handle,
const struct amd_pp_display_configuration *display_config)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
phm_store_dal_configuration_data(hwmgr, display_config);
return 0;
}
static int pp_get_display_power_level(void *handle,
struct amd_pp_simple_clock_info *output)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en || !output)
return -EINVAL;
return phm_get_dal_power_level(hwmgr, output);
}
static int pp_get_current_clocks(void *handle,
struct amd_pp_clock_info *clocks)
{
struct amd_pp_simple_clock_info simple_clocks = { 0 };
struct pp_clock_info hw_clocks;
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
phm_get_dal_power_level(hwmgr, &simple_clocks);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment))
ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
else
ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
if (ret) {
pr_debug("Error in phm_get_clock_info \n");
return -EINVAL;
}
clocks->min_engine_clock = hw_clocks.min_eng_clk;
clocks->max_engine_clock = hw_clocks.max_eng_clk;
clocks->min_memory_clock = hw_clocks.min_mem_clk;
clocks->max_memory_clock = hw_clocks.max_mem_clk;
clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
if (simple_clocks.level == 0)
clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
else
clocks->max_clocks_state = simple_clocks.level;
if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
}
return 0;
}
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (clocks == NULL)
return -EINVAL;
return phm_get_clock_by_type(hwmgr, type, clocks);
}
static int pp_get_clock_by_type_with_latency(void *handle,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_latency *clocks)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en || !clocks)
return -EINVAL;
return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
}
static int pp_get_clock_by_type_with_voltage(void *handle,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en || !clocks)
return -EINVAL;
return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
}
static int pp_set_watermarks_for_clocks_ranges(void *handle,
void *clock_ranges)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
return -EINVAL;
return phm_set_watermarks_for_clocks_ranges(hwmgr,
clock_ranges);
}
static int pp_display_clock_voltage_request(void *handle,
struct pp_display_clock_request *clock)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en || !clock)
return -EINVAL;
return phm_display_clock_voltage_request(hwmgr, clock);
}
static int pp_get_display_mode_validation_clocks(void *handle,
struct amd_pp_simple_clock_info *clocks)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
if (!hwmgr || !hwmgr->pm_en || !clocks)
return -EINVAL;
clocks->level = PP_DAL_POWERLEVEL_7;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
ret = phm_get_max_high_clocks(hwmgr, clocks);
return ret;
}
static int pp_dpm_powergate_mmhub(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
}
static int pp_dpm_powergate_gfx(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return 0;
if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
}
static void pp_dpm_powergate_acp(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return;
if (hwmgr->hwmgr_func->powergate_acp == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
}
static void pp_dpm_powergate_sdma(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr)
return;
if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
}
static int pp_set_powergating_by_smu(void *handle,
uint32_t block_type, bool gate)
{
int ret = 0;
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCN:
pp_dpm_powergate_uvd(handle, gate);
break;
case AMD_IP_BLOCK_TYPE_VCE:
pp_dpm_powergate_vce(handle, gate);
break;
case AMD_IP_BLOCK_TYPE_GMC:
/*
* For now, this is only used on PICASSO.
* And only "gate" operation is supported.
*/
if (gate)
pp_dpm_powergate_mmhub(handle);
break;
case AMD_IP_BLOCK_TYPE_GFX:
ret = pp_dpm_powergate_gfx(handle, gate);
break;
case AMD_IP_BLOCK_TYPE_ACP:
pp_dpm_powergate_acp(handle, gate);
break;
case AMD_IP_BLOCK_TYPE_SDMA:
pp_dpm_powergate_sdma(handle, gate);
break;
default:
break;
}
return ret;
}
static int pp_notify_smu_enable_pwe(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
return 0;
}
static int pp_enable_mgpu_fan_boost(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr)
return -EINVAL;
if (!hwmgr->pm_en ||
hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
return 0;
hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
return 0;
}
static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
pr_debug("%s was not implemented.\n", __func__);
return -EINVAL;
}
hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
return 0;
}
static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
pr_debug("%s was not implemented.\n", __func__);
return -EINVAL;
}
hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
return 0;
}
static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
pr_debug("%s was not implemented.\n", __func__);
return -EINVAL;
}
hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
return 0;
}
static int pp_set_active_display_count(void *handle, uint32_t count)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
return phm_set_active_display_count(hwmgr, count);
}
static int pp_get_asic_baco_capability(void *handle, bool *cap)
{
struct pp_hwmgr *hwmgr = handle;
*cap = false;
if (!hwmgr)
return -EINVAL;
if (!(hwmgr->not_vf && amdgpu_dpm) ||
!hwmgr->hwmgr_func->get_asic_baco_capability)
return 0;
hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
return 0;
}
static int pp_get_asic_baco_state(void *handle, int *state)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr)
return -EINVAL;
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
return 0;
hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
return 0;
}
static int pp_set_asic_baco_state(void *handle, int state)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr)
return -EINVAL;
if (!(hwmgr->not_vf && amdgpu_dpm) ||
!hwmgr->hwmgr_func->set_asic_baco_state)
return 0;
hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
return 0;
}
static int pp_get_ppfeature_status(void *handle, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en || !buf)
return -EINVAL;
if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
}
static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
}
static int pp_asic_reset_mode_2(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->asic_reset == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
}
static int pp_smu_i2c_bus_access(void *handle, bool acquire)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
}
static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr)
return -EINVAL;
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
return 0;
hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
return 0;
}
static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr)
return -EINVAL;
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
return 0;
hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
return 0;
}
static ssize_t pp_get_gpu_metrics(void *handle, void **table)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr)
return -EINVAL;
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
return -EOPNOTSUPP;
return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
}
static int pp_gfx_state_change_set(void *handle, uint32_t state)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
return 0;
}
static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
{
struct pp_hwmgr *hwmgr = handle;
struct amdgpu_device *adev = hwmgr->adev;
int err;
if (!addr || !size)
return -EINVAL;
*addr = NULL;
*size = 0;
if (adev->pm.smu_prv_buffer) {
err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
if (err)
return err;
*size = adev->pm.smu_prv_buffer_size;
}
return 0;
}
static void pp_pm_compute_clocks(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
struct amdgpu_device *adev = hwmgr->adev;
if (!adev->dc_enabled) {
amdgpu_dpm_get_active_displays(adev);
adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
/* we have issues with mclk switching with
* refresh rates over 120 hz on the non-DC code.
*/
if (adev->pm.pm_display_cfg.vrefresh > 120)
adev->pm.pm_display_cfg.min_vblank_time = 0;
pp_display_configuration_change(handle,
&adev->pm.pm_display_cfg);
}
pp_dpm_dispatch_tasks(handle,
AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
NULL);
}
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
.force_performance_level = pp_dpm_force_performance_level,
.get_performance_level = pp_dpm_get_performance_level,
.get_current_power_state = pp_dpm_get_current_power_state,
.dispatch_tasks = pp_dpm_dispatch_tasks,
.set_fan_control_mode = pp_dpm_set_fan_control_mode,
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
.set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
.get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
.set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
.get_pp_num_states = pp_dpm_get_pp_num_states,
.get_pp_table = pp_dpm_get_pp_table,
.set_pp_table = pp_dpm_set_pp_table,
.force_clock_level = pp_dpm_force_clock_level,
.emit_clock_levels = pp_dpm_emit_clock_levels,
.print_clock_levels = pp_dpm_print_clock_levels,
.get_sclk_od = pp_dpm_get_sclk_od,
.set_sclk_od = pp_dpm_set_sclk_od,
.get_mclk_od = pp_dpm_get_mclk_od,
.set_mclk_od = pp_dpm_set_mclk_od,
.read_sensor = pp_dpm_read_sensor,
.get_vce_clock_state = pp_dpm_get_vce_clock_state,
.switch_power_profile = pp_dpm_switch_power_profile,
.set_clockgating_by_smu = pp_set_clockgating_by_smu,
.set_powergating_by_smu = pp_set_powergating_by_smu,
.get_power_profile_mode = pp_get_power_profile_mode,
.set_power_profile_mode = pp_set_power_profile_mode,
.set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
.odn_edit_dpm_table = pp_odn_edit_dpm_table,
.set_mp1_state = pp_dpm_set_mp1_state,
.set_power_limit = pp_set_power_limit,
.get_power_limit = pp_get_power_limit,
/* export to DC */
.get_sclk = pp_dpm_get_sclk,
.get_mclk = pp_dpm_get_mclk,
.display_configuration_change = pp_display_configuration_change,
.get_display_power_level = pp_get_display_power_level,
.get_current_clocks = pp_get_current_clocks,
.get_clock_by_type = pp_get_clock_by_type,
.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
.display_clock_voltage_request = pp_display_clock_voltage_request,
.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
.set_active_display_count = pp_set_active_display_count,
.set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
.set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
.set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
.get_asic_baco_capability = pp_get_asic_baco_capability,
.get_asic_baco_state = pp_get_asic_baco_state,
.set_asic_baco_state = pp_set_asic_baco_state,
.get_ppfeature_status = pp_get_ppfeature_status,
.set_ppfeature_status = pp_set_ppfeature_status,
.asic_reset_mode_2 = pp_asic_reset_mode_2,
.smu_i2c_bus_access = pp_smu_i2c_bus_access,
.set_df_cstate = pp_set_df_cstate,
.set_xgmi_pstate = pp_set_xgmi_pstate,
.get_gpu_metrics = pp_get_gpu_metrics,
.gfx_state_change_set = pp_gfx_state_change_set,
.get_smu_prv_buf_details = pp_get_prv_buffer_details,
.pm_compute_clocks = pp_pm_compute_clocks,
};
| linux-master | drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "ppatomfwctrl.h"
#include "atomfirmware.h"
#include "atom.h"
#include "pp_debug.h"
static const union atom_voltage_object_v4 *pp_atomfwctrl_lookup_voltage_type_v4(
const struct atom_voltage_objects_info_v4_1 *voltage_object_info_table,
uint8_t voltage_type, uint8_t voltage_mode)
{
unsigned int size = le16_to_cpu(
voltage_object_info_table->table_header.structuresize);
unsigned int offset =
offsetof(struct atom_voltage_objects_info_v4_1, voltage_object[0]);
unsigned long start = (unsigned long)voltage_object_info_table;
while (offset < size) {
const union atom_voltage_object_v4 *voltage_object =
(const union atom_voltage_object_v4 *)(start + offset);
if (voltage_type == voltage_object->gpio_voltage_obj.header.voltage_type &&
voltage_mode == voltage_object->gpio_voltage_obj.header.voltage_mode)
return voltage_object;
offset += le16_to_cpu(voltage_object->gpio_voltage_obj.header.object_size);
}
return NULL;
}
static struct atom_voltage_objects_info_v4_1 *pp_atomfwctrl_get_voltage_info_table(
struct pp_hwmgr *hwmgr)
{
const void *table_address;
uint16_t idx;
idx = GetIndexIntoMasterDataTable(voltageobject_info);
table_address = smu_atom_get_data_table(hwmgr->adev,
idx, NULL, NULL, NULL);
PP_ASSERT_WITH_CODE(table_address,
"Error retrieving BIOS Table Address!",
return NULL);
return (struct atom_voltage_objects_info_v4_1 *)table_address;
}
/*
* Returns TRUE if the given voltage type is controlled by GPIO pins.
* voltage_type is one of SET_VOLTAGE_TYPE_ASIC_VDDC, SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ.
* voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE
*/
bool pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(struct pp_hwmgr *hwmgr,
uint8_t voltage_type, uint8_t voltage_mode)
{
struct atom_voltage_objects_info_v4_1 *voltage_info =
(struct atom_voltage_objects_info_v4_1 *)
pp_atomfwctrl_get_voltage_info_table(hwmgr);
bool ret;
/* If we cannot find the table do NOT try to control this voltage. */
PP_ASSERT_WITH_CODE(voltage_info,
"Could not find Voltage Table in BIOS.",
return false);
ret = (pp_atomfwctrl_lookup_voltage_type_v4(voltage_info,
voltage_type, voltage_mode)) ? true : false;
return ret;
}
int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr,
uint8_t voltage_type, uint8_t voltage_mode,
struct pp_atomfwctrl_voltage_table *voltage_table)
{
struct atom_voltage_objects_info_v4_1 *voltage_info =
(struct atom_voltage_objects_info_v4_1 *)
pp_atomfwctrl_get_voltage_info_table(hwmgr);
const union atom_voltage_object_v4 *voltage_object;
unsigned int i;
int result = 0;
PP_ASSERT_WITH_CODE(voltage_info,
"Could not find Voltage Table in BIOS.",
return -1);
voltage_object = pp_atomfwctrl_lookup_voltage_type_v4(voltage_info,
voltage_type, voltage_mode);
if (!voltage_object)
return -1;
voltage_table->count = 0;
if (voltage_mode == VOLTAGE_OBJ_GPIO_LUT) {
PP_ASSERT_WITH_CODE(
(voltage_object->gpio_voltage_obj.gpio_entry_num <=
PP_ATOMFWCTRL_MAX_VOLTAGE_ENTRIES),
"Too many voltage entries!",
result = -1);
if (!result) {
for (i = 0; i < voltage_object->gpio_voltage_obj.
gpio_entry_num; i++) {
voltage_table->entries[i].value =
le16_to_cpu(voltage_object->gpio_voltage_obj.
voltage_gpio_lut[i].voltage_level_mv);
voltage_table->entries[i].smio_low =
le32_to_cpu(voltage_object->gpio_voltage_obj.
voltage_gpio_lut[i].voltage_gpio_reg_val);
}
voltage_table->count =
voltage_object->gpio_voltage_obj.gpio_entry_num;
voltage_table->mask_low =
le32_to_cpu(
voltage_object->gpio_voltage_obj.gpio_mask_val);
voltage_table->phase_delay =
voltage_object->gpio_voltage_obj.phase_delay_us;
}
} else if (voltage_mode == VOLTAGE_OBJ_SVID2) {
voltage_table->psi1_enable =
(voltage_object->svid2_voltage_obj.loadline_psi1 & 0x20) >> 5;
voltage_table->psi0_enable =
voltage_object->svid2_voltage_obj.psi0_enable & 0x1;
voltage_table->max_vid_step =
voltage_object->svid2_voltage_obj.maxvstep;
voltage_table->telemetry_offset =
voltage_object->svid2_voltage_obj.telemetry_offset;
voltage_table->telemetry_slope =
voltage_object->svid2_voltage_obj.telemetry_gain;
} else
PP_ASSERT_WITH_CODE(false,
"Unsupported Voltage Object Mode!",
result = -1);
return result;
}
static struct atom_gpio_pin_lut_v2_1 *pp_atomfwctrl_get_gpio_lookup_table(
struct pp_hwmgr *hwmgr)
{
const void *table_address;
uint16_t idx;
idx = GetIndexIntoMasterDataTable(gpio_pin_lut);
table_address = smu_atom_get_data_table(hwmgr->adev,
idx, NULL, NULL, NULL);
PP_ASSERT_WITH_CODE(table_address,
"Error retrieving BIOS Table Address!",
return NULL);
return (struct atom_gpio_pin_lut_v2_1 *)table_address;
}
static bool pp_atomfwctrl_lookup_gpio_pin(
struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table,
const uint32_t pin_id,
struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment)
{
unsigned int size = le16_to_cpu(
gpio_lookup_table->table_header.structuresize);
unsigned int offset =
offsetof(struct atom_gpio_pin_lut_v2_1, gpio_pin[0]);
unsigned long start = (unsigned long)gpio_lookup_table;
while (offset < size) {
const struct atom_gpio_pin_assignment *pin_assignment =
(const struct atom_gpio_pin_assignment *)(start + offset);
if (pin_id == pin_assignment->gpio_id) {
gpio_pin_assignment->uc_gpio_pin_bit_shift =
pin_assignment->gpio_bitshift;
gpio_pin_assignment->us_gpio_pin_aindex =
le16_to_cpu(pin_assignment->data_a_reg_index);
return true;
}
offset += offsetof(struct atom_gpio_pin_assignment, gpio_id) + 1;
}
return false;
}
/*
* Returns TRUE if the given pin id find in lookup table.
*/
bool pp_atomfwctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr,
const uint32_t pin_id,
struct pp_atomfwctrl_gpio_pin_assignment *gpio_pin_assignment)
{
bool ret = false;
struct atom_gpio_pin_lut_v2_1 *gpio_lookup_table =
pp_atomfwctrl_get_gpio_lookup_table(hwmgr);
/* If we cannot find the table do NOT try to control this voltage. */
PP_ASSERT_WITH_CODE(gpio_lookup_table,
"Could not find GPIO lookup Table in BIOS.",
return false);
ret = pp_atomfwctrl_lookup_gpio_pin(gpio_lookup_table,
pin_id, gpio_pin_assignment);
return ret;
}
/*
* Enter to SelfRefresh mode.
* @param hwmgr
*/
int pp_atomfwctrl_enter_self_refresh(struct pp_hwmgr *hwmgr)
{
/* 0 - no action
* 1 - leave power to video memory always on
*/
return 0;
}
/** pp_atomfwctrl_get_gpu_pll_dividers_vega10().
*
* @param hwmgr input parameter: pointer to HwMgr
* @param clock_type input parameter: Clock type: 1 - GFXCLK, 2 - UCLK, 0 - All other clocks
* @param clock_value input parameter: Clock
* @param dividers output parameter:Clock dividers
*/
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
uint32_t clock_type, uint32_t clock_value,
struct pp_atomfwctrl_clock_dividers_soc15 *dividers)
{
struct amdgpu_device *adev = hwmgr->adev;
struct compute_gpu_clock_input_parameter_v1_8 pll_parameters;
struct compute_gpu_clock_output_parameter_v1_8 *pll_output;
uint32_t idx;
pll_parameters.gpuclock_10khz = (uint32_t)clock_value;
pll_parameters.gpu_clock_type = clock_type;
idx = GetIndexIntoMasterCmdTable(computegpuclockparam);
if (amdgpu_atom_execute_table(
adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters))
return -EINVAL;
pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *)
&pll_parameters;
dividers->ulClock = le32_to_cpu(pll_output->gpuclock_10khz);
dividers->ulDid = le32_to_cpu(pll_output->dfs_did);
dividers->ulPll_fb_mult = le32_to_cpu(pll_output->pll_fb_mult);
dividers->ulPll_ss_fbsmult = le32_to_cpu(pll_output->pll_ss_fbsmult);
dividers->usPll_ss_slew_frac = le16_to_cpu(pll_output->pll_ss_slew_frac);
dividers->ucPll_ss_enable = pll_output->pll_ss_enable;
return 0;
}
int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_avfs_parameters *param)
{
uint16_t idx;
uint8_t format_revision, content_revision;
struct atom_asic_profiling_info_v4_1 *profile;
struct atom_asic_profiling_info_v4_2 *profile_v4_2;
idx = GetIndexIntoMasterDataTable(asic_profiling_info);
profile = (struct atom_asic_profiling_info_v4_1 *)
smu_atom_get_data_table(hwmgr->adev,
idx, NULL, NULL, NULL);
if (!profile)
return -1;
format_revision = ((struct atom_common_table_header *)profile)->format_revision;
content_revision = ((struct atom_common_table_header *)profile)->content_revision;
if (format_revision == 4 && content_revision == 1) {
param->ulMaxVddc = le32_to_cpu(profile->maxvddc);
param->ulMinVddc = le32_to_cpu(profile->minvddc);
param->ulMeanNsigmaAcontant0 =
le32_to_cpu(profile->avfs_meannsigma_acontant0);
param->ulMeanNsigmaAcontant1 =
le32_to_cpu(profile->avfs_meannsigma_acontant1);
param->ulMeanNsigmaAcontant2 =
le32_to_cpu(profile->avfs_meannsigma_acontant2);
param->usMeanNsigmaDcTolSigma =
le16_to_cpu(profile->avfs_meannsigma_dc_tol_sigma);
param->usMeanNsigmaPlatformMean =
le16_to_cpu(profile->avfs_meannsigma_platform_mean);
param->usMeanNsigmaPlatformSigma =
le16_to_cpu(profile->avfs_meannsigma_platform_sigma);
param->ulGbVdroopTableCksoffA0 =
le32_to_cpu(profile->gb_vdroop_table_cksoff_a0);
param->ulGbVdroopTableCksoffA1 =
le32_to_cpu(profile->gb_vdroop_table_cksoff_a1);
param->ulGbVdroopTableCksoffA2 =
le32_to_cpu(profile->gb_vdroop_table_cksoff_a2);
param->ulGbVdroopTableCksonA0 =
le32_to_cpu(profile->gb_vdroop_table_ckson_a0);
param->ulGbVdroopTableCksonA1 =
le32_to_cpu(profile->gb_vdroop_table_ckson_a1);
param->ulGbVdroopTableCksonA2 =
le32_to_cpu(profile->gb_vdroop_table_ckson_a2);
param->ulGbFuseTableCksoffM1 =
le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1);
param->ulGbFuseTableCksoffM2 =
le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m2);
param->ulGbFuseTableCksoffB =
le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b);
param->ulGbFuseTableCksonM1 =
le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1);
param->ulGbFuseTableCksonM2 =
le32_to_cpu(profile->avfsgb_fuse_table_ckson_m2);
param->ulGbFuseTableCksonB =
le32_to_cpu(profile->avfsgb_fuse_table_ckson_b);
param->ucEnableGbVdroopTableCkson =
profile->enable_gb_vdroop_table_ckson;
param->ucEnableGbFuseTableCkson =
profile->enable_gb_fuse_table_ckson;
param->usPsmAgeComfactor =
le16_to_cpu(profile->psm_age_comfactor);
param->ulDispclk2GfxclkM1 =
le32_to_cpu(profile->dispclk2gfxclk_a);
param->ulDispclk2GfxclkM2 =
le32_to_cpu(profile->dispclk2gfxclk_b);
param->ulDispclk2GfxclkB =
le32_to_cpu(profile->dispclk2gfxclk_c);
param->ulDcefclk2GfxclkM1 =
le32_to_cpu(profile->dcefclk2gfxclk_a);
param->ulDcefclk2GfxclkM2 =
le32_to_cpu(profile->dcefclk2gfxclk_b);
param->ulDcefclk2GfxclkB =
le32_to_cpu(profile->dcefclk2gfxclk_c);
param->ulPixelclk2GfxclkM1 =
le32_to_cpu(profile->pixclk2gfxclk_a);
param->ulPixelclk2GfxclkM2 =
le32_to_cpu(profile->pixclk2gfxclk_b);
param->ulPixelclk2GfxclkB =
le32_to_cpu(profile->pixclk2gfxclk_c);
param->ulPhyclk2GfxclkM1 =
le32_to_cpu(profile->phyclk2gfxclk_a);
param->ulPhyclk2GfxclkM2 =
le32_to_cpu(profile->phyclk2gfxclk_b);
param->ulPhyclk2GfxclkB =
le32_to_cpu(profile->phyclk2gfxclk_c);
param->ulAcgGbVdroopTableA0 = 0;
param->ulAcgGbVdroopTableA1 = 0;
param->ulAcgGbVdroopTableA2 = 0;
param->ulAcgGbFuseTableM1 = 0;
param->ulAcgGbFuseTableM2 = 0;
param->ulAcgGbFuseTableB = 0;
param->ucAcgEnableGbVdroopTable = 0;
param->ucAcgEnableGbFuseTable = 0;
} else if (format_revision == 4 && content_revision == 2) {
profile_v4_2 = (struct atom_asic_profiling_info_v4_2 *)profile;
param->ulMaxVddc = le32_to_cpu(profile_v4_2->maxvddc);
param->ulMinVddc = le32_to_cpu(profile_v4_2->minvddc);
param->ulMeanNsigmaAcontant0 =
le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant0);
param->ulMeanNsigmaAcontant1 =
le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant1);
param->ulMeanNsigmaAcontant2 =
le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant2);
param->usMeanNsigmaDcTolSigma =
le16_to_cpu(profile_v4_2->avfs_meannsigma_dc_tol_sigma);
param->usMeanNsigmaPlatformMean =
le16_to_cpu(profile_v4_2->avfs_meannsigma_platform_mean);
param->usMeanNsigmaPlatformSigma =
le16_to_cpu(profile_v4_2->avfs_meannsigma_platform_sigma);
param->ulGbVdroopTableCksoffA0 =
le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a0);
param->ulGbVdroopTableCksoffA1 =
le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a1);
param->ulGbVdroopTableCksoffA2 =
le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a2);
param->ulGbVdroopTableCksonA0 =
le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a0);
param->ulGbVdroopTableCksonA1 =
le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a1);
param->ulGbVdroopTableCksonA2 =
le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a2);
param->ulGbFuseTableCksoffM1 =
le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_m1);
param->ulGbFuseTableCksoffM2 =
le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_m2);
param->ulGbFuseTableCksoffB =
le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_b);
param->ulGbFuseTableCksonM1 =
le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_m1);
param->ulGbFuseTableCksonM2 =
le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_m2);
param->ulGbFuseTableCksonB =
le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_b);
param->ucEnableGbVdroopTableCkson =
profile_v4_2->enable_gb_vdroop_table_ckson;
param->ucEnableGbFuseTableCkson =
profile_v4_2->enable_gb_fuse_table_ckson;
param->usPsmAgeComfactor =
le16_to_cpu(profile_v4_2->psm_age_comfactor);
param->ulDispclk2GfxclkM1 =
le32_to_cpu(profile_v4_2->dispclk2gfxclk_a);
param->ulDispclk2GfxclkM2 =
le32_to_cpu(profile_v4_2->dispclk2gfxclk_b);
param->ulDispclk2GfxclkB =
le32_to_cpu(profile_v4_2->dispclk2gfxclk_c);
param->ulDcefclk2GfxclkM1 =
le32_to_cpu(profile_v4_2->dcefclk2gfxclk_a);
param->ulDcefclk2GfxclkM2 =
le32_to_cpu(profile_v4_2->dcefclk2gfxclk_b);
param->ulDcefclk2GfxclkB =
le32_to_cpu(profile_v4_2->dcefclk2gfxclk_c);
param->ulPixelclk2GfxclkM1 =
le32_to_cpu(profile_v4_2->pixclk2gfxclk_a);
param->ulPixelclk2GfxclkM2 =
le32_to_cpu(profile_v4_2->pixclk2gfxclk_b);
param->ulPixelclk2GfxclkB =
le32_to_cpu(profile_v4_2->pixclk2gfxclk_c);
param->ulPhyclk2GfxclkM1 =
le32_to_cpu(profile->phyclk2gfxclk_a);
param->ulPhyclk2GfxclkM2 =
le32_to_cpu(profile_v4_2->phyclk2gfxclk_b);
param->ulPhyclk2GfxclkB =
le32_to_cpu(profile_v4_2->phyclk2gfxclk_c);
param->ulAcgGbVdroopTableA0 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a0);
param->ulAcgGbVdroopTableA1 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a1);
param->ulAcgGbVdroopTableA2 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a2);
param->ulAcgGbFuseTableM1 = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_m1);
param->ulAcgGbFuseTableM2 = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_m2);
param->ulAcgGbFuseTableB = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_b);
param->ucAcgEnableGbVdroopTable = le32_to_cpu(profile_v4_2->enable_acg_gb_vdroop_table);
param->ucAcgEnableGbFuseTable = le32_to_cpu(profile_v4_2->enable_acg_gb_fuse_table);
} else {
pr_info("Invalid VBIOS AVFS ProfilingInfo Revision!\n");
return -EINVAL;
}
return 0;
}
int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_gpio_parameters *param)
{
struct atom_smu_info_v3_1 *info;
uint16_t idx;
idx = GetIndexIntoMasterDataTable(smu_info);
info = (struct atom_smu_info_v3_1 *)
smu_atom_get_data_table(hwmgr->adev,
idx, NULL, NULL, NULL);
if (!info) {
pr_info("Error retrieving BIOS smu_info Table Address!");
return -1;
}
param->ucAcDcGpio = info->ac_dc_gpio_bit;
param->ucAcDcPolarity = info->ac_dc_polarity;
param->ucVR0HotGpio = info->vr0hot_gpio_bit;
param->ucVR0HotPolarity = info->vr0hot_polarity;
param->ucVR1HotGpio = info->vr1hot_gpio_bit;
param->ucVR1HotPolarity = info->vr1hot_polarity;
param->ucFwCtfGpio = info->fw_ctf_gpio_bit;
param->ucFwCtfPolarity = info->fw_ctf_polarity;
return 0;
}
int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
uint8_t clk_id, uint8_t syspll_id,
uint32_t *frequency)
{
struct amdgpu_device *adev = hwmgr->adev;
struct atom_get_smu_clock_info_parameters_v3_1 parameters;
struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
uint32_t ix;
parameters.clk_id = clk_id;
parameters.syspll_id = syspll_id;
parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
parameters.dfsdid = 0;
ix = GetIndexIntoMasterCmdTable(getsmuclockinfo);
if (amdgpu_atom_execute_table(
adev->mode_info.atom_context, ix, (uint32_t *)¶meters))
return -EINVAL;
output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)¶meters;
*frequency = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
return 0;
}
static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_bios_boot_up_values *boot_values,
struct atom_firmware_info_v3_2 *fw_info)
{
uint32_t frequency = 0;
boot_values->ulRevision = fw_info->firmware_revision;
boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
boot_values->usVddc = fw_info->bootup_vddc_mv;
boot_values->usVddci = fw_info->bootup_vddci_mv;
boot_values->usMvddc = fw_info->bootup_mvddc_mv;
boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
boot_values->ucCoolingID = fw_info->coolingsolution_id;
boot_values->ulSocClk = 0;
boot_values->ulDCEFClk = 0;
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulSocClk = frequency;
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulDCEFClk = frequency;
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulEClk = frequency;
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulVClk = frequency;
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulDClk = frequency;
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL1_0_FCLK_ID, SMU11_SYSPLL1_2_ID, &frequency))
boot_values->ulFClk = frequency;
}
static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_bios_boot_up_values *boot_values,
struct atom_firmware_info_v3_1 *fw_info)
{
uint32_t frequency = 0;
boot_values->ulRevision = fw_info->firmware_revision;
boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
boot_values->usVddc = fw_info->bootup_vddc_mv;
boot_values->usVddci = fw_info->bootup_vddci_mv;
boot_values->usMvddc = fw_info->bootup_mvddc_mv;
boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
boot_values->ucCoolingID = fw_info->coolingsolution_id;
boot_values->ulSocClk = 0;
boot_values->ulDCEFClk = 0;
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, 0, &frequency))
boot_values->ulSocClk = frequency;
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, 0, &frequency))
boot_values->ulDCEFClk = frequency;
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, 0, &frequency))
boot_values->ulEClk = frequency;
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, 0, &frequency))
boot_values->ulVClk = frequency;
if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, 0, &frequency))
boot_values->ulDClk = frequency;
}
int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_bios_boot_up_values *boot_values)
{
struct atom_firmware_info_v3_2 *fwinfo_3_2;
struct atom_firmware_info_v3_1 *fwinfo_3_1;
struct atom_common_table_header *info = NULL;
uint16_t ix;
ix = GetIndexIntoMasterDataTable(firmwareinfo);
info = (struct atom_common_table_header *)
smu_atom_get_data_table(hwmgr->adev,
ix, NULL, NULL, NULL);
if (!info) {
pr_info("Error retrieving BIOS firmwareinfo!");
return -EINVAL;
}
if ((info->format_revision == 3) && (info->content_revision == 2)) {
fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info;
pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr,
boot_values, fwinfo_3_2);
} else if ((info->format_revision == 3) && (info->content_revision == 1)) {
fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info;
pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr,
boot_values, fwinfo_3_1);
} else {
pr_info("Fw info table revision does not match!");
return -EINVAL;
}
return 0;
}
int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_smc_dpm_parameters *param)
{
struct atom_smc_dpm_info_v4_1 *info;
uint16_t ix;
ix = GetIndexIntoMasterDataTable(smc_dpm_info);
info = (struct atom_smc_dpm_info_v4_1 *)
smu_atom_get_data_table(hwmgr->adev,
ix, NULL, NULL, NULL);
if (!info) {
pr_info("Error retrieving BIOS Table Address!");
return -EINVAL;
}
param->liquid1_i2c_address = info->liquid1_i2c_address;
param->liquid2_i2c_address = info->liquid2_i2c_address;
param->vr_i2c_address = info->vr_i2c_address;
param->plx_i2c_address = info->plx_i2c_address;
param->liquid_i2c_linescl = info->liquid_i2c_linescl;
param->liquid_i2c_linesda = info->liquid_i2c_linesda;
param->vr_i2c_linescl = info->vr_i2c_linescl;
param->vr_i2c_linesda = info->vr_i2c_linesda;
param->plx_i2c_linescl = info->plx_i2c_linescl;
param->plx_i2c_linesda = info->plx_i2c_linesda;
param->vrsensorpresent = info->vrsensorpresent;
param->liquidsensorpresent = info->liquidsensorpresent;
param->maxvoltagestepgfx = info->maxvoltagestepgfx;
param->maxvoltagestepsoc = info->maxvoltagestepsoc;
param->vddgfxvrmapping = info->vddgfxvrmapping;
param->vddsocvrmapping = info->vddsocvrmapping;
param->vddmem0vrmapping = info->vddmem0vrmapping;
param->vddmem1vrmapping = info->vddmem1vrmapping;
param->gfxulvphasesheddingmask = info->gfxulvphasesheddingmask;
param->soculvphasesheddingmask = info->soculvphasesheddingmask;
param->gfxmaxcurrent = info->gfxmaxcurrent;
param->gfxoffset = info->gfxoffset;
param->padding_telemetrygfx = info->padding_telemetrygfx;
param->socmaxcurrent = info->socmaxcurrent;
param->socoffset = info->socoffset;
param->padding_telemetrysoc = info->padding_telemetrysoc;
param->mem0maxcurrent = info->mem0maxcurrent;
param->mem0offset = info->mem0offset;
param->padding_telemetrymem0 = info->padding_telemetrymem0;
param->mem1maxcurrent = info->mem1maxcurrent;
param->mem1offset = info->mem1offset;
param->padding_telemetrymem1 = info->padding_telemetrymem1;
param->acdcgpio = info->acdcgpio;
param->acdcpolarity = info->acdcpolarity;
param->vr0hotgpio = info->vr0hotgpio;
param->vr0hotpolarity = info->vr0hotpolarity;
param->vr1hotgpio = info->vr1hotgpio;
param->vr1hotpolarity = info->vr1hotpolarity;
param->padding1 = info->padding1;
param->padding2 = info->padding2;
param->ledpin0 = info->ledpin0;
param->ledpin1 = info->ledpin1;
param->ledpin2 = info->ledpin2;
param->pllgfxclkspreadenabled = info->pllgfxclkspreadenabled;
param->pllgfxclkspreadpercent = info->pllgfxclkspreadpercent;
param->pllgfxclkspreadfreq = info->pllgfxclkspreadfreq;
param->uclkspreadenabled = info->uclkspreadenabled;
param->uclkspreadpercent = info->uclkspreadpercent;
param->uclkspreadfreq = info->uclkspreadfreq;
param->socclkspreadenabled = info->socclkspreadenabled;
param->socclkspreadpercent = info->socclkspreadpercent;
param->socclkspreadfreq = info->socclkspreadfreq;
param->acggfxclkspreadenabled = info->acggfxclkspreadenabled;
param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
param->Vr2_I2C_address = info->Vr2_I2C_address;
return 0;
}
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "soc15.h"
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
#include "soc15_common.h"
#include "vega10_inc.h"
#include "smu9_baco.h"
int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg, data;
*cap = false;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
return 0;
WREG32(0x12074, 0xFFF0003B);
data = RREG32(0x12075);
if (data == 0x1) {
reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
*cap = true;
}
return 0;
}
int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg;
reg = RREG32_SOC15(NBIF, 0, mmBACO_CNTL);
if (reg & BACO_CNTL__BACO_MODE_MASK)
/* gfx has already entered BACO state */
*state = BACO_STATE_IN;
else
*state = BACO_STATE_OUT;
return 0;
}
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c |
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "atom-types.h"
#include "atombios.h"
#include "processpptables.h"
#include "cgs_common.h"
#include "smu/smu_8_0_d.h"
#include "smu8_fusion.h"
#include "smu/smu_8_0_sh_mask.h"
#include "smumgr.h"
#include "hwmgr.h"
#include "hardwaremanager.h"
#include "cz_ppsmc.h"
#include "smu8_hwmgr.h"
#include "power_state.h"
#include "pp_thermal.h"
#define ixSMUSVI_NB_CURRENTVID 0xD8230044
#define CURRENT_NB_VID_MASK 0xff000000
#define CURRENT_NB_VID__SHIFT 24
#define ixSMUSVI_GFX_CURRENTVID 0xD8230048
#define CURRENT_GFX_VID_MASK 0xff000000
#define CURRENT_GFX_VID__SHIFT 24
static const unsigned long smu8_magic = (unsigned long) PHM_Cz_Magic;
static struct smu8_power_state *cast_smu8_power_state(struct pp_hw_power_state *hw_ps)
{
if (smu8_magic != hw_ps->magic)
return NULL;
return (struct smu8_power_state *)hw_ps;
}
static const struct smu8_power_state *cast_const_smu8_power_state(
const struct pp_hw_power_state *hw_ps)
{
if (smu8_magic != hw_ps->magic)
return NULL;
return (struct smu8_power_state *)hw_ps;
}
static uint32_t smu8_get_eclk_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
struct phm_vce_clock_voltage_dependency_table *ptable =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
switch (msg) {
case PPSMC_MSG_SetEclkSoftMin:
case PPSMC_MSG_SetEclkHardMin:
for (i = 0; i < (int)ptable->count; i++) {
if (clock <= ptable->entries[i].ecclk)
break;
}
break;
case PPSMC_MSG_SetEclkSoftMax:
case PPSMC_MSG_SetEclkHardMax:
for (i = ptable->count - 1; i >= 0; i--) {
if (clock >= ptable->entries[i].ecclk)
break;
}
break;
default:
break;
}
return i;
}
static uint32_t smu8_get_sclk_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
switch (msg) {
case PPSMC_MSG_SetSclkSoftMin:
case PPSMC_MSG_SetSclkHardMin:
for (i = 0; i < (int)table->count; i++) {
if (clock <= table->entries[i].clk)
break;
}
break;
case PPSMC_MSG_SetSclkSoftMax:
case PPSMC_MSG_SetSclkHardMax:
for (i = table->count - 1; i >= 0; i--) {
if (clock >= table->entries[i].clk)
break;
}
break;
default:
break;
}
return i;
}
static uint32_t smu8_get_uvd_level(struct pp_hwmgr *hwmgr,
uint32_t clock, uint32_t msg)
{
int i = 0;
struct phm_uvd_clock_voltage_dependency_table *ptable =
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
switch (msg) {
case PPSMC_MSG_SetUvdSoftMin:
case PPSMC_MSG_SetUvdHardMin:
for (i = 0; i < (int)ptable->count; i++) {
if (clock <= ptable->entries[i].vclk)
break;
}
break;
case PPSMC_MSG_SetUvdSoftMax:
case PPSMC_MSG_SetUvdHardMax:
for (i = ptable->count - 1; i >= 0; i--) {
if (clock >= ptable->entries[i].vclk)
break;
}
break;
default:
break;
}
return i;
}
static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
if (data->max_sclk_level == 0) {
smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetMaxSclkLevel,
&data->max_sclk_level);
data->max_sclk_level += 1;
}
return data->max_sclk_level;
}
static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct amdgpu_device *adev = hwmgr->adev;
data->gfx_ramp_step = 256*25/100;
data->gfx_ramp_delay = 1; /* by default, we delay 1us */
data->mgcg_cgtt_local0 = 0x00000000;
data->mgcg_cgtt_local1 = 0x00000000;
data->clock_slow_down_freq = 25000;
data->skip_clock_slow_down = 1;
data->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
data->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
data->voting_rights_clients = 0x00C00033;
data->static_screen_threshold = 8;
data->ddi_power_gating_disabled = 0;
data->bapm_enabled = 1;
data->voltage_drop_threshold = 0;
data->gfx_power_gating_threshold = 500;
data->vce_slow_sclk_threshold = 20000;
data->dce_slow_sclk_threshold = 30000;
data->disable_driver_thermal_policy = 1;
data->disable_nb_ps3_in_battery = 0;
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ABM);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_NonABMSupportInPPLib);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicM3Arbiter);
data->override_dynamic_mgpg = 1;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicPatchPowerState);
data->thermal_auto_throttling_treshold = 0;
data->tdr_clock = 0;
data->disable_gfx_power_gating_in_uvd = 0;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicUVDState);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDPM);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEDPM);
data->cc6_settings.cpu_cc6_disable = false;
data->cc6_settings.cpu_pstate_disable = false;
data->cc6_settings.nb_pstate_switch_disable = false;
data->cc6_settings.cpu_pstate_separation_time = 0;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableVoltageIsland);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating);
if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
return 0;
}
/* convert form 8bit vid to real voltage in mV*4 */
static uint32_t smu8_convert_8Bit_index_to_voltage(
struct pp_hwmgr *hwmgr, uint16_t voltage)
{
return 6200 - (voltage * 25);
}
static int smu8_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
struct phm_clock_and_voltage_limits *table)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct smu8_sys_info *sys_info = &data->sys_info;
struct phm_clock_voltage_dependency_table *dep_table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
if (dep_table->count > 0) {
table->sclk = dep_table->entries[dep_table->count-1].clk;
table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr,
(uint16_t)dep_table->entries[dep_table->count-1].v);
}
table->mclk = sys_info->nbp_memory_clock[0];
return 0;
}
static int smu8_init_dynamic_state_adjustment_rule_settings(
struct pp_hwmgr *hwmgr,
ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
{
struct phm_clock_voltage_dependency_table *table_clk_vlt;
table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 8),
GFP_KERNEL);
if (NULL == table_clk_vlt) {
pr_err("Can not allocate memory!\n");
return -ENOMEM;
}
table_clk_vlt->count = 8;
table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
table_clk_vlt->entries[0].v = 0;
table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
table_clk_vlt->entries[1].v = 1;
table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
table_clk_vlt->entries[2].v = 2;
table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
table_clk_vlt->entries[3].v = 3;
table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
table_clk_vlt->entries[4].v = 4;
table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
table_clk_vlt->entries[5].v = 5;
table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
table_clk_vlt->entries[6].v = 6;
table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
table_clk_vlt->entries[7].v = 7;
hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
return 0;
}
static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
uint32_t i;
int result = 0;
uint8_t frev, crev;
uint16_t size;
info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *)smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
&size, &frev, &crev);
if (info == NULL) {
pr_err("Could not retrieve the Integrated System Info Table!\n");
return -EINVAL;
}
if (crev != 9) {
pr_err("Unsupported IGP table: %d %d\n", frev, crev);
return -EINVAL;
}
data->sys_info.bootup_uma_clock =
le32_to_cpu(info->ulBootUpUMAClock);
data->sys_info.bootup_engine_clock =
le32_to_cpu(info->ulBootUpEngineClock);
data->sys_info.dentist_vco_freq =
le32_to_cpu(info->ulDentistVCOFreq);
data->sys_info.system_config =
le32_to_cpu(info->ulSystemConfig);
data->sys_info.bootup_nb_voltage_index =
le16_to_cpu(info->usBootUpNBVoltage);
data->sys_info.htc_hyst_lmt =
(info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
data->sys_info.htc_tmp_lmt =
(info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
if (data->sys_info.htc_tmp_lmt <=
data->sys_info.htc_hyst_lmt) {
pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
return -EINVAL;
}
data->sys_info.nb_dpm_enable =
data->enable_nb_ps_policy &&
(le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
if (i < SMU8_NUM_NBPMEMORYCLOCK) {
data->sys_info.nbp_memory_clock[i] =
le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
}
data->sys_info.nbp_n_clock[i] =
le32_to_cpu(info->ulNbpStateNClkFreq[i]);
}
for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
data->sys_info.display_clock[i] =
le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
}
/* Here use 4 levels, make sure not exceed */
for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
data->sys_info.nbp_voltage_index[i] =
le16_to_cpu(info->usNBPStateVoltage[i]);
}
if (!data->sys_info.nb_dpm_enable) {
for (i = 1; i < SMU8_NUM_NBPSTATES; i++) {
if (i < SMU8_NUM_NBPMEMORYCLOCK) {
data->sys_info.nbp_memory_clock[i] =
data->sys_info.nbp_memory_clock[0];
}
data->sys_info.nbp_n_clock[i] =
data->sys_info.nbp_n_clock[0];
data->sys_info.nbp_voltage_index[i] =
data->sys_info.nbp_voltage_index[0];
}
}
if (le32_to_cpu(info->ulGPUCapInfo) &
SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableDFSBypass);
}
data->sys_info.uma_channel_number = info->ucUMAChannelNumber;
smu8_construct_max_power_limits_table (hwmgr,
&hwmgr->dyn_state.max_clock_voltage_on_ac);
smu8_init_dynamic_state_adjustment_rule_settings(hwmgr,
&info->sDISPCLK_Voltage[0]);
return result;
}
static int smu8_construct_boot_state(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
data->boot_power_level.engineClock =
data->sys_info.bootup_engine_clock;
data->boot_power_level.vddcIndex =
(uint8_t)data->sys_info.bootup_nb_voltage_index;
data->boot_power_level.dsDividerIndex = 0;
data->boot_power_level.ssDividerIndex = 0;
data->boot_power_level.allowGnbSlow = 1;
data->boot_power_level.forceNBPstate = 0;
data->boot_power_level.hysteresis_up = 0;
data->boot_power_level.numSIMDToPowerDown = 0;
data->boot_power_level.display_wm = 0;
data->boot_power_level.vce_wm = 0;
return 0;
}
static int smu8_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
{
struct SMU8_Fusion_ClkTable *clock_table;
int ret;
uint32_t i;
void *table = NULL;
pp_atomctrl_clock_dividers_kong dividers;
struct phm_clock_voltage_dependency_table *vddc_table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
struct phm_clock_voltage_dependency_table *vdd_gfx_table =
hwmgr->dyn_state.vdd_gfx_dependency_on_sclk;
struct phm_acp_clock_voltage_dependency_table *acp_table =
hwmgr->dyn_state.acp_clock_voltage_dependency_table;
struct phm_uvd_clock_voltage_dependency_table *uvd_table =
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
struct phm_vce_clock_voltage_dependency_table *vce_table =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
if (!hwmgr->need_pp_table_upload)
return 0;
ret = smum_download_powerplay_table(hwmgr, &table);
PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
"Fail to get clock table from SMU!", return -EINVAL;);
clock_table = (struct SMU8_Fusion_ClkTable *)table;
/* patch clock table */
PP_ASSERT_WITH_CODE((vddc_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
PP_ASSERT_WITH_CODE((acp_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
PP_ASSERT_WITH_CODE((uvd_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
PP_ASSERT_WITH_CODE((vce_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
"Dependency table entry exceeds max limit!", return -EINVAL;);
for (i = 0; i < SMU8_MAX_HARDWARE_POWERLEVELS; i++) {
/* vddc_sclk */
clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
(i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
(i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
atomctrl_get_engine_pll_dividers_kong(hwmgr,
clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
÷rs);
clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
(uint8_t)dividers.pll_post_divider;
/* vddgfx_sclk */
clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
(i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0;
/* acp breakdown */
clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
(i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
(i < acp_table->count) ? acp_table->entries[i].acpclk : 0;
atomctrl_get_engine_pll_dividers_kong(hwmgr,
clock_table->AclkBreakdownTable.ClkLevel[i].Frequency,
÷rs);
clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
(uint8_t)dividers.pll_post_divider;
/* uvd breakdown */
clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
(i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
(i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
atomctrl_get_engine_pll_dividers_kong(hwmgr,
clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
÷rs);
clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
(uint8_t)dividers.pll_post_divider;
clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
(i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
(i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
atomctrl_get_engine_pll_dividers_kong(hwmgr,
clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
÷rs);
clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
(uint8_t)dividers.pll_post_divider;
/* vce breakdown */
clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
(i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
(i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
atomctrl_get_engine_pll_dividers_kong(hwmgr,
clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
÷rs);
clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
(uint8_t)dividers.pll_post_divider;
}
ret = smum_upload_powerplay_table(hwmgr);
return ret;
}
static int smu8_init_sclk_limit(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
unsigned long clock = 0, level;
if (NULL == table || table->count <= 0)
return -EINVAL;
data->sclk_dpm.soft_min_clk = table->entries[0].clk;
data->sclk_dpm.hard_min_clk = table->entries[0].clk;
level = smu8_get_max_sclk_level(hwmgr) - 1;
if (level < table->count)
clock = table->entries[level].clk;
else
clock = table->entries[table->count - 1].clk;
data->sclk_dpm.soft_max_clk = clock;
data->sclk_dpm.hard_max_clk = clock;
return 0;
}
static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_uvd_clock_voltage_dependency_table *table =
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
unsigned long clock = 0;
uint32_t level;
if (NULL == table || table->count <= 0)
return -EINVAL;
data->uvd_dpm.soft_min_clk = 0;
data->uvd_dpm.hard_min_clk = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
if (level < table->count)
clock = table->entries[level].vclk;
else
clock = table->entries[table->count - 1].vclk;
data->uvd_dpm.soft_max_clk = clock;
data->uvd_dpm.hard_max_clk = clock;
return 0;
}
static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_vce_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
unsigned long clock = 0;
uint32_t level;
if (NULL == table || table->count <= 0)
return -EINVAL;
data->vce_dpm.soft_min_clk = 0;
data->vce_dpm.hard_min_clk = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
if (level < table->count)
clock = table->entries[level].ecclk;
else
clock = table->entries[table->count - 1].ecclk;
data->vce_dpm.soft_max_clk = clock;
data->vce_dpm.hard_max_clk = clock;
return 0;
}
static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_acp_clock_voltage_dependency_table *table =
hwmgr->dyn_state.acp_clock_voltage_dependency_table;
unsigned long clock = 0;
uint32_t level;
if (NULL == table || table->count <= 0)
return -EINVAL;
data->acp_dpm.soft_min_clk = 0;
data->acp_dpm.hard_min_clk = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
if (level < table->count)
clock = table->entries[level].acpclk;
else
clock = table->entries[table->count - 1].acpclk;
data->acp_dpm.soft_max_clk = clock;
data->acp_dpm.hard_max_clk = clock;
return 0;
}
static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
data->uvd_power_gated = false;
data->vce_power_gated = false;
data->samu_power_gated = false;
#ifdef CONFIG_DRM_AMD_ACP
data->acp_power_gated = false;
#else
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
data->acp_power_gated = true;
#endif
}
static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
data->low_sclk_interrupt_threshold = 0;
}
static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
unsigned long clock = 0;
unsigned long level;
unsigned long stable_pstate_sclk;
unsigned long percentage;
data->sclk_dpm.soft_min_clk = table->entries[0].clk;
level = smu8_get_max_sclk_level(hwmgr) - 1;
if (level < table->count)
data->sclk_dpm.soft_max_clk = table->entries[level].clk;
else
data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
clock = hwmgr->display_config->min_core_set_clock;
if (clock == 0)
pr_debug("min_core_set_clock not set\n");
if (data->sclk_dpm.hard_min_clk != clock) {
data->sclk_dpm.hard_min_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkHardMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.hard_min_clk,
PPSMC_MSG_SetSclkHardMin),
NULL);
}
clock = data->sclk_dpm.soft_min_clk;
/* update minimum clocks for Stable P-State feature */
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState)) {
percentage = 75;
/*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table */
stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk *
percentage) / 100;
if (clock < stable_pstate_sclk)
clock = stable_pstate_sclk;
}
if (data->sclk_dpm.soft_min_clk != clock) {
data->sclk_dpm.soft_min_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin),
NULL);
}
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState) &&
data->sclk_dpm.soft_max_clk != clock) {
data->sclk_dpm.soft_max_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMax),
NULL);
}
return 0;
}
static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
uint32_t clks = hwmgr->display_config->min_core_set_clock_in_sr;
if (clks == 0)
clks = SMU8_MIN_DEEP_SLEEP_SCLK;
PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepSclk,
clks,
NULL);
}
return 0;
}
static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data =
hwmgr->backend;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetWatermarkFrequency,
data->sclk_dpm.soft_max_clk,
NULL);
return 0;
}
static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
{
struct smu8_hwmgr *hw_data = hwmgr->backend;
if (hw_data->is_nb_dpm_enabled) {
if (enable) {
PP_DBG_LOG("enable Low Memory PState.\n");
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableLowMemoryPstate,
(lock ? 1 : 0),
NULL);
} else {
PP_DBG_LOG("disable Low Memory PState.\n");
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableLowMemoryPstate,
(lock ? 1 : 0),
NULL);
}
}
return 0;
}
static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
{
int ret = 0;
struct smu8_hwmgr *data = hwmgr->backend;
unsigned long dpm_features = 0;
if (data->is_nb_dpm_enabled) {
smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
dpm_features |= NB_DPM_MASK;
ret = smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
dpm_features,
NULL);
if (ret == 0)
data->is_nb_dpm_enabled = false;
}
return ret;
}
static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
{
int ret = 0;
struct smu8_hwmgr *data = hwmgr->backend;
unsigned long dpm_features = 0;
if (!data->is_nb_dpm_enabled) {
PP_DBG_LOG("enabling ALL SMU features.\n");
dpm_features |= NB_DPM_MASK;
ret = smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
dpm_features,
NULL);
if (ret == 0)
data->is_nb_dpm_enabled = true;
}
return ret;
}
static int smu8_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
{
bool disable_switch;
bool enable_low_mem_state;
struct smu8_hwmgr *hw_data = hwmgr->backend;
const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
const struct smu8_power_state *pnew_state = cast_const_smu8_power_state(states->pnew_state);
if (hw_data->sys_info.nb_dpm_enable) {
disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
if (pnew_state->action == FORCE_HIGH)
smu8_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
else if (pnew_state->action == CANCEL_FORCE_HIGH)
smu8_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
else
smu8_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
}
return 0;
}
static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
{
int ret = 0;
smu8_update_sclk_limit(hwmgr);
smu8_set_deep_sleep_sclk_threshold(hwmgr);
smu8_set_watermark_threshold(hwmgr);
ret = smu8_enable_nb_dpm(hwmgr);
if (ret)
return ret;
smu8_update_low_mem_pstate(hwmgr, input);
return 0;
}
static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr)
{
int ret;
ret = smu8_upload_pptable_to_smu(hwmgr);
if (ret)
return ret;
ret = smu8_init_sclk_limit(hwmgr);
if (ret)
return ret;
ret = smu8_init_uvd_limit(hwmgr);
if (ret)
return ret;
ret = smu8_init_vce_limit(hwmgr);
if (ret)
return ret;
ret = smu8_init_acp_limit(hwmgr);
if (ret)
return ret;
smu8_init_power_gate_state(hwmgr);
smu8_init_sclk_threshold(hwmgr);
return 0;
}
static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *hw_data = hwmgr->backend;
hw_data->disp_clk_bypass_pending = false;
hw_data->disp_clk_bypass = false;
}
static void smu8_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *hw_data = hwmgr->backend;
hw_data->is_nb_dpm_enabled = false;
}
static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *hw_data = hwmgr->backend;
hw_data->cc6_settings.cc6_setting_changed = false;
hw_data->cc6_settings.cpu_pstate_separation_time = 0;
hw_data->cc6_settings.cpu_cc6_disable = false;
hw_data->cc6_settings.cpu_pstate_disable = false;
}
static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr)
{
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixCG_FREQ_TRAN_VOTING_0,
SMU8_VOTINGRIGHTSCLIENTS_DFLT0);
}
static void smu8_clear_voting_clients(struct pp_hwmgr *hwmgr)
{
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixCG_FREQ_TRAN_VOTING_0, 0);
}
static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
data->dpm_flags |= DPMFlags_SCLK_Enabled;
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
SCLK_DPM_MASK,
NULL);
}
static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
{
int ret = 0;
struct smu8_hwmgr *data = hwmgr->backend;
unsigned long dpm_features = 0;
if (data->dpm_flags & DPMFlags_SCLK_Enabled) {
dpm_features |= SCLK_DPM_MASK;
data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
dpm_features,
NULL);
}
return ret;
}
static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
data->sclk_dpm.soft_min_clk = data->sys_info.bootup_engine_clock;
data->sclk_dpm.soft_max_clk = data->sys_info.bootup_engine_clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin),
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMax),
NULL);
return 0;
}
static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
data->acp_boot_level = 0xff;
}
static void smu8_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
{
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
hwmgr->pstate_sclk = table->entries[0].clk / 100;
hwmgr->pstate_mclk = 0;
hwmgr->pstate_sclk_peak = table->entries[table->count - 1].clk / 100;
hwmgr->pstate_mclk_peak = 0;
}
static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
smu8_program_voting_clients(hwmgr);
if (smu8_start_dpm(hwmgr))
return -EINVAL;
smu8_program_bootup_state(hwmgr);
smu8_reset_acp_boot_level(hwmgr);
smu8_populate_umdpstate_clocks(hwmgr);
return 0;
}
static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
smu8_disable_nb_dpm(hwmgr);
smu8_clear_voting_clients(hwmgr);
if (smu8_stop_dpm(hwmgr))
return -EINVAL;
return 0;
}
static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
{
smu8_disable_dpm_tasks(hwmgr);
smu8_power_up_display_clock_sys_pll(hwmgr);
smu8_clear_nb_dpm_flag(hwmgr);
smu8_reset_cc6_data(hwmgr);
return 0;
}
static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *prequest_ps,
const struct pp_power_state *pcurrent_ps)
{
struct smu8_power_state *smu8_ps =
cast_smu8_power_state(&prequest_ps->hardware);
const struct smu8_power_state *smu8_current_ps =
cast_const_smu8_power_state(&pcurrent_ps->hardware);
struct smu8_hwmgr *data = hwmgr->backend;
struct PP_Clocks clocks = {0, 0, 0, 0};
bool force_high;
smu8_ps->need_dfs_bypass = true;
data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
clocks.memoryClock = hwmgr->display_config->min_mem_set_clock != 0 ?
hwmgr->display_config->min_mem_set_clock :
data->sys_info.nbp_memory_clock[1];
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
|| (hwmgr->display_config->num_display >= 3);
smu8_ps->action = smu8_current_ps->action;
if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
smu8_nbdpm_pstate_enable_disable(hwmgr, false, false);
else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
else if (!force_high && (smu8_ps->action == FORCE_HIGH))
smu8_ps->action = CANCEL_FORCE_HIGH;
else if (force_high && (smu8_ps->action != FORCE_HIGH))
smu8_ps->action = FORCE_HIGH;
else
smu8_ps->action = DO_NOTHING;
return 0;
}
static int smu8_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
int result = 0;
struct smu8_hwmgr *data;
data = kzalloc(sizeof(struct smu8_hwmgr), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
hwmgr->backend = data;
result = smu8_initialize_dpm_defaults(hwmgr);
if (result != 0) {
pr_err("smu8_initialize_dpm_defaults failed\n");
return result;
}
result = smu8_get_system_info_data(hwmgr);
if (result != 0) {
pr_err("smu8_get_system_info_data failed\n");
return result;
}
smu8_construct_boot_state(hwmgr);
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = SMU8_MAX_HARDWARE_POWERLEVELS;
return result;
}
static int smu8_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
if (hwmgr != NULL) {
kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
kfree(hwmgr->backend);
hwmgr->backend = NULL;
}
return 0;
}
static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMin),
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMax),
NULL);
return 0;
}
static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
unsigned long clock = 0, level;
if (NULL == table || table->count <= 0)
return -EINVAL;
data->sclk_dpm.soft_min_clk = table->entries[0].clk;
data->sclk_dpm.hard_min_clk = table->entries[0].clk;
level = smu8_get_max_sclk_level(hwmgr) - 1;
if (level < table->count)
clock = table->entries[level].clk;
else
clock = table->entries[table->count - 1].clk;
data->sclk_dpm.soft_max_clk = clock;
data->sclk_dpm.hard_max_clk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin),
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMax),
NULL);
return 0;
}
static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMax),
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin),
NULL);
return 0;
}
static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
int ret = 0;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
ret = smu8_phm_force_dpm_highest(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
ret = smu8_phm_force_dpm_lowest(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = smu8_phm_unforce_dpm_levels(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
return ret;
}
static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF, NULL);
return 0;
}
static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
return smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_UVDPowerON,
PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0,
NULL);
}
return 0;
}
static int smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_vce_clock_voltage_dependency_table *ptable =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
if (PP_CAP(PHM_PlatformCaps_StablePState) ||
hwmgr->en_umd_pstate) {
data->vce_dpm.hard_min_clk =
ptable->entries[ptable->count - 1].ecclk;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkHardMin,
smu8_get_eclk_level(hwmgr,
data->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin),
NULL);
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkHardMin,
0,
NULL);
/* disable ECLK DPM 0. Otherwise VCE could hang if
* switching SCLK from DPM 0 to 6/7 */
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkSoftMin,
1,
NULL);
}
return 0;
}
static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerOFF,
NULL);
return 0;
}
static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerON,
NULL);
return 0;
}
static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct smu8_hwmgr *data = hwmgr->backend;
return data->sys_info.bootup_uma_clock;
}
static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct smu8_power_state *smu8_ps;
if (hwmgr == NULL)
return -EINVAL;
ps = hwmgr->request_ps;
if (ps == NULL)
return -EINVAL;
smu8_ps = cast_smu8_power_state(&ps->hardware);
if (low)
return smu8_ps->levels[0].engineClock;
else
return smu8_ps->levels[smu8_ps->level-1].engineClock;
}
static int smu8_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
smu8_ps->level = 1;
smu8_ps->nbps_flags = 0;
smu8_ps->bapm_flags = 0;
smu8_ps->levels[0] = data->boot_power_level;
return 0;
}
static int smu8_dpm_get_pp_table_entry_callback(
struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps,
unsigned int index,
const void *clock_info)
{
struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
const ATOM_PPLIB_CZ_CLOCK_INFO *smu8_clock_info = clock_info;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
uint8_t clock_info_index = smu8_clock_info->index;
if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
smu8_ps->level = index + 1;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
smu8_ps->levels[index].dsDividerIndex = 5;
smu8_ps->levels[index].ssDividerIndex = 5;
}
return 0;
}
static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
{
int result;
unsigned long ret = 0;
result = pp_tables_get_num_of_entries(hwmgr, &ret);
return result ? 0 : ret;
}
static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
unsigned long entry, struct pp_power_state *ps)
{
int result;
struct smu8_power_state *smu8_ps;
ps->hardware.magic = smu8_magic;
smu8_ps = cast_smu8_power_state(&(ps->hardware));
result = pp_tables_get_entry(hwmgr, entry, ps,
smu8_dpm_get_pp_table_entry_callback);
smu8_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
smu8_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
return result;
}
static int smu8_get_power_state_size(struct pp_hwmgr *hwmgr)
{
return sizeof(struct smu8_power_state);
}
static void smu8_hw_print_display_cfg(
const struct cc6_settings *cc6_settings)
{
PP_DBG_LOG("New Display Configuration:\n");
PP_DBG_LOG(" cpu_cc6_disable: %d\n",
cc6_settings->cpu_cc6_disable);
PP_DBG_LOG(" cpu_pstate_disable: %d\n",
cc6_settings->cpu_pstate_disable);
PP_DBG_LOG(" nb_pstate_switch_disable: %d\n",
cc6_settings->nb_pstate_switch_disable);
PP_DBG_LOG(" cpu_pstate_separation_time: %d\n\n",
cc6_settings->cpu_pstate_separation_time);
}
static int smu8_set_cpu_power_state(struct pp_hwmgr *hwmgr)
{
struct smu8_hwmgr *hw_data = hwmgr->backend;
uint32_t data = 0;
if (hw_data->cc6_settings.cc6_setting_changed) {
hw_data->cc6_settings.cc6_setting_changed = false;
smu8_hw_print_display_cfg(&hw_data->cc6_settings);
data |= (hw_data->cc6_settings.cpu_pstate_separation_time
& PWRMGT_SEPARATION_TIME_MASK)
<< PWRMGT_SEPARATION_TIME_SHIFT;
data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
<< PWRMGT_DISABLE_CPU_CSTATES_SHIFT;
data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
<< PWRMGT_DISABLE_CPU_PSTATES_SHIFT;
PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
data);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplaySizePowerParams,
data,
NULL);
}
return 0;
}
static int smu8_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
{
struct smu8_hwmgr *hw_data = hwmgr->backend;
if (separation_time !=
hw_data->cc6_settings.cpu_pstate_separation_time ||
cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
hw_data->cc6_settings.cc6_setting_changed = true;
hw_data->cc6_settings.cpu_pstate_separation_time =
separation_time;
hw_data->cc6_settings.cpu_cc6_disable =
cc6_disable;
hw_data->cc6_settings.cpu_pstate_disable =
pstate_disable;
hw_data->cc6_settings.nb_pstate_switch_disable =
pstate_switch_disable;
}
return 0;
}
static int smu8_get_dal_power_level(struct pp_hwmgr *hwmgr,
struct amd_pp_simple_clock_info *info)
{
uint32_t i;
const struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dep_on_dal_pwrl;
const struct phm_clock_and_voltage_limits *limits =
&hwmgr->dyn_state.max_clock_voltage_on_ac;
info->engine_max_clock = limits->sclk;
info->memory_max_clock = limits->mclk;
for (i = table->count - 1; i > 0; i--) {
if (limits->vddc >= table->entries[i].v) {
info->level = table->entries[i].clk;
return 0;
}
}
return -EINVAL;
}
static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
switch (type) {
case PP_SCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
mask,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
mask,
NULL);
break;
default:
break;
}
return 0;
}
static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *sclk_table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
uint32_t i, now;
int size = 0;
switch (type) {
case PP_SCLK:
now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC,
ixTARGET_AND_CURRENT_PROFILE_INDEX),
TARGET_AND_CURRENT_PROFILE_INDEX,
CURR_SCLK_INDEX);
for (i = 0; i < sclk_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, sclk_table->entries[i].clk / 100,
(i == now) ? "*" : "");
break;
case PP_MCLK:
now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC,
ixTARGET_AND_CURRENT_PROFILE_INDEX),
TARGET_AND_CURRENT_PROFILE_INDEX,
CURR_MCLK_INDEX);
for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
size += sprintf(buf + size, "%d: %uMhz %s\n",
SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
(SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
break;
default:
break;
}
return size;
}
static int smu8_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
PHM_PerformanceLevelDesignation designation, uint32_t index,
PHM_PerformanceLevel *level)
{
const struct smu8_power_state *ps;
struct smu8_hwmgr *data;
uint32_t level_index;
uint32_t i;
if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;
data = hwmgr->backend;
ps = cast_const_smu8_power_state(state);
level_index = index > ps->level - 1 ? ps->level - 1 : index;
level->coreClock = ps->levels[level_index].engineClock;
if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
for (i = 1; i < ps->level; i++) {
if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) {
level->coreClock = ps->levels[i].engineClock;
break;
}
}
}
if (level_index == 0)
level->memory_clock = data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1];
else
level->memory_clock = data->sys_info.nbp_memory_clock[0];
level->vddc = (smu8_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
level->nonLocalMemoryFreq = 0;
level->nonLocalMemoryWidth = 0;
return 0;
}
static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
{
const struct smu8_power_state *ps = cast_const_smu8_power_state(state);
clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
return 0;
}
static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
struct amd_pp_clocks *clocks)
{
struct smu8_hwmgr *data = hwmgr->backend;
int i;
struct phm_clock_voltage_dependency_table *table;
clocks->count = smu8_get_max_sclk_level(hwmgr);
switch (type) {
case amd_pp_disp_clock:
for (i = 0; i < clocks->count; i++)
clocks->clock[i] = data->sys_info.display_clock[i] * 10;
break;
case amd_pp_sys_clock:
table = hwmgr->dyn_state.vddc_dependency_on_sclk;
for (i = 0; i < clocks->count; i++)
clocks->clock[i] = table->entries[i].clk * 10;
break;
case amd_pp_mem_clock:
clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
for (i = 0; i < clocks->count; i++)
clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10;
break;
default:
return -1;
}
return 0;
}
static int smu8_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
{
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
unsigned long level;
const struct phm_clock_and_voltage_limits *limits =
&hwmgr->dyn_state.max_clock_voltage_on_ac;
if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
return -EINVAL;
level = smu8_get_max_sclk_level(hwmgr) - 1;
if (level < table->count)
clocks->engine_max_clock = table->entries[level].clk;
else
clocks->engine_max_clock = table->entries[table->count - 1].clk;
clocks->memory_max_clock = limits->mclk;
return 0;
}
static int smu8_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
int actual_temp = 0;
uint32_t val = cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
else
actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return actual_temp;
}
static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dependency_on_sclk;
struct phm_vce_clock_voltage_dependency_table *vce_table =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
struct phm_uvd_clock_voltage_dependency_table *uvd_table =
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
uint16_t vddnb, vddgfx;
int result;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
return -EINVAL;
*size = 4;
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
if (sclk_index < NUM_SCLK_LEVELS) {
sclk = table->entries[sclk_index].clk;
*((uint32_t *)value) = sclk;
return 0;
}
return -EINVAL;
case AMDGPU_PP_SENSOR_VDDNB:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
*((uint32_t *)value) = vddnb;
return 0;
case AMDGPU_PP_SENSOR_VDDGFX:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
*((uint32_t *)value) = vddgfx;
return 0;
case AMDGPU_PP_SENSOR_UVD_VCLK:
if (!data->uvd_power_gated) {
if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
return -EINVAL;
} else {
vclk = uvd_table->entries[uvd_index].vclk;
*((uint32_t *)value) = vclk;
return 0;
}
}
*((uint32_t *)value) = 0;
return 0;
case AMDGPU_PP_SENSOR_UVD_DCLK:
if (!data->uvd_power_gated) {
if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
return -EINVAL;
} else {
dclk = uvd_table->entries[uvd_index].dclk;
*((uint32_t *)value) = dclk;
return 0;
}
}
*((uint32_t *)value) = 0;
return 0;
case AMDGPU_PP_SENSOR_VCE_ECCLK:
if (!data->vce_power_gated) {
if (vce_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
return -EINVAL;
} else {
ecclk = vce_table->entries[vce_index].ecclk;
*((uint32_t *)value) = ecclk;
return 0;
}
}
*((uint32_t *)value) = 0;
return 0;
case AMDGPU_PP_SENSOR_GPU_LOAD:
result = smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetAverageGraphicsActivity,
&activity_percent);
if (0 == result)
activity_percent = activity_percent > 100 ? 100 : activity_percent;
else
return -EIO;
*((uint32_t *)value) = activity_percent;
return 0;
case AMDGPU_PP_SENSOR_UVD_POWER:
*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
return 0;
case AMDGPU_PP_SENSOR_VCE_POWER:
*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
return 0;
case AMDGPU_PP_SENSOR_GPU_TEMP:
*((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
uint32_t virtual_addr_low,
uint32_t virtual_addr_hi,
uint32_t mc_addr_low,
uint32_t mc_addr_hi,
uint32_t size)
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrHiVirtual,
mc_addr_hi,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrLoVirtual,
mc_addr_low,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrHiPhysical,
virtual_addr_hi,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrLoPhysical,
virtual_addr_low,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramBufferSize,
size,
NULL);
return 0;
}
static int smu8_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *thermal_data)
{
struct smu8_hwmgr *data = hwmgr->backend;
memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
thermal_data->max = (data->thermal_auto_throttling_treshold +
data->sys_info.htc_hyst_lmt) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
}
static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
struct smu8_hwmgr *data = hwmgr->backend;
uint32_t dpm_features = 0;
if (enable &&
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDPM)) {
data->dpm_flags |= DPMFlags_UVD_Enabled;
dpm_features |= UVD_DPM_MASK;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
dpm_features,
NULL);
} else {
dpm_features |= UVD_DPM_MASK;
data->dpm_flags &= ~DPMFlags_UVD_Enabled;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
dpm_features,
NULL);
}
return 0;
}
static int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_uvd_clock_voltage_dependency_table *ptable =
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
if (!bgate) {
/* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
if (PP_CAP(PHM_PlatformCaps_StablePState) ||
hwmgr->en_umd_pstate) {
data->uvd_dpm.hard_min_clk =
ptable->entries[ptable->count - 1].vclk;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUvdHardMin,
smu8_get_uvd_level(hwmgr,
data->uvd_dpm.hard_min_clk,
PPSMC_MSG_SetUvdHardMin),
NULL);
smu8_enable_disable_uvd_dpm(hwmgr, true);
} else {
smu8_enable_disable_uvd_dpm(hwmgr, true);
}
} else {
smu8_enable_disable_uvd_dpm(hwmgr, false);
}
return 0;
}
static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
struct smu8_hwmgr *data = hwmgr->backend;
uint32_t dpm_features = 0;
if (enable && phm_cap_enabled(
hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEDPM)) {
data->dpm_flags |= DPMFlags_VCE_Enabled;
dpm_features |= VCE_DPM_MASK;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
dpm_features,
NULL);
} else {
dpm_features |= VCE_DPM_MASK;
data->dpm_flags &= ~DPMFlags_VCE_Enabled;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
dpm_features,
NULL);
}
return 0;
}
static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu8_hwmgr *data = hwmgr->backend;
if (data->acp_power_gated == bgate)
return;
if (bgate)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
else
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL);
}
#define WIDTH_4K 3840
static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu8_hwmgr *data = hwmgr->backend;
struct amdgpu_device *adev = hwmgr->adev;
data->uvd_power_gated = bgate;
if (bgate) {
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
smu8_dpm_update_uvd_dpm(hwmgr, true);
smu8_dpm_powerdown_uvd(hwmgr);
} else {
smu8_dpm_powerup_uvd(hwmgr);
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE);
smu8_dpm_update_uvd_dpm(hwmgr, false);
}
/* enable/disable Low Memory PState for UVD (4k videos) */
if (adev->asic_type == CHIP_STONEY &&
adev->uvd.decode_image_width >= WIDTH_4K)
smu8_nbdpm_pstate_enable_disable(hwmgr,
bgate,
true);
}
static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu8_hwmgr *data = hwmgr->backend;
if (bgate) {
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
smu8_enable_disable_vce_dpm(hwmgr, false);
smu8_dpm_powerdown_vce(hwmgr);
data->vce_power_gated = true;
} else {
smu8_dpm_powerup_vce(hwmgr);
data->vce_power_gated = false;
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_UNGATE);
smu8_dpm_update_vce_dpm(hwmgr);
smu8_enable_disable_vce_dpm(hwmgr, true);
}
}
static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
.backend_init = smu8_hwmgr_backend_init,
.backend_fini = smu8_hwmgr_backend_fini,
.apply_state_adjust_rules = smu8_apply_state_adjust_rules,
.force_dpm_level = smu8_dpm_force_dpm_level,
.get_power_state_size = smu8_get_power_state_size,
.powerdown_uvd = smu8_dpm_powerdown_uvd,
.powergate_uvd = smu8_dpm_powergate_uvd,
.powergate_vce = smu8_dpm_powergate_vce,
.powergate_acp = smu8_dpm_powergate_acp,
.get_mclk = smu8_dpm_get_mclk,
.get_sclk = smu8_dpm_get_sclk,
.patch_boot_state = smu8_dpm_patch_boot_state,
.get_pp_table_entry = smu8_dpm_get_pp_table_entry,
.get_num_of_pp_table_entries = smu8_dpm_get_num_of_pp_table_entries,
.set_cpu_power_state = smu8_set_cpu_power_state,
.store_cc6_data = smu8_store_cc6_data,
.force_clock_level = smu8_force_clock_level,
.print_clock_levels = smu8_print_clock_levels,
.get_dal_power_level = smu8_get_dal_power_level,
.get_performance_level = smu8_get_performance_level,
.get_current_shallow_sleep_clocks = smu8_get_current_shallow_sleep_clocks,
.get_clock_by_type = smu8_get_clock_by_type,
.get_max_high_clocks = smu8_get_max_high_clocks,
.read_sensor = smu8_read_sensor,
.power_off_asic = smu8_power_off_asic,
.asic_setup = smu8_setup_asic_task,
.dynamic_state_management_enable = smu8_enable_dpm_tasks,
.power_state_set = smu8_set_power_state_tasks,
.dynamic_state_management_disable = smu8_disable_dpm_tasks,
.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
.get_thermal_temperature_range = smu8_get_thermal_temperature_range,
};
int smu8_init_function_pointers(struct pp_hwmgr *hwmgr)
{
hwmgr->hwmgr_func = &smu8_hwmgr_funcs;
hwmgr->pptable_func = &pptable_funcs;
return 0;
}
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "ci_baco.h"
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
#include "bif/bif_4_1_d.h"
#include "bif/bif_4_1_sh_mask.h"
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
#include "smu/smu_7_0_1_d.h"
#include "smu/smu_7_0_1_sh_mask.h"
#include "gca/gfx_7_2_d.h"
#include "gca/gfx_7_2_sh_mask.h"
static const struct baco_cmd_entry gpio_tbl[] = {
{ CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
{ CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
{ CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
{ CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
};
static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
{ CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
};
static const struct baco_cmd_entry use_bclk_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
{ CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
{ CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
{ CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
{ CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
};
static const struct baco_cmd_entry turn_off_plls_tbl[] = {
{ CMD_READMODIFYWRITE, mmDISPPLL_BG_CNTL, DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK, DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_DC },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__OSC_EN_MASK, CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK, CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_RESET_MASK, PLL_CNTL__PLL_RESET__SHIFT, 0, 0x1 },
{ CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_POWER_DOWN_MASK, PLL_CNTL__PLL_POWER_DOWN__SHIFT, 0, 0x1 },
{ CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_BYPASS_CAL_MASK, PLL_CNTL__PLL_BYPASS_CAL__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
{ CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
{ CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
{ CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
{ CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
{ CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x2 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x2 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT, 0, 0x2 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x2 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x2 }
};
static const struct baco_cmd_entry enter_baco_tbl[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
};
#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
static const struct baco_cmd_entry exit_baco_tbl[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
{ CMD_DELAY_MS, 0, 0, 0, 20, 0 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
};
static const struct baco_cmd_entry clean_baco_tbl[] = {
{ CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
{ CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 },
{ CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 }
};
int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
{
enum BACO_STATE cur_state;
smu7_baco_get_state(hwmgr, &cur_state);
if (cur_state == state)
/* aisc already in the target state */
return 0;
if (state == BACO_STATE_IN) {
baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
ARRAY_SIZE(enable_fb_req_rej_tbl));
baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
baco_program_registers(hwmgr, turn_off_plls_tbl,
ARRAY_SIZE(turn_off_plls_tbl));
if (baco_program_registers(hwmgr, enter_baco_tbl,
ARRAY_SIZE(enter_baco_tbl)))
return 0;
} else if (state == BACO_STATE_OUT) {
/* HW requires at least 20ms between regulator off and on */
msleep(20);
/* Execute Hardware BACO exit sequence */
if (baco_program_registers(hwmgr, exit_baco_tbl,
ARRAY_SIZE(exit_baco_tbl))) {
if (baco_program_registers(hwmgr, clean_baco_tbl,
ARRAY_SIZE(clean_baco_tbl)))
return 0;
}
}
return -EINVAL;
}
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c |
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "atom-types.h"
#include "atombios.h"
#include "processpptables.h"
#include "cgs_common.h"
#include "smumgr.h"
#include "hwmgr.h"
#include "hardwaremanager.h"
#include "rv_ppsmc.h"
#include "smu10_hwmgr.h"
#include "power_state.h"
#include "soc15_common.h"
#include "smu10.h"
#include "asic_reg/pwr/pwr_10_0_offset.h"
#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
#define SCLK_MIN_DIV_INTV_SHIFT 12
#define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
#define SMC_RAM_END 0x40000
static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock_req)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
enum amd_pp_clock_type clk_type = clock_req->clock_type;
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
PPSMC_Msg msg;
switch (clk_type) {
case amd_pp_dcf_clock:
if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
return 0;
msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
smu10_data->dcf_actual_hard_min_freq = clk_freq;
break;
case amd_pp_soc_clock:
msg = PPSMC_MSG_SetHardMinSocclkByFreq;
break;
case amd_pp_f_clock:
if (clk_freq == smu10_data->f_actual_hard_min_freq)
return 0;
smu10_data->f_actual_hard_min_freq = clk_freq;
msg = PPSMC_MSG_SetHardMinFclkByFreq;
break;
default:
pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
return -EINVAL;
}
smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq, NULL);
return 0;
}
static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
{
if (SMU10_Magic != hw_ps->magic)
return NULL;
return (struct smu10_power_state *)hw_ps;
}
static const struct smu10_power_state *cast_const_smu10_ps(
const struct pp_hw_power_state *hw_ps)
{
if (SMU10_Magic != hw_ps->magic)
return NULL;
return (struct smu10_power_state *)hw_ps;
}
static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
smu10_data->dce_slow_sclk_threshold = 30000;
smu10_data->thermal_auto_throttling_treshold = 0;
smu10_data->is_nb_dpm_enabled = 1;
smu10_data->dpm_flags = 1;
smu10_data->need_min_deep_sleep_dcefclk = true;
smu10_data->num_active_display = 0;
smu10_data->deep_sleep_dcefclk = 0;
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerPlaySupport);
return 0;
}
static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
struct phm_clock_and_voltage_limits *table)
{
return 0;
}
static int smu10_init_dynamic_state_adjustment_rule_settings(
struct pp_hwmgr *hwmgr)
{
int count = 8;
struct phm_clock_voltage_dependency_table *table_clk_vlt;
table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, count),
GFP_KERNEL);
if (NULL == table_clk_vlt) {
pr_err("Can not allocate memory!\n");
return -ENOMEM;
}
table_clk_vlt->count = count;
table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
table_clk_vlt->entries[0].v = 0;
table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
table_clk_vlt->entries[1].v = 1;
table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
table_clk_vlt->entries[2].v = 2;
table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
table_clk_vlt->entries[3].v = 3;
table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
table_clk_vlt->entries[4].v = 4;
table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
table_clk_vlt->entries[5].v = 5;
table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
table_clk_vlt->entries[6].v = 6;
table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
table_clk_vlt->entries[7].v = 7;
hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
return 0;
}
static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend;
smu10_data->sys_info.htc_hyst_lmt = 5;
smu10_data->sys_info.htc_tmp_lmt = 203;
if (smu10_data->thermal_auto_throttling_treshold == 0)
smu10_data->thermal_auto_throttling_treshold = 203;
smu10_construct_max_power_limits_table (hwmgr,
&hwmgr->dyn_state.max_clock_voltage_on_ac);
smu10_init_dynamic_state_adjustment_rule_settings(hwmgr);
return 0;
}
static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr)
{
return 0;
}
static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
{
struct PP_Clocks clocks = {0};
struct pp_display_clock_request clock_req;
clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
clock_req.clock_type = amd_pp_dcf_clock;
clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req),
"Attempt to set DCF Clock Failed!", return -EINVAL);
return 0;
}
static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (clock && smu10_data->deep_sleep_dcefclk != clock) {
smu10_data->deep_sleep_dcefclk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
smu10_data->deep_sleep_dcefclk,
NULL);
}
return 0;
}
static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (clock && smu10_data->dcf_actual_hard_min_freq != clock) {
smu10_data->dcf_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinDcefclkByFreq,
smu10_data->dcf_actual_hard_min_freq,
NULL);
}
return 0;
}
static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (clock && smu10_data->f_actual_hard_min_freq != clock) {
smu10_data->f_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
smu10_data->f_actual_hard_min_freq,
NULL);
}
return 0;
}
static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (clock && smu10_data->gfx_actual_soft_min_freq != clock) {
smu10_data->gfx_actual_soft_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
clock,
NULL);
}
return 0;
}
static int smu10_set_soft_max_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (clock && smu10_data->gfx_max_freq_limit != (clock * 100)) {
smu10_data->gfx_max_freq_limit = clock * 100;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
clock,
NULL);
}
return 0;
}
static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->num_active_display != count) {
smu10_data->num_active_display = count;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplayCount,
smu10_data->num_active_display,
NULL);
}
return 0;
}
static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
{
return smu10_set_clock_limit(hwmgr, input);
}
static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
struct amdgpu_device *adev = hwmgr->adev;
smu10_data->vcn_power_gated = true;
smu10_data->isp_tileA_power_gated = true;
smu10_data->isp_tileB_power_gated = true;
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetGfxCGPG,
true,
NULL);
else
return 0;
}
static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr)
{
return smu10_init_power_gate_state(hwmgr);
}
static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
smu10_data->separation_time = 0;
smu10_data->cc6_disable = false;
smu10_data->pstate_disable = false;
smu10_data->cc6_setting_changed = false;
return 0;
}
static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
{
return smu10_reset_cc6_data(hwmgr);
}
static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
{
uint32_t reg;
struct amdgpu_device *adev = hwmgr->adev;
reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS);
if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) ==
(0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT))
return true;
return false;
}
static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff, NULL);
/* confirm gfx is back to "on" state */
while (!smu10_is_gfx_on(hwmgr))
msleep(1);
}
return 0;
}
static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
return 0;
}
static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff, NULL);
return 0;
}
static void smu10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
{
hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetMaxGfxclkFrequency,
&hwmgr->pstate_sclk_peak);
hwmgr->pstate_mclk_peak = SMU10_UMD_PSTATE_PEAK_FCLK;
}
static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
int ret = -EINVAL;
if (adev->in_suspend) {
pr_info("restore the fine grain parameters\n");
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
smu10_data->gfx_actual_soft_min_freq,
NULL);
if (ret)
return ret;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
smu10_data->gfx_actual_soft_max_freq,
NULL);
if (ret)
return ret;
}
smu10_populate_umdpstate_clocks(hwmgr);
return 0;
}
static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
{
if (enable)
return smu10_enable_gfx_off(hwmgr);
else
return smu10_disable_gfx_off(hwmgr);
}
static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *prequest_ps,
const struct pp_power_state *pcurrent_ps)
{
return 0;
}
/* temporary hardcoded clock voltage breakdown tables */
static const DpmClock_t VddDcfClk[] = {
{ 300, 2600},
{ 600, 3200},
{ 600, 3600},
};
static const DpmClock_t VddSocClk[] = {
{ 478, 2600},
{ 722, 3200},
{ 722, 3600},
};
static const DpmClock_t VddFClk[] = {
{ 400, 2600},
{1200, 3200},
{1200, 3600},
};
static const DpmClock_t VddDispClk[] = {
{ 435, 2600},
{ 661, 3200},
{1086, 3600},
};
static const DpmClock_t VddDppClk[] = {
{ 435, 2600},
{ 661, 3200},
{ 661, 3600},
};
static const DpmClock_t VddPhyClk[] = {
{ 540, 2600},
{ 810, 3200},
{ 810, 3600},
};
static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
struct smu10_voltage_dependency_table **pptable,
uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
{
uint32_t i;
struct smu10_voltage_dependency_table *ptable;
ptable = kzalloc(struct_size(ptable, entries, num_entry), GFP_KERNEL);
if (NULL == ptable)
return -ENOMEM;
ptable->count = num_entry;
for (i = 0; i < ptable->count; i++) {
ptable->entries[i].clk = pclk_dependency_table->Freq * 100;
ptable->entries[i].vol = pclk_dependency_table->Vol;
pclk_dependency_table++;
}
*pptable = ptable;
return 0;
}
static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
{
uint32_t result;
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
DpmClocks_t *table = &(smu10_data->clock_table);
struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
PP_ASSERT_WITH_CODE((0 == result),
"Attempt to copy clock table from smc failed",
return result);
if (0 == result && table->DcefClocks[0].Freq != 0) {
smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
NUM_DCEFCLK_DPM_LEVELS,
&smu10_data->clock_table.DcefClocks[0]);
smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
NUM_SOCCLK_DPM_LEVELS,
&smu10_data->clock_table.SocClocks[0]);
smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
NUM_FCLK_DPM_LEVELS,
&smu10_data->clock_table.FClocks[0]);
smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
NUM_MEMCLK_DPM_LEVELS,
&smu10_data->clock_table.MemClocks[0]);
} else {
smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
ARRAY_SIZE(VddDcfClk),
&VddDcfClk[0]);
smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
ARRAY_SIZE(VddSocClk),
&VddSocClk[0]);
smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
ARRAY_SIZE(VddFClk),
&VddFClk[0]);
}
smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
ARRAY_SIZE(VddDispClk),
&VddDispClk[0]);
smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &result);
smu10_data->gfx_min_freq_limit = result / 10 * 1000;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &result);
smu10_data->gfx_max_freq_limit = result / 10 * 1000;
return 0;
}
static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
int result = 0;
struct smu10_hwmgr *data;
data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
hwmgr->backend = data;
result = smu10_initialize_dpm_defaults(hwmgr);
if (result != 0) {
pr_err("smu10_initialize_dpm_defaults failed\n");
return result;
}
smu10_populate_clock_table(hwmgr);
result = smu10_get_system_info_data(hwmgr);
if (result != 0) {
pr_err("smu10_get_system_info_data failed\n");
return result;
}
smu10_construct_boot_state(hwmgr);
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
SMU10_MAX_HARDWARE_POWERLEVELS;
hwmgr->platform_descriptor.hardwarePerformanceLevels =
SMU10_MAX_HARDWARE_POWERLEVELS;
hwmgr->platform_descriptor.vbiosInterruptId = 0;
hwmgr->platform_descriptor.clockStep.engineClock = 500;
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
/* enable the pp_od_clk_voltage sysfs file */
hwmgr->od_enabled = 1;
/* disabled fine grain tuning function by default */
data->fine_grain_enabled = 0;
return result;
}
static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
kfree(pinfo->vdd_dep_on_dcefclk);
pinfo->vdd_dep_on_dcefclk = NULL;
kfree(pinfo->vdd_dep_on_socclk);
pinfo->vdd_dep_on_socclk = NULL;
kfree(pinfo->vdd_dep_on_fclk);
pinfo->vdd_dep_on_fclk = NULL;
kfree(pinfo->vdd_dep_on_dispclk);
pinfo->vdd_dep_on_dispclk = NULL;
kfree(pinfo->vdd_dep_on_dppclk);
pinfo->vdd_dep_on_dppclk = NULL;
kfree(pinfo->vdd_dep_on_phyclk);
pinfo->vdd_dep_on_phyclk = NULL;
kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
kfree(hwmgr->backend);
hwmgr->backend = NULL;
return 0;
}
static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
struct smu10_hwmgr *data = hwmgr->backend;
uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
uint32_t fine_grain_min_freq = 0, fine_grain_max_freq = 0;
if (hwmgr->smu_version < 0x1E3700) {
pr_info("smu firmware version too old, can not set dpm level\n");
return 0;
}
if (min_sclk < data->gfx_min_freq_limit)
min_sclk = data->gfx_min_freq_limit;
min_sclk /= 100; /* transfer 10KHz to MHz */
if (min_mclk < data->clock_table.FClocks[0].Freq)
min_mclk = data->clock_table.FClocks[0].Freq;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
data->fine_grain_enabled = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
data->gfx_actual_soft_min_freq = fine_grain_min_freq;
data->gfx_actual_soft_max_freq = fine_grain_max_freq;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_max_freq_limit/100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
SMU10_UMD_PSTATE_PEAK_FCLK,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
SMU10_UMD_PSTATE_PEAK_SOCCLK,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
SMU10_UMD_PSTATE_VCE,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
data->gfx_max_freq_limit/100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
SMU10_UMD_PSTATE_PEAK_FCLK,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
SMU10_UMD_PSTATE_PEAK_SOCCLK,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
SMU10_UMD_PSTATE_VCE,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
data->fine_grain_enabled = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
data->gfx_actual_soft_min_freq = fine_grain_min_freq;
data->gfx_actual_soft_max_freq = fine_grain_max_freq;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
min_sclk,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
data->fine_grain_enabled = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
data->gfx_actual_soft_min_freq = fine_grain_min_freq;
data->gfx_actual_soft_max_freq = fine_grain_max_freq;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
min_mclk,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
min_mclk,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
data->fine_grain_enabled = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
data->gfx_actual_soft_min_freq = fine_grain_min_freq;
data->gfx_actual_soft_max_freq = fine_grain_max_freq;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
SMU10_UMD_PSTATE_GFXCLK,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
SMU10_UMD_PSTATE_FCLK,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
SMU10_UMD_PSTATE_SOCCLK,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
SMU10_UMD_PSTATE_PROFILE_VCE,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
SMU10_UMD_PSTATE_GFXCLK,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
SMU10_UMD_PSTATE_FCLK,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
SMU10_UMD_PSTATE_SOCCLK,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
SMU10_UMD_PSTATE_PROFILE_VCE,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
data->fine_grain_enabled = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
data->gfx_actual_soft_min_freq = fine_grain_min_freq;
data->gfx_actual_soft_max_freq = fine_grain_max_freq;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
hwmgr->display_config->num_display > 3 ?
(data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) :
min_mclk,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
SMU10_UMD_PSTATE_MIN_VCE,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
data->gfx_max_freq_limit/100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
SMU10_UMD_PSTATE_VCE,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
data->fine_grain_enabled = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
data->gfx_actual_soft_min_freq = fine_grain_min_freq;
data->gfx_actual_soft_max_freq = fine_grain_max_freq;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_min_freq_limit/100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
data->gfx_min_freq_limit/100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
min_mclk,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
min_mclk,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
data->fine_grain_enabled = 1;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
return 0;
}
static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct smu10_hwmgr *data;
if (hwmgr == NULL)
return -EINVAL;
data = (struct smu10_hwmgr *)(hwmgr->backend);
if (low)
return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
else
return data->clock_vol_info.vdd_dep_on_fclk->entries[
data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
}
static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct smu10_hwmgr *data;
if (hwmgr == NULL)
return -EINVAL;
data = (struct smu10_hwmgr *)(hwmgr->backend);
if (low)
return data->gfx_min_freq_limit;
else
return data->gfx_max_freq_limit;
}
static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps)
{
return 0;
}
static int smu10_dpm_get_pp_table_entry_callback(
struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps,
unsigned int index,
const void *clock_info)
{
struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps);
smu10_ps->levels[index].engine_clock = 0;
smu10_ps->levels[index].vddc_index = 0;
smu10_ps->level = index + 1;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
smu10_ps->levels[index].ds_divider_index = 5;
smu10_ps->levels[index].ss_divider_index = 5;
}
return 0;
}
static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
{
int result;
unsigned long ret = 0;
result = pp_tables_get_num_of_entries(hwmgr, &ret);
return result ? 0 : ret;
}
static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
unsigned long entry, struct pp_power_state *ps)
{
int result;
struct smu10_power_state *smu10_ps;
ps->hardware.magic = SMU10_Magic;
smu10_ps = cast_smu10_ps(&(ps->hardware));
result = pp_tables_get_entry(hwmgr, entry, ps,
smu10_dpm_get_pp_table_entry_callback);
smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
return result;
}
static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr)
{
return sizeof(struct smu10_power_state);
}
static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
{
return 0;
}
static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
{
struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
if (separation_time != data->separation_time ||
cc6_disable != data->cc6_disable ||
pstate_disable != data->pstate_disable) {
data->separation_time = separation_time;
data->cc6_disable = cc6_disable;
data->pstate_disable = pstate_disable;
data->cc6_setting_changed = true;
}
return 0;
}
static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
struct amd_pp_simple_clock_info *info)
{
return -EINVAL;
}
static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
struct smu10_hwmgr *data = hwmgr->backend;
struct smu10_voltage_dependency_table *mclk_table =
data->clock_vol_info.vdd_dep_on_fclk;
uint32_t low, high;
low = mask ? (ffs(mask) - 1) : 0;
high = mask ? (fls(mask) - 1) : 0;
switch (type) {
case PP_SCLK:
if (low > 2 || high > 2) {
pr_info("Currently sclk only support 3 levels on RV\n");
return -EINVAL;
}
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
low == 2 ? data->gfx_max_freq_limit/100 :
low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
data->gfx_min_freq_limit/100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
high == 0 ? data->gfx_min_freq_limit/100 :
high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
data->gfx_max_freq_limit/100,
NULL);
break;
case PP_MCLK:
if (low > mclk_table->count - 1 || high > mclk_table->count - 1)
return -EINVAL;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
mclk_table->entries[low].clk/100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
mclk_table->entries[high].clk/100,
NULL);
break;
case PP_PCIE:
default:
break;
}
return 0;
}
static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
struct smu10_voltage_dependency_table *mclk_table =
data->clock_vol_info.vdd_dep_on_fclk;
uint32_t i, now, size = 0;
uint32_t min_freq, max_freq = 0;
uint32_t ret = 0;
switch (type) {
case PP_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
if (now == data->gfx_max_freq_limit/100)
i = 2;
else if (now == data->gfx_min_freq_limit/100)
i = 0;
else
i = 1;
size += sprintf(buf + size, "0: %uMhz %s\n",
data->gfx_min_freq_limit/100,
i == 0 ? "*" : "");
size += sprintf(buf + size, "1: %uMhz %s\n",
i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
i == 1 ? "*" : "");
size += sprintf(buf + size, "2: %uMhz %s\n",
data->gfx_max_freq_limit/100,
i == 2 ? "*" : "");
break;
case PP_MCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
for (i = 0; i < mclk_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i,
mclk_table->entries[i].clk / 100,
((mclk_table->entries[i].clk / 100)
== now) ? "*" : "");
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
if (ret)
return ret;
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
if (ret)
return ret;
size += sprintf(buf + size, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "0: %10uMhz\n",
(data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
size += sprintf(buf + size, "1: %10uMhz\n",
(data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
if (ret)
return ret;
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
if (ret)
return ret;
size += sprintf(buf + size, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
min_freq, max_freq);
}
break;
default:
break;
}
return size;
}
static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
PHM_PerformanceLevelDesignation designation, uint32_t index,
PHM_PerformanceLevel *level)
{
struct smu10_hwmgr *data;
if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;
data = (struct smu10_hwmgr *)(hwmgr->backend);
if (index == 0) {
level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
level->coreClock = data->gfx_min_freq_limit;
} else {
level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
level->coreClock = data->gfx_max_freq_limit;
}
level->nonLocalMemoryFreq = 0;
level->nonLocalMemoryWidth = 0;
return 0;
}
static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
{
const struct smu10_power_state *ps = cast_const_smu10_ps(state);
clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
return 0;
}
#define MEM_FREQ_LOW_LATENCY 25000
#define MEM_FREQ_HIGH_LATENCY 80000
#define MEM_LATENCY_HIGH 245
#define MEM_LATENCY_LOW 35
#define MEM_LATENCY_ERR 0xFFFF
static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr,
uint32_t clock)
{
if (clock >= MEM_FREQ_LOW_LATENCY &&
clock < MEM_FREQ_HIGH_LATENCY)
return MEM_LATENCY_HIGH;
else if (clock >= MEM_FREQ_HIGH_LATENCY)
return MEM_LATENCY_LOW;
else
return MEM_LATENCY_ERR;
}
static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_latency *clocks)
{
uint32_t i;
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
struct smu10_voltage_dependency_table *pclk_vol_table;
bool latency_required = false;
if (pinfo == NULL)
return -EINVAL;
switch (type) {
case amd_pp_mem_clock:
pclk_vol_table = pinfo->vdd_dep_on_mclk;
latency_required = true;
break;
case amd_pp_f_clock:
pclk_vol_table = pinfo->vdd_dep_on_fclk;
latency_required = true;
break;
case amd_pp_dcf_clock:
pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
break;
case amd_pp_disp_clock:
pclk_vol_table = pinfo->vdd_dep_on_dispclk;
break;
case amd_pp_phy_clock:
pclk_vol_table = pinfo->vdd_dep_on_phyclk;
break;
case amd_pp_dpp_clock:
pclk_vol_table = pinfo->vdd_dep_on_dppclk;
break;
default:
return -EINVAL;
}
if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
return -EINVAL;
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
if (pclk_vol_table->entries[i].clk) {
clocks->data[clocks->num_levels].clocks_in_khz =
pclk_vol_table->entries[i].clk * 10;
clocks->data[clocks->num_levels].latency_in_us = latency_required ?
smu10_get_mem_latency(hwmgr,
pclk_vol_table->entries[i].clk) :
0;
clocks->num_levels++;
}
}
return 0;
}
static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
uint32_t i;
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
struct smu10_voltage_dependency_table *pclk_vol_table = NULL;
if (pinfo == NULL)
return -EINVAL;
switch (type) {
case amd_pp_mem_clock:
pclk_vol_table = pinfo->vdd_dep_on_mclk;
break;
case amd_pp_f_clock:
pclk_vol_table = pinfo->vdd_dep_on_fclk;
break;
case amd_pp_dcf_clock:
pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
break;
case amd_pp_soc_clock:
pclk_vol_table = pinfo->vdd_dep_on_socclk;
break;
case amd_pp_disp_clock:
pclk_vol_table = pinfo->vdd_dep_on_dispclk;
break;
case amd_pp_phy_clock:
pclk_vol_table = pinfo->vdd_dep_on_phyclk;
break;
default:
return -EINVAL;
}
if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
return -EINVAL;
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
if (pclk_vol_table->entries[i].clk) {
clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol;
clocks->num_levels++;
}
}
return 0;
}
static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
{
clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
return 0;
}
static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP);
int cur_temp =
(reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
if (cur_temp & THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK)
cur_temp = ((cur_temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
else
cur_temp = (cur_temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return cur_temp;
}
static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
struct amdgpu_device *adev = hwmgr->adev;
uint32_t sclk, mclk, activity_percent;
bool has_gfx_busy;
int ret = 0;
/* GetGfxBusy support was added on RV SMU FW 30.85.00 and PCO 4.30.59 */
if ((adev->apu_flags & AMD_APU_IS_PICASSO) &&
(hwmgr->smu_version >= 0x41e3b))
has_gfx_busy = true;
else if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
(hwmgr->smu_version >= 0x1e5500))
has_gfx_busy = true;
else
has_gfx_busy = false;
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk);
/* in units of 10KHZ */
*((uint32_t *)value) = sclk * 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &mclk);
/* in units of 10KHZ */
*((uint32_t *)value) = mclk * 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_TEMP:
*((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
break;
case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
*(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_LOAD:
if (!has_gfx_busy)
ret = -EOPNOTSUPP;
else {
ret = smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetGfxBusy,
&activity_percent);
if (!ret)
*((uint32_t *)value) = min(activity_percent, (u32)100);
else
ret = -EIO;
}
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
void *clock_ranges)
{
struct smu10_hwmgr *data = hwmgr->backend;
struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
Watermarks_t *table = &(data->water_marks_table);
struct amdgpu_device *adev = hwmgr->adev;
int i;
smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
for (i = 0; i < NUM_WM_RANGES; i++)
table->WatermarkRow[WM_DCFCLK][i].WmType = (uint8_t)0;
for (i = 0; i < NUM_WM_RANGES; i++)
table->WatermarkRow[WM_SOCCLK][i].WmType = (uint8_t)0;
}
smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
data->water_marks_exist = true;
return 0;
}
static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
{
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister, NULL);
}
static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
{
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub, NULL);
}
static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
{
if (gate)
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma, NULL);
else
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma, NULL);
}
static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (bgate) {
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerDownVcn, 0, NULL);
smu10_data->vcn_power_gated = true;
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerUpVcn, 0, NULL);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
smu10_data->vcn_power_gated = false;
}
}
static int conv_power_profile_to_pplib_workload(int power_profile)
{
int pplib_workload = 0;
switch (power_profile) {
case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
break;
case PP_SMC_POWER_PROFILE_VIDEO:
pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
break;
case PP_SMC_POWER_PROFILE_VR:
pplib_workload = WORKLOAD_PPLIB_VR_BIT;
break;
case PP_SMC_POWER_PROFILE_COMPUTE:
pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
break;
case PP_SMC_POWER_PROFILE_CUSTOM:
pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
break;
}
return pplib_workload;
}
static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
{
uint32_t i, size = 0;
static const uint8_t
profile_mode_setting[6][4] = {{70, 60, 0, 0,},
{70, 60, 1, 3,},
{90, 60, 0, 0,},
{70, 60, 0, 0,},
{70, 90, 0, 0,},
{30, 60, 0, 6,},
};
static const char *title[6] = {"NUM",
"MODE_NAME",
"BUSY_SET_POINT",
"FPS",
"USE_RLC_BUSY",
"MIN_ACTIVE_LEVEL"};
if (!buf)
return -EINVAL;
phm_get_sysfs_buf(&buf, &size);
size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n", title[0],
title[1], title[2], title[3], title[4], title[5]);
for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++)
size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n",
i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
profile_mode_setting[i][0], profile_mode_setting[i][1],
profile_mode_setting[i][2], profile_mode_setting[i][3]);
return size;
}
static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
(hwmgr->smu_version >= 0x41e2b))
return true;
else
return false;
}
static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
{
int workload_type = 0;
int result = 0;
if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) {
pr_err("Invalid power profile mode %ld\n", input[size]);
return -EINVAL;
}
if (hwmgr->power_profile_mode == input[size])
return 0;
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type =
conv_power_profile_to_pplib_workload(input[size]);
if (workload_type &&
smu10_is_raven1_refresh(hwmgr) &&
!hwmgr->gfxoff_state_changed_by_workload) {
smu10_gfx_off_control(hwmgr, false);
hwmgr->gfxoff_state_changed_by_workload = true;
}
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
1 << workload_type,
NULL);
if (!result)
hwmgr->power_profile_mode = input[size];
if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
smu10_gfx_off_control(hwmgr, true);
hwmgr->gfxoff_state_changed_by_workload = false;
}
return 0;
}
static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode)
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DeviceDriverReset,
mode,
NULL);
}
static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
uint32_t min_freq, max_freq = 0;
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
int ret = 0;
if (!hwmgr->od_enabled) {
pr_err("Fine grain not support\n");
return -EINVAL;
}
if (!smu10_data->fine_grain_enabled) {
pr_err("pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n");
return -EINVAL;
}
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
if (size != 2) {
pr_err("Input parameter number not correct\n");
return -EINVAL;
}
if (input[0] == 0) {
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
if (input[1] < min_freq) {
pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
input[1], min_freq);
return -EINVAL;
}
smu10_data->gfx_actual_soft_min_freq = input[1];
} else if (input[0] == 1) {
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
if (input[1] > max_freq) {
pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
input[1], max_freq);
return -EINVAL;
}
smu10_data->gfx_actual_soft_max_freq = input[1];
} else {
return -EINVAL;
}
} else if (type == PP_OD_RESTORE_DEFAULT_TABLE) {
if (size != 0) {
pr_err("Input parameter number not correct\n");
return -EINVAL;
}
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
smu10_data->gfx_actual_soft_min_freq = min_freq;
smu10_data->gfx_actual_soft_max_freq = max_freq;
} else if (type == PP_OD_COMMIT_DPM_TABLE) {
if (size != 0) {
pr_err("Input parameter number not correct\n");
return -EINVAL;
}
if (smu10_data->gfx_actual_soft_min_freq > smu10_data->gfx_actual_soft_max_freq) {
pr_err("The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu10_data->gfx_actual_soft_min_freq, smu10_data->gfx_actual_soft_max_freq);
return -EINVAL;
}
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
smu10_data->gfx_actual_soft_min_freq,
NULL);
if (ret)
return ret;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
smu10_data->gfx_actual_soft_max_freq,
NULL);
if (ret)
return ret;
} else {
return -EINVAL;
}
return 0;
}
static int smu10_gfx_state_change(struct pp_hwmgr *hwmgr, uint32_t state)
{
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GpuChangeState, state, NULL);
return 0;
}
static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.backend_init = smu10_hwmgr_backend_init,
.backend_fini = smu10_hwmgr_backend_fini,
.apply_state_adjust_rules = smu10_apply_state_adjust_rules,
.force_dpm_level = smu10_dpm_force_dpm_level,
.get_power_state_size = smu10_get_power_state_size,
.powerdown_uvd = NULL,
.powergate_uvd = smu10_powergate_vcn,
.powergate_vce = NULL,
.get_mclk = smu10_dpm_get_mclk,
.get_sclk = smu10_dpm_get_sclk,
.patch_boot_state = smu10_dpm_patch_boot_state,
.get_pp_table_entry = smu10_dpm_get_pp_table_entry,
.get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries,
.set_cpu_power_state = smu10_set_cpu_power_state,
.store_cc6_data = smu10_store_cc6_data,
.force_clock_level = smu10_force_clock_level,
.print_clock_levels = smu10_print_clock_levels,
.get_dal_power_level = smu10_get_dal_power_level,
.get_performance_level = smu10_get_performance_level,
.get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
.get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
.get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
.set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges,
.get_max_high_clocks = smu10_get_max_high_clocks,
.read_sensor = smu10_read_sensor,
.set_active_display_count = smu10_set_active_display_count,
.set_min_deep_sleep_dcefclk = smu10_set_min_deep_sleep_dcefclk,
.dynamic_state_management_enable = smu10_enable_dpm_tasks,
.power_off_asic = smu10_power_off_asic,
.asic_setup = smu10_setup_asic_task,
.power_state_set = smu10_set_power_state_tasks,
.dynamic_state_management_disable = smu10_disable_dpm_tasks,
.powergate_mmhub = smu10_powergate_mmhub,
.smus_notify_pwe = smu10_smus_notify_pwe,
.display_clock_voltage_request = smu10_display_clock_voltage_request,
.powergate_gfx = smu10_gfx_off_control,
.powergate_sdma = smu10_powergate_sdma,
.set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
.set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
.set_hard_min_gfxclk_by_freq = smu10_set_hard_min_gfxclk_by_freq,
.set_soft_max_gfxclk_by_freq = smu10_set_soft_max_gfxclk_by_freq,
.get_power_profile_mode = smu10_get_power_profile_mode,
.set_power_profile_mode = smu10_set_power_profile_mode,
.asic_reset = smu10_asic_reset,
.set_fine_grain_clk_vol = smu10_set_fine_grain_clk_vol,
.gfx_state_change = smu10_gfx_state_change,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
{
hwmgr->hwmgr_func = &smu10_hwmgr_funcs;
hwmgr->pptable_func = &pptable_funcs;
return 0;
}
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "vega20_thermal.h"
#include "vega20_hwmgr.h"
#include "vega20_smumgr.h"
#include "vega20_ppsmc.h"
#include "vega20_inc.h"
#include "soc15_common.h"
#include "pp_debug.h"
static int vega20_disable_fan_control_feature(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data = hwmgr->backend;
int ret = 0;
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
ret = vega20_enable_smc_features(
hwmgr, false,
data->smu_features[GNLD_FAN_CONTROL].
smu_feature_bitmap);
PP_ASSERT_WITH_CODE(!ret,
"Disable FAN CONTROL feature Failed!",
return ret);
data->smu_features[GNLD_FAN_CONTROL].enabled = false;
}
return ret;
}
int vega20_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_FAN_CONTROL].supported)
return vega20_disable_fan_control_feature(hwmgr);
return 0;
}
static int vega20_enable_fan_control_feature(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data = hwmgr->backend;
int ret = 0;
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
ret = vega20_enable_smc_features(
hwmgr, true,
data->smu_features[GNLD_FAN_CONTROL].
smu_feature_bitmap);
PP_ASSERT_WITH_CODE(!ret,
"Enable FAN CONTROL feature Failed!",
return ret);
data->smu_features[GNLD_FAN_CONTROL].enabled = true;
}
return ret;
}
int vega20_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_FAN_CONTROL].supported)
return vega20_enable_fan_control_feature(hwmgr);
return 0;
}
static int vega20_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
struct amdgpu_device *adev = hwmgr->adev;
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
CG_FDO_CTRL2, TMIN, 0));
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
CG_FDO_CTRL2, FDO_PWM_MODE, mode));
return 0;
}
static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
{
int ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentRpm,
current_rpm)) == 0,
"Attempt to get current RPM from SMC Failed!",
return ret);
return 0;
}
int vega20_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
struct amdgpu_device *adev = hwmgr->adev;
uint32_t duty100, duty;
uint64_t tmp64;
duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
CG_FDO_CTRL1, FMAX_DUTY100);
duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
CG_THERMAL_STATUS, FDO_PWM_DUTY);
if (!duty100)
return -EINVAL;
tmp64 = (uint64_t)duty * 255;
do_div(tmp64, duty100);
*speed = MIN((uint32_t)tmp64, 255);
return 0;
}
int vega20_fan_ctrl_set_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t speed)
{
struct amdgpu_device *adev = hwmgr->adev;
uint32_t duty100;
uint32_t duty;
uint64_t tmp64;
speed = MIN(speed, 255);
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
CG_FDO_CTRL1, FMAX_DUTY100);
if (duty100 == 0)
return -EINVAL;
tmp64 = (uint64_t)speed * duty100;
do_div(tmp64, 255);
duty = (uint32_t)tmp64;
WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
return vega20_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
int vega20_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info)
{
memset(fan_speed_info, 0, sizeof(*fan_speed_info));
fan_speed_info->supports_percent_read = true;
fan_speed_info->supports_percent_write = true;
fan_speed_info->supports_rpm_read = true;
fan_speed_info->supports_rpm_write = true;
return 0;
}
int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
{
*speed = 0;
return vega20_get_current_rpm(hwmgr, speed);
}
int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
{
struct amdgpu_device *adev = hwmgr->adev;
uint32_t tach_period, crystal_clock_freq;
int result = 0;
if (!speed)
return -EINVAL;
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
result = vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
if (result)
return result;
}
crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
CG_TACH_CTRL, TARGET_PERIOD,
tach_period));
return vega20_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
}
/**
* vega20_thermal_get_temperature - Reads the remote temperature from the SIslands thermal controller.
*
* @hwmgr: The address of the hardware manager.
*/
int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
int temp = 0;
temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
temp = temp & 0x1ff;
temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return temp;
}
/**
* vega20_thermal_set_temperature_range - Set the requested temperature range for high and low alert signals
*
* @hwmgr: The address of the hardware manager.
* @range: Temperature range to be programmed for high and low alert signals
* Exception: PP_Result_BadInput if the input data is not valid.
*/
static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
struct phm_ppt_v3_information *pptable_information =
(struct phm_ppt_v3_information *)hwmgr->pptable;
struct amdgpu_device *adev = hwmgr->adev;
int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP;
int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
/* compare them in unit celsius degree */
if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
if (high > pptable_information->us_software_shutdown_temp)
high = pptable_information->us_software_shutdown_temp;
if (low > high)
return -EINVAL;
val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
val &= ~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK;
val &= ~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
val &= ~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
return 0;
}
/**
* vega20_thermal_enable_alert - Enable thermal alerts on the RV770 thermal controller.
*
* @hwmgr: The address of the hardware manager.
*/
static int vega20_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
uint32_t val = 0;
val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
return 0;
}
/**
* vega20_thermal_disable_alert - Disable thermal alerts on the RV770 thermal controller.
* @hwmgr: The address of the hardware manager.
*/
int vega20_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
return 0;
}
/**
* vega20_thermal_stop_thermal_controller - Uninitialize the thermal controller.
* Currently just disables alerts.
* @hwmgr: The address of the hardware manager.
*/
int vega20_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
{
int result = vega20_thermal_disable_alert(hwmgr);
return result;
}
/**
* vega20_thermal_setup_fan_table - Set up the fan table to control the fan using the SMC.
* @hwmgr: the address of the powerplay hardware manager.
*/
static int vega20_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
int ret;
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
PPTable_t *table = &(data->smc_state_table.pp_table);
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
(uint32_t)table->FanTargetTemperature,
NULL);
return ret;
}
int vega20_start_thermal_controller(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
int ret = 0;
if (range == NULL)
return -EINVAL;
ret = vega20_thermal_set_temperature_range(hwmgr, range);
if (ret)
return ret;
ret = vega20_thermal_enable_alert(hwmgr);
if (ret)
return ret;
ret = vega20_thermal_setup_fan_table(hwmgr);
return ret;
};
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "hwmgr.h"
#include "amd_powerplay.h"
#include "vega12_smumgr.h"
#include "hardwaremanager.h"
#include "ppatomfwctrl.h"
#include "atomfirmware.h"
#include "cgs_common.h"
#include "vega12_inc.h"
#include "pppcielanes.h"
#include "vega12_hwmgr.h"
#include "vega12_processpptables.h"
#include "vega12_pptable.h"
#include "vega12_thermal.h"
#include "vega12_ppsmc.h"
#include "pp_debug.h"
#include "amd_pcie_helpers.h"
#include "ppinterrupt.h"
#include "pp_overdriver.h"
#include "pp_thermal.h"
#include "vega12_baco.h"
#define smnPCIE_LC_SPEED_CNTL 0x11140290
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
#define LINK_WIDTH_MAX 6
#define LINK_SPEED_MAX 3
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
static const int link_speed[] = {25, 50, 80, 160};
static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask);
static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
uint32_t *clock,
PPCLK_e clock_select,
bool max);
static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
data->gfxclk_average_alpha = PPVEGA12_VEGA12GFXCLKAVERAGEALPHA_DFLT;
data->socclk_average_alpha = PPVEGA12_VEGA12SOCCLKAVERAGEALPHA_DFLT;
data->uclk_average_alpha = PPVEGA12_VEGA12UCLKCLKAVERAGEALPHA_DFLT;
data->gfx_activity_average_alpha = PPVEGA12_VEGA12GFXACTIVITYAVERAGEALPHA_DFLT;
data->lowest_uclk_reserved_for_ulv = PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT;
data->display_voltage_mode = PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT;
data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
data->disp_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
data->disp_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
data->disp_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
data->phy_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
data->phy_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
data->phy_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
data->registry_data.disallowed_features = 0x0;
data->registry_data.od_state_in_dc_support = 0;
data->registry_data.thermal_support = 1;
data->registry_data.skip_baco_hardware = 0;
data->registry_data.log_avfs_param = 0;
data->registry_data.sclk_throttle_low_notification = 1;
data->registry_data.force_dpm_high = 0;
data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
data->registry_data.didt_support = 0;
if (data->registry_data.didt_support) {
data->registry_data.didt_mode = 6;
data->registry_data.sq_ramping_support = 1;
data->registry_data.db_ramping_support = 0;
data->registry_data.td_ramping_support = 0;
data->registry_data.tcp_ramping_support = 0;
data->registry_data.dbr_ramping_support = 0;
data->registry_data.edc_didt_support = 1;
data->registry_data.gc_didt_support = 0;
data->registry_data.psm_didt_support = 0;
}
data->registry_data.pcie_lane_override = 0xff;
data->registry_data.pcie_speed_override = 0xff;
data->registry_data.pcie_clock_override = 0xffffffff;
data->registry_data.regulator_hot_gpio_support = 1;
data->registry_data.ac_dc_switch_gpio_support = 0;
data->registry_data.quick_transition_support = 0;
data->registry_data.zrpm_start_temp = 0xffff;
data->registry_data.zrpm_stop_temp = 0xffff;
data->registry_data.odn_feature_enable = 1;
data->registry_data.disable_water_mark = 0;
data->registry_data.disable_pp_tuning = 0;
data->registry_data.disable_xlpp_tuning = 0;
data->registry_data.disable_workload_policy = 0;
data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
data->registry_data.force_workload_policy_mask = 0;
data->registry_data.disable_3d_fs_detection = 0;
data->registry_data.fps_support = 1;
data->registry_data.disable_auto_wattman = 1;
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
data->registry_data.auto_wattman_threshold = 50;
data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
}
static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
struct amdgpu_device *adev = hwmgr->adev;
if (data->vddci_control == VEGA12_VOLTAGE_CONTROL_NONE)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ControlVDDCI);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableSMU7ThermalManagement);
if (adev->pg_flags & AMD_PG_SUPPORT_UVD) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDynamicPowerGating);
}
if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UnTabledHardwareInterface);
if (data->registry_data.odn_feature_enable)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ODNinACSupport);
else {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_OD6inACSupport);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_OD6PlusinACSupport);
}
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ActivityReporting);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_FanSpeedInTableIsRPM);
if (data->registry_data.od_state_in_dc_support) {
if (data->registry_data.odn_feature_enable)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ODNinDCSupport);
else {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_OD6inDCSupport);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_OD6PlusinDCSupport);
}
}
if (data->registry_data.thermal_support
&& data->registry_data.fuzzy_fan_control_support
&& hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ODFuzzyFanControlSupport);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicPowerManagement);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SMC);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ThermalPolicyDelay);
if (data->registry_data.force_dpm_high)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicUVDState);
if (data->registry_data.sclk_throttle_low_notification)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification);
/* power tune caps */
/* assume disabled */
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DiDtSupport);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TDRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DiDtEDCEnable);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_GCEDC);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PSM);
if (data->registry_data.didt_support) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
if (data->registry_data.sq_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
if (data->registry_data.db_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
if (data->registry_data.td_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
if (data->registry_data.tcp_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
if (data->registry_data.dbr_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
if (data->registry_data.edc_didt_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
if (data->registry_data.gc_didt_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
if (data->registry_data.psm_didt_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
}
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot);
if (data->registry_data.ac_dc_switch_gpio_support) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
}
if (data->registry_data.quick_transition_support) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_Falcon_QuickTransition);
}
if (data->lowest_uclk_reserved_for_ulv != PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_LowestUclkReservedForUlv);
if (data->lowest_uclk_reserved_for_ulv == 1)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_LowestUclkReservedForUlv);
}
if (data->registry_data.custom_fan_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CustomFanControlSupport);
return 0;
}
static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
struct amdgpu_device *adev = hwmgr->adev;
uint32_t top32, bottom32;
int i;
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
FEATURE_DPM_PREFETCHER_BIT;
data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
FEATURE_DPM_GFXCLK_BIT;
data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
FEATURE_DPM_UCLK_BIT;
data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
FEATURE_DPM_SOCCLK_BIT;
data->smu_features[GNLD_DPM_UVD].smu_feature_id =
FEATURE_DPM_UVD_BIT;
data->smu_features[GNLD_DPM_VCE].smu_feature_id =
FEATURE_DPM_VCE_BIT;
data->smu_features[GNLD_ULV].smu_feature_id =
FEATURE_ULV_BIT;
data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
FEATURE_DPM_MP0CLK_BIT;
data->smu_features[GNLD_DPM_LINK].smu_feature_id =
FEATURE_DPM_LINK_BIT;
data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
FEATURE_DPM_DCEFCLK_BIT;
data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
FEATURE_DS_GFXCLK_BIT;
data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
FEATURE_DS_SOCCLK_BIT;
data->smu_features[GNLD_DS_LCLK].smu_feature_id =
FEATURE_DS_LCLK_BIT;
data->smu_features[GNLD_PPT].smu_feature_id =
FEATURE_PPT_BIT;
data->smu_features[GNLD_TDC].smu_feature_id =
FEATURE_TDC_BIT;
data->smu_features[GNLD_THERMAL].smu_feature_id =
FEATURE_THERMAL_BIT;
data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
FEATURE_GFX_PER_CU_CG_BIT;
data->smu_features[GNLD_RM].smu_feature_id =
FEATURE_RM_BIT;
data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
FEATURE_DS_DCEFCLK_BIT;
data->smu_features[GNLD_ACDC].smu_feature_id =
FEATURE_ACDC_BIT;
data->smu_features[GNLD_VR0HOT].smu_feature_id =
FEATURE_VR0HOT_BIT;
data->smu_features[GNLD_VR1HOT].smu_feature_id =
FEATURE_VR1HOT_BIT;
data->smu_features[GNLD_FW_CTF].smu_feature_id =
FEATURE_FW_CTF_BIT;
data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
FEATURE_LED_DISPLAY_BIT;
data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
FEATURE_FAN_CONTROL_BIT;
data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
data->smu_features[i].smu_feature_bitmap =
(uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
data->smu_features[i].allowed =
((data->registry_data.disallowed_features >> i) & 1) ?
false : true;
}
/* Get the SN to turn into a Unique ID */
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
{
return 0;
}
static int vega12_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
kfree(hwmgr->backend);
hwmgr->backend = NULL;
return 0;
}
static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
int result = 0;
struct vega12_hwmgr *data;
struct amdgpu_device *adev = hwmgr->adev;
data = kzalloc(sizeof(struct vega12_hwmgr), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
hwmgr->backend = data;
vega12_set_default_registry_data(hwmgr);
data->disable_dpm_mask = 0xff;
data->workload_mask = 0xff;
/* need to set voltage control types before EVV patching */
data->vddc_control = VEGA12_VOLTAGE_CONTROL_NONE;
data->mvdd_control = VEGA12_VOLTAGE_CONTROL_NONE;
data->vddci_control = VEGA12_VOLTAGE_CONTROL_NONE;
data->water_marks_bitmap = 0;
data->avfs_exist = false;
vega12_set_features_platform_caps(hwmgr);
vega12_init_dpm_defaults(hwmgr);
/* Parse pptable data read from VBIOS */
vega12_set_private_data_based_on_pptable(hwmgr);
data->is_tlu_enabled = false;
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
VEGA12_MAX_HARDWARE_POWERLEVELS;
hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
hwmgr->platform_descriptor.clockStep.engineClock = 500;
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
data->total_active_cus = adev->gfx.cu_info.number;
/* Setup default Overdrive Fan control settings */
data->odn_fan_table.target_fan_speed =
hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
data->odn_fan_table.target_temperature =
hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature;
data->odn_fan_table.min_performance_clock =
hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit;
data->odn_fan_table.min_fan_limit =
hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
if (hwmgr->feature_mask & PP_GFXOFF_MASK)
data->gfxoff_controlled_by_driver = true;
else
data->gfxoff_controlled_by_driver = false;
return result;
}
static int vega12_init_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
data->low_sclk_interrupt_threshold = 0;
return 0;
}
static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
{
PP_ASSERT_WITH_CODE(!vega12_init_sclk_threshold(hwmgr),
"Failed to init sclk threshold!",
return -EINVAL);
return 0;
}
/*
* @fn vega12_init_dpm_state
* @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
*
* @param dpm_state - the address of the DPM Table to initiailize.
* @return None.
*/
static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
{
dpm_state->soft_min_level = 0x0;
dpm_state->soft_max_level = 0xffff;
dpm_state->hard_min_level = 0x0;
dpm_state->hard_max_level = 0xffff;
}
static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
int i;
int ret;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_gen = 3;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
pcie_gen = 2;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
pcie_gen = 1;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
pcie_gen = 0;
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
pcie_width = 6;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
pcie_width = 5;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
pcie_width = 4;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
pcie_width = 3;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
pcie_width = 2;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
pcie_width = 1;
/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
* Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
*/
for (i = 0; i < NUM_LINK_LEVELS; i++) {
pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
pp_table->PcieGenSpeed[i];
pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
pp_table->PcieLaneCount[i];
if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
pp_table->PcieLaneCount[i]) {
smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
}
/* update the pptable */
pp_table->PcieGenSpeed[i] = pcie_gen_arg;
pp_table->PcieLaneCount[i] = pcie_width_arg;
}
/* override to the highest if it's disabled from ppfeaturmask */
if (data->registry_data.pcie_dpm_key_disabled) {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
pp_table->PcieGenSpeed[i] = pcie_gen;
pp_table->PcieLaneCount[i] = pcie_width;
}
ret = vega12_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
PP_ASSERT_WITH_CODE(!ret,
"Attempt to Disable DPM LINK Failed!",
return ret);
data->smu_features[GNLD_DPM_LINK].enabled = false;
data->smu_features[GNLD_DPM_LINK].supported = false;
}
return 0;
}
static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
PPCLK_e clk_id, uint32_t *num_of_levels)
{
int ret = 0;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmFreqByIndex,
(clk_id << 16 | 0xFF),
num_of_levels);
PP_ASSERT_WITH_CODE(!ret,
"[GetNumOfDpmLevel] failed to get dpm levels!",
return ret);
return ret;
}
static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
PPCLK_e clkID, uint32_t index, uint32_t *clock)
{
/*
*SMU expects the Clock ID to be in the top 16 bits.
*Lower 16 bits specify the level
*/
PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index),
clock) == 0,
"[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
return -EINVAL);
return 0;
}
static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
{
int ret = 0;
uint32_t i, num_of_levels, clk;
ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
PP_ASSERT_WITH_CODE(!ret,
"[SetupSingleDpmTable] failed to get clk levels!",
return ret);
dpm_table->count = num_of_levels;
for (i = 0; i < num_of_levels; i++) {
ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
PP_ASSERT_WITH_CODE(!ret,
"[SetupSingleDpmTable] failed to get clk of specific level!",
return ret);
dpm_table->dpm_levels[i].value = clk;
dpm_table->dpm_levels[i].enabled = true;
}
return ret;
}
/*
* This function is to initialize all DPM state tables
* for SMU based on the dependency table.
* Dynamic state patching function will then trim these
* state tables to the allowed range based
* on the power policy or external client requests,
* such as UVD request, etc.
*/
static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
struct vega12_single_dpm_table *dpm_table;
int ret = 0;
memset(&data->dpm_table, 0, sizeof(data->dpm_table));
/* socclk */
dpm_table = &(data->dpm_table.soc_table);
if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
PP_ASSERT_WITH_CODE(!ret,
"[SetupDefaultDpmTable] failed to get socclk dpm levels!",
return ret);
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* gfxclk */
dpm_table = &(data->dpm_table.gfx_table);
if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
PP_ASSERT_WITH_CODE(!ret,
"[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
return ret);
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* memclk */
dpm_table = &(data->dpm_table.mem_table);
if (data->smu_features[GNLD_DPM_UCLK].enabled) {
ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
PP_ASSERT_WITH_CODE(!ret,
"[SetupDefaultDpmTable] failed to get memclk dpm levels!",
return ret);
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* eclk */
dpm_table = &(data->dpm_table.eclk_table);
if (data->smu_features[GNLD_DPM_VCE].enabled) {
ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
PP_ASSERT_WITH_CODE(!ret,
"[SetupDefaultDpmTable] failed to get eclk dpm levels!",
return ret);
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* vclk */
dpm_table = &(data->dpm_table.vclk_table);
if (data->smu_features[GNLD_DPM_UVD].enabled) {
ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
PP_ASSERT_WITH_CODE(!ret,
"[SetupDefaultDpmTable] failed to get vclk dpm levels!",
return ret);
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* dclk */
dpm_table = &(data->dpm_table.dclk_table);
if (data->smu_features[GNLD_DPM_UVD].enabled) {
ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
PP_ASSERT_WITH_CODE(!ret,
"[SetupDefaultDpmTable] failed to get dclk dpm levels!",
return ret);
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* dcefclk */
dpm_table = &(data->dpm_table.dcef_table);
if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
PP_ASSERT_WITH_CODE(!ret,
"[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
return ret);
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* pixclk */
dpm_table = &(data->dpm_table.pixel_table);
if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
PP_ASSERT_WITH_CODE(!ret,
"[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
return ret);
} else
dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* dispclk */
dpm_table = &(data->dpm_table.display_table);
if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
PP_ASSERT_WITH_CODE(!ret,
"[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
return ret);
} else
dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* phyclk */
dpm_table = &(data->dpm_table.phy_table);
if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
PP_ASSERT_WITH_CODE(!ret,
"[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
return ret);
} else
dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* save a copy of the default DPM table */
memcpy(&(data->golden_dpm_table), &(data->dpm_table),
sizeof(struct vega12_dpm_table));
return 0;
}
#if 0
static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
struct vega12_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
uint32_t min_level;
hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
/* Optimize compute power profile: Use only highest
* 2 power levels (if more than 2 are available)
*/
if (dpm_table->count > 2)
min_level = dpm_table->count - 2;
else if (dpm_table->count == 2)
min_level = 1;
else
min_level = 0;
hwmgr->default_compute_power_profile.min_sclk =
dpm_table->dpm_levels[min_level].value;
hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
return 0;
}
#endif
/**
* vega12_init_smc_table - Initializes the SMC table and uploads it
*
* @hwmgr: the address of the powerplay hardware manager.
* return: always 0
*/
static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
struct phm_ppt_v3_information *pptable_information =
(struct phm_ppt_v3_information *)hwmgr->pptable;
result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
if (!result) {
data->vbios_boot_state.vddc = boot_up_values.usVddc;
data->vbios_boot_state.vddci = boot_up_values.usVddci;
data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
data->vbios_boot_state.eclock = boot_up_values.ulEClk;
data->vbios_boot_state.dclock = boot_up_values.ulDClk;
data->vbios_boot_state.vclock = boot_up_values.ulVClk;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
(uint32_t)(data->vbios_boot_state.dcef_clock / 100),
NULL);
}
memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
result = smum_smc_table_manager(hwmgr,
(uint8_t *)pp_table, TABLE_PPTABLE, false);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
return 0;
}
static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
{
uint32_t result;
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &result) == 0,
"[Run_ACG_BTC] Attempt to run ACG BTC failed!",
return -EINVAL);
PP_ASSERT_WITH_CODE(result == 1,
"Failed to run ACG BTC!", return -EINVAL);
return 0;
}
static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
int i;
uint32_t allowed_features_low = 0, allowed_features_high = 0;
for (i = 0; i < GNLD_FEATURES_MAX; i++)
if (data->smu_features[i].allowed)
data->smu_features[i].smu_feature_id > 31 ?
(allowed_features_high |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT) & 0xFFFFFFFF)) :
(allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF));
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high,
NULL) == 0,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
return -1);
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low,
NULL) == 0,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
return -1);
return 0;
}
static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = true;
data->vce_power_gated = true;
if (data->smu_features[GNLD_DPM_UVD].enabled)
data->uvd_power_gated = false;
if (data->smu_features[GNLD_DPM_VCE].enabled)
data->vce_power_gated = false;
}
static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
uint64_t features_enabled;
int i;
bool enabled;
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, NULL) == 0,
"[EnableAllSMUFeatures] Failed to enable all smu features!",
return -1);
if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
data->smu_features[i].enabled = enabled;
data->smu_features[i].supported = enabled;
}
}
vega12_init_powergate_state(hwmgr);
return 0;
}
static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
uint64_t features_enabled;
int i;
bool enabled;
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, NULL) == 0,
"[DisableAllSMUFeatures] Failed to disable all smu features!",
return -1);
if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
data->smu_features[i].enabled = enabled;
data->smu_features[i].supported = enabled;
}
}
return 0;
}
static int vega12_odn_initialize_default_settings(
struct pp_hwmgr *hwmgr)
{
return 0;
}
static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
NULL);
}
static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
{
int adjust_percent, result = 0;
if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
adjust_percent =
hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
(-1 * hwmgr->platform_descriptor.TDPAdjustment);
result = vega12_set_overdrive_target_percentage(hwmgr,
(uint32_t)adjust_percent);
}
return result;
}
static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
PPCLK_e clkid, struct vega12_clock_range *clock)
{
/* AC Max */
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16),
&(clock->ACMax)) == 0,
"[GetClockRanges] Failed to get max ac clock from SMC!",
return -EINVAL);
/* AC Min */
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16),
&(clock->ACMin)) == 0,
"[GetClockRanges] Failed to get min ac clock from SMC!",
return -EINVAL);
/* DC Max */
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16),
&(clock->DCMax)) == 0,
"[GetClockRanges] Failed to get max dc clock from SMC!",
return -EINVAL);
return 0;
}
static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
uint32_t i;
for (i = 0; i < PPCLK_COUNT; i++)
PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
i, &(data->clk_range[i])),
"Failed to get clk range from SMC!",
return -EINVAL);
return 0;
}
static void vega12_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
} else {
hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[0].value;
hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[0].value;
}
hwmgr->pstate_sclk_peak = gfx_dpm_table->dpm_levels[gfx_dpm_table->count].value;
hwmgr->pstate_mclk_peak = mem_dpm_table->dpm_levels[mem_dpm_table->count].value;
}
static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays, 0, NULL);
result = vega12_set_allowed_featuresmask(hwmgr);
PP_ASSERT_WITH_CODE(result == 0,
"[EnableDPMTasks] Failed to set allowed featuresmask!\n",
return result);
tmp_result = vega12_init_smc_table(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to initialize SMC table!",
result = tmp_result);
tmp_result = vega12_run_acg_btc(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to run ACG BTC!",
result = tmp_result);
result = vega12_enable_all_smu_features(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to enable all smu features!",
return result);
result = vega12_override_pcie_parameters(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"[EnableDPMTasks] Failed to override pcie parameters!",
return result);
tmp_result = vega12_power_control_set_level(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to power control set level!",
result = tmp_result);
result = vega12_get_all_clock_ranges(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to get all clock ranges!",
return result);
result = vega12_odn_initialize_default_settings(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to power control set level!",
return result);
result = vega12_setup_default_dpm_tables(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to setup default DPM tables!",
return result);
vega12_populate_umdpstate_clocks(hwmgr);
return result;
}
static int vega12_patch_boot_state(struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps)
{
return 0;
}
static uint32_t vega12_find_lowest_dpm_level(
struct vega12_single_dpm_table *table)
{
uint32_t i;
for (i = 0; i < table->count; i++) {
if (table->dpm_levels[i].enabled)
break;
}
if (i >= table->count) {
i = 0;
table->dpm_levels[i].enabled = true;
}
return i;
}
static uint32_t vega12_find_highest_dpm_level(
struct vega12_single_dpm_table *table)
{
int32_t i = 0;
PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
"[FindHighestDPMLevel] DPM Table has too many entries!",
return MAX_REGULAR_DPM_NUMBER - 1);
for (i = table->count - 1; i >= 0; i--) {
if (table->dpm_levels[i].enabled)
break;
}
if (i < 0) {
i = 0;
table->dpm_levels[i].enabled = true;
}
return (uint32_t)i;
}
static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = hwmgr->backend;
uint32_t min_freq;
int ret = 0;
if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
(PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
NULL)),
"Failed to set soft min gfxclk !",
return ret);
}
if (data->smu_features[GNLD_DPM_UCLK].enabled) {
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
(PPCLK_UCLK << 16) | (min_freq & 0xffff),
NULL)),
"Failed to set soft min memclk !",
return ret);
min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetHardMinByFreq,
(PPCLK_UCLK << 16) | (min_freq & 0xffff),
NULL)),
"Failed to set hard min memclk !",
return ret);
}
if (data->smu_features[GNLD_DPM_UVD].enabled) {
min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
(PPCLK_VCLK << 16) | (min_freq & 0xffff),
NULL)),
"Failed to set soft min vclk!",
return ret);
min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
(PPCLK_DCLK << 16) | (min_freq & 0xffff),
NULL)),
"Failed to set soft min dclk!",
return ret);
}
if (data->smu_features[GNLD_DPM_VCE].enabled) {
min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
(PPCLK_ECLK << 16) | (min_freq & 0xffff),
NULL)),
"Failed to set soft min eclk!",
return ret);
}
if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
(PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
NULL)),
"Failed to set soft min socclk!",
return ret);
}
if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetHardMinByFreq,
(PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
NULL)),
"Failed to set hard min dcefclk!",
return ret);
}
return ret;
}
static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = hwmgr->backend;
uint32_t max_freq;
int ret = 0;
if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
(PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
NULL)),
"Failed to set soft max gfxclk!",
return ret);
}
if (data->smu_features[GNLD_DPM_UCLK].enabled) {
max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
(PPCLK_UCLK << 16) | (max_freq & 0xffff),
NULL)),
"Failed to set soft max memclk!",
return ret);
}
if (data->smu_features[GNLD_DPM_UVD].enabled) {
max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
(PPCLK_VCLK << 16) | (max_freq & 0xffff),
NULL)),
"Failed to set soft max vclk!",
return ret);
max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
(PPCLK_DCLK << 16) | (max_freq & 0xffff),
NULL)),
"Failed to set soft max dclk!",
return ret);
}
if (data->smu_features[GNLD_DPM_VCE].enabled) {
max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
(PPCLK_ECLK << 16) | (max_freq & 0xffff),
NULL)),
"Failed to set soft max eclk!",
return ret);
}
if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
(PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
NULL)),
"Failed to set soft max socclk!",
return ret);
}
return ret;
}
int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DPM_VCE].supported) {
PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
enable,
data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
"Attempt to Enable/Disable DPM VCE Failed!",
return -1);
data->smu_features[GNLD_DPM_VCE].enabled = enable;
}
return 0;
}
static uint32_t vega12_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
uint32_t gfx_clk;
if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
return -1;
if (low)
PP_ASSERT_WITH_CODE(
vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false) == 0,
"[GetSclks]: fail to get min PPCLK_GFXCLK\n",
return -1);
else
PP_ASSERT_WITH_CODE(
vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true) == 0,
"[GetSclks]: fail to get max PPCLK_GFXCLK\n",
return -1);
return (gfx_clk * 100);
}
static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
uint32_t mem_clk;
if (!data->smu_features[GNLD_DPM_UCLK].enabled)
return -1;
if (low)
PP_ASSERT_WITH_CODE(
vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false) == 0,
"[GetMclks]: fail to get min PPCLK_UCLK\n",
return -1);
else
PP_ASSERT_WITH_CODE(
vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true) == 0,
"[GetMclks]: fail to get max PPCLK_UCLK\n",
return -1);
return (mem_clk * 100);
}
static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr,
SmuMetrics_t *metrics_table,
bool bypass_cache)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
int ret = 0;
if (bypass_cache ||
!data->metrics_time ||
time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) {
ret = smum_smc_table_manager(hwmgr,
(uint8_t *)(&data->metrics_table),
TABLE_SMU_METRICS,
true);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
return ret;
}
data->metrics_time = jiffies;
}
if (metrics_table)
memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
return ret;
}
static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
{
SmuMetrics_t metrics_table;
int ret = 0;
ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
if (ret)
return ret;
*query = metrics_table.CurrSocketPower << 8;
return ret;
}
static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
{
uint32_t gfx_clk = 0;
*gfx_freq = 0;
PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16),
&gfx_clk) == 0,
"[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
return -EINVAL);
*gfx_freq = gfx_clk * 100;
return 0;
}
static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq)
{
uint32_t mem_clk = 0;
*mclk_freq = 0;
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16),
&mem_clk) == 0,
"[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
return -EINVAL);
*mclk_freq = mem_clk * 100;
return 0;
}
static int vega12_get_current_activity_percent(
struct pp_hwmgr *hwmgr,
int idx,
uint32_t *activity_percent)
{
SmuMetrics_t metrics_table;
int ret = 0;
ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
if (ret)
return ret;
switch (idx) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
*activity_percent = metrics_table.AverageGfxActivity;
break;
case AMDGPU_PP_SENSOR_MEM_LOAD:
*activity_percent = metrics_table.AverageUclkActivity;
break;
default:
pr_err("Invalid index for retrieving clock activity\n");
return -EINVAL;
}
return ret;
}
static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
SmuMetrics_t metrics_table;
int ret = 0;
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
ret = vega12_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value);
if (!ret)
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = vega12_get_current_mclk_freq(hwmgr, (uint32_t *)value);
if (!ret)
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_LOAD:
case AMDGPU_PP_SENSOR_MEM_LOAD:
ret = vega12_get_current_activity_percent(hwmgr, idx, (uint32_t *)value);
if (!ret)
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_TEMP:
*((uint32_t *)value) = vega12_thermal_get_temperature(hwmgr);
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
if (ret)
return ret;
*((uint32_t *)value) = metrics_table.TemperatureHotspot *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
*size = 4;
break;
case AMDGPU_PP_SENSOR_MEM_TEMP:
ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
if (ret)
return ret;
*((uint32_t *)value) = metrics_table.TemperatureHBM *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
*size = 4;
break;
case AMDGPU_PP_SENSOR_UVD_POWER:
*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
*size = 4;
break;
case AMDGPU_PP_SENSOR_VCE_POWER:
*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value);
if (!ret)
*size = 4;
break;
case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
ret = vega12_get_enabled_smc_features(hwmgr, (uint64_t *)value);
if (!ret)
*size = 8;
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
bool has_disp)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
has_disp ? 1 : 0,
NULL);
return 0;
}
static int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock_req)
{
int result = 0;
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
enum amd_pp_clock_type clk_type = clock_req->clock_type;
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
PPCLK_e clk_select = 0;
uint32_t clk_request = 0;
if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
switch (clk_type) {
case amd_pp_dcef_clock:
clk_select = PPCLK_DCEFCLK;
break;
case amd_pp_disp_clock:
clk_select = PPCLK_DISPCLK;
break;
case amd_pp_pixel_clock:
clk_select = PPCLK_PIXCLK;
break;
case amd_pp_phy_clock:
clk_select = PPCLK_PHYCLK;
break;
default:
pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
result = -1;
break;
}
if (!result) {
clk_request = (clk_select << 16) | clk_freq;
result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
clk_request,
NULL);
}
}
return result;
}
static int vega12_notify_smc_display_config_after_ps_adjustment(
struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
struct PP_Clocks min_clocks = {0};
struct pp_display_clock_request clock_req;
if ((hwmgr->display_config->num_display > 1) &&
!hwmgr->display_config->multi_monitor_in_sync &&
!hwmgr->display_config->nb_pstate_switch_disable)
vega12_notify_smc_display_change(hwmgr, false);
else
vega12_notify_smc_display_change(hwmgr, true);
min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcefClock / 10;
if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
if (data->smu_features[GNLD_DS_DCEFCLK].supported)
PP_ASSERT_WITH_CODE(
!smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcefClockInSR / 100,
NULL),
"Attempt to set divider for DCEFCLK Failed!",
return -1);
} else {
pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
}
}
return 0;
}
static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
uint32_t soft_level;
soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->dpm_table.gfx_table.dpm_levels[soft_level].value;
soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload boot level to highest!",
return -1);
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
"Failed to upload dpm max level to highest!",
return -1);
return 0;
}
static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
uint32_t soft_level;
soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->dpm_table.gfx_table.dpm_levels[soft_level].value;
soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload boot level to highest!",
return -1);
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
"Failed to upload dpm max level to highest!",
return -1);
return 0;
}
static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload DPM Bootup Levels!",
return -1);
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
"Failed to upload DPM Max Levels!",
return -1);
return 0;
}
static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
*sclk_mask = 0;
*mclk_mask = 0;
*soc_mask = 0;
if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
*sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
*mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
*soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
}
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
*sclk_mask = 0;
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
*mclk_mask = 0;
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
*sclk_mask = gfx_dpm_table->count - 1;
*mclk_mask = mem_dpm_table->count - 1;
*soc_mask = soc_dpm_table->count - 1;
}
return 0;
}
static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
switch (mode) {
case AMD_FAN_CTRL_NONE:
break;
case AMD_FAN_CTRL_MANUAL:
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega12_fan_ctrl_stop_smc_fan_control(hwmgr);
break;
case AMD_FAN_CTRL_AUTO:
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega12_fan_ctrl_start_smc_fan_control(hwmgr);
break;
default:
break;
}
}
static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
int ret = 0;
uint32_t sclk_mask = 0;
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = vega12_force_dpm_highest(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
ret = vega12_force_dpm_lowest(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = vega12_unforce_dpm_levels(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
if (ret)
return ret;
vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
return ret;
}
static uint32_t vega12_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
return AMD_FAN_CTRL_MANUAL;
else
return AMD_FAN_CTRL_AUTO;
}
static int vega12_get_dal_power_level(struct pp_hwmgr *hwmgr,
struct amd_pp_simple_clock_info *info)
{
#if 0
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable;
struct phm_clock_and_voltage_limits *max_limits =
&table_info->max_clock_voltage_on_ac;
info->engine_max_clock = max_limits->sclk;
info->memory_max_clock = max_limits->mclk;
#endif
return 0;
}
static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
uint32_t *clock,
PPCLK_e clock_select,
bool max)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
if (max)
*clock = data->clk_range[clock_select].ACMax;
else
*clock = data->clk_range[clock_select].ACMin;
return 0;
}
static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
struct pp_clock_levels_with_latency *clocks)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
uint32_t ucount;
int i;
struct vega12_single_dpm_table *dpm_table;
if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
return -1;
dpm_table = &(data->dpm_table.gfx_table);
ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
clocks->num_levels = ucount;
return 0;
}
static uint32_t vega12_get_mem_latency(struct pp_hwmgr *hwmgr,
uint32_t clock)
{
return 25;
}
static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
struct pp_clock_levels_with_latency *clocks)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
uint32_t ucount;
int i;
struct vega12_single_dpm_table *dpm_table;
if (!data->smu_features[GNLD_DPM_UCLK].enabled)
return -1;
dpm_table = &(data->dpm_table.mem_table);
ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz = dpm_table->dpm_levels[i].value * 1000;
data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100;
clocks->data[i].latency_in_us =
data->mclk_latency_table.entries[i].latency =
vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
}
clocks->num_levels = data->mclk_latency_table.count = ucount;
return 0;
}
static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
struct pp_clock_levels_with_latency *clocks)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
uint32_t ucount;
int i;
struct vega12_single_dpm_table *dpm_table;
if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
return -1;
dpm_table = &(data->dpm_table.dcef_table);
ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
clocks->num_levels = ucount;
return 0;
}
static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
struct pp_clock_levels_with_latency *clocks)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
uint32_t ucount;
int i;
struct vega12_single_dpm_table *dpm_table;
if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
return -1;
dpm_table = &(data->dpm_table.soc_table);
ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
clocks->num_levels = ucount;
return 0;
}
static int vega12_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_latency *clocks)
{
int ret;
switch (type) {
case amd_pp_sys_clock:
ret = vega12_get_sclks(hwmgr, clocks);
break;
case amd_pp_mem_clock:
ret = vega12_get_memclocks(hwmgr, clocks);
break;
case amd_pp_dcef_clock:
ret = vega12_get_dcefclocks(hwmgr, clocks);
break;
case amd_pp_soc_clock:
ret = vega12_get_socclocks(hwmgr, clocks);
break;
default:
return -EINVAL;
}
return ret;
}
static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
clocks->num_levels = 0;
return 0;
}
static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
void *clock_ranges)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
if (!data->registry_data.disable_water_mark &&
data->smu_features[GNLD_DPM_DCEFCLK].supported &&
data->smu_features[GNLD_DPM_SOCCLK].supported) {
smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
data->water_marks_bitmap |= WaterMarksExist;
data->water_marks_bitmap &= ~WaterMarksLoaded;
}
return 0;
}
static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
uint32_t soft_min_level, soft_max_level, hard_min_level;
int ret = 0;
switch (type) {
case PP_SCLK:
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
ret = vega12_upload_dpm_min_level(hwmgr);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
ret = vega12_upload_dpm_max_level(hwmgr);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
break;
case PP_MCLK:
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
ret = vega12_upload_dpm_min_level(hwmgr);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
ret = vega12_upload_dpm_max_level(hwmgr);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
break;
case PP_SOCCLK:
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
if (soft_max_level >= data->dpm_table.soc_table.count) {
pr_err("Clock level specified %d is over max allowed %d\n",
soft_max_level,
data->dpm_table.soc_table.count - 1);
return -EINVAL;
}
data->dpm_table.soc_table.dpm_state.soft_min_level =
data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
data->dpm_table.soc_table.dpm_state.soft_max_level =
data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
ret = vega12_upload_dpm_min_level(hwmgr);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
ret = vega12_upload_dpm_max_level(hwmgr);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
break;
case PP_DCEFCLK:
hard_min_level = mask ? (ffs(mask) - 1) : 0;
if (hard_min_level >= data->dpm_table.dcef_table.count) {
pr_err("Clock level specified %d is over max allowed %d\n",
hard_min_level,
data->dpm_table.dcef_table.count - 1);
return -EINVAL;
}
data->dpm_table.dcef_table.dpm_state.hard_min_level =
data->dpm_table.dcef_table.dpm_levels[hard_min_level].value;
ret = vega12_upload_dpm_min_level(hwmgr);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
//TODO: Setting DCEFCLK max dpm level is not supported
break;
case PP_PCIE:
break;
default:
break;
}
return 0;
}
static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
{
static const char *ppfeature_name[] = {
"DPM_PREFETCHER",
"GFXCLK_DPM",
"UCLK_DPM",
"SOCCLK_DPM",
"UVD_DPM",
"VCE_DPM",
"ULV",
"MP0CLK_DPM",
"LINK_DPM",
"DCEFCLK_DPM",
"GFXCLK_DS",
"SOCCLK_DS",
"LCLK_DS",
"PPT",
"TDC",
"THERMAL",
"GFX_PER_CU_CG",
"RM",
"DCEFCLK_DS",
"ACDC",
"VR0HOT",
"VR1HOT",
"FW_CTF",
"LED_DISPLAY",
"FAN_CONTROL",
"DIDT",
"GFXOFF",
"CG",
"ACG"};
static const char *output_title[] = {
"FEATURES",
"BITMASK",
"ENABLEMENT"};
uint64_t features_enabled;
int i;
int ret = 0;
int size = 0;
phm_get_sysfs_buf(&buf, &size);
ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled);
PP_ASSERT_WITH_CODE(!ret,
"[EnableAllSmuFeatures] Failed to get enabled smc features!",
return ret);
size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
output_title[0],
output_title[1],
output_title[2]);
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
ppfeature_name[i],
1ULL << i,
(features_enabled & (1ULL << i)) ? "Y" : "N");
}
return size;
}
static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
{
uint64_t features_enabled;
uint64_t features_to_enable;
uint64_t features_to_disable;
int ret = 0;
if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
return -EINVAL;
ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled);
if (ret)
return ret;
features_to_disable =
features_enabled & ~new_ppfeature_masks;
features_to_enable =
~features_enabled & new_ppfeature_masks;
pr_debug("features_to_disable 0x%llx\n", features_to_disable);
pr_debug("features_to_enable 0x%llx\n", features_to_enable);
if (features_to_disable) {
ret = vega12_enable_smc_features(hwmgr, false, features_to_disable);
if (ret)
return ret;
}
if (features_to_enable) {
ret = vega12_enable_smc_features(hwmgr, true, features_to_enable);
if (ret)
return ret;
}
return 0;
}
static int vega12_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
}
static int vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
{
uint32_t width_level;
width_level = vega12_get_current_pcie_link_width_level(hwmgr);
if (width_level > LINK_WIDTH_MAX)
width_level = 0;
return link_width[width_level];
}
static int vega12_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
}
static int vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
{
uint32_t speed_level;
speed_level = vega12_get_current_pcie_link_speed_level(hwmgr);
if (speed_level > LINK_SPEED_MAX)
speed_level = 0;
return link_speed[speed_level];
}
static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
int i, now, size = 0;
struct pp_clock_levels_with_latency clocks;
switch (type) {
case PP_SCLK:
PP_ASSERT_WITH_CODE(
vega12_get_current_gfx_clk_freq(hwmgr, &now) == 0,
"Attempt to get current gfx clk Failed!",
return -1);
PP_ASSERT_WITH_CODE(
vega12_get_sclks(hwmgr, &clocks) == 0,
"Attempt to get gfx clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
case PP_MCLK:
PP_ASSERT_WITH_CODE(
vega12_get_current_mclk_freq(hwmgr, &now) == 0,
"Attempt to get current mclk freq Failed!",
return -1);
PP_ASSERT_WITH_CODE(
vega12_get_memclocks(hwmgr, &clocks) == 0,
"Attempt to get memory clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
case PP_SOCCLK:
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16),
&now) == 0,
"Attempt to get Current SOCCLK Frequency Failed!",
return -EINVAL);
PP_ASSERT_WITH_CODE(
vega12_get_socclocks(hwmgr, &clocks) == 0,
"Attempt to get soc clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
break;
case PP_DCEFCLK:
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16),
&now) == 0,
"Attempt to get Current DCEFCLK Frequency Failed!",
return -EINVAL);
PP_ASSERT_WITH_CODE(
vega12_get_dcefclocks(hwmgr, &clocks) == 0,
"Attempt to get dcef clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
break;
case PP_PCIE:
break;
default:
break;
}
return size;
}
static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
struct vega12_single_dpm_table *dpm_table;
bool vblank_too_short = false;
bool disable_mclk_switching;
uint32_t i, latency;
disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
!hwmgr->display_config->multi_monitor_in_sync) ||
vblank_too_short;
latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
/* gfxclk */
dpm_table = &(data->dpm_table.gfx_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
}
if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
}
if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
}
}
/* memclk */
dpm_table = &(data->dpm_table.mem_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
}
if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
}
if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
}
}
/* honour DAL's UCLK Hardmin */
if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
/* Hardmin is dependent on displayconfig */
if (disable_mclk_switching) {
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
if (data->mclk_latency_table.entries[i].latency <= latency) {
if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
break;
}
}
}
}
if (hwmgr->display_config->nb_pstate_switch_disable)
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
/* vclk */
dpm_table = &(data->dpm_table.vclk_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
}
if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
}
}
/* dclk */
dpm_table = &(data->dpm_table.dclk_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
}
if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
}
}
/* socclk */
dpm_table = &(data->dpm_table.soc_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
}
if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
}
}
/* eclk */
dpm_table = &(data->dpm_table.eclk_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
}
if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
}
}
return 0;
}
static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
struct vega12_single_dpm_table *dpm_table)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
int ret = 0;
if (data->smu_features[GNLD_DPM_UCLK].enabled) {
PP_ASSERT_WITH_CODE(dpm_table->count > 0,
"[SetUclkToHightestDpmLevel] Dpm table has no entry!",
return -EINVAL);
PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
"[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
return -EINVAL);
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
(PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level,
NULL)),
"[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
return ret);
}
return ret;
}
static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
int ret = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays, 0,
NULL);
ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
&data->dpm_table.mem_table);
return ret;
}
static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
int result = 0;
Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
if ((data->water_marks_bitmap & WaterMarksExist) &&
!(data->water_marks_bitmap & WaterMarksLoaded)) {
result = smum_smc_table_manager(hwmgr,
(uint8_t *)wm_table, TABLE_WATERMARKS, false);
PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return -EINVAL);
data->water_marks_bitmap |= WaterMarksLoaded;
}
if ((data->water_marks_bitmap & WaterMarksExist) &&
data->smu_features[GNLD_DPM_DCEFCLK].supported &&
data->smu_features[GNLD_DPM_SOCCLK].supported)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display,
NULL);
return result;
}
static int vega12_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DPM_UVD].supported) {
PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
enable,
data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
"Attempt to Enable/Disable DPM UVD Failed!",
return -1);
data->smu_features[GNLD_DPM_UVD].enabled = enable;
}
return 0;
}
static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
if (data->vce_power_gated == bgate)
return;
data->vce_power_gated = bgate;
vega12_enable_disable_vce_dpm(hwmgr, !bgate);
}
static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
if (data->uvd_power_gated == bgate)
return;
data->uvd_power_gated = bgate;
vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
}
static bool
vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
bool is_update_required = false;
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
is_update_required = true;
if (data->registry_data.gfx_clk_deep_sleep_support) {
if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
is_update_required = true;
}
return is_update_required;
}
static int vega12_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
tmp_result = vega12_disable_all_smu_features(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable all smu features!", result = tmp_result);
return result;
}
static int vega12_power_off_asic(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
int result;
result = vega12_disable_dpm_tasks(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"[disable_dpm_tasks] Failed to disable DPM!",
);
data->water_marks_bitmap &= ~(WaterMarksLoaded);
return result;
}
#if 0
static void vega12_find_min_clock_index(struct pp_hwmgr *hwmgr,
uint32_t *sclk_idx, uint32_t *mclk_idx,
uint32_t min_sclk, uint32_t min_mclk)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
struct vega12_dpm_table *dpm_table = &(data->dpm_table);
uint32_t i;
for (i = 0; i < dpm_table->gfx_table.count; i++) {
if (dpm_table->gfx_table.dpm_levels[i].enabled &&
dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
*sclk_idx = i;
break;
}
}
for (i = 0; i < dpm_table->mem_table.count; i++) {
if (dpm_table->mem_table.dpm_levels[i].enabled &&
dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
*mclk_idx = i;
break;
}
}
}
#endif
#if 0
static int vega12_set_power_profile_state(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request)
{
return 0;
}
static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega12_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
int golden_value = golden_sclk_table->dpm_levels
[golden_sclk_table->count - 1].value;
value -= golden_value;
value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
static int vega12_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
{
return 0;
}
static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega12_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
int golden_value = golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value;
value -= golden_value;
value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
static int vega12_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
{
return 0;
}
#endif
static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
uint32_t virtual_addr_low,
uint32_t virtual_addr_hi,
uint32_t mc_addr_low,
uint32_t mc_addr_hi,
uint32_t size)
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrHigh,
virtual_addr_hi,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrLow,
virtual_addr_low,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrHigh,
mc_addr_hi,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrLow,
mc_addr_low,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramSize,
size,
NULL);
return 0;
}
static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *thermal_data)
{
struct phm_ppt_v3_information *pptable_information =
(struct phm_ppt_v3_information *)hwmgr->pptable;
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
thermal_data->max = pp_table->TedgeLimit *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->mem_crit_max = pp_table->ThbmLimit *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
}
static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
int ret = 0;
if (data->gfxoff_controlled_by_driver)
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff, NULL);
return ret;
}
static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
int ret = 0;
if (data->gfxoff_controlled_by_driver)
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff, NULL);
return ret;
}
static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
{
if (enable)
return vega12_enable_gfx_off(hwmgr);
else
return vega12_disable_gfx_off(hwmgr);
}
static int vega12_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
PHM_PerformanceLevelDesignation designation, uint32_t index,
PHM_PerformanceLevel *level)
{
return 0;
}
static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr,
enum pp_mp1_state mp1_state)
{
uint16_t msg;
int ret;
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
msg = PPSMC_MSG_PrepareMp1ForUnload;
break;
case PP_MP1_STATE_SHUTDOWN:
case PP_MP1_STATE_RESET:
case PP_MP1_STATE_NONE:
default:
return 0;
}
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
"[PrepareMp1] Failed!",
return ret);
return 0;
}
static void vega12_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
{
memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
gpu_metrics->common_header.structure_size =
sizeof(struct gpu_metrics_v1_0);
gpu_metrics->common_header.format_revision = 1;
gpu_metrics->common_header.content_revision = 0;
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
}
static ssize_t vega12_get_gpu_metrics(struct pp_hwmgr *hwmgr,
void **table)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
struct gpu_metrics_v1_0 *gpu_metrics =
&data->gpu_metrics_table;
SmuMetrics_t metrics;
uint32_t fan_speed_rpm;
int ret;
ret = vega12_get_metrics_table(hwmgr, &metrics, true);
if (ret)
return ret;
vega12_init_gpu_metrics_v1_0(gpu_metrics);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
gpu_metrics->temperature_mem = metrics.TemperatureHBM;
gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem;
gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
vega12_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm);
gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm;
gpu_metrics->pcie_link_width =
vega12_get_current_pcie_link_width(hwmgr);
gpu_metrics->pcie_link_speed =
vega12_get_current_pcie_link_speed(hwmgr);
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v1_0);
}
static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.backend_init = vega12_hwmgr_backend_init,
.backend_fini = vega12_hwmgr_backend_fini,
.asic_setup = vega12_setup_asic_task,
.dynamic_state_management_enable = vega12_enable_dpm_tasks,
.dynamic_state_management_disable = vega12_disable_dpm_tasks,
.patch_boot_state = vega12_patch_boot_state,
.get_sclk = vega12_dpm_get_sclk,
.get_mclk = vega12_dpm_get_mclk,
.notify_smc_display_config_after_ps_adjustment =
vega12_notify_smc_display_config_after_ps_adjustment,
.force_dpm_level = vega12_dpm_force_dpm_level,
.stop_thermal_controller = vega12_thermal_stop_thermal_controller,
.get_fan_speed_info = vega12_fan_ctrl_get_fan_speed_info,
.reset_fan_speed_to_default =
vega12_fan_ctrl_reset_fan_speed_to_default,
.get_fan_speed_rpm = vega12_fan_ctrl_get_fan_speed_rpm,
.set_fan_control_mode = vega12_set_fan_control_mode,
.get_fan_control_mode = vega12_get_fan_control_mode,
.read_sensor = vega12_read_sensor,
.get_dal_power_level = vega12_get_dal_power_level,
.get_clock_by_type_with_latency = vega12_get_clock_by_type_with_latency,
.get_clock_by_type_with_voltage = vega12_get_clock_by_type_with_voltage,
.set_watermarks_for_clocks_ranges = vega12_set_watermarks_for_clocks_ranges,
.display_clock_voltage_request = vega12_display_clock_voltage_request,
.force_clock_level = vega12_force_clock_level,
.print_clock_levels = vega12_print_clock_levels,
.apply_clocks_adjust_rules =
vega12_apply_clocks_adjust_rules,
.pre_display_config_changed =
vega12_pre_display_configuration_changed_task,
.display_config_changed = vega12_display_configuration_changed_task,
.powergate_uvd = vega12_power_gate_uvd,
.powergate_vce = vega12_power_gate_vce,
.check_smc_update_required_for_display_configuration =
vega12_check_smc_update_required_for_display_configuration,
.power_off_asic = vega12_power_off_asic,
.disable_smc_firmware_ctf = vega12_thermal_disable_alert,
#if 0
.set_power_profile_state = vega12_set_power_profile_state,
.get_sclk_od = vega12_get_sclk_od,
.set_sclk_od = vega12_set_sclk_od,
.get_mclk_od = vega12_get_mclk_od,
.set_mclk_od = vega12_set_mclk_od,
#endif
.notify_cac_buffer_info = vega12_notify_cac_buffer_info,
.get_thermal_temperature_range = vega12_get_thermal_temperature_range,
.register_irq_handlers = smu9_register_irq_handlers,
.start_thermal_controller = vega12_start_thermal_controller,
.powergate_gfx = vega12_gfx_off_control,
.get_performance_level = vega12_get_performance_level,
.get_asic_baco_capability = smu9_baco_get_capability,
.get_asic_baco_state = smu9_baco_get_state,
.set_asic_baco_state = vega12_baco_set_state,
.get_ppfeature_status = vega12_get_ppfeature_status,
.set_ppfeature_status = vega12_set_ppfeature_status,
.set_mp1_state = vega12_set_mp1_state,
.get_gpu_metrics = vega12_get_gpu_metrics,
};
int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
{
hwmgr->hwmgr_func = &vega12_hwmgr_funcs;
hwmgr->pptable_func = &vega12_pptable_funcs;
return 0;
}
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "hwmgr.h"
#include "vega20_hwmgr.h"
#include "vega20_powertune.h"
#include "vega20_smumgr.h"
#include "vega20_ppsmc.h"
#include "vega20_inc.h"
#include "pp_debug.h"
int vega20_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_PPT].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetPptLimit, n,
NULL);
return 0;
}
int vega20_validate_power_level_request(struct pp_hwmgr *hwmgr,
uint32_t tdp_percentage_adjustment, uint32_t tdp_absolute_value_adjustment)
{
return (tdp_percentage_adjustment > hwmgr->platform_descriptor.TDPLimit) ? -1 : 0;
}
static int vega20_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
NULL);
}
int vega20_power_control_set_level(struct pp_hwmgr *hwmgr)
{
int adjust_percent, result = 0;
if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
adjust_percent =
hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
(-1 * hwmgr->platform_descriptor.TDPAdjustment);
result = vega20_set_overdrive_target_percentage(hwmgr,
(uint32_t)adjust_percent);
}
return result;
}
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_powertune.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/pci.h>
#include <linux/reboot.h>
#include "hwmgr.h"
#include "pp_debug.h"
#include "ppatomctrl.h"
#include "ppsmc.h"
#include "atom.h"
#include "ivsrcid/thm/irqsrcs_thm_9_0.h"
#include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
#include "ivsrcid/ivsrcid_vislands30.h"
uint8_t convert_to_vid(uint16_t vddc)
{
return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
}
uint16_t convert_to_vddc(uint8_t vid)
{
return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
}
int phm_copy_clock_limits_array(
struct pp_hwmgr *hwmgr,
uint32_t **pptable_info_array,
const uint32_t *pptable_array,
uint32_t power_saving_clock_count)
{
uint32_t array_size, i;
uint32_t *table;
array_size = sizeof(uint32_t) * power_saving_clock_count;
table = kzalloc(array_size, GFP_KERNEL);
if (NULL == table)
return -ENOMEM;
for (i = 0; i < power_saving_clock_count; i++)
table[i] = le32_to_cpu(pptable_array[i]);
*pptable_info_array = table;
return 0;
}
int phm_copy_overdrive_settings_limits_array(
struct pp_hwmgr *hwmgr,
uint32_t **pptable_info_array,
const uint32_t *pptable_array,
uint32_t od_setting_count)
{
uint32_t array_size, i;
uint32_t *table;
array_size = sizeof(uint32_t) * od_setting_count;
table = kzalloc(array_size, GFP_KERNEL);
if (NULL == table)
return -ENOMEM;
for (i = 0; i < od_setting_count; i++)
table[i] = le32_to_cpu(pptable_array[i]);
*pptable_info_array = table;
return 0;
}
uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
{
u32 mask = 0;
u32 shift = 0;
shift = (offset % 4) << 3;
if (size == sizeof(uint8_t))
mask = 0xFF << shift;
else if (size == sizeof(uint16_t))
mask = 0xFFFF << shift;
original_data &= ~mask;
original_data |= (field << shift);
return original_data;
}
/*
* Returns once the part of the register indicated by the mask has
* reached the given value.
*/
int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
uint32_t value, uint32_t mask)
{
uint32_t i;
uint32_t cur_value;
if (hwmgr == NULL || hwmgr->device == NULL) {
pr_err("Invalid Hardware Manager!");
return -EINVAL;
}
for (i = 0; i < hwmgr->usec_timeout; i++) {
cur_value = cgs_read_register(hwmgr->device, index);
if ((cur_value & mask) == (value & mask))
break;
udelay(1);
}
/* timeout means wrong logic*/
if (i == hwmgr->usec_timeout)
return -1;
return 0;
}
/*
* Returns once the part of the register indicated by the mask has
* reached the given value.The indirect space is described by giving
* the memory-mapped index of the indirect index register.
*/
int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
uint32_t indirect_port,
uint32_t index,
uint32_t value,
uint32_t mask)
{
if (hwmgr == NULL || hwmgr->device == NULL) {
pr_err("Invalid Hardware Manager!");
return -EINVAL;
}
cgs_write_register(hwmgr->device, indirect_port, index);
return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
}
int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
uint32_t index,
uint32_t value, uint32_t mask)
{
uint32_t i;
uint32_t cur_value;
if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
for (i = 0; i < hwmgr->usec_timeout; i++) {
cur_value = cgs_read_register(hwmgr->device,
index);
if ((cur_value & mask) != (value & mask))
break;
udelay(1);
}
/* timeout means wrong logic */
if (i == hwmgr->usec_timeout)
return -ETIME;
return 0;
}
int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
uint32_t indirect_port,
uint32_t index,
uint32_t value,
uint32_t mask)
{
if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
cgs_write_register(hwmgr->device, indirect_port, index);
return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
value, mask);
}
bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
{
return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
}
bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
{
return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
}
int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
{
uint32_t i, j;
uint16_t vvalue;
bool found = false;
struct pp_atomctrl_voltage_table *table;
PP_ASSERT_WITH_CODE((NULL != vol_table),
"Voltage Table empty.", return -EINVAL);
table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
GFP_KERNEL);
if (NULL == table)
return -EINVAL;
table->mask_low = vol_table->mask_low;
table->phase_delay = vol_table->phase_delay;
for (i = 0; i < vol_table->count; i++) {
vvalue = vol_table->entries[i].value;
found = false;
for (j = 0; j < table->count; j++) {
if (vvalue == table->entries[j].value) {
found = true;
break;
}
}
if (!found) {
table->entries[table->count].value = vvalue;
table->entries[table->count].smio_low =
vol_table->entries[i].smio_low;
table->count++;
}
}
memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
kfree(table);
table = NULL;
return 0;
}
int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
phm_ppt_v1_clock_voltage_dependency_table *dep_table)
{
uint32_t i;
int result;
PP_ASSERT_WITH_CODE((0 != dep_table->count),
"Voltage Dependency Table empty.", return -EINVAL);
PP_ASSERT_WITH_CODE((NULL != vol_table),
"vol_table empty.", return -EINVAL);
vol_table->mask_low = 0;
vol_table->phase_delay = 0;
vol_table->count = dep_table->count;
for (i = 0; i < dep_table->count; i++) {
vol_table->entries[i].value = dep_table->entries[i].mvdd;
vol_table->entries[i].smio_low = 0;
}
result = phm_trim_voltage_table(vol_table);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to trim MVDD table.", return result);
return 0;
}
int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
phm_ppt_v1_clock_voltage_dependency_table *dep_table)
{
uint32_t i;
int result;
PP_ASSERT_WITH_CODE((0 != dep_table->count),
"Voltage Dependency Table empty.", return -EINVAL);
PP_ASSERT_WITH_CODE((NULL != vol_table),
"vol_table empty.", return -EINVAL);
vol_table->mask_low = 0;
vol_table->phase_delay = 0;
vol_table->count = dep_table->count;
for (i = 0; i < dep_table->count; i++) {
vol_table->entries[i].value = dep_table->entries[i].vddci;
vol_table->entries[i].smio_low = 0;
}
result = phm_trim_voltage_table(vol_table);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to trim VDDCI table.", return result);
return 0;
}
int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
phm_ppt_v1_voltage_lookup_table *lookup_table)
{
int i = 0;
PP_ASSERT_WITH_CODE((0 != lookup_table->count),
"Voltage Lookup Table empty.", return -EINVAL);
PP_ASSERT_WITH_CODE((NULL != vol_table),
"vol_table empty.", return -EINVAL);
vol_table->mask_low = 0;
vol_table->phase_delay = 0;
vol_table->count = lookup_table->count;
for (i = 0; i < vol_table->count; i++) {
vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
vol_table->entries[i].smio_low = 0;
}
return 0;
}
void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
struct pp_atomctrl_voltage_table *vol_table)
{
unsigned int i, diff;
if (vol_table->count <= max_vol_steps)
return;
diff = vol_table->count - max_vol_steps;
for (i = 0; i < max_vol_steps; i++)
vol_table->entries[i] = vol_table->entries[i + diff];
vol_table->count = max_vol_steps;
return;
}
int phm_reset_single_dpm_table(void *table,
uint32_t count, int max)
{
int i;
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
dpm_table->count = count > max ? max : count;
for (i = 0; i < dpm_table->count; i++)
dpm_table->dpm_level[i].enabled = false;
return 0;
}
void phm_setup_pcie_table_entry(
void *table,
uint32_t index, uint32_t pcie_gen,
uint32_t pcie_lanes)
{
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
dpm_table->dpm_level[index].value = pcie_gen;
dpm_table->dpm_level[index].param1 = pcie_lanes;
dpm_table->dpm_level[index].enabled = 1;
}
int32_t phm_get_dpm_level_enable_mask_value(void *table)
{
int32_t i;
int32_t mask = 0;
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
for (i = dpm_table->count; i > 0; i--) {
mask = mask << 1;
if (dpm_table->dpm_level[i - 1].enabled)
mask |= 0x1;
else
mask &= 0xFFFFFFFE;
}
return mask;
}
uint8_t phm_get_voltage_index(
struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
{
uint8_t count = (uint8_t) (lookup_table->count);
uint8_t i;
PP_ASSERT_WITH_CODE((NULL != lookup_table),
"Lookup Table empty.", return 0);
PP_ASSERT_WITH_CODE((0 != count),
"Lookup Table empty.", return 0);
for (i = 0; i < lookup_table->count; i++) {
/* find first voltage equal or bigger than requested */
if (lookup_table->entries[i].us_vdd >= voltage)
return i;
}
/* voltage is bigger than max voltage in the table */
return i - 1;
}
uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
uint32_t voltage)
{
uint8_t count = (uint8_t) (voltage_table->count);
uint8_t i = 0;
PP_ASSERT_WITH_CODE((NULL != voltage_table),
"Voltage Table empty.", return 0;);
PP_ASSERT_WITH_CODE((0 != count),
"Voltage Table empty.", return 0;);
for (i = 0; i < count; i++) {
/* find first voltage bigger than requested */
if (voltage_table->entries[i].value >= voltage)
return i;
}
/* voltage is bigger than max voltage in the table */
return i - 1;
}
uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
{
uint32_t i;
for (i = 0; i < vddci_table->count; i++) {
if (vddci_table->entries[i].value >= vddci)
return vddci_table->entries[i].value;
}
pr_debug("vddci is larger than max value in vddci_table\n");
return vddci_table->entries[i-1].value;
}
int phm_find_boot_level(void *table,
uint32_t value, uint32_t *boot_level)
{
int result = -EINVAL;
uint32_t i;
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
for (i = 0; i < dpm_table->count; i++) {
if (value == dpm_table->dpm_level[i].value) {
*boot_level = i;
result = 0;
}
}
return result;
}
int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
phm_ppt_v1_voltage_lookup_table *lookup_table,
uint16_t virtual_voltage_id, int32_t *sclk)
{
uint8_t entry_id;
uint8_t voltage_id;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
break;
}
if (entry_id >= table_info->vdd_dep_on_sclk->count) {
pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
return -EINVAL;
}
*sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
return 0;
}
/**
* phm_initializa_dynamic_state_adjustment_rule_settings - Initialize Dynamic State Adjustment Rule Settings
*
* @hwmgr: the address of the powerplay hardware manager.
*/
int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
{
struct phm_clock_voltage_dependency_table *table_clk_vlt;
struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
/* initialize vddc_dep_on_dal_pwrl table */
table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 4),
GFP_KERNEL);
if (NULL == table_clk_vlt) {
pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
return -ENOMEM;
} else {
table_clk_vlt->count = 4;
table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
if (hwmgr->chip_id >= CHIP_POLARIS10 &&
hwmgr->chip_id <= CHIP_VEGAM)
table_clk_vlt->entries[0].v = 700;
else
table_clk_vlt->entries[0].v = 0;
table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
if (hwmgr->chip_id >= CHIP_POLARIS10 &&
hwmgr->chip_id <= CHIP_VEGAM)
table_clk_vlt->entries[1].v = 740;
else
table_clk_vlt->entries[1].v = 720;
table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
if (hwmgr->chip_id >= CHIP_POLARIS10 &&
hwmgr->chip_id <= CHIP_VEGAM)
table_clk_vlt->entries[2].v = 800;
else
table_clk_vlt->entries[2].v = 810;
table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
table_clk_vlt->entries[3].v = 900;
if (pptable_info != NULL)
pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
}
return 0;
}
uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
{
uint32_t level = 0;
while (0 == (mask & (1 << level)))
level++;
return level;
}
void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)hwmgr->pptable;
struct phm_clock_voltage_dependency_table *table =
table_info->vddc_dep_on_dal_pwrl;
struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
uint32_t req_vddc = 0, req_volt, i;
if (!table || table->count <= 0
|| dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
|| dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
return;
for (i = 0; i < table->count; i++) {
if (dal_power_level == table->entries[i].clk) {
req_vddc = table->entries[i].v;
break;
}
}
vddc_table = table_info->vdd_dep_on_sclk;
for (i = 0; i < vddc_table->count; i++) {
if (req_vddc <= vddc_table->entries[i].vddc) {
req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VddC_Request,
req_volt,
NULL);
return;
}
}
pr_err("DAL requested level can not"
" found a available voltage in VDDC DPM Table \n");
}
int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t sclk, uint16_t id, uint16_t *voltage)
{
uint32_t vol;
int ret = 0;
if (hwmgr->chip_id < CHIP_TONGA) {
ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
} else if (hwmgr->chip_id < CHIP_POLARIS10) {
ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
if (*voltage >= 2000 || *voltage == 0)
*voltage = 1150;
} else {
ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
*voltage = (uint16_t)(vol/100);
}
return ret;
}
int phm_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
schedule_delayed_work(&hwmgr->swctf_delayed_work,
msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
} else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) {
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
} else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
/*
* HW CTF just occurred. Shutdown to prevent further damage.
*/
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
orderly_poweroff(true);
}
} else if (client_id == SOC15_IH_CLIENTID_THM) {
if (src_id == 0)
schedule_delayed_work(&hwmgr->swctf_delayed_work,
msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
else
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
} else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
/*
* HW CTF just occurred. Shutdown to prevent further damage.
*/
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
orderly_poweroff(true);
}
return 0;
}
static const struct amdgpu_irq_src_funcs smu9_irq_funcs = {
.process = phm_irq_process,
};
int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
{
struct amdgpu_irq_src *source =
kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
if (!source)
return -ENOMEM;
source->funcs = &smu9_irq_funcs;
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
SOC15_IH_CLIENTID_THM,
THM_9_0__SRCID__THM_DIG_THERM_L2H,
source);
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
SOC15_IH_CLIENTID_THM,
THM_9_0__SRCID__THM_DIG_THERM_H2L,
source);
/* Register CTF(GPIO_19) interrupt */
amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
SOC15_IH_CLIENTID_ROM_SMUIO,
SMUIO_9_0__SRCID__SMUIO_GPIO19,
source);
return 0;
}
void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
uint8_t *frev, uint8_t *crev)
{
struct amdgpu_device *adev = dev;
uint16_t data_start;
if (amdgpu_atom_parse_data_header(
adev->mode_info.atom_context, table, size,
frev, crev, &data_start))
return (uint8_t *)adev->mode_info.atom_context->bios +
data_start;
return NULL;
}
int smu_get_voltage_dependency_table_ppt_v1(
const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
{
uint8_t i = 0;
PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
"Voltage Lookup Table empty",
return -EINVAL);
dep_table->count = allowed_dep_table->count;
for (i = 0; i < dep_table->count; i++) {
dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
}
return 0;
}
int smu_set_watermarks_for_clocks_ranges(void *wt_table,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
uint32_t i;
struct watermarks *table = wt_table;
if (!table || !wm_with_clock_ranges)
return -EINVAL;
if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
return -EINVAL;
for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
table->WatermarkRow[1][i].MinClock =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
1000));
table->WatermarkRow[1][i].MaxClock =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
1000));
table->WatermarkRow[1][i].MinUclk =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1000));
table->WatermarkRow[1][i].MaxUclk =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1000));
table->WatermarkRow[1][i].WmSetting = (uint8_t)
wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
}
for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
table->WatermarkRow[0][i].MinClock =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
1000));
table->WatermarkRow[0][i].MaxClock =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
1000));
table->WatermarkRow[0][i].MinUclk =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1000));
table->WatermarkRow[0][i].MaxUclk =
cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1000));
table->WatermarkRow[0][i].WmSetting = (uint8_t)
wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
return 0;
}
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "smu7_baco.h"
#include "tonga_baco.h"
#include "fiji_baco.h"
#include "polaris_baco.h"
#include "ci_baco.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg;
*cap = false;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
return 0;
reg = RREG32(mmCC_BIF_BX_FUSESTRAP0);
if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK)
*cap = true;
return 0;
}
int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg;
reg = RREG32(mmBACO_CNTL);
if (reg & BACO_CNTL__BACO_MODE_MASK)
/* gfx has already entered BACO state */
*state = BACO_STATE_IN;
else
*state = BACO_STATE_OUT;
return 0;
}
int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
switch (adev->asic_type) {
case CHIP_TOPAZ:
case CHIP_TONGA:
return tonga_baco_set_state(hwmgr, state);
case CHIP_FIJI:
return fiji_baco_set_state(hwmgr, state);
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
return polaris_baco_set_state(hwmgr, state);
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
return ci_baco_set_state(hwmgr, state);
#endif
default:
return -EINVAL;
}
}
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "vega12_thermal.h"
#include "vega12_hwmgr.h"
#include "vega12_smumgr.h"
#include "vega12_ppsmc.h"
#include "vega12_inc.h"
#include "soc15_common.h"
#include "pp_debug.h"
static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
{
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentRpm,
current_rpm),
"Attempt to get current RPM from SMC Failed!",
return -EINVAL);
return 0;
}
int vega12_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info)
{
memset(fan_speed_info, 0, sizeof(*fan_speed_info));
fan_speed_info->supports_percent_read = false;
fan_speed_info->supports_percent_write = false;
fan_speed_info->supports_rpm_read = true;
fan_speed_info->supports_rpm_write = true;
return 0;
}
int vega12_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
{
*speed = 0;
return vega12_get_current_rpm(hwmgr, speed);
}
/**
* vega12_enable_fan_control_feature -Enables the SMC Fan Control Feature.
*
* @hwmgr: the address of the powerplay hardware manager.
* Return: 0 on success. -1 otherwise.
*/
static int vega12_enable_fan_control_feature(struct pp_hwmgr *hwmgr)
{
#if 0
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(
hwmgr, true,
data->smu_features[GNLD_FAN_CONTROL].
smu_feature_bitmap),
"Attempt to Enable FAN CONTROL feature Failed!",
return -1);
data->smu_features[GNLD_FAN_CONTROL].enabled = true;
}
#endif
return 0;
}
static int vega12_disable_fan_control_feature(struct pp_hwmgr *hwmgr)
{
#if 0
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(
hwmgr, false,
data->smu_features[GNLD_FAN_CONTROL].
smu_feature_bitmap),
"Attempt to Enable FAN CONTROL feature Failed!",
return -1);
data->smu_features[GNLD_FAN_CONTROL].enabled = false;
}
#endif
return 0;
}
int vega12_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_FAN_CONTROL].supported)
PP_ASSERT_WITH_CODE(
!vega12_enable_fan_control_feature(hwmgr),
"Attempt to Enable SMC FAN CONTROL Feature Failed!",
return -1);
return 0;
}
int vega12_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_FAN_CONTROL].supported)
PP_ASSERT_WITH_CODE(!vega12_disable_fan_control_feature(hwmgr),
"Attempt to Disable SMC FAN CONTROL Feature Failed!",
return -1);
return 0;
}
/**
* vega12_fan_ctrl_reset_fan_speed_to_default - Reset Fan Speed to default.
* @hwmgr: the address of the powerplay hardware manager.
* Exception Always succeeds.
*/
int vega12_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
{
return vega12_fan_ctrl_start_smc_fan_control(hwmgr);
}
/**
* vega12_thermal_get_temperature - Reads the remote temperature from the SIslands thermal controller.
*
* @hwmgr: The address of the hardware manager.
*/
int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
int temp = 0;
temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
temp = temp & 0x1ff;
temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return temp;
}
/**
* vega12_thermal_set_temperature_range - Set the requested temperature range
* for high and low alert signals
*
* @hwmgr: The address of the hardware manager.
* @range: Temperature range to be programmed for
* high and low alert signals
* Exception: PP_Result_BadInput if the input data is not valid.
*/
static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
struct phm_ppt_v3_information *pptable_information =
(struct phm_ppt_v3_information *)hwmgr->pptable;
struct amdgpu_device *adev = hwmgr->adev;
int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP;
int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
/* compare them in unit celsius degree */
if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
if (high > pptable_information->us_software_shutdown_temp)
high = pptable_information->us_software_shutdown_temp;
if (low > high)
return -EINVAL;
val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
val &= ~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK;
val &= ~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
val &= ~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
return 0;
}
/**
* vega12_thermal_enable_alert - Enable thermal alerts on the RV770 thermal controller.
*
* @hwmgr: The address of the hardware manager.
*/
static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
uint32_t val = 0;
val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
return 0;
}
/**
* vega12_thermal_disable_alert - Disable thermal alerts on the RV770 thermal controller.
* @hwmgr: The address of the hardware manager.
*/
int vega12_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
return 0;
}
/**
* vega12_thermal_stop_thermal_controller - Uninitialize the thermal controller.
* Currently just disables alerts.
* @hwmgr: The address of the hardware manager.
*/
int vega12_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
{
int result = vega12_thermal_disable_alert(hwmgr);
return result;
}
/**
* vega12_thermal_setup_fan_table - Set up the fan table to control the fan using the SMC.
* @hwmgr: the address of the powerplay hardware manager.
*/
static int vega12_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
int ret;
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
PPTable_t *table = &(data->smc_state_table.pp_table);
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
(uint32_t)table->FanTargetTemperature,
NULL);
return ret;
}
/**
* vega12_thermal_start_smc_fan_control - Start the fan control on the SMC.
* @hwmgr: the address of the powerplay hardware manager.
* Return: result from set temperature range routine
*/
static int vega12_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
/* If the fantable setup has failed we could have disabled
* PHM_PlatformCaps_MicrocodeFanControl even after
* this function was included in the table.
* Make sure that we still think controlling the fan is OK.
*/
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega12_fan_ctrl_start_smc_fan_control(hwmgr);
return 0;
}
int vega12_start_thermal_controller(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
int ret = 0;
if (range == NULL)
return -EINVAL;
ret = vega12_thermal_set_temperature_range(hwmgr, range);
if (ret)
return -EINVAL;
vega12_thermal_enable_alert(hwmgr);
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
ret = vega12_thermal_setup_fan_table(hwmgr);
if (ret)
return -EINVAL;
vega12_thermal_start_smc_fan_control(hwmgr);
return 0;
};
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include "hwmgr.h"
#include "amd_powerplay.h"
#include "hardwaremanager.h"
#include "ppatomfwctrl.h"
#include "atomfirmware.h"
#include "cgs_common.h"
#include "vega10_powertune.h"
#include "smu9.h"
#include "smu9_driver_if.h"
#include "vega10_inc.h"
#include "soc15_common.h"
#include "pppcielanes.h"
#include "vega10_hwmgr.h"
#include "vega10_smumgr.h"
#include "vega10_processpptables.h"
#include "vega10_pptable.h"
#include "vega10_thermal.h"
#include "pp_debug.h"
#include "amd_pcie_helpers.h"
#include "ppinterrupt.h"
#include "pp_overdriver.h"
#include "pp_thermal.h"
#include "vega10_baco.h"
#include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h"
#define smnPCIE_LC_SPEED_CNTL 0x11140290
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
#define HBM_MEMORY_CHANNEL_WIDTH 128
static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
#define mmDF_CS_AON0_DramBaseAddress0 0x0044
#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
//DF_CS_AON0_DramBaseAddress0
#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
typedef enum {
CLK_SMNCLK = 0,
CLK_SOCCLK,
CLK_MP0CLK,
CLK_MP1CLK,
CLK_LCLK,
CLK_DCEFCLK,
CLK_VCLK,
CLK_DCLK,
CLK_ECLK,
CLK_UCLK,
CLK_GFXCLK,
CLK_COUNT,
} CLOCK_ID_e;
static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
static struct vega10_power_state *cast_phw_vega10_power_state(
struct pp_hw_power_state *hw_ps)
{
PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
"Invalid Powerstate Type!",
return NULL;);
return (struct vega10_power_state *)hw_ps;
}
static const struct vega10_power_state *cast_const_phw_vega10_power_state(
const struct pp_hw_power_state *hw_ps)
{
PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
"Invalid Powerstate Type!",
return NULL;);
return (const struct vega10_power_state *)hw_ps;
}
static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
data->registry_data.sclk_dpm_key_disabled =
hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
data->registry_data.socclk_dpm_key_disabled =
hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
data->registry_data.mclk_dpm_key_disabled =
hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
data->registry_data.pcie_dpm_key_disabled =
hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
data->registry_data.dcefclk_dpm_key_disabled =
hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
data->registry_data.power_containment_support = 1;
data->registry_data.enable_pkg_pwr_tracking_feature = 1;
data->registry_data.enable_tdc_limit_feature = 1;
}
data->registry_data.clock_stretcher_support =
hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false;
data->registry_data.ulv_support =
hwmgr->feature_mask & PP_ULV_MASK ? true : false;
data->registry_data.sclk_deep_sleep_support =
hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
data->registry_data.disable_water_mark = 0;
data->registry_data.fan_control_support = 1;
data->registry_data.thermal_support = 1;
data->registry_data.fw_ctf_enabled = 1;
data->registry_data.avfs_support =
hwmgr->feature_mask & PP_AVFS_MASK ? true : false;
data->registry_data.led_dpm_enabled = 1;
data->registry_data.vr0hot_enabled = 1;
data->registry_data.vr1hot_enabled = 1;
data->registry_data.regulator_hot_gpio_support = 1;
data->registry_data.didt_support = 1;
if (data->registry_data.didt_support) {
data->registry_data.didt_mode = 6;
data->registry_data.sq_ramping_support = 1;
data->registry_data.db_ramping_support = 0;
data->registry_data.td_ramping_support = 0;
data->registry_data.tcp_ramping_support = 0;
data->registry_data.dbr_ramping_support = 0;
data->registry_data.edc_didt_support = 1;
data->registry_data.gc_didt_support = 0;
data->registry_data.psm_didt_support = 0;
}
data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
}
static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable;
struct amdgpu_device *adev = hwmgr->adev;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicPatchPowerState);
if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ControlVDDCI);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableSMU7ThermalManagement);
if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating);
if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UnTabledHardwareInterface);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_FanSpeedInTableIsRPM);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ODFuzzyFanControlSupport);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicPowerManagement);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SMC);
/* power tune caps */
/* assume disabled */
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DiDtSupport);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TDRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DiDtEDCEnable);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_GCEDC);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PSM);
if (data->registry_data.didt_support) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
if (data->registry_data.sq_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
if (data->registry_data.db_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
if (data->registry_data.td_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
if (data->registry_data.tcp_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
if (data->registry_data.dbr_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
if (data->registry_data.edc_didt_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
if (data->registry_data.gc_didt_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
if (data->registry_data.psm_didt_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
}
if (data->registry_data.power_containment_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
if (table_info->tdp_table->usClockStretchAmount &&
data->registry_data.clock_stretcher_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDPM);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEDPM);
return 0;
}
static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
struct vega10_odn_vddc_lookup_table *od_lookup_table;
struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
uint32_t i;
int result;
result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
if (!result) {
data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
}
od_lookup_table = &odn_table->vddc_lookup_table;
vddc_lookup_table = table_info->vddc_lookup_table;
for (i = 0; i < vddc_lookup_table->count; i++)
od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd;
od_lookup_table->count = vddc_lookup_table->count;
dep_table[0] = table_info->vdd_dep_on_sclk;
dep_table[1] = table_info->vdd_dep_on_mclk;
dep_table[2] = table_info->vdd_dep_on_socclk;
od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk;
od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk;
od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk;
for (i = 0; i < 3; i++)
smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]);
if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000)
odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc;
if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000)
odn_table->min_vddc = dep_table[0]->entries[0].vddc;
i = od_table[2]->count - 1;
od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ?
hwmgr->platform_descriptor.overdriveLimit.memoryClock :
od_table[2]->entries[i].clk;
od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ?
odn_table->max_vddc :
od_table[2]->entries[i].vddc;
return 0;
}
static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
int i;
uint32_t sub_vendor_id, hw_revision;
uint32_t top32, bottom32;
struct amdgpu_device *adev = hwmgr->adev;
vega10_initialize_power_tune_defaults(hwmgr);
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
data->smu_features[i].smu_feature_id = 0xffff;
data->smu_features[i].smu_feature_bitmap = 1 << i;
data->smu_features[i].enabled = false;
data->smu_features[i].supported = false;
}
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
FEATURE_DPM_PREFETCHER_BIT;
data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
FEATURE_DPM_GFXCLK_BIT;
data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
FEATURE_DPM_UCLK_BIT;
data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
FEATURE_DPM_SOCCLK_BIT;
data->smu_features[GNLD_DPM_UVD].smu_feature_id =
FEATURE_DPM_UVD_BIT;
data->smu_features[GNLD_DPM_VCE].smu_feature_id =
FEATURE_DPM_VCE_BIT;
data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
FEATURE_DPM_MP0CLK_BIT;
data->smu_features[GNLD_DPM_LINK].smu_feature_id =
FEATURE_DPM_LINK_BIT;
data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
FEATURE_DPM_DCEFCLK_BIT;
data->smu_features[GNLD_ULV].smu_feature_id =
FEATURE_ULV_BIT;
data->smu_features[GNLD_AVFS].smu_feature_id =
FEATURE_AVFS_BIT;
data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
FEATURE_DS_GFXCLK_BIT;
data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
FEATURE_DS_SOCCLK_BIT;
data->smu_features[GNLD_DS_LCLK].smu_feature_id =
FEATURE_DS_LCLK_BIT;
data->smu_features[GNLD_PPT].smu_feature_id =
FEATURE_PPT_BIT;
data->smu_features[GNLD_TDC].smu_feature_id =
FEATURE_TDC_BIT;
data->smu_features[GNLD_THERMAL].smu_feature_id =
FEATURE_THERMAL_BIT;
data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
FEATURE_GFX_PER_CU_CG_BIT;
data->smu_features[GNLD_RM].smu_feature_id =
FEATURE_RM_BIT;
data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
FEATURE_DS_DCEFCLK_BIT;
data->smu_features[GNLD_ACDC].smu_feature_id =
FEATURE_ACDC_BIT;
data->smu_features[GNLD_VR0HOT].smu_feature_id =
FEATURE_VR0HOT_BIT;
data->smu_features[GNLD_VR1HOT].smu_feature_id =
FEATURE_VR1HOT_BIT;
data->smu_features[GNLD_FW_CTF].smu_feature_id =
FEATURE_FW_CTF_BIT;
data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
FEATURE_LED_DISPLAY_BIT;
data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
FEATURE_FAN_CONTROL_BIT;
data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
data->smu_features[GNLD_PCC_LIMIT].smu_feature_id = FEATURE_PCC_LIMIT_CONTROL_BIT;
if (!data->registry_data.prefetcher_dpm_key_disabled)
data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
if (!data->registry_data.sclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_GFXCLK].supported = true;
if (!data->registry_data.mclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_UCLK].supported = true;
if (!data->registry_data.socclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_SOCCLK].supported = true;
if (PP_CAP(PHM_PlatformCaps_UVDDPM))
data->smu_features[GNLD_DPM_UVD].supported = true;
if (PP_CAP(PHM_PlatformCaps_VCEDPM))
data->smu_features[GNLD_DPM_VCE].supported = true;
data->smu_features[GNLD_DPM_LINK].supported = true;
if (!data->registry_data.dcefclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) &&
data->registry_data.sclk_deep_sleep_support) {
data->smu_features[GNLD_DS_GFXCLK].supported = true;
data->smu_features[GNLD_DS_SOCCLK].supported = true;
data->smu_features[GNLD_DS_LCLK].supported = true;
data->smu_features[GNLD_DS_DCEFCLK].supported = true;
}
if (data->registry_data.enable_pkg_pwr_tracking_feature)
data->smu_features[GNLD_PPT].supported = true;
if (data->registry_data.enable_tdc_limit_feature)
data->smu_features[GNLD_TDC].supported = true;
if (data->registry_data.thermal_support)
data->smu_features[GNLD_THERMAL].supported = true;
if (data->registry_data.fan_control_support)
data->smu_features[GNLD_FAN_CONTROL].supported = true;
if (data->registry_data.fw_ctf_enabled)
data->smu_features[GNLD_FW_CTF].supported = true;
if (data->registry_data.avfs_support)
data->smu_features[GNLD_AVFS].supported = true;
if (data->registry_data.led_dpm_enabled)
data->smu_features[GNLD_LED_DISPLAY].supported = true;
if (data->registry_data.vr1hot_enabled)
data->smu_features[GNLD_VR1HOT].supported = true;
if (data->registry_data.vr0hot_enabled)
data->smu_features[GNLD_VR0HOT].supported = true;
smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetSmuVersion,
&hwmgr->smu_version);
/* ACG firmware has major version 5 */
if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
data->smu_features[GNLD_ACG].supported = true;
if (data->registry_data.didt_support)
data->smu_features[GNLD_DIDT].supported = true;
hw_revision = adev->pdev->revision;
sub_vendor_id = adev->pdev->subsystem_vendor;
if ((hwmgr->chip_id == 0x6862 ||
hwmgr->chip_id == 0x6861 ||
hwmgr->chip_id == 0x6868) &&
(hw_revision == 0) &&
(sub_vendor_id != 0x1002))
data->smu_features[GNLD_PCC_LIMIT].supported = true;
/* Get the SN to turn into a Unique ID */
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
#ifdef PPLIB_VEGA10_EVV_SUPPORT
static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
phm_ppt_v1_voltage_lookup_table *lookup_table,
uint16_t virtual_voltage_id, int32_t *socclk)
{
uint8_t entry_id;
uint8_t voltage_id;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
PP_ASSERT_WITH_CODE(lookup_table->count != 0,
"Lookup table is empty",
return -EINVAL);
/* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
break;
}
PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
"Can't find requested voltage id in vdd_dep_on_socclk table!",
return -EINVAL);
*socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
return 0;
}
#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
/**
* vega10_get_evv_voltages - Get Leakage VDDC based on leakage ID.
*
* @hwmgr: the address of the powerplay hardware manager.
* return: always 0.
*/
static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint16_t vv_id;
uint32_t vddc = 0;
uint16_t i, j;
uint32_t sclk = 0;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
table_info->vdd_dep_on_socclk;
int result;
for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
if (!vega10_get_socclk_for_voltage_evv(hwmgr,
table_info->vddc_lookup_table, vv_id, &sclk)) {
if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) {
for (j = 1; j < socclk_table->count; j++) {
if (socclk_table->entries[j].clk == sclk &&
socclk_table->entries[j].cks_enable == 0) {
sclk += 5000;
break;
}
}
}
PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
"Error retrieving EVV voltage value!",
continue);
/* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
"Invalid VDDC value", result = -EINVAL;);
/* the voltage should not be zero nor equal to leakage ID */
if (vddc != 0 && vddc != vv_id) {
data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
data->vddc_leakage.count++;
}
}
}
return 0;
}
/**
* vega10_patch_with_vdd_leakage - Change virtual leakage voltage to actual value.
*
* @hwmgr: the address of the powerplay hardware manager.
* @voltage: pointer to changing voltage
* @leakage_table: pointer to leakage table
*/
static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
{
uint32_t index;
/* search for leakage voltage ID 0xff01 ~ 0xff08 */
for (index = 0; index < leakage_table->count; index++) {
/* if this voltage matches a leakage voltage ID */
/* patch with actual leakage voltage */
if (leakage_table->leakage_id[index] == *voltage) {
*voltage = leakage_table->actual_voltage[index];
break;
}
}
if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
}
/**
* vega10_patch_lookup_table_with_leakage - Patch voltage lookup table by EVV leakages.
*
* @hwmgr: the address of the powerplay hardware manager.
* @lookup_table: pointer to voltage lookup table
* @leakage_table: pointer to leakage table
* return: always 0
*/
static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
phm_ppt_v1_voltage_lookup_table *lookup_table,
struct vega10_leakage_voltage *leakage_table)
{
uint32_t i;
for (i = 0; i < lookup_table->count; i++)
vega10_patch_with_vdd_leakage(hwmgr,
&lookup_table->entries[i].us_vdd, leakage_table);
return 0;
}
static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
uint16_t *vddc)
{
vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
return 0;
}
#endif
static int vega10_patch_voltage_dependency_tables_with_lookup_table(
struct pp_hwmgr *hwmgr)
{
uint8_t entry_id, voltage_id;
unsigned i;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
table_info->mm_dep_table;
struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
table_info->vdd_dep_on_mclk;
for (i = 0; i < 6; i++) {
struct phm_ppt_v1_clock_voltage_dependency_table *vdt;
switch (i) {
case 0: vdt = table_info->vdd_dep_on_socclk; break;
case 1: vdt = table_info->vdd_dep_on_sclk; break;
case 2: vdt = table_info->vdd_dep_on_dcefclk; break;
case 3: vdt = table_info->vdd_dep_on_pixclk; break;
case 4: vdt = table_info->vdd_dep_on_dispclk; break;
case 5: vdt = table_info->vdd_dep_on_phyclk; break;
}
for (entry_id = 0; entry_id < vdt->count; entry_id++) {
voltage_id = vdt->entries[entry_id].vddInd;
vdt->entries[entry_id].vddc =
table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
}
}
for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
voltage_id = mm_table->entries[entry_id].vddcInd;
mm_table->entries[entry_id].vddc =
table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
}
for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
voltage_id = mclk_table->entries[entry_id].vddInd;
mclk_table->entries[entry_id].vddc =
table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
voltage_id = mclk_table->entries[entry_id].vddciInd;
mclk_table->entries[entry_id].vddci =
table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
voltage_id = mclk_table->entries[entry_id].mvddInd;
mclk_table->entries[entry_id].mvdd =
table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
}
return 0;
}
static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_voltage_lookup_table *lookup_table)
{
uint32_t table_size, i, j;
PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
"Lookup table is empty", return -EINVAL);
table_size = lookup_table->count;
/* Sorting voltages */
for (i = 0; i < table_size - 1; i++) {
for (j = i + 1; j > 0; j--) {
if (lookup_table->entries[j].us_vdd <
lookup_table->entries[j - 1].us_vdd) {
swap(lookup_table->entries[j - 1],
lookup_table->entries[j]);
}
}
}
return 0;
}
static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
{
int result = 0;
int tmp_result;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
#ifdef PPLIB_VEGA10_EVV_SUPPORT
struct vega10_hwmgr *data = hwmgr->backend;
tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
table_info->vddc_lookup_table, &(data->vddc_leakage));
if (tmp_result)
result = tmp_result;
tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
&(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
if (tmp_result)
result = tmp_result;
#endif
tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
if (tmp_result)
result = tmp_result;
tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
if (tmp_result)
result = tmp_result;
return result;
}
static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
table_info->vdd_dep_on_socclk;
struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
table_info->vdd_dep_on_mclk;
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
"VDD dependency on SCLK table is missing. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
"VDD dependency on SCLK table is empty. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
"VDD dependency on MCLK table is missing. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
"VDD dependency on MCLK table is empty. This table is mandatory", return -EINVAL);
table_info->max_clock_voltage_on_ac.sclk =
allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
table_info->max_clock_voltage_on_ac.mclk =
allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
table_info->max_clock_voltage_on_ac.vddc =
allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
table_info->max_clock_voltage_on_ac.vddci =
allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
table_info->max_clock_voltage_on_ac.sclk;
hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
table_info->max_clock_voltage_on_ac.mclk;
hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
table_info->max_clock_voltage_on_ac.vddc;
hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
table_info->max_clock_voltage_on_ac.vddci;
return 0;
}
static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
kfree(hwmgr->backend);
hwmgr->backend = NULL;
return 0;
}
static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
int result = 0;
struct vega10_hwmgr *data;
uint32_t config_telemetry = 0;
struct pp_atomfwctrl_voltage_table vol_table;
struct amdgpu_device *adev = hwmgr->adev;
data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
hwmgr->backend = data;
hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
vega10_set_default_registry_data(hwmgr);
data->disable_dpm_mask = 0xff;
/* need to set voltage control types before EVV patching */
data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
/* VDDCR_SOC */
if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
&vol_table)) {
config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
(vol_table.telemetry_offset & 0xff);
data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
}
} else {
kfree(hwmgr->backend);
hwmgr->backend = NULL;
PP_ASSERT_WITH_CODE(false,
"VDDCR_SOC is not SVID2!",
return -1);
}
/* MVDDC */
if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
&vol_table)) {
config_telemetry |=
((vol_table.telemetry_slope << 24) & 0xff000000) |
((vol_table.telemetry_offset << 16) & 0xff0000);
data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
}
}
/* VDDCI_MEM */
if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) {
if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
}
data->config_telemetry = config_telemetry;
vega10_set_features_platform_caps(hwmgr);
vega10_init_dpm_defaults(hwmgr);
#ifdef PPLIB_VEGA10_EVV_SUPPORT
/* Get leakage voltage based on leakage ID. */
PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
"Get EVV Voltage Failed. Abort Driver loading!",
return -1);
#endif
/* Patch our voltage dependency table with actual leakage voltage
* We need to perform leakage translation before it's used by other functions
*/
vega10_complete_dependency_tables(hwmgr);
/* Parse pptable data read from VBIOS */
vega10_set_private_data_based_on_pptable(hwmgr);
data->is_tlu_enabled = false;
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
VEGA10_MAX_HARDWARE_POWERLEVELS;
hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
hwmgr->platform_descriptor.clockStep.engineClock = 500;
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
data->total_active_cus = adev->gfx.cu_info.number;
if (!hwmgr->not_vf)
return result;
/* Setup default Overdrive Fan control settings */
data->odn_fan_table.target_fan_speed =
hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
data->odn_fan_table.target_temperature =
hwmgr->thermal_controller.
advanceFanControlParameters.ucTargetTemperature;
data->odn_fan_table.min_performance_clock =
hwmgr->thermal_controller.advanceFanControlParameters.
ulMinFanSCLKAcousticLimit;
data->odn_fan_table.min_fan_limit =
hwmgr->thermal_controller.
advanceFanControlParameters.usFanPWMMinLimit *
hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) &
DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
"Mem Channel Index Exceeded maximum!",
return -EINVAL);
return result;
}
static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
data->low_sclk_interrupt_threshold = 0;
return 0;
}
static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct pp_atomfwctrl_voltage_table table;
uint8_t i, j;
uint32_t mask = 0;
uint32_t tmp;
int32_t ret = 0;
ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
VOLTAGE_OBJ_GPIO_LUT, &table);
if (!ret) {
tmp = table.mask_low;
for (i = 0, j = 0; i < 32; i++) {
if (tmp & 1) {
mask |= (uint32_t)(i << (8 * j));
if (++j >= 3)
break;
}
tmp >>= 1;
}
}
pp_table->LedPin0 = (uint8_t)(mask & 0xff);
pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
return 0;
}
static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->not_vf)
return 0;
PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
"Failed to init sclk threshold!",
return -EINVAL);
PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
"Failed to set up led dpm config!",
return -EINVAL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays,
0,
NULL);
return 0;
}
/**
* vega10_trim_voltage_table - Remove repeated voltage values and create table with unique values.
*
* @hwmgr: the address of the powerplay hardware manager.
* @vol_table: the pointer to changing voltage table
* return: 0 in success
*/
static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_voltage_table *vol_table)
{
uint32_t i, j;
uint16_t vvalue;
bool found = false;
struct pp_atomfwctrl_voltage_table *table;
PP_ASSERT_WITH_CODE(vol_table,
"Voltage Table empty.", return -EINVAL);
table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
GFP_KERNEL);
if (!table)
return -ENOMEM;
table->mask_low = vol_table->mask_low;
table->phase_delay = vol_table->phase_delay;
for (i = 0; i < vol_table->count; i++) {
vvalue = vol_table->entries[i].value;
found = false;
for (j = 0; j < table->count; j++) {
if (vvalue == table->entries[j].value) {
found = true;
break;
}
}
if (!found) {
table->entries[table->count].value = vvalue;
table->entries[table->count].smio_low =
vol_table->entries[i].smio_low;
table->count++;
}
}
memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
kfree(table);
return 0;
}
static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
phm_ppt_v1_clock_voltage_dependency_table *dep_table,
struct pp_atomfwctrl_voltage_table *vol_table)
{
int i;
PP_ASSERT_WITH_CODE(dep_table->count,
"Voltage Dependency Table empty.",
return -EINVAL);
vol_table->mask_low = 0;
vol_table->phase_delay = 0;
vol_table->count = dep_table->count;
for (i = 0; i < vol_table->count; i++) {
vol_table->entries[i].value = dep_table->entries[i].mvdd;
vol_table->entries[i].smio_low = 0;
}
PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
vol_table),
"Failed to trim MVDD Table!",
return -1);
return 0;
}
static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
phm_ppt_v1_clock_voltage_dependency_table *dep_table,
struct pp_atomfwctrl_voltage_table *vol_table)
{
uint32_t i;
PP_ASSERT_WITH_CODE(dep_table->count,
"Voltage Dependency Table empty.",
return -EINVAL);
vol_table->mask_low = 0;
vol_table->phase_delay = 0;
vol_table->count = dep_table->count;
for (i = 0; i < dep_table->count; i++) {
vol_table->entries[i].value = dep_table->entries[i].vddci;
vol_table->entries[i].smio_low = 0;
}
PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
"Failed to trim VDDCI table.",
return -1);
return 0;
}
static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
phm_ppt_v1_clock_voltage_dependency_table *dep_table,
struct pp_atomfwctrl_voltage_table *vol_table)
{
int i;
PP_ASSERT_WITH_CODE(dep_table->count,
"Voltage Dependency Table empty.",
return -EINVAL);
vol_table->mask_low = 0;
vol_table->phase_delay = 0;
vol_table->count = dep_table->count;
for (i = 0; i < vol_table->count; i++) {
vol_table->entries[i].value = dep_table->entries[i].vddc;
vol_table->entries[i].smio_low = 0;
}
return 0;
}
/* ---- Voltage Tables ----
* If the voltage table would be bigger than
* what will fit into the state table on
* the SMC keep only the higher entries.
*/
static void vega10_trim_voltage_table_to_fit_state_table(
struct pp_hwmgr *hwmgr,
uint32_t max_vol_steps,
struct pp_atomfwctrl_voltage_table *vol_table)
{
unsigned int i, diff;
if (vol_table->count <= max_vol_steps)
return;
diff = vol_table->count - max_vol_steps;
for (i = 0; i < max_vol_steps; i++)
vol_table->entries[i] = vol_table->entries[i + diff];
vol_table->count = max_vol_steps;
}
/**
* vega10_construct_voltage_tables - Create Voltage Tables.
*
* @hwmgr: the address of the powerplay hardware manager.
* return: always 0
*/
static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable;
int result;
if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
result = vega10_get_mvdd_voltage_table(hwmgr,
table_info->vdd_dep_on_mclk,
&(data->mvdd_voltage_table));
PP_ASSERT_WITH_CODE(!result,
"Failed to retrieve MVDDC table!",
return result);
}
if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
result = vega10_get_vddci_voltage_table(hwmgr,
table_info->vdd_dep_on_mclk,
&(data->vddci_voltage_table));
PP_ASSERT_WITH_CODE(!result,
"Failed to retrieve VDDCI_MEM table!",
return result);
}
if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
result = vega10_get_vdd_voltage_table(hwmgr,
table_info->vdd_dep_on_sclk,
&(data->vddc_voltage_table));
PP_ASSERT_WITH_CODE(!result,
"Failed to retrieve VDDCR_SOC table!",
return result);
}
PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
"Too many voltage values for VDDC. Trimming to fit state table.",
vega10_trim_voltage_table_to_fit_state_table(hwmgr,
16, &(data->vddc_voltage_table)));
PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
"Too many voltage values for VDDCI. Trimming to fit state table.",
vega10_trim_voltage_table_to_fit_state_table(hwmgr,
16, &(data->vddci_voltage_table)));
PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
"Too many voltage values for MVDD. Trimming to fit state table.",
vega10_trim_voltage_table_to_fit_state_table(hwmgr,
16, &(data->mvdd_voltage_table)));
return 0;
}
/*
* vega10_init_dpm_state
* Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
*
* @dpm_state: - the address of the DPM Table to initiailize.
* return: None.
*/
static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
{
dpm_state->soft_min_level = 0xff;
dpm_state->soft_max_level = 0xff;
dpm_state->hard_min_level = 0xff;
dpm_state->hard_max_level = 0xff;
}
static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
struct vega10_single_dpm_table *dpm_table,
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
{
int i;
dpm_table->count = 0;
for (i = 0; i < dep_table->count; i++) {
if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
dep_table->entries[i].clk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_table->entries[i].clk;
dpm_table->dpm_levels[dpm_table->count].enabled = true;
dpm_table->count++;
}
}
}
static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_ppt_v1_pcie_table *bios_pcie_table =
table_info->pcie_table;
uint32_t i;
PP_ASSERT_WITH_CODE(bios_pcie_table->count,
"Incorrect number of PCIE States from VBIOS!",
return -1);
for (i = 0; i < NUM_LINK_LEVELS; i++) {
if (data->registry_data.pcieSpeedOverride)
pcie_table->pcie_gen[i] =
data->registry_data.pcieSpeedOverride;
else
pcie_table->pcie_gen[i] =
bios_pcie_table->entries[i].gen_speed;
if (data->registry_data.pcieLaneOverride)
pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
data->registry_data.pcieLaneOverride);
else
pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
bios_pcie_table->entries[i].lane_width);
if (data->registry_data.pcieClockOverride)
pcie_table->lclk[i] =
data->registry_data.pcieClockOverride;
else
pcie_table->lclk[i] =
bios_pcie_table->entries[i].pcie_sclk;
}
pcie_table->count = NUM_LINK_LEVELS;
return 0;
}
/*
* This function is to initialize all DPM state tables
* for SMU based on the dependency table.
* Dynamic state patching function will then trim these
* state tables to the allowed range based
* on the power policy or external client requests,
* such as UVD request, etc.
*/
static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct vega10_single_dpm_table *dpm_table;
uint32_t i;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
table_info->vdd_dep_on_socclk;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
table_info->vdd_dep_on_sclk;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
table_info->vdd_dep_on_mclk;
struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
table_info->mm_dep_table;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
table_info->vdd_dep_on_dcefclk;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
table_info->vdd_dep_on_pixclk;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
table_info->vdd_dep_on_dispclk;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
table_info->vdd_dep_on_phyclk;
PP_ASSERT_WITH_CODE(dep_soc_table,
"SOCCLK dependency table is missing. This table is mandatory",
return -EINVAL);
PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
"SOCCLK dependency table is empty. This table is mandatory",
return -EINVAL);
PP_ASSERT_WITH_CODE(dep_gfx_table,
"GFXCLK dependency table is missing. This table is mandatory",
return -EINVAL);
PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
"GFXCLK dependency table is empty. This table is mandatory",
return -EINVAL);
PP_ASSERT_WITH_CODE(dep_mclk_table,
"MCLK dependency table is missing. This table is mandatory",
return -EINVAL);
PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
"MCLK dependency table has to have is missing. This table is mandatory",
return -EINVAL);
/* Initialize Sclk DPM table based on allow Sclk values */
dpm_table = &(data->dpm_table.soc_table);
vega10_setup_default_single_dpm_table(hwmgr,
dpm_table,
dep_soc_table);
vega10_init_dpm_state(&(dpm_table->dpm_state));
dpm_table = &(data->dpm_table.gfx_table);
vega10_setup_default_single_dpm_table(hwmgr,
dpm_table,
dep_gfx_table);
if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
hwmgr->platform_descriptor.overdriveLimit.engineClock =
dpm_table->dpm_levels[dpm_table->count-1].value;
vega10_init_dpm_state(&(dpm_table->dpm_state));
/* Initialize Mclk DPM table based on allow Mclk values */
data->dpm_table.mem_table.count = 0;
dpm_table = &(data->dpm_table.mem_table);
vega10_setup_default_single_dpm_table(hwmgr,
dpm_table,
dep_mclk_table);
if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
dpm_table->dpm_levels[dpm_table->count-1].value;
vega10_init_dpm_state(&(dpm_table->dpm_state));
data->dpm_table.eclk_table.count = 0;
dpm_table = &(data->dpm_table.eclk_table);
for (i = 0; i < dep_mm_table->count; i++) {
if (i == 0 || dpm_table->dpm_levels
[dpm_table->count - 1].value <=
dep_mm_table->entries[i].eclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].eclk;
dpm_table->dpm_levels[dpm_table->count].enabled = i == 0;
dpm_table->count++;
}
}
vega10_init_dpm_state(&(dpm_table->dpm_state));
data->dpm_table.vclk_table.count = 0;
data->dpm_table.dclk_table.count = 0;
dpm_table = &(data->dpm_table.vclk_table);
for (i = 0; i < dep_mm_table->count; i++) {
if (i == 0 || dpm_table->dpm_levels
[dpm_table->count - 1].value <=
dep_mm_table->entries[i].vclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].vclk;
dpm_table->dpm_levels[dpm_table->count].enabled = i == 0;
dpm_table->count++;
}
}
vega10_init_dpm_state(&(dpm_table->dpm_state));
dpm_table = &(data->dpm_table.dclk_table);
for (i = 0; i < dep_mm_table->count; i++) {
if (i == 0 || dpm_table->dpm_levels
[dpm_table->count - 1].value <=
dep_mm_table->entries[i].dclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].dclk;
dpm_table->dpm_levels[dpm_table->count].enabled = i == 0;
dpm_table->count++;
}
}
vega10_init_dpm_state(&(dpm_table->dpm_state));
/* Assume there is no headless Vega10 for now */
dpm_table = &(data->dpm_table.dcef_table);
vega10_setup_default_single_dpm_table(hwmgr,
dpm_table,
dep_dcef_table);
vega10_init_dpm_state(&(dpm_table->dpm_state));
dpm_table = &(data->dpm_table.pixel_table);
vega10_setup_default_single_dpm_table(hwmgr,
dpm_table,
dep_pix_table);
vega10_init_dpm_state(&(dpm_table->dpm_state));
dpm_table = &(data->dpm_table.display_table);
vega10_setup_default_single_dpm_table(hwmgr,
dpm_table,
dep_disp_table);
vega10_init_dpm_state(&(dpm_table->dpm_state));
dpm_table = &(data->dpm_table.phy_table);
vega10_setup_default_single_dpm_table(hwmgr,
dpm_table,
dep_phy_table);
vega10_init_dpm_state(&(dpm_table->dpm_state));
vega10_setup_default_pcie_table(hwmgr);
/* Zero out the saved copy of the CUSTOM profile
* This will be checked when trying to set the profile
* and will require that new values be passed in
*/
data->custom_profile_mode[0] = 0;
data->custom_profile_mode[1] = 0;
data->custom_profile_mode[2] = 0;
data->custom_profile_mode[3] = 0;
/* save a copy of the default DPM table */
memcpy(&(data->golden_dpm_table), &(data->dpm_table),
sizeof(struct vega10_dpm_table));
return 0;
}
/*
* vega10_populate_ulv_state
* Function to provide parameters for Utral Low Voltage state to SMC.
*
* @hwmgr: - the address of the hardware manager.
* return: Always 0.
*/
static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
data->smc_state_table.pp_table.UlvOffsetVid =
(uint8_t)table_info->us_ulv_voltage_offset;
data->smc_state_table.pp_table.UlvSmnclkDid =
(uint8_t)(table_info->us_ulv_smnclk_did);
data->smc_state_table.pp_table.UlvMp1clkDid =
(uint8_t)(table_info->us_ulv_mp1clk_did);
data->smc_state_table.pp_table.UlvGfxclkBypass =
(uint8_t)(table_info->us_ulv_gfxclk_bypass);
data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
(uint8_t)(data->vddc_voltage_table.psi0_enable);
data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
(uint8_t)(data->vddc_voltage_table.psi1_enable);
return 0;
}
static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
uint32_t lclock, uint8_t *curr_lclk_did)
{
struct pp_atomfwctrl_clock_dividers_soc15 dividers;
PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
hwmgr,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
lclock, ÷rs),
"Failed to get LCLK clock settings from VBIOS!",
return -1);
*curr_lclk_did = dividers.ulDid;
return 0;
}
static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
uint32_t pcie_gen = 0, pcie_width = 0;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
int i;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_gen = 3;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
pcie_gen = 2;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
pcie_gen = 1;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
pcie_gen = 0;
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
pcie_width = 6;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
pcie_width = 5;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
pcie_width = 4;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
pcie_width = 3;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
pcie_width = 2;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
pcie_width = 1;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
if (pp_table->PcieGenSpeed[i] > pcie_gen)
pp_table->PcieGenSpeed[i] = pcie_gen;
if (pp_table->PcieLaneCount[i] > pcie_width)
pp_table->PcieLaneCount[i] = pcie_width;
}
if (data->registry_data.pcie_dpm_key_disabled) {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
pp_table->PcieGenSpeed[i] = pcie_gen;
pp_table->PcieLaneCount[i] = pcie_width;
}
}
return 0;
}
static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
{
int result = -1;
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct vega10_pcie_table *pcie_table =
&(data->dpm_table.pcie_table);
uint32_t i, j;
for (i = 0; i < pcie_table->count; i++) {
pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
result = vega10_populate_single_lclk_level(hwmgr,
pcie_table->lclk[i], &(pp_table->LclkDid[i]));
if (result) {
pr_info("Populate LClock Level %d Failed!\n", i);
return result;
}
}
j = i - 1;
while (i < NUM_LINK_LEVELS) {
pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
result = vega10_populate_single_lclk_level(hwmgr,
pcie_table->lclk[j], &(pp_table->LclkDid[i]));
if (result) {
pr_info("Populate LClock Level %d Failed!\n", i);
return result;
}
i++;
}
return result;
}
/**
* vega10_populate_single_gfx_level - Populates single SMC GFXSCLK structure
* using the provided engine clock
*
* @hwmgr: the address of the hardware manager
* @gfx_clock: the GFX clock to use to populate the structure.
* @current_gfxclk_level: location in PPTable for the SMC GFXCLK structure.
* @acg_freq: ACG frequenty to return (MHz)
*/
static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
uint32_t *acg_freq)
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk;
struct vega10_hwmgr *data = hwmgr->backend;
struct pp_atomfwctrl_clock_dividers_soc15 dividers;
uint32_t gfx_max_clock =
hwmgr->platform_descriptor.overdriveLimit.engineClock;
uint32_t i = 0;
if (hwmgr->od_enabled)
dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
&(data->odn_dpm_table.vdd_dep_on_sclk);
else
dep_on_sclk = table_info->vdd_dep_on_sclk;
PP_ASSERT_WITH_CODE(dep_on_sclk,
"Invalid SOC_VDD-GFX_CLK Dependency Table!",
return -EINVAL);
if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock;
else {
for (i = 0; i < dep_on_sclk->count; i++) {
if (dep_on_sclk->entries[i].clk == gfx_clock)
break;
}
PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
"Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
return -EINVAL);
}
PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
gfx_clock, ÷rs),
"Failed to get GFX Clock settings from VBIOS!",
return -EINVAL);
/* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
current_gfxclk_level->FbMult =
cpu_to_le32(dividers.ulPll_fb_mult);
/* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
current_gfxclk_level->SsFbMult =
cpu_to_le32(dividers.ulPll_ss_fbsmult);
current_gfxclk_level->SsSlewFrac =
cpu_to_le16(dividers.usPll_ss_slew_frac);
current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
*acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */
return 0;
}
/**
* vega10_populate_single_soc_level - Populates single SMC SOCCLK structure
* using the provided clock.
*
* @hwmgr: the address of the hardware manager.
* @soc_clock: the SOC clock to use to populate the structure.
* @current_soc_did: DFS divider to pass back to caller
* @current_vol_index: index of current VDD to pass back to caller
* return: 0 on success
*/
static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
uint32_t soc_clock, uint8_t *current_soc_did,
uint8_t *current_vol_index)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc;
struct pp_atomfwctrl_clock_dividers_soc15 dividers;
uint32_t i;
if (hwmgr->od_enabled) {
dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *)
&data->odn_dpm_table.vdd_dep_on_socclk;
for (i = 0; i < dep_on_soc->count; i++) {
if (dep_on_soc->entries[i].clk >= soc_clock)
break;
}
} else {
dep_on_soc = table_info->vdd_dep_on_socclk;
for (i = 0; i < dep_on_soc->count; i++) {
if (dep_on_soc->entries[i].clk == soc_clock)
break;
}
}
PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
"Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
return -EINVAL);
PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
soc_clock, ÷rs),
"Failed to get SOC Clock settings from VBIOS!",
return -EINVAL);
*current_soc_did = (uint8_t)dividers.ulDid;
*current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
return 0;
}
/**
* vega10_populate_all_graphic_levels - Populates all SMC SCLK levels' structure
* based on the trimmed allowed dpm engine clock states
*
* @hwmgr: the address of the hardware manager
*/
static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
int result = 0;
uint32_t i, j;
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_gfx_level(hwmgr,
dpm_table->dpm_levels[i].value,
&(pp_table->GfxclkLevel[i]),
&(pp_table->AcgFreqTable[i]));
if (result)
return result;
}
j = i - 1;
while (i < NUM_GFXCLK_DPM_LEVELS) {
result = vega10_populate_single_gfx_level(hwmgr,
dpm_table->dpm_levels[j].value,
&(pp_table->GfxclkLevel[i]),
&(pp_table->AcgFreqTable[i]));
if (result)
return result;
i++;
}
pp_table->GfxclkSlewRate =
cpu_to_le16(table_info->us_gfxclk_slew_rate);
dpm_table = &(data->dpm_table.soc_table);
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_soc_level(hwmgr,
dpm_table->dpm_levels[i].value,
&(pp_table->SocclkDid[i]),
&(pp_table->SocDpmVoltageIndex[i]));
if (result)
return result;
}
j = i - 1;
while (i < NUM_SOCCLK_DPM_LEVELS) {
result = vega10_populate_single_soc_level(hwmgr,
dpm_table->dpm_levels[j].value,
&(pp_table->SocclkDid[i]),
&(pp_table->SocDpmVoltageIndex[i]));
if (result)
return result;
i++;
}
return result;
}
static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct phm_ppt_v2_information *table_info = hwmgr->pptable;
struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
uint8_t soc_vid = 0;
uint32_t i, max_vddc_level;
if (hwmgr->od_enabled)
vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table;
else
vddc_lookup_table = table_info->vddc_lookup_table;
max_vddc_level = vddc_lookup_table->count;
for (i = 0; i < max_vddc_level; i++) {
soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd);
pp_table->SocVid[i] = soc_vid;
}
while (i < MAX_REGULAR_DPM_NUMBER) {
pp_table->SocVid[i] = soc_vid;
i++;
}
}
/*
* Populates single SMC GFXCLK structure using the provided clock.
*
* @hwmgr: the address of the hardware manager.
* @mem_clock: the memory clock to use to populate the structure.
* return: 0 on success..
*/
static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
uint32_t mem_clock, uint8_t *current_mem_vid,
PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk;
struct pp_atomfwctrl_clock_dividers_soc15 dividers;
uint32_t mem_max_clock =
hwmgr->platform_descriptor.overdriveLimit.memoryClock;
uint32_t i = 0;
if (hwmgr->od_enabled)
dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
&data->odn_dpm_table.vdd_dep_on_mclk;
else
dep_on_mclk = table_info->vdd_dep_on_mclk;
PP_ASSERT_WITH_CODE(dep_on_mclk,
"Invalid SOC_VDD-UCLK Dependency Table!",
return -EINVAL);
if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
} else {
for (i = 0; i < dep_on_mclk->count; i++) {
if (dep_on_mclk->entries[i].clk == mem_clock)
break;
}
PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
"Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
return -EINVAL);
}
PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, ÷rs),
"Failed to get UCLK settings from VBIOS!",
return -1);
*current_mem_vid =
(uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
*current_mem_soc_vind =
(uint8_t)(dep_on_mclk->entries[i].vddInd);
current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
current_memclk_level->Did = (uint8_t)(dividers.ulDid);
PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
"Invalid Divider ID!",
return -EINVAL);
return 0;
}
/**
* vega10_populate_all_memory_levels - Populates all SMC MCLK levels' structure
* based on the trimmed allowed dpm memory clock states.
*
* @hwmgr: the address of the hardware manager.
* return: PP_Result_OK on success.
*/
static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct vega10_single_dpm_table *dpm_table =
&(data->dpm_table.mem_table);
int result = 0;
uint32_t i, j;
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_memory_level(hwmgr,
dpm_table->dpm_levels[i].value,
&(pp_table->MemVid[i]),
&(pp_table->UclkLevel[i]),
&(pp_table->MemSocVoltageIndex[i]));
if (result)
return result;
}
j = i - 1;
while (i < NUM_UCLK_DPM_LEVELS) {
result = vega10_populate_single_memory_level(hwmgr,
dpm_table->dpm_levels[j].value,
&(pp_table->MemVid[i]),
&(pp_table->UclkLevel[i]),
&(pp_table->MemSocVoltageIndex[i]));
if (result)
return result;
i++;
}
pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
pp_table->MemoryChannelWidth =
(uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
channel_number[data->mem_channels]);
pp_table->LowestUclkReservedForUlv =
(uint8_t)(data->lowest_uclk_reserved_for_ulv);
return result;
}
static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
DSPCLK_e disp_clock)
{
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)
(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
uint32_t i;
uint16_t clk = 0, vddc = 0;
uint8_t vid = 0;
switch (disp_clock) {
case DSPCLK_DCEFCLK:
dep_table = table_info->vdd_dep_on_dcefclk;
break;
case DSPCLK_DISPCLK:
dep_table = table_info->vdd_dep_on_dispclk;
break;
case DSPCLK_PIXCLK:
dep_table = table_info->vdd_dep_on_pixclk;
break;
case DSPCLK_PHYCLK:
dep_table = table_info->vdd_dep_on_phyclk;
break;
default:
return -1;
}
PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
"Number Of Entries Exceeded maximum!",
return -1);
for (i = 0; i < dep_table->count; i++) {
clk = (uint16_t)(dep_table->entries[i].clk / 100);
vddc = table_info->vddc_lookup_table->
entries[dep_table->entries[i].vddInd].us_vdd;
vid = (uint8_t)convert_to_vid(vddc);
pp_table->DisplayClockTable[disp_clock][i].Freq =
cpu_to_le16(clk);
pp_table->DisplayClockTable[disp_clock][i].Vid =
cpu_to_le16(vid);
}
while (i < NUM_DSPCLK_LEVELS) {
pp_table->DisplayClockTable[disp_clock][i].Freq =
cpu_to_le16(clk);
pp_table->DisplayClockTable[disp_clock][i].Vid =
cpu_to_le16(vid);
i++;
}
return 0;
}
static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
{
uint32_t i;
for (i = 0; i < DSPCLK_COUNT; i++) {
PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
"Failed to populate Clock in DisplayClockTable!",
return -1);
}
return 0;
}
static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
uint32_t eclock, uint8_t *current_eclk_did,
uint8_t *current_soc_vol)
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
table_info->mm_dep_table;
struct pp_atomfwctrl_clock_dividers_soc15 dividers;
uint32_t i;
PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
eclock, ÷rs),
"Failed to get ECLK clock settings from VBIOS!",
return -1);
*current_eclk_did = (uint8_t)dividers.ulDid;
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].eclk == eclock)
*current_soc_vol = dep_table->entries[i].vddcInd;
}
return 0;
}
static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
int result = -EINVAL;
uint32_t i, j;
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_eclock_level(hwmgr,
dpm_table->dpm_levels[i].value,
&(pp_table->EclkDid[i]),
&(pp_table->VceDpmVoltageIndex[i]));
if (result)
return result;
}
j = i - 1;
while (i < NUM_VCE_DPM_LEVELS) {
result = vega10_populate_single_eclock_level(hwmgr,
dpm_table->dpm_levels[j].value,
&(pp_table->EclkDid[i]),
&(pp_table->VceDpmVoltageIndex[i]));
if (result)
return result;
i++;
}
return result;
}
static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
uint32_t vclock, uint8_t *current_vclk_did)
{
struct pp_atomfwctrl_clock_dividers_soc15 dividers;
PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
vclock, ÷rs),
"Failed to get VCLK clock settings from VBIOS!",
return -EINVAL);
*current_vclk_did = (uint8_t)dividers.ulDid;
return 0;
}
static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
uint32_t dclock, uint8_t *current_dclk_did)
{
struct pp_atomfwctrl_clock_dividers_soc15 dividers;
PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
dclock, ÷rs),
"Failed to get DCLK clock settings from VBIOS!",
return -EINVAL);
*current_dclk_did = (uint8_t)dividers.ulDid;
return 0;
}
static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct vega10_single_dpm_table *vclk_dpm_table =
&(data->dpm_table.vclk_table);
struct vega10_single_dpm_table *dclk_dpm_table =
&(data->dpm_table.dclk_table);
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
table_info->mm_dep_table;
int result = -EINVAL;
uint32_t i, j;
for (i = 0; i < vclk_dpm_table->count; i++) {
result = vega10_populate_single_vclock_level(hwmgr,
vclk_dpm_table->dpm_levels[i].value,
&(pp_table->VclkDid[i]));
if (result)
return result;
}
j = i - 1;
while (i < NUM_UVD_DPM_LEVELS) {
result = vega10_populate_single_vclock_level(hwmgr,
vclk_dpm_table->dpm_levels[j].value,
&(pp_table->VclkDid[i]));
if (result)
return result;
i++;
}
for (i = 0; i < dclk_dpm_table->count; i++) {
result = vega10_populate_single_dclock_level(hwmgr,
dclk_dpm_table->dpm_levels[i].value,
&(pp_table->DclkDid[i]));
if (result)
return result;
}
j = i - 1;
while (i < NUM_UVD_DPM_LEVELS) {
result = vega10_populate_single_dclock_level(hwmgr,
dclk_dpm_table->dpm_levels[j].value,
&(pp_table->DclkDid[i]));
if (result)
return result;
i++;
}
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].vclk ==
vclk_dpm_table->dpm_levels[i].value &&
dep_table->entries[i].dclk ==
dclk_dpm_table->dpm_levels[i].value)
pp_table->UvdDpmVoltageIndex[i] =
dep_table->entries[i].vddcInd;
else
return -1;
}
j = i - 1;
while (i < NUM_UVD_DPM_LEVELS) {
pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
i++;
}
return 0;
}
static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
table_info->vdd_dep_on_sclk;
uint32_t i;
for (i = 0; i < dep_table->count; i++) {
pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
* VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
}
return 0;
}
static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
table_info->vdd_dep_on_sclk;
struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
int result = 0;
uint32_t i;
pp_table->MinVoltageVid = (uint8_t)0xff;
pp_table->MaxVoltageVid = (uint8_t)0;
if (data->smu_features[GNLD_AVFS].supported) {
result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
if (!result) {
pp_table->MinVoltageVid = (uint8_t)
convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
pp_table->MaxVoltageVid = (uint8_t)
convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
pp_table->BtcGbVdroopTableCksOff.a0 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
pp_table->BtcGbVdroopTableCksOff.a1 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
pp_table->BtcGbVdroopTableCksOff.a2 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
pp_table->BtcGbVdroopTableCksOn.a0 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
pp_table->BtcGbVdroopTableCksOn.a1 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
pp_table->BtcGbVdroopTableCksOn.a2 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
pp_table->AvfsGbCksOn.m1 =
cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
pp_table->AvfsGbCksOn.m2 =
cpu_to_le32(avfs_params.ulGbFuseTableCksonM2);
pp_table->AvfsGbCksOn.b =
cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
pp_table->AvfsGbCksOn.m1_shift = 24;
pp_table->AvfsGbCksOn.m2_shift = 12;
pp_table->AvfsGbCksOn.b_shift = 0;
pp_table->OverrideAvfsGbCksOn =
avfs_params.ucEnableGbFuseTableCkson;
pp_table->AvfsGbCksOff.m1 =
cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
pp_table->AvfsGbCksOff.m2 =
cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2);
pp_table->AvfsGbCksOff.b =
cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
pp_table->AvfsGbCksOff.m1_shift = 24;
pp_table->AvfsGbCksOff.m2_shift = 12;
pp_table->AvfsGbCksOff.b_shift = 0;
for (i = 0; i < dep_table->count; i++)
pp_table->StaticVoltageOffsetVid[i] =
convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->disp_clk_quad_eqn_a) &&
(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->disp_clk_quad_eqn_b)) {
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
(int32_t)data->disp_clk_quad_eqn_a;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
(int32_t)data->disp_clk_quad_eqn_b;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
(int32_t)data->disp_clk_quad_eqn_c;
} else {
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
(int32_t)avfs_params.ulDispclk2GfxclkM1;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
(int32_t)avfs_params.ulDispclk2GfxclkM2;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
(int32_t)avfs_params.ulDispclk2GfxclkB;
}
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->dcef_clk_quad_eqn_a) &&
(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->dcef_clk_quad_eqn_b)) {
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
(int32_t)data->dcef_clk_quad_eqn_a;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
(int32_t)data->dcef_clk_quad_eqn_b;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
(int32_t)data->dcef_clk_quad_eqn_c;
} else {
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
(int32_t)avfs_params.ulDcefclk2GfxclkM1;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
(int32_t)avfs_params.ulDcefclk2GfxclkM2;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
(int32_t)avfs_params.ulDcefclk2GfxclkB;
}
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->pixel_clk_quad_eqn_a) &&
(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->pixel_clk_quad_eqn_b)) {
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
(int32_t)data->pixel_clk_quad_eqn_a;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
(int32_t)data->pixel_clk_quad_eqn_b;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
(int32_t)data->pixel_clk_quad_eqn_c;
} else {
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
(int32_t)avfs_params.ulPixelclk2GfxclkM1;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
(int32_t)avfs_params.ulPixelclk2GfxclkM2;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
(int32_t)avfs_params.ulPixelclk2GfxclkB;
}
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->phy_clk_quad_eqn_a) &&
(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->phy_clk_quad_eqn_b)) {
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
(int32_t)data->phy_clk_quad_eqn_a;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
(int32_t)data->phy_clk_quad_eqn_b;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
(int32_t)data->phy_clk_quad_eqn_c;
} else {
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
(int32_t)avfs_params.ulPhyclk2GfxclkM1;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
(int32_t)avfs_params.ulPhyclk2GfxclkM2;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
(int32_t)avfs_params.ulPhyclk2GfxclkB;
}
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0;
pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1;
pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2;
pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1;
pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2;
pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB;
pp_table->AcgAvfsGb.m1_shift = 24;
pp_table->AcgAvfsGb.m2_shift = 12;
pp_table->AcgAvfsGb.b_shift = 0;
} else {
data->smu_features[GNLD_AVFS].supported = false;
}
}
return 0;
}
static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t agc_btc_response;
if (data->smu_features[GNLD_ACG].supported) {
if (0 == vega10_enable_smc_features(hwmgr, true,
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
if (1 == agc_btc_response) {
if (1 == data->acg_loop_state)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop, NULL);
else if (2 == data->acg_loop_state)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop, NULL);
if (0 == vega10_enable_smc_features(hwmgr, true,
data->smu_features[GNLD_ACG].smu_feature_bitmap))
data->smu_features[GNLD_ACG].enabled = true;
} else {
pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
data->smu_features[GNLD_ACG].enabled = false;
}
}
return 0;
}
static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_ACG].supported &&
data->smu_features[GNLD_ACG].enabled)
if (!vega10_enable_smc_features(hwmgr, false,
data->smu_features[GNLD_ACG].smu_feature_bitmap))
data->smu_features[GNLD_ACG].enabled = false;
return 0;
}
static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
int result;
result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
if (!result) {
if (PP_CAP(PHM_PlatformCaps_RegulatorHot) &&
data->registry_data.regulator_hot_gpio_support) {
pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
} else {
pp_table->VR0HotGpio = 0;
pp_table->VR0HotPolarity = 0;
pp_table->VR1HotGpio = 0;
pp_table->VR1HotPolarity = 0;
}
if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) &&
data->registry_data.ac_dc_switch_gpio_support) {
pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
} else {
pp_table->AcDcGpio = 0;
pp_table->AcDcPolarity = 0;
}
}
return result;
}
static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_AVFS].supported) {
/* Already enabled or disabled */
if (!(enable ^ data->smu_features[GNLD_AVFS].enabled))
return 0;
if (enable) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_AVFS].smu_feature_bitmap),
"[avfs_control] Attempt to Enable AVFS feature Failed!",
return -1);
data->smu_features[GNLD_AVFS].enabled = true;
} else {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_AVFS].smu_feature_bitmap),
"[avfs_control] Attempt to Disable AVFS feature Failed!",
return -1);
data->smu_features[GNLD_AVFS].enabled = false;
}
}
return 0;
}
static int vega10_update_avfs(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
vega10_avfs_enable(hwmgr, false);
} else if (data->need_update_dpm_table) {
vega10_avfs_enable(hwmgr, false);
vega10_avfs_enable(hwmgr, true);
} else {
vega10_avfs_enable(hwmgr, true);
}
return 0;
}
static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
{
int result = 0;
uint64_t serial_number = 0;
uint32_t top32, bottom32;
struct phm_fuses_default fuse;
struct vega10_hwmgr *data = hwmgr->backend;
AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
serial_number = ((uint64_t)bottom32 << 32) | top32;
if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) {
avfs_fuse_table->VFT0_b = fuse.VFT0_b;
avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
avfs_fuse_table->VFT1_b = fuse.VFT1_b;
avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
avfs_fuse_table->VFT2_b = fuse.VFT2_b;
avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
result = smum_smc_table_manager(hwmgr, (uint8_t *)avfs_fuse_table,
AVFSFUSETABLE, false);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload FuseOVerride!",
);
}
return result;
}
static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
struct phm_ppt_v2_information *table_info = hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
uint32_t i;
dep_table = table_info->vdd_dep_on_mclk;
odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
return;
}
}
dep_table = table_info->vdd_dep_on_sclk;
odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
return;
}
}
}
/**
* vega10_init_smc_table - Initializes the SMC table and uploads it
*
* @hwmgr: the address of the powerplay hardware manager.
* return: always 0
*/
static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct pp_atomfwctrl_voltage_table voltage_table;
struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
result = vega10_setup_default_dpm_tables(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to setup default DPM tables!",
return result);
if (!hwmgr->not_vf)
return 0;
/* initialize ODN table */
if (hwmgr->od_enabled) {
if (odn_table->max_vddc) {
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
vega10_check_dpm_table_updated(hwmgr);
} else {
vega10_odn_initial_default_setting(hwmgr);
}
}
pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
VOLTAGE_OBJ_SVID2, &voltage_table);
pp_table->MaxVidStep = voltage_table.max_vid_step;
pp_table->GfxDpmVoltageMode =
(uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
pp_table->SocDpmVoltageMode =
(uint8_t)(table_info->uc_soc_dpm_voltage_mode);
pp_table->UclkDpmVoltageMode =
(uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
pp_table->UvdDpmVoltageMode =
(uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
pp_table->VceDpmVoltageMode =
(uint8_t)(table_info->uc_vce_dpm_voltage_mode);
pp_table->Mp0DpmVoltageMode =
(uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
pp_table->DisplayDpmVoltageMode =
(uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
if (data->registry_data.ulv_support &&
table_info->us_ulv_voltage_offset) {
result = vega10_populate_ulv_state(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize ULV state!",
return result);
}
result = vega10_populate_smc_link_levels(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize Link Level!",
return result);
result = vega10_override_pcie_parameters(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to override pcie parameters!",
return result);
result = vega10_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize Graphics Level!",
return result);
result = vega10_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize Memory Level!",
return result);
vega10_populate_vddc_soc_levels(hwmgr);
result = vega10_populate_all_display_clock_levels(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize Display Level!",
return result);
result = vega10_populate_smc_vce_levels(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize VCE Level!",
return result);
result = vega10_populate_smc_uvd_levels(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize UVD Level!",
return result);
if (data->registry_data.clock_stretcher_support) {
result = vega10_populate_clock_stretcher_table(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to populate Clock Stretcher Table!",
return result);
}
result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
if (!result) {
data->vbios_boot_state.vddc = boot_up_values.usVddc;
data->vbios_boot_state.vddci = boot_up_values.usVddci;
data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
SMU9_SYSPLL0_SOCCLK_ID, 0, &boot_up_values.ulSocClk);
pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
SMU9_SYSPLL0_DCEFCLK_ID, 0, &boot_up_values.ulDCEFClk);
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
if (0 != boot_up_values.usVddc) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFloorSocVoltage,
(boot_up_values.usVddc * 4),
NULL);
data->vbios_boot_state.bsoc_vddc_lock = true;
} else {
data->vbios_boot_state.bsoc_vddc_lock = false;
}
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
(uint32_t)(data->vbios_boot_state.dcef_clock / 100),
NULL);
}
result = vega10_populate_avfs_parameters(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize AVFS Parameters!",
return result);
result = vega10_populate_gpio_parameters(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize GPIO Parameters!",
return result);
pp_table->GfxclkAverageAlpha = (uint8_t)
(data->gfxclk_average_alpha);
pp_table->SocclkAverageAlpha = (uint8_t)
(data->socclk_average_alpha);
pp_table->UclkAverageAlpha = (uint8_t)
(data->uclk_average_alpha);
pp_table->GfxActivityAverageAlpha = (uint8_t)
(data->gfx_activity_average_alpha);
vega10_populate_and_upload_avfs_fuse_override(hwmgr);
result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
result = vega10_avfs_enable(hwmgr, true);
PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
return result);
vega10_acg_enable(hwmgr);
return 0;
}
static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_THERMAL].supported) {
if (data->smu_features[GNLD_THERMAL].enabled)
pr_info("THERMAL Feature Already enabled!");
PP_ASSERT_WITH_CODE(
!vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
"Enable THERMAL Feature Failed!",
return -1);
data->smu_features[GNLD_THERMAL].enabled = true;
}
return 0;
}
static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_THERMAL].supported) {
if (!data->smu_features[GNLD_THERMAL].enabled)
pr_info("THERMAL Feature Already disabled!");
PP_ASSERT_WITH_CODE(
!vega10_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
"disable THERMAL Feature Failed!",
return -1);
data->smu_features[GNLD_THERMAL].enabled = false;
}
return 0;
}
static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
if (data->smu_features[GNLD_VR0HOT].supported) {
PP_ASSERT_WITH_CODE(
!vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
"Attempt to Enable VR0 Hot feature Failed!",
return -1);
data->smu_features[GNLD_VR0HOT].enabled = true;
} else {
if (data->smu_features[GNLD_VR1HOT].supported) {
PP_ASSERT_WITH_CODE(
!vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
"Attempt to Enable VR0 Hot feature Failed!",
return -1);
data->smu_features[GNLD_VR1HOT].enabled = true;
}
}
}
return 0;
}
static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (data->registry_data.ulv_support) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
"Enable ULV Feature Failed!",
return -1);
data->smu_features[GNLD_ULV].enabled = true;
}
return 0;
}
static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (data->registry_data.ulv_support) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
"disable ULV Feature Failed!",
return -EINVAL);
data->smu_features[GNLD_ULV].enabled = false;
}
return 0;
}
static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_DS_GFXCLK].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
"Attempt to Enable DS_GFXCLK Feature Failed!",
return -EINVAL);
data->smu_features[GNLD_DS_GFXCLK].enabled = true;
}
if (data->smu_features[GNLD_DS_SOCCLK].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
"Attempt to Enable DS_SOCCLK Feature Failed!",
return -EINVAL);
data->smu_features[GNLD_DS_SOCCLK].enabled = true;
}
if (data->smu_features[GNLD_DS_LCLK].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
"Attempt to Enable DS_LCLK Feature Failed!",
return -EINVAL);
data->smu_features[GNLD_DS_LCLK].enabled = true;
}
if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
"Attempt to Enable DS_DCEFCLK Feature Failed!",
return -EINVAL);
data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
}
return 0;
}
static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_DS_GFXCLK].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
"Attempt to disable DS_GFXCLK Feature Failed!",
return -EINVAL);
data->smu_features[GNLD_DS_GFXCLK].enabled = false;
}
if (data->smu_features[GNLD_DS_SOCCLK].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
"Attempt to disable DS_ Feature Failed!",
return -EINVAL);
data->smu_features[GNLD_DS_SOCCLK].enabled = false;
}
if (data->smu_features[GNLD_DS_LCLK].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
"Attempt to disable DS_LCLK Feature Failed!",
return -EINVAL);
data->smu_features[GNLD_DS_LCLK].enabled = false;
}
if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
"Attempt to disable DS_DCEFCLK Feature Failed!",
return -EINVAL);
data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
}
return 0;
}
static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t i, feature_mask = 0;
if (!hwmgr->not_vf)
return 0;
if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
"Attempt to disable LED DPM feature failed!", return -EINVAL);
data->smu_features[GNLD_LED_DISPLAY].enabled = false;
}
for (i = 0; i < GNLD_DPM_MAX; i++) {
if (data->smu_features[i].smu_feature_bitmap & bitmap) {
if (data->smu_features[i].supported) {
if (data->smu_features[i].enabled) {
feature_mask |= data->smu_features[i].
smu_feature_bitmap;
data->smu_features[i].enabled = false;
}
}
}
}
vega10_enable_smc_features(hwmgr, false, feature_mask);
return 0;
}
/**
* vega10_start_dpm - Tell SMC to enabled the supported DPMs.
*
* @hwmgr: the address of the powerplay hardware manager.
* @bitmap: bitmap for the features to enabled.
* return: 0 on at least one DPM is successfully enabled.
*/
static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t i, feature_mask = 0;
for (i = 0; i < GNLD_DPM_MAX; i++) {
if (data->smu_features[i].smu_feature_bitmap & bitmap) {
if (data->smu_features[i].supported) {
if (!data->smu_features[i].enabled) {
feature_mask |= data->smu_features[i].
smu_feature_bitmap;
data->smu_features[i].enabled = true;
}
}
}
}
if (vega10_enable_smc_features(hwmgr,
true, feature_mask)) {
for (i = 0; i < GNLD_DPM_MAX; i++) {
if (data->smu_features[i].smu_feature_bitmap &
feature_mask)
data->smu_features[i].enabled = false;
}
}
if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
"Attempt to Enable LED DPM feature Failed!", return -EINVAL);
data->smu_features[GNLD_LED_DISPLAY].enabled = true;
}
if (data->vbios_boot_state.bsoc_vddc_lock) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFloorSocVoltage, 0,
NULL);
data->vbios_boot_state.bsoc_vddc_lock = false;
}
if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) {
if (data->smu_features[GNLD_ACDC].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
"Attempt to Enable DS_GFXCLK Feature Failed!",
return -1);
data->smu_features[GNLD_ACDC].enabled = true;
}
}
if (data->registry_data.pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap),
"Attempt to Disable Link DPM feature Failed!", return -EINVAL);
data->smu_features[GNLD_DPM_LINK].enabled = false;
data->smu_features[GNLD_DPM_LINK].supported = false;
}
return 0;
}
static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_PCC_LIMIT].supported) {
if (enable == data->smu_features[GNLD_PCC_LIMIT].enabled)
pr_info("GNLD_PCC_LIMIT has been %s \n", enable ? "enabled" : "disabled");
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
enable, data->smu_features[GNLD_PCC_LIMIT].smu_feature_bitmap),
"Attempt to Enable PCC Limit feature Failed!",
return -EINVAL);
data->smu_features[GNLD_PCC_LIMIT].enabled = enable;
}
return 0;
}
static void vega10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
} else {
hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[0].clk;
}
hwmgr->pstate_sclk_peak = table_info->vdd_dep_on_sclk->entries[table_info->vdd_dep_on_sclk->count - 1].clk;
hwmgr->pstate_mclk_peak = table_info->vdd_dep_on_mclk->entries[table_info->vdd_dep_on_mclk->count - 1].clk;
/* make sure the output is in Mhz */
hwmgr->pstate_sclk /= 100;
hwmgr->pstate_mclk /= 100;
hwmgr->pstate_sclk_peak /= 100;
hwmgr->pstate_mclk_peak /= 100;
}
static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
int tmp_result, result = 0;
if (hwmgr->not_vf) {
vega10_enable_disable_PCC_limit_feature(hwmgr, true);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ConfigureTelemetry, data->config_telemetry,
NULL);
tmp_result = vega10_construct_voltage_tables(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to construct voltage tables!",
result = tmp_result);
}
if (hwmgr->not_vf || hwmgr->pp_one_vf) {
tmp_result = vega10_init_smc_table(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to initialize SMC table!",
result = tmp_result);
}
if (hwmgr->not_vf) {
if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
tmp_result = vega10_enable_thermal_protection(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to enable thermal protection!",
result = tmp_result);
}
tmp_result = vega10_enable_vrhot_feature(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to enable VR hot feature!",
result = tmp_result);
tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to enable deep sleep master switch!",
result = tmp_result);
}
if (hwmgr->not_vf) {
tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to start DPM!", result = tmp_result);
}
if (hwmgr->not_vf) {
/* enable didt, do not abort if failed didt */
tmp_result = vega10_enable_didt_config(hwmgr);
PP_ASSERT(!tmp_result,
"Failed to enable didt config!");
}
tmp_result = vega10_enable_power_containment(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to enable power containment!",
result = tmp_result);
if (hwmgr->not_vf) {
tmp_result = vega10_power_control_set_level(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to power control set level!",
result = tmp_result);
tmp_result = vega10_enable_ulv(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to enable ULV!",
result = tmp_result);
}
vega10_populate_umdpstate_clocks(hwmgr);
return result;
}
static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
{
return sizeof(struct vega10_power_state);
}
static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
void *state, struct pp_power_state *power_state,
void *pp_table, uint32_t classification_flag)
{
ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
struct vega10_power_state *vega10_ps =
cast_phw_vega10_power_state(&(power_state->hardware));
struct vega10_performance_level *performance_level;
ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
(ATOM_Vega10_POWERPLAYTABLE *)pp_table;
ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
(ATOM_Vega10_SOCCLK_Dependency_Table *)
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
(ATOM_Vega10_GFXCLK_Dependency_Table *)
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
(ATOM_Vega10_MCLK_Dependency_Table *)
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
/* The following fields are not initialized here:
* id orderedList allStatesList
*/
power_state->classification.ui_label =
(le16_to_cpu(state_entry->usClassification) &
ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
power_state->classification.flags = classification_flag;
/* NOTE: There is a classification2 flag in BIOS
* that is not being used right now
*/
power_state->classification.temporary_state = false;
power_state->classification.to_be_deleted = false;
power_state->validation.disallowOnDC =
((le32_to_cpu(state_entry->ulCapsAndSettings) &
ATOM_Vega10_DISALLOW_ON_DC) != 0);
power_state->display.disableFrameModulation = false;
power_state->display.limitRefreshrate = false;
power_state->display.enableVariBright =
((le32_to_cpu(state_entry->ulCapsAndSettings) &
ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
power_state->validation.supportedPowerLevels = 0;
power_state->uvd_clocks.VCLK = 0;
power_state->uvd_clocks.DCLK = 0;
power_state->temperatures.min = 0;
power_state->temperatures.max = 0;
performance_level = &(vega10_ps->performance_levels
[vega10_ps->performance_level_count++]);
PP_ASSERT_WITH_CODE(
(vega10_ps->performance_level_count <
NUM_GFXCLK_DPM_LEVELS),
"Performance levels exceeds SMC limit!",
return -1);
PP_ASSERT_WITH_CODE(
(vega10_ps->performance_level_count <
hwmgr->platform_descriptor.
hardwareActivityPerformanceLevels),
"Performance levels exceeds Driver limit!",
return -1);
/* Performance levels are arranged from low to high. */
performance_level->soc_clock = socclk_dep_table->entries
[state_entry->ucSocClockIndexLow].ulClk;
performance_level->gfx_clock = gfxclk_dep_table->entries
[state_entry->ucGfxClockIndexLow].ulClk;
performance_level->mem_clock = mclk_dep_table->entries
[state_entry->ucMemClockIndexLow].ulMemClk;
performance_level = &(vega10_ps->performance_levels
[vega10_ps->performance_level_count++]);
performance_level->soc_clock = socclk_dep_table->entries
[state_entry->ucSocClockIndexHigh].ulClk;
if (gfxclk_dep_table->ucRevId == 0) {
/* under vega10 pp one vf mode, the gfx clk dpm need be lower
* to level-4 due to the limited 110w-power
*/
if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0))
performance_level->gfx_clock =
gfxclk_dep_table->entries[4].ulClk;
else
performance_level->gfx_clock = gfxclk_dep_table->entries
[state_entry->ucGfxClockIndexHigh].ulClk;
} else if (gfxclk_dep_table->ucRevId == 1) {
patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0))
performance_level->gfx_clock = patom_record_V2[4].ulClk;
else
performance_level->gfx_clock =
patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
}
performance_level->mem_clock = mclk_dep_table->entries
[state_entry->ucMemClockIndexHigh].ulMemClk;
return 0;
}
static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
unsigned long entry_index, struct pp_power_state *state)
{
int result;
struct vega10_power_state *vega10_ps;
state->hardware.magic = PhwVega10_Magic;
vega10_ps = cast_phw_vega10_power_state(&state->hardware);
result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
vega10_get_pp_table_entry_callback_func);
if (result)
return result;
/*
* This is the earliest time we have all the dependency table
* and the VBIOS boot state
*/
/* set DC compatible flag if this state supports DC */
if (!state->validation.disallowOnDC)
vega10_ps->dc_compatible = true;
vega10_ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
vega10_ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
return 0;
}
static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps)
{
return 0;
}
static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *request_ps,
const struct pp_power_state *current_ps)
{
struct amdgpu_device *adev = hwmgr->adev;
struct vega10_power_state *vega10_ps =
cast_phw_vega10_power_state(&request_ps->hardware);
uint32_t sclk;
uint32_t mclk;
struct PP_Clocks minimum_clocks = {0};
bool disable_mclk_switching;
bool disable_mclk_switching_for_frame_lock;
bool disable_mclk_switching_for_vr;
bool force_mclk_high;
const struct phm_clock_and_voltage_limits *max_limits;
uint32_t i;
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
int32_t count;
uint32_t stable_pstate_sclk_dpm_percentage;
uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
uint32_t latency;
data->battery_state = (PP_StateUILabel_Battery ==
request_ps->classification.ui_label);
if (vega10_ps->performance_level_count != 2)
pr_info("VI should always have 2 performance levels");
max_limits = adev->pm.ac_power ?
&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
&(hwmgr->dyn_state.max_clock_voltage_on_dc);
/* Cap clock DPM tables at DC MAX if it is in DC. */
if (!adev->pm.ac_power) {
for (i = 0; i < vega10_ps->performance_level_count; i++) {
if (vega10_ps->performance_levels[i].mem_clock >
max_limits->mclk)
vega10_ps->performance_levels[i].mem_clock =
max_limits->mclk;
if (vega10_ps->performance_levels[i].gfx_clock >
max_limits->sclk)
vega10_ps->performance_levels[i].gfx_clock =
max_limits->sclk;
}
}
/* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
if (PP_CAP(PHM_PlatformCaps_StablePState)) {
stable_pstate_sclk_dpm_percentage =
data->registry_data.stable_pstate_sclk_dpm_percentage;
PP_ASSERT_WITH_CODE(
data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
"percent sclk value must range from 1% to 100%, setting default value",
stable_pstate_sclk_dpm_percentage = 75);
max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
stable_pstate_sclk = (max_limits->sclk *
stable_pstate_sclk_dpm_percentage) / 100;
for (count = table_info->vdd_dep_on_sclk->count - 1;
count >= 0; count--) {
if (stable_pstate_sclk >=
table_info->vdd_dep_on_sclk->entries[count].clk) {
stable_pstate_sclk =
table_info->vdd_dep_on_sclk->entries[count].clk;
break;
}
}
if (count < 0)
stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
stable_pstate_mclk = max_limits->mclk;
minimum_clocks.engineClock = stable_pstate_sclk;
minimum_clocks.memoryClock = stable_pstate_mclk;
}
disable_mclk_switching_for_frame_lock =
PP_CAP(PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
disable_mclk_switching_for_vr =
PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
if (hwmgr->display_config->num_display == 0)
disable_mclk_switching = false;
else
disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
!hwmgr->display_config->multi_monitor_in_sync) ||
disable_mclk_switching_for_frame_lock ||
disable_mclk_switching_for_vr ||
force_mclk_high;
sclk = vega10_ps->performance_levels[0].gfx_clock;
mclk = vega10_ps->performance_levels[0].mem_clock;
if (sclk < minimum_clocks.engineClock)
sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
max_limits->sclk : minimum_clocks.engineClock;
if (mclk < minimum_clocks.memoryClock)
mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
max_limits->mclk : minimum_clocks.memoryClock;
vega10_ps->performance_levels[0].gfx_clock = sclk;
vega10_ps->performance_levels[0].mem_clock = mclk;
if (vega10_ps->performance_levels[1].gfx_clock <
vega10_ps->performance_levels[0].gfx_clock)
vega10_ps->performance_levels[0].gfx_clock =
vega10_ps->performance_levels[1].gfx_clock;
if (disable_mclk_switching) {
/* Set Mclk the max of level 0 and level 1 */
if (mclk < vega10_ps->performance_levels[1].mem_clock)
mclk = vega10_ps->performance_levels[1].mem_clock;
/* Find the lowest MCLK frequency that is within
* the tolerable latency defined in DAL
*/
latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
for (i = 0; i < data->mclk_latency_table.count; i++) {
if ((data->mclk_latency_table.entries[i].latency <= latency) &&
(data->mclk_latency_table.entries[i].frequency >=
vega10_ps->performance_levels[0].mem_clock) &&
(data->mclk_latency_table.entries[i].frequency <=
vega10_ps->performance_levels[1].mem_clock))
mclk = data->mclk_latency_table.entries[i].frequency;
}
vega10_ps->performance_levels[0].mem_clock = mclk;
} else {
if (vega10_ps->performance_levels[1].mem_clock <
vega10_ps->performance_levels[0].mem_clock)
vega10_ps->performance_levels[0].mem_clock =
vega10_ps->performance_levels[1].mem_clock;
}
if (PP_CAP(PHM_PlatformCaps_StablePState)) {
for (i = 0; i < vega10_ps->performance_level_count; i++) {
vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
}
}
return 0;
}
static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
{
struct vega10_hwmgr *data = hwmgr->backend;
const struct phm_set_power_state_input *states =
(const struct phm_set_power_state_input *)input;
const struct vega10_power_state *vega10_ps =
cast_const_phw_vega10_power_state(states->pnew_state);
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
uint32_t sclk = vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].gfx_clock;
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
uint32_t mclk = vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].mem_clock;
uint32_t i;
for (i = 0; i < sclk_table->count; i++) {
if (sclk == sclk_table->dpm_levels[i].value)
break;
}
if (i >= sclk_table->count) {
if (sclk > sclk_table->dpm_levels[i-1].value) {
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
sclk_table->dpm_levels[i-1].value = sclk;
}
}
for (i = 0; i < mclk_table->count; i++) {
if (mclk == mclk_table->dpm_levels[i].value)
break;
}
if (i >= mclk_table->count) {
if (mclk > mclk_table->dpm_levels[i-1].value) {
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
mclk_table->dpm_levels[i-1].value = mclk;
}
}
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
return 0;
}
static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
struct pp_hwmgr *hwmgr, const void *input)
{
int result = 0;
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_dpm_table *dpm_table = &data->dpm_table;
struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
int count;
if (!data->need_update_dpm_table)
return 0;
if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
for (count = 0; count < dpm_table->gfx_table.count; count++)
dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
}
odn_clk_table = &odn_table->vdd_dep_on_mclk;
if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
for (count = 0; count < dpm_table->mem_table.count; count++)
dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
}
if (data->need_update_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK | DPMTABLE_UPDATE_SOCCLK)) {
result = vega10_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
return result);
}
if (data->need_update_dpm_table &
(DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
result = vega10_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
return result);
}
vega10_populate_vddc_soc_levels(hwmgr);
return result;
}
static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
struct vega10_single_dpm_table *dpm_table,
uint32_t low_limit, uint32_t high_limit)
{
uint32_t i;
for (i = 0; i < dpm_table->count; i++) {
if ((dpm_table->dpm_levels[i].value < low_limit) ||
(dpm_table->dpm_levels[i].value > high_limit))
dpm_table->dpm_levels[i].enabled = false;
else
dpm_table->dpm_levels[i].enabled = true;
}
return 0;
}
static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
struct vega10_single_dpm_table *dpm_table,
uint32_t low_limit, uint32_t high_limit,
uint32_t disable_dpm_mask)
{
uint32_t i;
for (i = 0; i < dpm_table->count; i++) {
if ((dpm_table->dpm_levels[i].value < low_limit) ||
(dpm_table->dpm_levels[i].value > high_limit))
dpm_table->dpm_levels[i].enabled = false;
else if (!((1 << i) & disable_dpm_mask))
dpm_table->dpm_levels[i].enabled = false;
else
dpm_table->dpm_levels[i].enabled = true;
}
return 0;
}
static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
const struct vega10_power_state *vega10_ps)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t high_limit_count;
PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
"power state did not have any performance level",
return -1);
high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
vega10_trim_single_dpm_states(hwmgr,
&(data->dpm_table.soc_table),
vega10_ps->performance_levels[0].soc_clock,
vega10_ps->performance_levels[high_limit_count].soc_clock);
vega10_trim_single_dpm_states_with_mask(hwmgr,
&(data->dpm_table.gfx_table),
vega10_ps->performance_levels[0].gfx_clock,
vega10_ps->performance_levels[high_limit_count].gfx_clock,
data->disable_dpm_mask);
vega10_trim_single_dpm_states(hwmgr,
&(data->dpm_table.mem_table),
vega10_ps->performance_levels[0].mem_clock,
vega10_ps->performance_levels[high_limit_count].mem_clock);
return 0;
}
static uint32_t vega10_find_lowest_dpm_level(
struct vega10_single_dpm_table *table)
{
uint32_t i;
for (i = 0; i < table->count; i++) {
if (table->dpm_levels[i].enabled)
break;
}
return i;
}
static uint32_t vega10_find_highest_dpm_level(
struct vega10_single_dpm_table *table)
{
uint32_t i = 0;
if (table->count <= MAX_REGULAR_DPM_NUMBER) {
for (i = table->count; i > 0; i--) {
if (table->dpm_levels[i - 1].enabled)
return i - 1;
}
} else {
pr_info("DPM Table Has Too Many Entries!");
return MAX_REGULAR_DPM_NUMBER - 1;
}
return i;
}
static void vega10_apply_dal_minimum_voltage_request(
struct pp_hwmgr *hwmgr)
{
return;
}
static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk;
return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
}
static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t socclk_idx;
vega10_apply_dal_minimum_voltage_request(hwmgr);
if (!data->registry_data.sclk_dpm_key_disabled) {
if (data->smc_state_table.gfx_boot_level !=
data->dpm_table.gfx_table.dpm_state.soft_min_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
data->smc_state_table.gfx_boot_level,
NULL);
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->smc_state_table.gfx_boot_level;
}
}
if (!data->registry_data.mclk_dpm_key_disabled) {
if (data->smc_state_table.mem_boot_level !=
data->dpm_table.mem_table.dpm_state.soft_min_level) {
if ((data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1)
&& hwmgr->not_vf) {
socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
socclk_idx,
NULL);
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinUclkByIndex,
data->smc_state_table.mem_boot_level,
NULL);
}
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->smc_state_table.mem_boot_level;
}
}
if (!hwmgr->not_vf)
return 0;
if (!data->registry_data.socclk_dpm_key_disabled) {
if (data->smc_state_table.soc_boot_level !=
data->dpm_table.soc_table.dpm_state.soft_min_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
data->smc_state_table.soc_boot_level,
NULL);
data->dpm_table.soc_table.dpm_state.soft_min_level =
data->smc_state_table.soc_boot_level;
}
}
return 0;
}
static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
vega10_apply_dal_minimum_voltage_request(hwmgr);
if (!data->registry_data.sclk_dpm_key_disabled) {
if (data->smc_state_table.gfx_max_level !=
data->dpm_table.gfx_table.dpm_state.soft_max_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxclkByIndex,
data->smc_state_table.gfx_max_level,
NULL);
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->smc_state_table.gfx_max_level;
}
}
if (!data->registry_data.mclk_dpm_key_disabled) {
if (data->smc_state_table.mem_max_level !=
data->dpm_table.mem_table.dpm_state.soft_max_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxUclkByIndex,
data->smc_state_table.mem_max_level,
NULL);
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->smc_state_table.mem_max_level;
}
}
if (!hwmgr->not_vf)
return 0;
if (!data->registry_data.socclk_dpm_key_disabled) {
if (data->smc_state_table.soc_max_level !=
data->dpm_table.soc_table.dpm_state.soft_max_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByIndex,
data->smc_state_table.soc_max_level,
NULL);
data->dpm_table.soc_table.dpm_state.soft_max_level =
data->smc_state_table.soc_max_level;
}
}
return 0;
}
static int vega10_generate_dpm_level_enable_mask(
struct pp_hwmgr *hwmgr, const void *input)
{
struct vega10_hwmgr *data = hwmgr->backend;
const struct phm_set_power_state_input *states =
(const struct phm_set_power_state_input *)input;
const struct vega10_power_state *vega10_ps =
cast_const_phw_vega10_power_state(states->pnew_state);
int i;
PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
"Attempt to Trim DPM States Failed!",
return -1);
data->smc_state_table.gfx_boot_level =
vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
data->smc_state_table.gfx_max_level =
vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
data->smc_state_table.mem_boot_level =
vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
data->smc_state_table.mem_max_level =
vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
data->smc_state_table.soc_boot_level =
vega10_find_lowest_dpm_level(&(data->dpm_table.soc_table));
data->smc_state_table.soc_max_level =
vega10_find_highest_dpm_level(&(data->dpm_table.soc_table));
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
"Attempt to upload DPM Bootup Levels Failed!",
return -1);
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
"Attempt to upload DPM Max Levels Failed!",
return -1);
for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
data->dpm_table.mem_table.dpm_levels[i].enabled = true;
for (i = data->smc_state_table.soc_boot_level; i < data->smc_state_table.soc_max_level; i++)
data->dpm_table.soc_table.dpm_levels[i].enabled = true;
return 0;
}
int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_DPM_VCE].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
enable,
data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
"Attempt to Enable/Disable DPM VCE Failed!",
return -1);
data->smu_features[GNLD_DPM_VCE].enabled = enable;
}
return 0;
}
static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t low_sclk_interrupt_threshold = 0;
if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
(data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
cpu_to_le32(low_sclk_interrupt_threshold);
/* This message will also enable SmcToHost Interrupt */
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetLowGfxclkInterruptThreshold,
(uint32_t)low_sclk_interrupt_threshold,
NULL);
}
return 0;
}
static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
const void *input)
{
int tmp_result, result = 0;
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to find DPM states clocks in DPM table!",
result = tmp_result);
tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to populate and upload SCLK MCLK DPM levels!",
result = tmp_result);
tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to generate DPM level enabled mask!",
result = tmp_result);
tmp_result = vega10_update_sclk_threshold(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to update SCLK threshold!",
result = tmp_result);
result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
/*
* If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
* That effectively disables AVFS feature.
*/
if(hwmgr->hardcode_pp_table != NULL)
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
vega10_update_avfs(hwmgr);
/*
* Clear all OD flags except DPMTABLE_OD_UPDATE_VDDC.
* That will help to keep AVFS disabled.
*/
data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
return 0;
}
static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct vega10_power_state *vega10_ps;
if (hwmgr == NULL)
return -EINVAL;
ps = hwmgr->request_ps;
if (ps == NULL)
return -EINVAL;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
if (low)
return vega10_ps->performance_levels[0].gfx_clock;
else
return vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].gfx_clock;
}
static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct vega10_power_state *vega10_ps;
if (hwmgr == NULL)
return -EINVAL;
ps = hwmgr->request_ps;
if (ps == NULL)
return -EINVAL;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
if (low)
return vega10_ps->performance_levels[0].mem_clock;
else
return vega10_ps->performance_levels
[vega10_ps->performance_level_count-1].mem_clock;
}
static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
uint32_t *query)
{
uint32_t value;
if (!query)
return -EINVAL;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
/* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
*query = value << 8;
return 0;
}
static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
struct amdgpu_device *adev = hwmgr->adev;
uint32_t sclk_mhz, mclk_idx, activity_percent = 0;
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_dpm_table *dpm_table = &data->dpm_table;
int ret = 0;
uint32_t val_vid;
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency, &sclk_mhz);
*((uint32_t *)value) = sclk_mhz * 100;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &mclk_idx);
if (mclk_idx < dpm_table->mem_table.count) {
*((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
*size = 4;
} else {
ret = -EINVAL;
}
break;
case AMDGPU_PP_SENSOR_GPU_LOAD:
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0,
&activity_percent);
*((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_TEMP:
*((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot, (uint32_t *)value);
*((uint32_t *)value) = *((uint32_t *)value) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
*size = 4;
break;
case AMDGPU_PP_SENSOR_MEM_TEMP:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM, (uint32_t *)value);
*((uint32_t *)value) = *((uint32_t *)value) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
*size = 4;
break;
case AMDGPU_PP_SENSOR_UVD_POWER:
*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
*size = 4;
break;
case AMDGPU_PP_SENSOR_VCE_POWER:
*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value);
break;
case AMDGPU_PP_SENSOR_VDDGFX:
val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) &
SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >>
SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
*((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
return 0;
case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
ret = vega10_get_enabled_smc_features(hwmgr, (uint64_t *)value);
if (!ret)
*size = 8;
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
bool has_disp)
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
has_disp ? 1 : 0,
NULL);
}
static int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock_req)
{
int result = 0;
enum amd_pp_clock_type clk_type = clock_req->clock_type;
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
DSPCLK_e clk_select = 0;
uint32_t clk_request = 0;
switch (clk_type) {
case amd_pp_dcef_clock:
clk_select = DSPCLK_DCEFCLK;
break;
case amd_pp_disp_clock:
clk_select = DSPCLK_DISPCLK;
break;
case amd_pp_pixel_clock:
clk_select = DSPCLK_PIXCLK;
break;
case amd_pp_phy_clock:
clk_select = DSPCLK_PHYCLK;
break;
default:
pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
result = -1;
break;
}
if (!result) {
clk_request = (clk_freq << 16) | clk_select;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_RequestDisplayClockByFreq,
clk_request,
NULL);
}
return result;
}
static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
uint32_t frequency)
{
uint8_t count;
uint8_t i;
if (mclk_table == NULL || mclk_table->count == 0)
return 0;
count = (uint8_t)(mclk_table->count);
for(i = 0; i < count; i++) {
if(mclk_table->entries[i].clk >= frequency)
return i;
}
return i-1;
}
static int vega10_notify_smc_display_config_after_ps_adjustment(
struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *dpm_table =
&data->dpm_table.dcef_table;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
uint32_t idx;
struct PP_Clocks min_clocks = {0};
uint32_t i;
struct pp_display_clock_request clock_req;
if ((hwmgr->display_config->num_display > 1) &&
!hwmgr->display_config->multi_monitor_in_sync &&
!hwmgr->display_config->nb_pstate_switch_disable)
vega10_notify_smc_display_change(hwmgr, false);
else
vega10_notify_smc_display_change(hwmgr, true);
min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
for (i = 0; i < dpm_table->count; i++) {
if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
break;
}
if (i < dpm_table->count) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10;
if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcefClockInSR / 100,
NULL);
} else {
pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
}
} else {
pr_debug("Cannot find requested DCEFCLK!");
}
if (min_clocks.memoryClock != 0) {
idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx,
NULL);
data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
}
return 0;
}
static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
data->smc_state_table.gfx_boot_level =
data->smc_state_table.gfx_max_level =
vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
data->smc_state_table.mem_boot_level =
data->smc_state_table.mem_max_level =
vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
"Failed to upload boot level to highest!",
return -1);
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
"Failed to upload dpm max level to highest!",
return -1);
return 0;
}
static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
data->smc_state_table.gfx_boot_level =
data->smc_state_table.gfx_max_level =
vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
data->smc_state_table.mem_boot_level =
data->smc_state_table.mem_max_level =
vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
"Failed to upload boot level to highest!",
return -1);
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
"Failed to upload dpm max level to highest!",
return -1);
return 0;
}
static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
data->smc_state_table.gfx_boot_level =
vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
data->smc_state_table.gfx_max_level =
vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
data->smc_state_table.mem_boot_level =
vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
data->smc_state_table.mem_max_level =
vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
"Failed to upload DPM Bootup Levels!",
return -1);
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
"Failed to upload DPM Max Levels!",
return -1);
return 0;
}
static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
*sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
*soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
*mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
}
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
*sclk_mask = 0;
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
*mclk_mask = 0;
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
/* under vega10 pp one vf mode, the gfx clk dpm need be lower
* to level-4 due to the limited power
*/
if (hwmgr->pp_one_vf)
*sclk_mask = 4;
else
*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
*soc_mask = table_info->vdd_dep_on_socclk->count - 1;
*mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
}
return 0;
}
static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
if (!hwmgr->not_vf)
return;
switch (mode) {
case AMD_FAN_CTRL_NONE:
vega10_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
break;
case AMD_FAN_CTRL_MANUAL:
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
break;
case AMD_FAN_CTRL_AUTO:
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega10_fan_ctrl_start_smc_fan_control(hwmgr);
break;
default:
break;
}
}
static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
struct vega10_hwmgr *data = hwmgr->backend;
switch (type) {
case PP_SCLK:
data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
"Failed to upload boot level to lowest!",
return -EINVAL);
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
"Failed to upload dpm max level to highest!",
return -EINVAL);
break;
case PP_MCLK:
data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
"Failed to upload boot level to lowest!",
return -EINVAL);
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
"Failed to upload dpm max level to highest!",
return -EINVAL);
break;
case PP_SOCCLK:
data->smc_state_table.soc_boot_level = mask ? (ffs(mask) - 1) : 0;
data->smc_state_table.soc_max_level = mask ? (fls(mask) - 1) : 0;
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
"Failed to upload boot level to lowest!",
return -EINVAL);
PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
"Failed to upload dpm max level to highest!",
return -EINVAL);
break;
case PP_DCEFCLK:
pr_info("Setting DCEFCLK min/max dpm level is not supported!\n");
break;
case PP_PCIE:
default:
break;
}
return 0;
}
static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
int ret = 0;
uint32_t sclk_mask = 0;
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = vega10_force_dpm_highest(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
ret = vega10_force_dpm_lowest(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = vega10_unforce_dpm_levels(hwmgr);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
if (ret)
return ret;
vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
if (!hwmgr->not_vf)
return ret;
if (!ret) {
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
}
return ret;
}
static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
return AMD_FAN_CTRL_MANUAL;
else
return AMD_FAN_CTRL_AUTO;
}
static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
struct amd_pp_simple_clock_info *info)
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable;
struct phm_clock_and_voltage_limits *max_limits =
&table_info->max_clock_voltage_on_ac;
info->engine_max_clock = max_limits->sclk;
info->memory_max_clock = max_limits->mclk;
return 0;
}
static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
struct pp_clock_levels_with_latency *clocks)
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
table_info->vdd_dep_on_sclk;
uint32_t i;
clocks->num_levels = 0;
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].clk) {
clocks->data[clocks->num_levels].clocks_in_khz =
dep_table->entries[i].clk * 10;
clocks->num_levels++;
}
}
}
static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
struct pp_clock_levels_with_latency *clocks)
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
table_info->vdd_dep_on_mclk;
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t j = 0;
uint32_t i;
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].clk) {
clocks->data[j].clocks_in_khz =
dep_table->entries[i].clk * 10;
data->mclk_latency_table.entries[j].frequency =
dep_table->entries[i].clk;
clocks->data[j].latency_in_us =
data->mclk_latency_table.entries[j].latency = 25;
j++;
}
}
clocks->num_levels = data->mclk_latency_table.count = j;
}
static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
struct pp_clock_levels_with_latency *clocks)
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
table_info->vdd_dep_on_dcefclk;
uint32_t i;
for (i = 0; i < dep_table->count; i++) {
clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = 0;
clocks->num_levels++;
}
}
static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
struct pp_clock_levels_with_latency *clocks)
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
table_info->vdd_dep_on_socclk;
uint32_t i;
for (i = 0; i < dep_table->count; i++) {
clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = 0;
clocks->num_levels++;
}
}
static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_latency *clocks)
{
switch (type) {
case amd_pp_sys_clock:
vega10_get_sclks(hwmgr, clocks);
break;
case amd_pp_mem_clock:
vega10_get_memclocks(hwmgr, clocks);
break;
case amd_pp_dcef_clock:
vega10_get_dcefclocks(hwmgr, clocks);
break;
case amd_pp_soc_clock:
vega10_get_socclocks(hwmgr, clocks);
break;
default:
return -1;
}
return 0;
}
static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
uint32_t i;
switch (type) {
case amd_pp_mem_clock:
dep_table = table_info->vdd_dep_on_mclk;
break;
case amd_pp_dcef_clock:
dep_table = table_info->vdd_dep_on_dcefclk;
break;
case amd_pp_disp_clock:
dep_table = table_info->vdd_dep_on_dispclk;
break;
case amd_pp_pixel_clock:
dep_table = table_info->vdd_dep_on_pixclk;
break;
case amd_pp_phy_clock:
dep_table = table_info->vdd_dep_on_phyclk;
break;
default:
return -1;
}
for (i = 0; i < dep_table->count; i++) {
clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
entries[dep_table->entries[i].vddInd].us_vdd);
clocks->num_levels++;
}
if (i < dep_table->count)
return -1;
return 0;
}
static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
void *clock_range)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
if (!data->registry_data.disable_water_mark) {
smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
data->water_marks_bitmap = WaterMarksExist;
}
return 0;
}
static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
{
static const char *ppfeature_name[] = {
"DPM_PREFETCHER",
"GFXCLK_DPM",
"UCLK_DPM",
"SOCCLK_DPM",
"UVD_DPM",
"VCE_DPM",
"ULV",
"MP0CLK_DPM",
"LINK_DPM",
"DCEFCLK_DPM",
"AVFS",
"GFXCLK_DS",
"SOCCLK_DS",
"LCLK_DS",
"PPT",
"TDC",
"THERMAL",
"GFX_PER_CU_CG",
"RM",
"DCEFCLK_DS",
"ACDC",
"VR0HOT",
"VR1HOT",
"FW_CTF",
"LED_DISPLAY",
"FAN_CONTROL",
"FAST_PPT",
"DIDT",
"ACG",
"PCC_LIMIT"};
static const char *output_title[] = {
"FEATURES",
"BITMASK",
"ENABLEMENT"};
uint64_t features_enabled;
int i;
int ret = 0;
int size = 0;
phm_get_sysfs_buf(&buf, &size);
ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled);
PP_ASSERT_WITH_CODE(!ret,
"[EnableAllSmuFeatures] Failed to get enabled smc features!",
return ret);
size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled);
size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n",
output_title[0],
output_title[1],
output_title[2]);
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n",
ppfeature_name[i],
1ULL << i,
(features_enabled & (1ULL << i)) ? "Y" : "N");
}
return size;
}
static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
{
uint64_t features_enabled;
uint64_t features_to_enable;
uint64_t features_to_disable;
int ret = 0;
if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
return -EINVAL;
ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled);
if (ret)
return ret;
features_to_disable =
features_enabled & ~new_ppfeature_masks;
features_to_enable =
~features_enabled & new_ppfeature_masks;
pr_debug("features_to_disable 0x%llx\n", features_to_disable);
pr_debug("features_to_enable 0x%llx\n", features_to_enable);
if (features_to_disable) {
ret = vega10_enable_smc_features(hwmgr, false, features_to_disable);
if (ret)
return ret;
}
if (features_to_enable) {
ret = vega10_enable_smc_features(hwmgr, true, features_to_enable);
if (ret)
return ret;
}
return 0;
}
static int vega10_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
}
static int vega10_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
}
static int vega10_emit_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf, int *offset)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);
struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table);
struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
PPTable_t *pptable = &(data->smc_state_table.pp_table);
uint32_t i, now, count = 0;
int ret = 0;
switch (type) {
case PP_SCLK:
if (data->registry_data.sclk_dpm_key_disabled)
return -EOPNOTSUPP;
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
if (unlikely(ret != 0))
return ret;
if (hwmgr->pp_one_vf &&
(hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
count = 5;
else
count = sclk_table->count;
for (i = 0; i < count; i++)
*offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n",
i, sclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
case PP_MCLK:
if (data->registry_data.mclk_dpm_key_disabled)
return -EOPNOTSUPP;
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
if (unlikely(ret != 0))
return ret;
for (i = 0; i < mclk_table->count; i++)
*offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n",
i, mclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
case PP_SOCCLK:
if (data->registry_data.socclk_dpm_key_disabled)
return -EOPNOTSUPP;
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
if (unlikely(ret != 0))
return ret;
for (i = 0; i < soc_table->count; i++)
*offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n",
i, soc_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
case PP_DCEFCLK:
if (data->registry_data.dcefclk_dpm_key_disabled)
return -EOPNOTSUPP;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetClockFreqMHz,
CLK_DCEFCLK, &now);
if (unlikely(ret != 0))
return ret;
for (i = 0; i < dcef_table->count; i++)
*offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n",
i, dcef_table->dpm_levels[i].value / 100,
(dcef_table->dpm_levels[i].value / 100 == now) ?
"*" : "");
break;
case PP_PCIE:
current_gen_speed =
vega10_get_current_pcie_link_speed_level(hwmgr);
current_lane_width =
vega10_get_current_pcie_link_width_level(hwmgr);
for (i = 0; i < NUM_LINK_LEVELS; i++) {
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
*offset += sysfs_emit_at(buf, *offset, "%d: %s %s %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
(gen_speed == 3) ? "16.0GT/s," : "",
(lane_width == 1) ? "x1" :
(lane_width == 2) ? "x2" :
(lane_width == 3) ? "x4" :
(lane_width == 4) ? "x8" :
(lane_width == 5) ? "x12" :
(lane_width == 6) ? "x16" : "",
(current_gen_speed == gen_speed) &&
(current_lane_width == lane_width) ?
"*" : "");
}
break;
case OD_SCLK:
if (!hwmgr->od_enabled)
return -EOPNOTSUPP;
*offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_SCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
for (i = 0; i < podn_vdd_dep->count; i++)
*offset += sysfs_emit_at(buf, *offset, "%d: %10uMhz %10umV\n",
i, podn_vdd_dep->entries[i].clk / 100,
podn_vdd_dep->entries[i].vddc);
break;
case OD_MCLK:
if (!hwmgr->od_enabled)
return -EOPNOTSUPP;
*offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_MCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
for (i = 0; i < podn_vdd_dep->count; i++)
*offset += sysfs_emit_at(buf, *offset, "%d: %10uMhz %10umV\n",
i, podn_vdd_dep->entries[i].clk/100,
podn_vdd_dep->entries[i].vddc);
break;
case OD_RANGE:
if (!hwmgr->od_enabled)
return -EOPNOTSUPP;
*offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_RANGE");
*offset += sysfs_emit_at(buf, *offset, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
*offset += sysfs_emit_at(buf, *offset, "MCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
*offset += sysfs_emit_at(buf, *offset, "VDDC: %7umV %11umV\n",
data->odn_dpm_table.min_vddc,
data->odn_dpm_table.max_vddc);
break;
default:
ret = -ENOENT;
break;
}
return ret;
}
static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);
struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table);
struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
PPTable_t *pptable = &(data->smc_state_table.pp_table);
int i, now, size = 0, count = 0;
switch (type) {
case PP_SCLK:
if (data->registry_data.sclk_dpm_key_disabled)
break;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
if (hwmgr->pp_one_vf &&
(hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
count = 5;
else
count = sclk_table->count;
for (i = 0; i < count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, sclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
case PP_MCLK:
if (data->registry_data.mclk_dpm_key_disabled)
break;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
for (i = 0; i < mclk_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, mclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
case PP_SOCCLK:
if (data->registry_data.socclk_dpm_key_disabled)
break;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
for (i = 0; i < soc_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, soc_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
case PP_DCEFCLK:
if (data->registry_data.dcefclk_dpm_key_disabled)
break;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
for (i = 0; i < dcef_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, dcef_table->dpm_levels[i].value / 100,
(dcef_table->dpm_levels[i].value / 100 == now) ?
"*" : "");
break;
case PP_PCIE:
current_gen_speed =
vega10_get_current_pcie_link_speed_level(hwmgr);
current_lane_width =
vega10_get_current_pcie_link_width_level(hwmgr);
for (i = 0; i < NUM_LINK_LEVELS; i++) {
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
size += sprintf(buf + size, "%d: %s %s %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
(gen_speed == 3) ? "16.0GT/s," : "",
(lane_width == 1) ? "x1" :
(lane_width == 2) ? "x2" :
(lane_width == 3) ? "x4" :
(lane_width == 4) ? "x8" :
(lane_width == 5) ? "x12" :
(lane_width == 6) ? "x16" : "",
(current_gen_speed == gen_speed) &&
(current_lane_width == lane_width) ?
"*" : "");
}
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
size += sprintf(buf + size, "%s:\n", "OD_SCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
for (i = 0; i < podn_vdd_dep->count; i++)
size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
i, podn_vdd_dep->entries[i].clk / 100,
podn_vdd_dep->entries[i].vddc);
}
break;
case OD_MCLK:
if (hwmgr->od_enabled) {
size += sprintf(buf + size, "%s:\n", "OD_MCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
for (i = 0; i < podn_vdd_dep->count; i++)
size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
i, podn_vdd_dep->entries[i].clk/100,
podn_vdd_dep->entries[i].vddc);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
size += sprintf(buf + size, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
data->odn_dpm_table.min_vddc,
data->odn_dpm_table.max_vddc);
}
break;
default:
break;
}
return size;
}
static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
int result = 0;
if ((data->water_marks_bitmap & WaterMarksExist) &&
!(data->water_marks_bitmap & WaterMarksLoaded)) {
result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, WMTABLE, false);
PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return -EINVAL);
data->water_marks_bitmap |= WaterMarksLoaded;
}
if (data->water_marks_bitmap & WaterMarksLoaded) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display,
NULL);
}
return result;
}
static int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_DPM_UVD].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
enable,
data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
"Attempt to Enable/Disable DPM UVD Failed!",
return -1);
data->smu_features[GNLD_DPM_UVD].enabled = enable;
}
return 0;
}
static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega10_hwmgr *data = hwmgr->backend;
data->vce_power_gated = bgate;
vega10_enable_disable_vce_dpm(hwmgr, !bgate);
}
static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega10_hwmgr *data = hwmgr->backend;
data->uvd_power_gated = bgate;
vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
}
static inline bool vega10_are_power_levels_equal(
const struct vega10_performance_level *pl1,
const struct vega10_performance_level *pl2)
{
return ((pl1->soc_clock == pl2->soc_clock) &&
(pl1->gfx_clock == pl2->gfx_clock) &&
(pl1->mem_clock == pl2->mem_clock));
}
static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
const struct pp_hw_power_state *pstate1,
const struct pp_hw_power_state *pstate2, bool *equal)
{
const struct vega10_power_state *vega10_psa;
const struct vega10_power_state *vega10_psb;
int i;
if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
return -EINVAL;
vega10_psa = cast_const_phw_vega10_power_state(pstate1);
vega10_psb = cast_const_phw_vega10_power_state(pstate2);
/* If the two states don't even have the same number of performance levels
* they cannot be the same state.
*/
if (vega10_psa->performance_level_count != vega10_psb->performance_level_count) {
*equal = false;
return 0;
}
for (i = 0; i < vega10_psa->performance_level_count; i++) {
if (!vega10_are_power_levels_equal(&(vega10_psa->performance_levels[i]),
&(vega10_psb->performance_levels[i]))) {
/* If we have found even one performance level pair
* that is different the states are different.
*/
*equal = false;
return 0;
}
}
/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
*equal = ((vega10_psa->uvd_clks.vclk == vega10_psb->uvd_clks.vclk) &&
(vega10_psa->uvd_clks.dclk == vega10_psb->uvd_clks.dclk));
*equal &= ((vega10_psa->vce_clks.evclk == vega10_psb->vce_clks.evclk) &&
(vega10_psa->vce_clks.ecclk == vega10_psb->vce_clks.ecclk));
*equal &= (vega10_psa->sclk_threshold == vega10_psb->sclk_threshold);
return 0;
}
static bool
vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
bool is_update_required = false;
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
is_update_required = true;
if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
is_update_required = true;
}
return is_update_required;
}
static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
if (!hwmgr->not_vf)
return 0;
if (PP_CAP(PHM_PlatformCaps_ThermalController))
vega10_disable_thermal_protection(hwmgr);
tmp_result = vega10_disable_power_containment(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable power containment!", result = tmp_result);
tmp_result = vega10_disable_didt_config(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable didt config!", result = tmp_result);
tmp_result = vega10_avfs_enable(hwmgr, false);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable AVFS!", result = tmp_result);
tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to stop DPM!", result = tmp_result);
tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable deep sleep!", result = tmp_result);
tmp_result = vega10_disable_ulv(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable ulv!", result = tmp_result);
tmp_result = vega10_acg_disable(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable acg!", result = tmp_result);
vega10_enable_disable_PCC_limit_feature(hwmgr, false);
return result;
}
static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
int result;
result = vega10_disable_dpm_tasks(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"[disable_dpm_tasks] Failed to disable DPM!",
);
data->water_marks_bitmap &= ~(WaterMarksLoaded);
return result;
}
static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega10_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
int golden_value = golden_sclk_table->dpm_levels
[golden_sclk_table->count - 1].value;
value -= golden_value;
value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *golden_sclk_table =
&(data->golden_dpm_table.gfx_table);
struct pp_power_state *ps;
struct vega10_power_state *vega10_ps;
ps = hwmgr->request_ps;
if (ps == NULL)
return -EINVAL;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].gfx_clock =
golden_sclk_table->dpm_levels
[golden_sclk_table->count - 1].value *
value / 100 +
golden_sclk_table->dpm_levels
[golden_sclk_table->count - 1].value;
if (vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].gfx_clock >
hwmgr->platform_descriptor.overdriveLimit.engineClock) {
vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].gfx_clock =
hwmgr->platform_descriptor.overdriveLimit.engineClock;
pr_warn("max sclk supported by vbios is %d\n",
hwmgr->platform_descriptor.overdriveLimit.engineClock);
}
return 0;
}
static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
int golden_value = golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value;
value -= golden_value;
value = DIV_ROUND_UP(value * 100, golden_value);
return value;
}
static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_single_dpm_table *golden_mclk_table =
&(data->golden_dpm_table.mem_table);
struct pp_power_state *ps;
struct vega10_power_state *vega10_ps;
ps = hwmgr->request_ps;
if (ps == NULL)
return -EINVAL;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].mem_clock =
golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value *
value / 100 +
golden_mclk_table->dpm_levels
[golden_mclk_table->count - 1].value;
if (vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].mem_clock >
hwmgr->platform_descriptor.overdriveLimit.memoryClock) {
vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].mem_clock =
hwmgr->platform_descriptor.overdriveLimit.memoryClock;
pr_warn("max mclk supported by vbios is %d\n",
hwmgr->platform_descriptor.overdriveLimit.memoryClock);
}
return 0;
}
static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
uint32_t virtual_addr_low,
uint32_t virtual_addr_hi,
uint32_t mc_addr_low,
uint32_t mc_addr_hi,
uint32_t size)
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrHigh,
virtual_addr_hi,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrLow,
virtual_addr_low,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrHigh,
mc_addr_hi,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrLow,
mc_addr_low,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramSize,
size,
NULL);
return 0;
}
static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *thermal_data)
{
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct phm_ppt_v2_information *pp_table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
struct phm_tdp_table *tdp_table = pp_table_info->tdp_table;
memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
thermal_data->max = pp_table->TedgeLimit *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->mem_crit_max = pp_table->ThbmLimit *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
if (tdp_table->usSoftwareShutdownTemp > pp_table->ThotspotLimit &&
tdp_table->usSoftwareShutdownTemp < VEGA10_THERMAL_MAXIMUM_ALERT_TEMP)
thermal_data->sw_ctf_threshold = tdp_table->usSoftwareShutdownTemp;
else
thermal_data->sw_ctf_threshold = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP;
thermal_data->sw_ctf_threshold *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
}
static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t i, size = 0;
static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,},
{70, 60, 1, 3,},
{90, 60, 0, 0,},
{70, 60, 0, 0,},
{70, 90, 0, 0,},
{30, 60, 0, 6,},
};
static const char *title[6] = {"NUM",
"MODE_NAME",
"BUSY_SET_POINT",
"FPS",
"USE_RLC_BUSY",
"MIN_ACTIVE_LEVEL"};
if (!buf)
return -EINVAL;
phm_get_sysfs_buf(&buf, &size);
size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
title[1], title[2], title[3], title[4], title[5]);
for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n",
i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
profile_mode_setting[i][0], profile_mode_setting[i][1],
profile_mode_setting[i][2], profile_mode_setting[i][3]);
size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
data->custom_profile_mode[0], data->custom_profile_mode[1],
data->custom_profile_mode[2], data->custom_profile_mode[3]);
return size;
}
static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
return (adev->pdev->device == 0x6860);
}
static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint8_t busy_set_point;
uint8_t FPS;
uint8_t use_rlc_busy;
uint8_t min_active_level;
uint32_t power_profile_mode = input[size];
if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (size != 0 && size != 4)
return -EINVAL;
/* If size = 0 and the CUSTOM profile has been set already
* then just apply the profile. The copy stored in the hwmgr
* is zeroed out on init
*/
if (size == 0) {
if (data->custom_profile_mode[0] != 0)
goto out;
else
return -EINVAL;
}
data->custom_profile_mode[0] = busy_set_point = input[0];
data->custom_profile_mode[1] = FPS = input[1];
data->custom_profile_mode[2] = use_rlc_busy = input[2];
data->custom_profile_mode[3] = min_active_level = input[3];
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetCustomGfxDpmParameters,
busy_set_point | FPS<<8 |
use_rlc_busy << 16 | min_active_level<<24,
NULL);
}
out:
if (vega10_get_power_profile_mode_quirks(hwmgr))
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
1 << power_profile_mode,
NULL);
else
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
(!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
NULL);
hwmgr->power_profile_mode = power_profile_mode;
return 0;
}
static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
uint32_t clk,
uint32_t voltage)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
struct vega10_single_dpm_table *golden_table;
if (voltage < odn_table->min_vddc || voltage > odn_table->max_vddc) {
pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table->min_vddc, odn_table->max_vddc);
return false;
}
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
golden_table = &(data->golden_dpm_table.gfx_table);
if (golden_table->dpm_levels[0].value > clk ||
hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
pr_info("OD engine clock is out of range [%d - %d] MHz\n",
golden_table->dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
return false;
}
} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
golden_table = &(data->golden_dpm_table.mem_table);
if (golden_table->dpm_levels[0].value > clk ||
hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
pr_info("OD memory clock is out of range [%d - %d] MHz\n",
golden_table->dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
return false;
}
} else {
return false;
}
return true;
}
static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct pp_power_state *ps = hwmgr->request_ps;
struct vega10_power_state *vega10_ps;
struct vega10_single_dpm_table *gfx_dpm_table =
&data->dpm_table.gfx_table;
struct vega10_single_dpm_table *soc_dpm_table =
&data->dpm_table.soc_table;
struct vega10_single_dpm_table *mem_dpm_table =
&data->dpm_table.mem_table;
int max_level;
if (!ps)
return;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
max_level = vega10_ps->performance_level_count - 1;
if (vega10_ps->performance_levels[max_level].gfx_clock !=
gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value)
vega10_ps->performance_levels[max_level].gfx_clock =
gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value;
if (vega10_ps->performance_levels[max_level].soc_clock !=
soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value)
vega10_ps->performance_levels[max_level].soc_clock =
soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value;
if (vega10_ps->performance_levels[max_level].mem_clock !=
mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value)
vega10_ps->performance_levels[max_level].mem_clock =
mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value;
if (!hwmgr->ps)
return;
ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
max_level = vega10_ps->performance_level_count - 1;
if (vega10_ps->performance_levels[max_level].gfx_clock !=
gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value)
vega10_ps->performance_levels[max_level].gfx_clock =
gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value;
if (vega10_ps->performance_levels[max_level].soc_clock !=
soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value)
vega10_ps->performance_levels[max_level].soc_clock =
soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value;
if (vega10_ps->performance_levels[max_level].mem_clock !=
mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value)
vega10_ps->performance_levels[max_level].mem_clock =
mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value;
}
static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info = hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk;
struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.mem_table;
struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk =
&data->odn_dpm_table.vdd_dep_on_socclk;
struct vega10_odn_vddc_lookup_table *od_vddc_lookup_table = &data->odn_dpm_table.vddc_lookup_table;
struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep;
uint8_t i, j;
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
for (i = 0; i < podn_vdd_dep->count; i++)
od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
for (i = 0; i < dpm_table->count; i++) {
for (j = 0; j < od_vddc_lookup_table->count; j++) {
if (od_vddc_lookup_table->entries[j].us_vdd >
podn_vdd_dep->entries[i].vddc)
break;
}
if (j == od_vddc_lookup_table->count) {
j = od_vddc_lookup_table->count - 1;
od_vddc_lookup_table->entries[j].us_vdd =
podn_vdd_dep->entries[i].vddc;
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
}
podn_vdd_dep->entries[i].vddInd = j;
}
dpm_table = &data->dpm_table.soc_table;
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[podn_vdd_dep->count-1].vddInd &&
dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count-1].clk) {
data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
for (; (i < dep_table->count) &&
(dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk); i++) {
podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[podn_vdd_dep->count-1].clk;
dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
}
break;
} else {
dpm_table->dpm_levels[i].value = dep_table->entries[i].clk;
podn_vdd_dep_on_socclk->entries[i].vddc = dep_table->entries[i].vddc;
podn_vdd_dep_on_socclk->entries[i].vddInd = dep_table->entries[i].vddInd;
podn_vdd_dep_on_socclk->entries[i].clk = dep_table->entries[i].clk;
}
}
if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk <
podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk) {
data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk =
podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk;
dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value =
podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk;
}
if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd <
podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd) {
data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd =
podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd;
}
}
vega10_odn_update_power_state(hwmgr);
}
static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_table;
struct vega10_single_dpm_table *dpm_table;
uint32_t input_clk;
uint32_t input_vol;
uint32_t input_level;
uint32_t i;
PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
return -EINVAL);
if (!hwmgr->od_enabled) {
pr_info("OverDrive feature not enabled\n");
return -EINVAL;
}
if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
dpm_table = &data->dpm_table.gfx_table;
podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_sclk;
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
} else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
dpm_table = &data->dpm_table.mem_table;
podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_mclk;
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
} else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table));
vega10_odn_initial_default_setting(hwmgr);
vega10_odn_update_power_state(hwmgr);
/* force to update all clock tables */
data->need_update_dpm_table = DPMTABLE_UPDATE_SCLK |
DPMTABLE_UPDATE_MCLK |
DPMTABLE_UPDATE_SOCCLK;
return 0;
} else if (PP_OD_COMMIT_DPM_TABLE == type) {
vega10_check_dpm_table_updated(hwmgr);
return 0;
} else {
return -EINVAL;
}
for (i = 0; i < size; i += 3) {
if (i + 3 > size || input[i] >= podn_vdd_dep_table->count) {
pr_info("invalid clock voltage input\n");
return 0;
}
input_level = input[i];
input_clk = input[i+1] * 100;
input_vol = input[i+2];
if (vega10_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
dpm_table->dpm_levels[input_level].value = input_clk;
podn_vdd_dep_table->entries[input_level].clk = input_clk;
podn_vdd_dep_table->entries[input_level].vddc = input_vol;
} else {
return -EINVAL;
}
}
vega10_odn_update_soc_table(hwmgr, type);
return 0;
}
static int vega10_set_mp1_state(struct pp_hwmgr *hwmgr,
enum pp_mp1_state mp1_state)
{
uint16_t msg;
int ret;
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
msg = PPSMC_MSG_PrepareMp1ForUnload;
break;
case PP_MP1_STATE_SHUTDOWN:
case PP_MP1_STATE_RESET:
case PP_MP1_STATE_NONE:
default:
return 0;
}
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
"[PrepareMp1] Failed!",
return ret);
return 0;
}
static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
PHM_PerformanceLevelDesignation designation, uint32_t index,
PHM_PerformanceLevel *level)
{
const struct vega10_power_state *vega10_ps;
uint32_t i;
if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;
vega10_ps = cast_const_phw_vega10_power_state(state);
i = index > vega10_ps->performance_level_count - 1 ?
vega10_ps->performance_level_count - 1 : index;
level->coreClock = vega10_ps->performance_levels[i].gfx_clock;
level->memory_clock = vega10_ps->performance_levels[i].mem_clock;
return 0;
}
static int vega10_disable_power_features_for_compute_performance(struct pp_hwmgr *hwmgr, bool disable)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t feature_mask = 0;
if (disable) {
feature_mask |= data->smu_features[GNLD_ULV].enabled ?
data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
feature_mask |= data->smu_features[GNLD_DS_GFXCLK].enabled ?
data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
feature_mask |= data->smu_features[GNLD_DS_SOCCLK].enabled ?
data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
feature_mask |= data->smu_features[GNLD_DS_LCLK].enabled ?
data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
feature_mask |= data->smu_features[GNLD_DS_DCEFCLK].enabled ?
data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
} else {
feature_mask |= (!data->smu_features[GNLD_ULV].enabled) ?
data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
feature_mask |= (!data->smu_features[GNLD_DS_GFXCLK].enabled) ?
data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
feature_mask |= (!data->smu_features[GNLD_DS_SOCCLK].enabled) ?
data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
feature_mask |= (!data->smu_features[GNLD_DS_LCLK].enabled) ?
data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
feature_mask |= (!data->smu_features[GNLD_DS_DCEFCLK].enabled) ?
data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
}
if (feature_mask)
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
!disable, feature_mask),
"enable/disable power features for compute performance Failed!",
return -EINVAL);
if (disable) {
data->smu_features[GNLD_ULV].enabled = false;
data->smu_features[GNLD_DS_GFXCLK].enabled = false;
data->smu_features[GNLD_DS_SOCCLK].enabled = false;
data->smu_features[GNLD_DS_LCLK].enabled = false;
data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
} else {
data->smu_features[GNLD_ULV].enabled = true;
data->smu_features[GNLD_DS_GFXCLK].enabled = true;
data->smu_features[GNLD_DS_SOCCLK].enabled = true;
data->smu_features[GNLD_DS_LCLK].enabled = true;
data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
}
return 0;
}
static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.backend_init = vega10_hwmgr_backend_init,
.backend_fini = vega10_hwmgr_backend_fini,
.asic_setup = vega10_setup_asic_task,
.dynamic_state_management_enable = vega10_enable_dpm_tasks,
.dynamic_state_management_disable = vega10_disable_dpm_tasks,
.get_num_of_pp_table_entries =
vega10_get_number_of_powerplay_table_entries,
.get_power_state_size = vega10_get_power_state_size,
.get_pp_table_entry = vega10_get_pp_table_entry,
.patch_boot_state = vega10_patch_boot_state,
.apply_state_adjust_rules = vega10_apply_state_adjust_rules,
.power_state_set = vega10_set_power_state_tasks,
.get_sclk = vega10_dpm_get_sclk,
.get_mclk = vega10_dpm_get_mclk,
.notify_smc_display_config_after_ps_adjustment =
vega10_notify_smc_display_config_after_ps_adjustment,
.force_dpm_level = vega10_dpm_force_dpm_level,
.stop_thermal_controller = vega10_thermal_stop_thermal_controller,
.get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
.get_fan_speed_pwm = vega10_fan_ctrl_get_fan_speed_pwm,
.set_fan_speed_pwm = vega10_fan_ctrl_set_fan_speed_pwm,
.reset_fan_speed_to_default =
vega10_fan_ctrl_reset_fan_speed_to_default,
.get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
.set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
.uninitialize_thermal_controller =
vega10_thermal_ctrl_uninitialize_thermal_controller,
.set_fan_control_mode = vega10_set_fan_control_mode,
.get_fan_control_mode = vega10_get_fan_control_mode,
.read_sensor = vega10_read_sensor,
.get_dal_power_level = vega10_get_dal_power_level,
.get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
.get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
.set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
.display_clock_voltage_request = vega10_display_clock_voltage_request,
.force_clock_level = vega10_force_clock_level,
.emit_clock_levels = vega10_emit_clock_levels,
.print_clock_levels = vega10_print_clock_levels,
.display_config_changed = vega10_display_configuration_changed_task,
.powergate_uvd = vega10_power_gate_uvd,
.powergate_vce = vega10_power_gate_vce,
.check_states_equal = vega10_check_states_equal,
.check_smc_update_required_for_display_configuration =
vega10_check_smc_update_required_for_display_configuration,
.power_off_asic = vega10_power_off_asic,
.disable_smc_firmware_ctf = vega10_thermal_disable_alert,
.get_sclk_od = vega10_get_sclk_od,
.set_sclk_od = vega10_set_sclk_od,
.get_mclk_od = vega10_get_mclk_od,
.set_mclk_od = vega10_set_mclk_od,
.avfs_control = vega10_avfs_enable,
.notify_cac_buffer_info = vega10_notify_cac_buffer_info,
.get_thermal_temperature_range = vega10_get_thermal_temperature_range,
.register_irq_handlers = smu9_register_irq_handlers,
.start_thermal_controller = vega10_start_thermal_controller,
.get_power_profile_mode = vega10_get_power_profile_mode,
.set_power_profile_mode = vega10_set_power_profile_mode,
.set_power_limit = vega10_set_power_limit,
.odn_edit_dpm_table = vega10_odn_edit_dpm_table,
.get_performance_level = vega10_get_performance_level,
.get_asic_baco_capability = smu9_baco_get_capability,
.get_asic_baco_state = smu9_baco_get_state,
.set_asic_baco_state = vega10_baco_set_state,
.enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost,
.get_ppfeature_status = vega10_get_ppfeature_status,
.set_ppfeature_status = vega10_set_ppfeature_status,
.set_mp1_state = vega10_set_mp1_state,
.disable_power_features_for_compute_performance =
vega10_disable_power_features_for_compute_performance,
};
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
hwmgr->pptable_func = &vega10_pptable_funcs;
if (amdgpu_passthrough(adev))
return vega10_baco_set_cap(hwmgr);
return 0;
}
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "pp_overdriver.h"
#include <linux/errno.h>
static const struct phm_fuses_default vega10_fuses_default[] = {
{ 0x0213EA94DE0E4964, 0x00003C96, 0xFFFFE226, 0x00000656, 0x00002203, 0xFFFFF201, 0x000003FF, 0x00002203, 0xFFFFF201, 0x000003FF },
{ 0x0213EA94DE0A1884, 0x00003CC5, 0xFFFFE23A, 0x0000064E, 0x00002258, 0xFFFFF1F7, 0x000003FC, 0x00002258, 0xFFFFF1F7, 0x000003FC },
{ 0x0213EA94DE0E31A4, 0x00003CAF, 0xFFFFE36E, 0x00000602, 0x00001E98, 0xFFFFF569, 0x00000357, 0x00001E98, 0xFFFFF569, 0x00000357 },
{ 0x0213EA94DE2C1144, 0x0000391A, 0xFFFFE548, 0x000005C9, 0x00001B98, 0xFFFFF707, 0x00000324, 0x00001B98, 0xFFFFF707, 0x00000324 },
{ 0x0213EA94DE2C18C4, 0x00003821, 0xFFFFE674, 0x00000597, 0x00002196, 0xFFFFF361, 0x000003C0, 0x00002196, 0xFFFFF361, 0x000003C0 },
{ 0x0213EA94DE263884, 0x000044A2, 0xFFFFDCB7, 0x00000738, 0x0000325C, 0xFFFFE6A7, 0x000005E6, 0x0000325C, 0xFFFFE6A7, 0x000005E6 },
{ 0x0213EA94DE082924, 0x00004057, 0xFFFFE1CF, 0x0000063C, 0x00002E2E, 0xFFFFEB62, 0x000004FD, 0x00002E2E, 0xFFFFEB62, 0x000004FD },
{ 0x0213EA94DE284924, 0x00003FD0, 0xFFFFDF0F, 0x000006E5, 0x0000267C, 0xFFFFEE2D, 0x000004AB, 0x0000267C, 0xFFFFEE2D, 0x000004AB },
{ 0x0213EA94DE280904, 0x00003F13, 0xFFFFE010, 0x000006AD, 0x000020E7, 0xFFFFF266, 0x000003EC, 0x000020E7, 0xFFFFF266, 0x000003EC },
{ 0x0213EA94DE082044, 0x00004088, 0xFFFFDFAB, 0x000006B6, 0x0000252B, 0xFFFFEFDB, 0x00000458, 0x0000252B, 0xFFFFEFDB, 0x00000458 },
{ 0x0213EA94DE283884, 0x00003EF6, 0xFFFFE017, 0x000006AA, 0x00001F67, 0xFFFFF369, 0x000003BE, 0x00001F67, 0xFFFFF369, 0x000003BE },
{ 0x0213EA94DE2C2184, 0x00003CDD, 0xFFFFE2A7, 0x0000063C, 0x000026C6, 0xFFFFEF38, 0x00000478, 0x000026C6, 0xFFFFEF38, 0x00000478 },
{ 0x0213EA94DE105124, 0x00003FA8, 0xFFFFDF02, 0x000006F0, 0x000027FE, 0xFFFFECF6, 0x000004EA, 0x000027FE, 0xFFFFECF6, 0x000004EA },
{ 0x0213EA94DE2638C4, 0x00004670, 0xFFFFDC40, 0x00000742, 0x00003A7A, 0xFFFFE1A7, 0x000006B6, 0x00003A7A, 0xFFFFE1A7, 0x000006B6 },
{ 0x0213EA94DE2C3024, 0x00003CDC, 0xFFFFE18C, 0x00000683, 0x00002A69, 0xFFFFEBE7, 0x00000515, 0x00002A69, 0xFFFFEBE7, 0x00000515 },
{ 0x0213EA94DE0E38C4, 0x00003CEC, 0xFFFFE38E, 0x00000601, 0x00002752, 0xFFFFEFA7, 0x00000453, 0x00002752, 0xFFFFEFA7, 0x00000453 },
{ 0x0213EA94DE2C1124, 0x000037D0, 0xFFFFE634, 0x000005A7, 0x00001CD2, 0xFFFFF644, 0x00000348, 0x00001CD2, 0xFFFFF644, 0x00000348 },
{ 0x0213EA94DE283964, 0x00003DF5, 0xFFFFE0A5, 0x00000698, 0x00001FD5, 0xFFFFF30E, 0x000003D1, 0x00001FD5, 0xFFFFF30E, 0x000003D1 },
{ 0x0213EA94DE0828C4, 0x00004201, 0xFFFFE03E, 0x00000688, 0x00003206, 0xFFFFE852, 0x0000058A, 0x00003206, 0xFFFFE852, 0x0000058A },
{ 0x0213EA94DE2C1864, 0x00003BED, 0xFFFFE2F5, 0x00000638, 0x0000270D, 0xFFFFEED0, 0x0000048E, 0x0000270D, 0xFFFFEED0, 0x0000048E },
{ 0x0213EA94DE0A1904, 0x00003E82, 0xFFFFE1BE, 0x00000654, 0x000025FB, 0xFFFFEFFA, 0x00000448, 0x000025FB, 0xFFFFEFFA, 0x00000448 },
{ 0x0213EA94DE2C40C4, 0x00003962, 0xFFFFE4B9, 0x000005EF, 0x00002385, 0xFFFFF156, 0x00000423, 0x00002385, 0xFFFFF156, 0x00000423 },
{ 0x0213EA94DE2C0944, 0x00003D88, 0xFFFFE21A, 0x00000655, 0x0000295A, 0xFFFFED68, 0x000004C4, 0x0000295A, 0xFFFFED68, 0x000004C4 },
{ 0x0213EA94DE2C1104, 0x00003AA4, 0xFFFFE4A3, 0x000005E0, 0x000022EF, 0xFFFFF250, 0x000003EB, 0x000022EF, 0xFFFFF250, 0x000003EB },
{ 0x0213EA94DE0E29A4, 0x00003D97, 0xFFFFE30D, 0x0000060D, 0x0000205D, 0xFFFFF45D, 0x00000380, 0x0000205D, 0xFFFFF45D, 0x00000380 },
{ 0x0213EA94DE2C40A4, 0x000039B6, 0xFFFFE446, 0x00000605, 0x00002325, 0xFFFFF16C, 0x0000041F, 0x00002325, 0xFFFFF16C, 0x0000041F },
{ 0x0213EA94DE263904, 0x0000457E, 0xFFFFDCF6, 0x00000722, 0x00003972, 0xFFFFE27B, 0x0000068E, 0x00003972, 0xFFFFE27B, 0x0000068E },
{ 0x0213EA94DE0A1924, 0x00003FB8, 0xFFFFE101, 0x00000670, 0x00002787, 0xFFFFEEF5, 0x00000471, 0x00002787, 0xFFFFEEF5, 0x00000471 },
{ 0x0213EA94DE0E38A4, 0x00003BB2, 0xFFFFE430, 0x000005EA, 0x000024A5, 0xFFFFF162, 0x00000409, 0x000024A5, 0xFFFFF162, 0x00000409 },
{ 0x0213EA94DE082144, 0x00003EC5, 0xFFFFE1BD, 0x0000064F, 0x000022F0, 0xFFFFF227, 0x000003E8, 0x000022F0, 0xFFFFF227, 0x000003E8 },
{ 0x0213EA94DE2C3164, 0x000038A7, 0xFFFFE59F, 0x000005C1, 0x000021CC, 0xFFFFF2DF, 0x000003D9, 0x000021CC, 0xFFFFF2DF, 0x000003D9 },
{ 0x0213EA94DE324184, 0x00002995, 0xFFFFEF7A, 0x0000044C, 0x00001552, 0xFFFFFB5D, 0x00000292, 0x00001552, 0xFFFFFB5D, 0x00000292 },
{ 0x0213EA94DE2C4064, 0x00003B26, 0xFFFFE2D3, 0x00000649, 0x000023B4, 0xFFFFF09B, 0x00000449, 0x000023B4, 0xFFFFF09B, 0x00000449 },
{ 0x0213EA94DE081124, 0x000040D2, 0xFFFFE00A, 0x00000696, 0x000022DA, 0xFFFFF1E9, 0x000003F2, 0x000022DA, 0xFFFFF1E9, 0x000003F2 },
{ 0x0213EA94DE2C3924, 0x00003C98, 0xFFFFE365, 0x00000618, 0x00002D5D, 0xFFFFEB3A, 0x0000051D, 0x00002D5D, 0xFFFFEB3A, 0x0000051D },
{ 0x0213EA94DE2C10A4, 0x00003BBD, 0xFFFFE37E, 0x00000617, 0x0000252E, 0xFFFFF06E, 0x00000441, 0x0000252E, 0xFFFFF06E, 0x00000441 },
{ 0x0213EA94DE262924, 0x00004363, 0xFFFFDF7A, 0x000006A0, 0x000031F5, 0xFFFFE880, 0x0000057B, 0x000031F5, 0xFFFFE880, 0x0000057B },
{ 0x0213EA94DE0E3844, 0x00003CFC, 0xFFFFE2AF, 0x0000062E, 0x0000212A, 0xFFFFF335, 0x000003BF, 0x0000212A, 0xFFFFF335, 0x000003BF },
{ 0x0213EA94DE1C4924, 0x0000252D, 0xFFFFF31B, 0x000003C3, 0x00001A1A, 0xFFFFF882, 0x00000325, 0x00001A1A, 0xFFFFF882, 0x00000325 },
{ 0x0213EA94DE0A29A4, 0x00003FE2, 0xFFFFDFEF, 0x000006AC, 0x000025A2, 0xFFFFEF84, 0x00000462, 0x000025A2, 0xFFFFEF84, 0x00000462 },
{ 0x0213EA94DE0820E4, 0x000040A5, 0xFFFFE13B, 0x0000065B, 0x00002C13, 0xFFFFEC75, 0x000004D7, 0x00002C13, 0xFFFFEC75, 0x000004D7 },
{ 0x0213EA94DE0E48A4, 0x00003E42, 0xFFFFE1B3, 0x00000657, 0x0000221D, 0xFFFFF273, 0x000003DE, 0x0000221D, 0xFFFFF273, 0x000003DE },
{ 0x0213EA94DE0A20E4, 0x00003E7F, 0xFFFFE255, 0x00000638, 0x00002D30, 0xFFFFEB8A, 0x00000503, 0x00002D30, 0xFFFFEB8A, 0x00000503 },
{ 0x0213EA94DE2C29C4, 0x00003E56, 0xFFFFE16D, 0x00000670, 0x000028DC, 0xFFFFEDA0, 0x000004BA, 0x000028DC, 0xFFFFEDA0, 0x000004BA },
{ 0x0213EA94DE2630A4, 0x000044AD, 0xFFFFDE24, 0x000006DD, 0x000031AD, 0xFFFFE850, 0x00000585, 0x000031AD, 0xFFFFE850, 0x00000585 },
{ 0x0213EA94DE2C20E4, 0x00003AF3, 0xFFFFE5B0, 0x000005A6, 0x00002CF6, 0xFFFFEC75, 0x000004DD, 0x00002CF6, 0xFFFFEC75, 0x000004DD },
{ 0x0213EA94DE0A2084, 0x00003E66, 0xFFFFE19E, 0x0000065B, 0x00002332, 0xFFFFF1B9, 0x000003FD, 0x00002332, 0xFFFFF1B9, 0x000003FD },
{ 0x0213EA94DE082884, 0x00003FB4, 0xFFFFE0A5, 0x00000686, 0x0000253E, 0xFFFFF02E, 0x00000444, 0x0000253E, 0xFFFFF02E, 0x00000444 },
{ 0x0213EA94DE2818A4, 0x00003E28, 0xFFFFE14D, 0x0000066E, 0x00001FE2, 0xFFFFF39A, 0x000003B1, 0x00001FE2, 0xFFFFF39A, 0x000003B1 },
{ 0x0213EA94DE2C0904, 0x000039E6, 0xFFFFE44B, 0x000005FE, 0x0000210C, 0xFFFFF2F4, 0x000003DA, 0x0000210C, 0xFFFFF2F4, 0x000003DA },
{ 0x0213EA94DE2C5104, 0x00003A4D, 0xFFFFE252, 0x0000067A, 0x000027E2, 0xFFFFECED, 0x000004FA, 0x000027E2, 0xFFFFECED, 0x000004FA },
{ 0x0213EA94DE0A2964, 0x00004065, 0xFFFFE02F, 0x0000069B, 0x0000299D, 0xFFFFED38, 0x000004C2, 0x0000299D, 0xFFFFED38, 0x000004C2 },
{ 0x0213EA94DE0E20A4, 0x000039EE, 0xFFFFE603, 0x00000594, 0x0000214F, 0xFFFFF429, 0x0000038E, 0x0000214F, 0xFFFFF429, 0x0000038E },
{ 0x0213EA94DE0E48E4, 0x00003BD2, 0xFFFFE351, 0x00000618, 0x000020B8, 0xFFFFF377, 0x000003B4, 0x000020B8, 0xFFFFF377, 0x000003B4 },
{ 0x0213EA94DE0A3124, 0x00003FAA, 0xFFFFE183, 0x0000065E, 0x000032AE, 0xFFFFE7C2, 0x000005A6, 0x000032AE, 0xFFFFE7C2, 0x000005A6 },
{ 0x0213EA94DE2C2984, 0x00003AFB, 0xFFFFE3E4, 0x00000608, 0x00002293, 0xFFFFF21F, 0x000003FA, 0x00002293, 0xFFFFF21F, 0x000003FA },
{ 0x0213EA94DE262064, 0x0000448B, 0xFFFFDD5D, 0x0000070D, 0x00002E4E, 0xFFFFE9DF, 0x00000551, 0x00002E4E, 0xFFFFE9DF, 0x00000551 },
{ 0x0213EA94DE0E2184, 0x00003D46, 0xFFFFE39B, 0x000005F3, 0x0000218E, 0xFFFFF3CD, 0x00000398, 0x0000218E, 0xFFFFF3CD, 0x00000398 },
{ 0x0213EA94DE0848E4, 0x00003F01, 0xFFFFDFD9, 0x000006BF, 0x000023AF, 0xFFFFF04E, 0x0000044C, 0x000023AF, 0xFFFFF04E, 0x0000044C },
{ 0x0213EA94DE1029A4, 0x0000403D, 0xFFFFDF6B, 0x000006C9, 0x0000270D, 0xFFFFEE4B, 0x0000049E, 0x0000270D, 0xFFFFEE4B, 0x0000049E },
{ 0x0213EA94DE0E3964, 0x00003C11, 0xFFFFE35C, 0x00000613, 0x000020F9, 0xFFFFF365, 0x000003B9, 0x000020F9, 0xFFFFF365, 0x000003B9 },
{ 0x0213EA94DE2C3884, 0x00003B58, 0xFFFFE37D, 0x0000061F, 0x00002698, 0xFFFFEF46, 0x00000478, 0x00002698, 0xFFFFEF46, 0x00000478 },
{ 0x0213EA94DE2841A4, 0x00003EBC, 0xFFFFDF7A, 0x000006D6, 0x0000212B, 0xFFFFF195, 0x0000041B, 0x0000212B, 0xFFFFF195, 0x0000041B },
{ 0x0213EA94DE0848C4, 0x00004050, 0xFFFFDEB3, 0x000006FE, 0x00002D6C, 0xFFFFE961, 0x00000582, 0x00002D6C, 0xFFFFE961, 0x00000582 },
{ 0x0213EA94DE262044, 0x000043F0, 0xFFFFDD9C, 0x00000702, 0x00002B31, 0xFFFFEBEA, 0x000004F7, 0x00002B31, 0xFFFFEBEA, 0x000004F7 },
{ 0x0213EA94DE100924, 0x00003EFA, 0xFFFFE093, 0x00000696, 0x000026DB, 0xFFFFEEB3, 0x00000489, 0x000026DB, 0xFFFFEEB3, 0x00000489 },
{ 0x0213EA94DE082064, 0x0000425D, 0xFFFFDE8D, 0x000006E6, 0x00002CA4, 0xFFFFEAD2, 0x00000531, 0x00002CA4, 0xFFFFEAD2, 0x00000531 },
{ 0x0213EA94DE2639A4, 0x000043B0, 0xFFFFDD03, 0x00000728, 0x00002946, 0xFFFFECA6, 0x000004DE, 0x00002946, 0xFFFFECA6, 0x000004DE },
{ 0x0213EA94DE282864, 0x00003F6A, 0xFFFFE03A, 0x0000069D, 0x00002208, 0xFFFFF1F8, 0x000003F6, 0x00002208, 0xFFFFF1F8, 0x000003F6 },
{ 0x0213EA94DE2C2964, 0x00003A94, 0xFFFFE4A7, 0x000005E2, 0x000024D0, 0xFFFFF100, 0x00000426, 0x000024D0, 0xFFFFF100, 0x00000426 },
{ 0x0213EA94DE2810C4, 0x00003F2F, 0xFFFFE0A3, 0x00000688, 0x00002198, 0xFFFFF271, 0x000003E2, 0x00002198, 0xFFFFF271, 0x000003E2 },
{ 0x0213EA94DE1048E4, 0x00003EA5, 0xFFFFE032, 0x000006AE, 0x0000227C, 0xFFFFF130, 0x00000426, 0x0000227C, 0xFFFFF130, 0x00000426 },
{ 0x0213EA94DE264144, 0x0000442F, 0xFFFFDBC4, 0x0000078B, 0x00003CD6, 0xFFFFDE6C, 0x0000076C, 0x00003CD6, 0xFFFFDE6C, 0x0000076C },
{ 0x0213EA94DE282884, 0x00003DDE, 0xFFFFE174, 0x00000668, 0x00001FF4, 0xFFFFF38F, 0x000003B1, 0x00001FF4, 0xFFFFF38F, 0x000003B1 },
{ 0x0213EA94DE0A3144, 0x000040B0, 0xFFFFE016, 0x000006A0, 0x00002DBB, 0xFFFFEA7F, 0x00000537, 0x00002DBB, 0xFFFFEA7F, 0x00000537 },
{ 0x0213EA94DE2C3104, 0x00003429, 0xFFFFEA97, 0x000004DD, 0x000024D5, 0xFFFFF26F, 0x000003DF, 0x000024D5, 0xFFFFF26F, 0x000003DF },
{ 0x0213EA94DE0E1904, 0x00003AEB, 0xFFFFE590, 0x000005A3, 0x000022CB, 0xFFFFF347, 0x000003B2, 0x000022CB, 0xFFFFF347, 0x000003B2 },
{ 0x0213EA94DE283904, 0x00003B8E, 0xFFFFE2EF, 0x00000636, 0x00002351, 0xFFFFF143, 0x0000041C, 0x00002351, 0xFFFFF143, 0x0000041C },
{ 0x0213EA94DE3240C4, 0x00002926, 0xFFFFF0B0, 0x00000410, 0x0000194E, 0xFFFFF94E, 0x000002E9, 0x0000194E, 0xFFFFF94E, 0x000002E9 },
{ 0x0213EA94DE283184, 0x0000402B, 0xFFFFDF78, 0x000006C2, 0x00002273, 0xFFFFF16C, 0x00000414, 0x00002273, 0xFFFFF16C, 0x00000414 },
{ 0x0213EA94DE0A10A4, 0x00003D6A, 0xFFFFE1D3, 0x00000659, 0x00002006, 0xFFFFF394, 0x000003B1, 0x00002006, 0xFFFFF394, 0x000003B1 },
{ 0x0213EA94DE284064, 0x00004042, 0xFFFFDFD8, 0x000006A8, 0x00002135, 0xFFFFF29F, 0x000003D9, 0x00002135, 0xFFFFF29F, 0x000003D9 },
{ 0x0213EA94DE0820A4, 0x0000405B, 0xFFFFE093, 0x00000682, 0x0000288F, 0xFFFFEE3A, 0x00000491, 0x0000288F, 0xFFFFEE3A, 0x00000491 },
{ 0x0213EA94DE2C48A4, 0x00003A49, 0xFFFFE30C, 0x00000648, 0x000023F9, 0xFFFFF02D, 0x00000460, 0x000023F9, 0xFFFFF02D, 0x00000460 },
{ 0x0213EA94DE282964, 0x00003D59, 0xFFFFE1CC, 0x0000065B, 0x00002013, 0xFFFFF37D, 0x000003B6, 0x00002013, 0xFFFFF37D, 0x000003B6 },
{ 0x0213EA94DE2C3984, 0x000040C1, 0xFFFFDF8C, 0x000006CA, 0x00003271, 0xFFFFE6CA, 0x000005EA, 0x00003271, 0xFFFFE6CA, 0x000005EA },
{ 0x0213EA94DE2620E4, 0x000042E9, 0xFFFFDFDC, 0x0000068C, 0x00002ED9, 0xFFFFEAAF, 0x0000051B, 0x00002ED9, 0xFFFFEAAF, 0x0000051B },
{ 0x0213EA94DE083084, 0x000042ED, 0xFFFFDE50, 0x000006F0, 0x00002FCF, 0xFFFFE8BB, 0x0000058C, 0x00002FCF, 0xFFFFE8BB, 0x0000058C },
{ 0x0213EA94DE0A4104, 0x00003EBD, 0xFFFFE099, 0x00000698, 0x00002709, 0xFFFFEE7B, 0x00000495, 0x00002709, 0xFFFFEE7B, 0x00000495 },
{ 0x0213EA94DE284904, 0x00003F71, 0xFFFFDF82, 0x000006C9, 0x0000219B, 0xFFFFF1AD, 0x0000040F, 0x0000219B, 0xFFFFF1AD, 0x0000040F },
{ 0x0213EA94DE2808E4, 0x00003E73, 0xFFFFE080, 0x0000069B, 0x000020E7, 0xFFFFF273, 0x000003E9, 0x000020E7, 0xFFFFF273, 0x000003E9 },
{ 0x0213EA94DE0E3184, 0x00003E14, 0xFFFFE278, 0x0000062C, 0x00002275, 0xFFFFF2B3, 0x000003CE, 0x00002275, 0xFFFFF2B3, 0x000003CE },
{ 0x0213EA94DE2C21A4, 0x00003ABB, 0xFFFFE3B9, 0x00000615, 0x00002192, 0xFFFFF28F, 0x000003EB, 0x00002192, 0xFFFFF28F, 0x000003EB },
{ 0x0213EA94DE283124, 0x00003D53, 0xFFFFE255, 0x00000643, 0x0000275B, 0xFFFFEEED, 0x00000479, 0x0000275B, 0xFFFFEEED, 0x00000479 },
{ 0x0213EA94DE262864, 0x000043E3, 0xFFFFDDC3, 0x000006FB, 0x00002B6B, 0xFFFFEBD6, 0x000004FA, 0x00002B6B, 0xFFFFEBD6, 0x000004FA },
{ 0x0213EA94DE0E2144, 0x00003BDE, 0xFFFFE507, 0x000005B4, 0x000022CE, 0xFFFFF358, 0x000003AB, 0x000022CE, 0xFFFFF358, 0x000003AB },
{ 0x0213EA94DE323164, 0x00002460, 0xFFFFF3B5, 0x000003A2, 0x000014E7, 0xFFFFFC32, 0x0000027C, 0x000014E7, 0xFFFFFC32, 0x0000027C },
{ 0x0213EA94DE2820C4, 0x00003D20, 0xFFFFE298, 0x0000062F, 0x00002080, 0xFFFFF3AF, 0x000003A8, 0x00002080, 0xFFFFF3AF, 0x000003A8 },
{ 0x0213EA94DE081904, 0x00003E14, 0xFFFFE221, 0x00000641, 0x000021BB, 0xFFFFF2EA, 0x000003CA, 0x000021BB, 0xFFFFF2EA, 0x000003CA },
{ 0x0213EA94DE0A40C4, 0x00003DE1, 0xFFFFE14E, 0x00000677, 0x00002468, 0xFFFFF068, 0x00000440, 0x00002468, 0xFFFFF068, 0x00000440 },
{ 0x0213EA94DE261084, 0x00004372, 0xFFFFDDF8, 0x000006F5, 0x00002B3F, 0xFFFFEBE8, 0x000004F8, 0x00002B3F, 0xFFFFEBE8, 0x000004F8 },
{ 0x0213EA94DE0A28C4, 0x00003E4F, 0xFFFFE2A3, 0x0000062B, 0x00002F5A, 0xFFFFEA37, 0x0000053B, 0x00002F5A, 0xFFFFEA37, 0x0000053B },
{ 0x0213EA94DE2850E4, 0x00003E07, 0xFFFFE02F, 0x000006B6, 0x0000216B, 0xFFFFF1A3, 0x00000416, 0x0000216B, 0xFFFFF1A3, 0x00000416 },
{ 0x0213EA94DE2838A4, 0x00003DAB, 0xFFFFE128, 0x0000067F, 0x0000216F, 0xFFFFF236, 0x000003F3, 0x0000216F, 0xFFFFF236, 0x000003F3 },
{ 0x0213EA94DE2C2924, 0x0000364B, 0xFFFFE8CB, 0x0000052A, 0x00002568, 0xFFFFF1B2, 0x00000400, 0x00002568, 0xFFFFF1B2, 0x00000400 },
{ 0x0213EA94DE261064, 0x00004219, 0xFFFFDE87, 0x000006E8, 0x00002C59, 0xFFFFEAEE, 0x00000529, 0x00002C59, 0xFFFFEAEE, 0x00000529 },
{ 0x0213EA94DE0E1944, 0x000039A8, 0xFFFFE602, 0x00000594, 0x00001D06, 0xFFFFF6F0, 0x00000316, 0x00001D06, 0xFFFFF6F0, 0x00000316 },
{ 0x0213EA94DE2610E4, 0x00004052, 0xFFFFE01C, 0x00000698, 0x00002310, 0xFFFFF1A1, 0x000003FE, 0x00002310, 0xFFFFF1A1, 0x000003FE },
{ 0x0213EA94DE0E2824, 0x00003C1C, 0xFFFFE3EB, 0x000005F1, 0x00002289, 0xFFFFF2CF, 0x000003C9, 0x00002289, 0xFFFFF2CF, 0x000003C9 },
{ 0x0213EA94DE0E5124, 0x00003F19, 0xFFFFE085, 0x0000069E, 0x00002B94, 0xFFFFEB72, 0x0000051D, 0x00002B94, 0xFFFFEB72, 0x0000051D },
{ 0x0213EA94DE0E41A4, 0x00003C51, 0xFFFFE2AD, 0x00000638, 0x0000206B, 0xFFFFF361, 0x000003BE, 0x0000206B, 0xFFFFF361, 0x000003BE },
{ 0x0213EA94DE2610C4, 0x000040B9, 0xFFFFDFBB, 0x000006AB, 0x0000241F, 0xFFFFF0CC, 0x00000425, 0x0000241F, 0xFFFFF0CC, 0x00000425 },
{ 0x0213EA94DE0A2064, 0x00003E62, 0xFFFFE12C, 0x00000678, 0x00002445, 0xFFFFF09E, 0x00000435, 0x00002445, 0xFFFFF09E, 0x00000435 },
{ 0x0213EA94DE0E1984, 0x00003C97, 0xFFFFE399, 0x000005FB, 0x0000209D, 0xFFFFF41D, 0x0000038F, 0x0000209D, 0xFFFFF41D, 0x0000038F },
{ 0x0213EA94DE0E3144, 0x00003FF9, 0xFFFFE1E9, 0x0000063E, 0x00002E96, 0xFFFFEAF5, 0x00000516, 0x00002E96, 0xFFFFEAF5, 0x00000516 },
{ 0x0213EA94DE0A3084, 0x00003F04, 0xFFFFE109, 0x0000067A, 0x000026E1, 0xFFFFEF0B, 0x00000476, 0x000026E1, 0xFFFFEF0B, 0x00000476 },
{ 0x0213EA94DE101124, 0x00003E3E, 0xFFFFE187, 0x00000660, 0x00002049, 0xFFFFF38D, 0x000003B0, 0x00002049, 0xFFFFF38D, 0x000003B0 },
{ 0x0213EA94DE282944, 0x00003D58, 0xFFFFE253, 0x0000063D, 0x00002158, 0xFFFFF308, 0x000003C3, 0x00002158, 0xFFFFF308, 0x000003C3 },
{ 0x0213EA94DE0840C4, 0x00004074, 0xFFFFDF8D, 0x000006C0, 0x00002799, 0xFFFFEE19, 0x000004A5, 0x00002799, 0xFFFFEE19, 0x000004A5 },
{ 0x0213EA94DE281924, 0x00003DAF, 0xFFFFE1C9, 0x00000659, 0x000020E5, 0xFFFFF313, 0x000003C6, 0x000020E5, 0xFFFFF313, 0x000003C6 },
{ 0x0213EA94DE0A3964, 0x000041DD, 0xFFFFDDFA, 0x0000071B, 0x0000348D, 0xFFFFE4B4, 0x0000064C, 0x0000348D, 0xFFFFE4B4, 0x0000064C },
{ 0x0213EA94DE2C2884, 0x00003947, 0xFFFFE5AE, 0x000005B8, 0x000024A6, 0xFFFFF140, 0x0000041D, 0x000024A6, 0xFFFFF140, 0x0000041D },
{ 0x0213EA94DE101844, 0x00003D35, 0xFFFFE197, 0x0000066E, 0x00002248, 0xFFFFF1BC, 0x00000408, 0x00002248, 0xFFFFF1BC, 0x00000408 },
{ 0x0213EA94DE0A18E4, 0x00003F4F, 0xFFFFE13E, 0x0000066D, 0x00002AF0, 0xFFFFEC99, 0x000004DB, 0x00002AF0, 0xFFFFEC99, 0x000004DB },
{ 0x0213EA94DE263944, 0x0000430F, 0xFFFFDDFB, 0x000006FC, 0x00002D4D, 0xFFFFEA55, 0x00000540, 0x00002D4D, 0xFFFFEA55, 0x00000540 },
{ 0x0213EA94DE0E2944, 0x00003B22, 0xFFFFE543, 0x000005B1, 0x000022E1, 0xFFFFF31B, 0x000003B9, 0x000022E1, 0xFFFFF31B, 0x000003B9 },
{ 0x0213EA94DE0E2084, 0x00003978, 0xFFFFE611, 0x00000592, 0x00001C36, 0xFFFFF771, 0x00000302, 0x00001C36, 0xFFFFF771, 0x00000302 },
{ 0x0213EA94DE262164, 0x000044DF, 0xFFFFDDAB, 0x000006F2, 0x00002CEA, 0xFFFFEB47, 0x00000507, 0x00002CEA, 0xFFFFEB47, 0x00000507 },
{ 0x0213EA94DE0A38C4, 0x00003E9B, 0xFFFFE12C, 0x0000067C, 0x00002B79, 0xFFFFEBD9, 0x00000503, 0x00002B79, 0xFFFFEBD9, 0x00000503 },
{ 0x0213EA94DE263044, 0x00004464, 0xFFFFDCD3, 0x00000731, 0x00002D14, 0xFFFFEA2D, 0x0000054E, 0x00002D14, 0xFFFFEA2D, 0x0000054E },
{ 0x0213EA94DE281124, 0x00003FB3, 0xFFFFE052, 0x00000693, 0x000020AC, 0xFFFFF311, 0x000003C6, 0x000020AC, 0xFFFFF311, 0x000003C6 },
{ 0x0213EA94DE2C1084, 0x00003BDA, 0xFFFFE2FB, 0x00000636, 0x0000261E, 0xFFFFEF72, 0x00000471, 0x0000261E, 0xFFFFEF72, 0x00000471 },
{ 0x0213EA94DE2C1964, 0x00003D72, 0xFFFFE28A, 0x0000063E, 0x000029D8, 0xFFFFED54, 0x000004C7, 0x000029D8, 0xFFFFED54, 0x000004C7 },
{ 0x0213EA94DE2C2824, 0x00003E26, 0xFFFFE102, 0x00000694, 0x00002DD1, 0xFFFFE9CA, 0x0000056D, 0x00002DD1, 0xFFFFE9CA, 0x0000056D },
{ 0x0213EA94DE104124, 0x000041CD, 0xFFFFDE97, 0x000006ED, 0x00002DE5, 0xFFFFE9B9, 0x00000565, 0x00002DE5, 0xFFFFE9B9, 0x00000565 },
{ 0x0213EA94DE0A2984, 0x00003F30, 0xFFFFE06E, 0x00000698, 0x000024FF, 0xFFFFEFFC, 0x0000044F, 0x000024FF, 0xFFFFEFFC, 0x0000044F },
{ 0x0213EA94DE2C38C4, 0x0000378B, 0xFFFFE6B4, 0x00000594, 0x000023A7, 0xFFFFF1DC, 0x00000407, 0x000023A7, 0xFFFFF1DC, 0x00000407 },
{ 0x0213EA94DE0E4164, 0x00003CD7, 0xFFFFE28D, 0x00000636, 0x00002036, 0xFFFFF3B5, 0x000003AA, 0x00002036, 0xFFFFF3B5, 0x000003AA },
{ 0x0213EA94DE0A3884, 0x00003EF9, 0xFFFFE0AA, 0x0000068D, 0x000024D3, 0xFFFFF02F, 0x00000445, 0x000024D3, 0xFFFFF02F, 0x00000445 },
{ 0x0213EA94DE283944, 0x00003D08, 0xFFFFE1BB, 0x00000665, 0x00002159, 0xFFFFF26F, 0x000003E6, 0x00002159, 0xFFFFF26F, 0x000003E6 },
{ 0x0213EA94DE2C20C4, 0x000038A9, 0xFFFFE6CA, 0x00000580, 0x000025D3, 0xFFFFF101, 0x00000421, 0x000025D3, 0xFFFFF101, 0x00000421 },
{ 0x0213EA94DE0A20A4, 0x00003E45, 0xFFFFE1F8, 0x0000064D, 0x000027E3, 0xFFFFEEBB, 0x0000047F, 0x000027E3, 0xFFFFEEBB, 0x0000047F },
{ 0x0213EA94DE0E3864, 0x00003F76, 0xFFFFE128, 0x0000066E, 0x0000286B, 0xFFFFEE4C, 0x00000493, 0x0000286B, 0xFFFFEE4C, 0x00000493 },
{ 0x0213EA94DE264104, 0x0000440D, 0xFFFFDCA2, 0x0000074F, 0x00003817, 0xFFFFE256, 0x000006AF, 0x00003817, 0xFFFFE256, 0x000006AF },
{ 0x0213EA94DE105104, 0x00003EE1, 0xFFFFDFA7, 0x000006D4, 0x000027EA, 0xFFFFED2B, 0x000004DE, 0x000027EA, 0xFFFFED2B, 0x000004DE },
{ 0x0213EA94DE2C3864, 0x00003C62, 0xFFFFE285, 0x0000064A, 0x00002520, 0xFFFFF001, 0x0000045C, 0x00002520, 0xFFFFF001, 0x0000045C },
{ 0x0213EA94DE323964, 0x0000272E, 0xFFFFF17A, 0x000003FA, 0x0000150B, 0xFFFFFBD5, 0x00000284, 0x0000150B, 0xFFFFFBD5, 0x00000284 },
{ 0x0213EA94DE261924, 0x00004275, 0xFFFFDF69, 0x000006A5, 0x000025AA, 0xFFFFF05C, 0x0000042B, 0x000025AA, 0xFFFFF05C, 0x0000042B },
{ 0x0213EA94DE0E40E4, 0x00003CAA, 0xFFFFE392, 0x000005FF, 0x000023A8, 0xFFFFF20E, 0x000003E9, 0x000023A8, 0xFFFFF20E, 0x000003E9 },
{ 0x0213EA94DE2C50C4, 0x00003CF8, 0xFFFFE0FB, 0x000006A6, 0x00002CA7, 0xFFFFE9FF, 0x0000056E, 0x00002CA7, 0xFFFFE9FF, 0x0000056E },
{ 0x0213EA94DE282124, 0x00003D00, 0xFFFFE296, 0x00000633, 0x000021C1, 0xFFFFF2C8, 0x000003CF, 0x000021C1, 0xFFFFF2C8, 0x000003CF },
{ 0x0213EA94DE2838E4, 0x00003B46, 0xFFFFE301, 0x00000632, 0x0000204C, 0xFFFFF33B, 0x000003C8, 0x0000204C, 0xFFFFF33B, 0x000003C8 },
{ 0x0213EA94DE204164, 0x00002026, 0xFFFFF5CE, 0x00000368, 0x00001598, 0xFFFFFB29, 0x000002C3, 0x00001598, 0xFFFFFB29, 0x000002C3 },
{ 0x0213EA94DE283164, 0x00003DCA, 0xFFFFE178, 0x00000668, 0x00001FDB, 0xFFFFF39D, 0x000003AF, 0x00001FDB, 0xFFFFF39D, 0x000003AF },
{ 0x0213EA94DE2C48C4, 0x00003A59, 0xFFFFE327, 0x00000642, 0x000024B9, 0xFFFFEFC4, 0x00000471, 0x000024B9, 0xFFFFEFC4, 0x00000471 },
{ 0x0213EA94DE2C2944, 0x00003C26, 0xFFFFE440, 0x000005EB, 0x00002C0F, 0xFFFFEC88, 0x000004E0, 0x00002C0F, 0xFFFFEC88, 0x000004E0 },
{ 0x0213EA94DE083884, 0x00004149, 0xFFFFDEB8, 0x000006E7, 0x0000280A, 0xFFFFED89, 0x000004C2, 0x0000280A, 0xFFFFED89, 0x000004C2 },
{ 0x0213EA94DE0E4124, 0x00003EB4, 0xFFFFE1E5, 0x0000064D, 0x0000299F, 0xFFFFEDB3, 0x000004A9, 0x0000299F, 0xFFFFEDB3, 0x000004A9 },
{ 0x0213EA94DE2C39A4, 0x00003BBF, 0xFFFFE268, 0x0000065A, 0x00002504, 0xFFFFEFB0, 0x00000470, 0x00002504, 0xFFFFEFB0, 0x00000470 },
{ 0x0213EA94DE084904, 0x00004203, 0xFFFFDDC6, 0x00000720, 0x0000303B, 0xFFFFE78F, 0x000005D0, 0x0000303B, 0xFFFFE78F, 0x000005D0 },
{ 0x0213EA94DE0E3984, 0x00003DA3, 0xFFFFE244, 0x0000063E, 0x000021B4, 0xFFFFF2DA, 0x000003CD, 0x000021B4, 0xFFFFF2DA, 0x000003CD },
{ 0x0213EA94DE0A38E4, 0x00004035, 0xFFFFE065, 0x0000069B, 0x00003323, 0xFFFFE6D6, 0x000005D8, 0x00003323, 0xFFFFE6D6, 0x000005D8 },
{ 0x0213EA94DE2C1164, 0x00003944, 0xFFFFE4E5, 0x000005E2, 0x00001F3C, 0xFFFFF456, 0x0000039D, 0x00001F3C, 0xFFFFF456, 0x0000039D },
{ 0x0213EA94DE061904, 0x000032D8, 0xFFFFEAE8, 0x000004E6, 0x00001812, 0xFFFFFA1C, 0x000002BC, 0x00001812, 0xFFFFFA1C, 0x000002BC },
{ 0x0213F0FD42D22944, 0x000041F6, 0xFFFFE025, 0x0000069A, 0x0000241E, 0xFFFFF1B4, 0x00000402, 0x0000241E, 0xFFFFF1B4, 0x00000402 },
{ 0x0213F0FE990C30A4, 0x00003300, 0xFFFFEB60, 0x000004C1, 0x00001E15, 0xFFFFF6A6, 0x0000033B, 0x00001E15, 0xFFFFF6A6, 0x0000033B },
{ 0x0213EA94DE0408A4, 0x000037F0, 0xFFFFE68F, 0x0000059B, 0x00001F8A, 0xFFFFF467, 0x000003A3, 0x00001F8A, 0xFFFFF467, 0x000003A3 },
{ 0x0213F0FE99182984, 0x000025D8, 0xFFFFF2AA, 0x000003C3, 0x000018A8, 0xFFFFF9BE, 0x000002D2, 0x000018A8, 0xFFFFF9BE, 0x000002D2 },
{ 0x0213F0FE990620C4, 0x0000364F, 0xFFFFE988, 0x000004FC, 0x00001E51, 0xFFFFF633, 0x0000034F, 0x00001E51, 0xFFFFF633, 0x0000034F },
{ 0x0213EA94DE061144, 0x00002288, 0xFFFFF483, 0x0000036C, 0x0000280F, 0xFFFFEF39, 0x0000047B, 0x0000280F, 0xFFFFEF39, 0x0000047B },
{ 0x0213F0FE99082084, 0x00003322, 0xFFFFEA7E, 0x000004ED, 0x00001DAD, 0xFFFFF62B, 0x00000355, 0x00001DAD, 0xFFFFF62B, 0x00000355 },
{ 0x0213EA94DE0250E4, 0x00002B7B, 0xFFFFEE4F, 0x0000045B, 0x00001AA2, 0xFFFFF710, 0x0000033E, 0x00001AA2, 0xFFFFF710, 0x0000033E },
{ 0x0213F0FE990420C4, 0x000034CC, 0xFFFFEA79, 0x000004E4, 0x00001B05, 0xFFFFF8B3, 0x000002EC, 0x00001B05, 0xFFFFF8B3, 0x000002EC },
{ 0x0213F0FD42DC2864, 0x00003837, 0xFFFFE5ED, 0x000005C3, 0x00001ACB, 0xFFFFF7B2, 0x00000314, 0x00001ACB, 0xFFFFF7B2, 0x00000314 },
{ 0x0213F0FE99044164, 0x0000352D, 0xFFFFE88F, 0x00000548, 0x000021E6, 0xFFFFF3B5, 0x000003AA, 0x000021E6, 0xFFFFF3B5, 0x000003AA },
{ 0x0213F0FE990A4884, 0x00003300, 0xFFFFE835, 0x0000057B, 0x00001A85, 0xFFFFF715, 0x00000336, 0x00001A85, 0xFFFFF715, 0x00000336 },
{ 0x0213EA94DE0448A4, 0x000033FA, 0xFFFFE851, 0x00000565, 0x00001A8E, 0xFFFFF727, 0x0000033B, 0x00001A8E, 0xFFFFF727, 0x0000033B },
{ 0x0213F0FD42DA3924, 0x000039D3, 0xFFFFE5D3, 0x000005B0, 0x00001888, 0xFFFFF978, 0x000002C8, 0x00001888, 0xFFFFF978, 0x000002C8 },
{ 0x0213F0FE990E4864, 0x00002F6B, 0xFFFFEC53, 0x000004B9, 0x00001C15, 0xFFFFF71B, 0x00000337, 0x00001C15, 0xFFFFF71B, 0x00000337 },
{ 0x0213F0FE99064144, 0x0000384D, 0xFFFFE737, 0x00000569, 0x00001D2D, 0xFFFFF673, 0x00000343, 0x00001D2D, 0xFFFFF673, 0x00000343 },
{ 0x0213F0FE990620A4, 0x00003A49, 0xFFFFE70B, 0x0000055F, 0x00001A63, 0xFFFFF8CD, 0x000002E2, 0x00001A63, 0xFFFFF8CD, 0x000002E2 },
{ 0x0213F0FE99042984, 0x0000311E, 0xFFFFEB97, 0x000004C6, 0x00001EAE, 0xFFFFF5A9, 0x00000367, 0x00001EAE, 0xFFFFF5A9, 0x00000367 },
{ 0x0213F0FE990E1124, 0x000027D3, 0xFFFFF075, 0x00000417, 0x00002001, 0xFFFFF44A, 0x000003A2, 0x00002001, 0xFFFFF44A, 0x000003A2 },
{ 0x0213F0FE99064904, 0x00003B72, 0xFFFFE4BD, 0x000005DC, 0x00001D76, 0xFFFFF606, 0x0000035A, 0x00001D76, 0xFFFFF606, 0x0000035A },
{ 0x0213F0FE99101124, 0x00002E0F, 0xFFFFECA7, 0x000004AE, 0x00001DC6, 0xFFFFF5BF, 0x0000036A, 0x00001DC6, 0xFFFFF5BF, 0x0000036A },
{ 0x0213F0FE990238A4, 0x000032C7, 0xFFFFEA7A, 0x000004F0, 0x00001A7B, 0xFFFFF827, 0x00000301, 0x00001A7B, 0xFFFFF827, 0x00000301 },
{ 0x0213EA94DE044884, 0x0000312D, 0xFFFFEA39, 0x00000515, 0x00001948, 0xFFFFF800, 0x00000318, 0x00001948, 0xFFFFF800, 0x00000318 },
{ 0x0213EA94DE062084, 0x00003611, 0xFFFFE8D7, 0x00000533, 0x00001929, 0xFFFFF965, 0x000002D2, 0x00001929, 0xFFFFF965, 0x000002D2 },
{ 0x0213F0FE992C30E4, 0x00002FE2, 0xFFFFED89, 0x00000470, 0x00001A3C, 0xFFFFF955, 0x000002D5, 0x00001A3C, 0xFFFFF955, 0x000002D5 },
{ 0x0213EA94DE0208A4, 0x000035FF, 0xFFFFE884, 0x00000548, 0x0000182A, 0xFFFFF9AB, 0x000002CF, 0x0000182A, 0xFFFFF9AB, 0x000002CF },
{ 0x0213F0FE990220E4, 0x00003597, 0xFFFFE904, 0x00000528, 0x00001A94, 0xFFFFF840, 0x00000300, 0x00001A94, 0xFFFFF840, 0x00000300 },
{ 0x0213F0FE99181944, 0x000026CB, 0xFFFFF1FB, 0x000003E4, 0x000017CC, 0xFFFFFA25, 0x000002C8, 0x000017CC, 0xFFFFFA25, 0x000002C8 },
{ 0x0213EA94DE0608C4, 0x00003274, 0xFFFFEA39, 0x0000050C, 0x00001B20, 0xFFFFF7C1, 0x00000314, 0x00001B20, 0xFFFFF7C1, 0x00000314 },
{ 0x0213F0FD42D82924, 0x0000280B, 0xFFFFF283, 0x000003B5, 0x000018D0, 0xFFFFF992, 0x000002EC, 0x000018D0, 0xFFFFF992, 0x000002EC },
{ 0x0213F0FE99062104, 0x000033AB, 0xFFFFEB1B, 0x000004C4, 0x00001FEE, 0xFFFFF53A, 0x00000378, 0x00001FEE, 0xFFFFF53A, 0x00000378 },
{ 0x0213F0FE990A3964, 0x00002F79, 0xFFFFEB0C, 0x000004FA, 0x00001E57, 0xFFFFF4BF, 0x0000039B, 0x00001E57, 0xFFFFF4BF, 0x0000039B },
{ 0x0213F0FE990448E4, 0x00003487, 0xFFFFE8F2, 0x00000539, 0x0000185B, 0xFFFFF9AE, 0x000002BA, 0x0000185B, 0xFFFFF9AE, 0x000002BA },
{ 0x0213F0FE990A18A4, 0x00003500, 0xFFFFE793, 0x0000058A, 0x00001AA2, 0xFFFFF792, 0x0000031D, 0x00001AA2, 0xFFFFF792, 0x0000031D },
{ 0x0213F0FE99081164, 0x00003943, 0xFFFFE54D, 0x000005D9, 0x00001BC8, 0xFFFFF6E0, 0x00000339, 0x00001BC8, 0xFFFFF6E0, 0x00000339 },
{ 0x0213EA94DE0430A4, 0x0000306D, 0xFFFFEC5E, 0x000004A5, 0x00001A3A, 0xFFFFF85F, 0x00000304, 0x00001A3A, 0xFFFFF85F, 0x00000304 },
{ 0x0213F0FD42D83084, 0x00002BA4, 0xFFFFEE8D, 0x0000046A, 0x0000198C, 0xFFFFF88E, 0x00000307, 0x0000198C, 0xFFFFF88E, 0x00000307 },
{ 0x0213F0FD42D218E4, 0x00003D30, 0xFFFFE2F6, 0x0000062A, 0x000025DC, 0xFFFFF074, 0x00000435, 0x000025DC, 0xFFFFF074, 0x00000435 },
{ 0x0213F0FD42D83964, 0x00002CD6, 0xFFFFED79, 0x0000049B, 0x000016D0, 0xFFFFFA53, 0x000002BB, 0x000016D0, 0xFFFFFA53, 0x000002BB },
{ 0x0213F0FE99163164, 0x00002484, 0xFFFFF3BD, 0x000003A0, 0x000015B8, 0xFFFFFB6B, 0x000002A4, 0x000015B8, 0xFFFFFB6B, 0x000002A4 },
{ 0x0213F0FE990E3944, 0x000038AE, 0xFFFFE6D1, 0x00000587, 0x00001A2A, 0xFFFFF8F1, 0x000002D4, 0x00001A2A, 0xFFFFF8F1, 0x000002D4 },
{ 0x0213F0FE99044944, 0x000036FD, 0xFFFFE76C, 0x00000576, 0x00001EE4, 0xFFFFF58D, 0x00000361, 0x00001EE4, 0xFFFFF58D, 0x00000361 },
{ 0x0213F0FD42D830A4, 0x00002BCF, 0xFFFFEF28, 0x00000448, 0x00001B93, 0xFFFFF7BA, 0x00000327, 0x00001B93, 0xFFFFF7BA, 0x00000327 },
{ 0x0213F0FE99062884, 0x00003834, 0xFFFFE818, 0x0000053B, 0x00001AFE, 0xFFFFF85C, 0x000002F3, 0x00001AFE, 0xFFFFF85C, 0x000002F3 },
{ 0x0213F0FE993231A4, 0x00002EF7, 0xFFFFEBFC, 0x000004CE, 0x00001897, 0xFFFFF8EF, 0x000002EC, 0x00001897, 0xFFFFF8EF, 0x000002EC },
{ 0x0213F0FE992C18C4, 0x000035BD, 0xFFFFE8BB, 0x0000053B, 0x00001F22, 0xFFFFF561, 0x00000373, 0x00001F22, 0xFFFFF561, 0x00000373 },
{ 0x0213F0FE99183984, 0x00002D42, 0xFFFFEE1D, 0x00000478, 0x000016F0, 0xFFFFFAAE, 0x000002B3, 0x000016F0, 0xFFFFFAAE, 0x000002B3 },
{ 0x0213EA94DE045124, 0x00002F98, 0xFFFFEB3C, 0x000004F0, 0x00001903, 0xFFFFF818, 0x00000319, 0x00001903, 0xFFFFF818, 0x00000319 },
{ 0x0213F0FD42D42144, 0x00004081, 0xFFFFDF13, 0x000006F3, 0x00002A6D, 0xFFFFEC1B, 0x00000509, 0x00002A6D, 0xFFFFEC1B, 0x00000509 },
{ 0x0213EA94DE040904, 0x00002D68, 0xFFFFED21, 0x00000498, 0x00001FF6, 0xFFFFF427, 0x000003B0, 0x00001FF6, 0xFFFFF427, 0x000003B0 },
{ 0x0213F0FE99023884, 0x00003243, 0xFFFFEA5C, 0x000004FD, 0x000020FB, 0xFFFFF39E, 0x000003C0, 0x000020FB, 0xFFFFF39E, 0x000003C0 },
{ 0x0213F0FD42D848A4, 0x00002F20, 0xFFFFEC19, 0x000004C6, 0x00001748, 0xFFFFF99F, 0x000002DA, 0x00001748, 0xFFFFF99F, 0x000002DA },
{ 0x0213F0FE99103984, 0x00002D68, 0xFFFFED21, 0x00000498, 0x00001A43, 0xFFFFF843, 0x000002F9, 0x00001A43, 0xFFFFF843, 0x000002F9 },
{ 0x0213F0FE990220A4, 0x0000396E, 0xFFFFE616, 0x000005A9, 0x00001A51, 0xFFFFF850, 0x000002FA, 0x00001A51, 0xFFFFF850, 0x000002FA },
{ 0x0213F0FE99043144, 0x0000305C, 0xFFFFED4B, 0x0000046C, 0x00001CF9, 0xFFFFF7BA, 0x00000304, 0x00001CF9, 0xFFFFF7BA, 0x00000304 },
{ 0x0213F0FD42DA4164, 0x0000343C, 0xFFFFE869, 0x00000559, 0x00001CE2, 0xFFFFF614, 0x00000359, 0x00001CE2, 0xFFFFF614, 0x00000359 },
{ 0x0213F0FE99183964, 0x00002782, 0xFFFFF1FE, 0x000003D9, 0x000015DC, 0xFFFFFB8B, 0x00000290, 0x000015DC, 0xFFFFFB8B, 0x00000290 },
{ 0x0213F0FE991818C4, 0x00002B9C, 0xFFFFEF63, 0x00000443, 0x00001369, 0xFFFFFD51, 0x00000244, 0x00001369, 0xFFFFFD51, 0x00000244 },
{ 0x0213F0FE990A2084, 0x000035F8, 0xFFFFE743, 0x00000592, 0x000018D8, 0xFFFFF8EE, 0x000002E4, 0x000018D8, 0xFFFFF8EE, 0x000002E4 },
{ 0x0213EA94DE062844, 0x00002B72, 0xFFFFEF1E, 0x0000043C, 0x00002647, 0xFFFFF092, 0x0000043E, 0x00002647, 0xFFFFF092, 0x0000043E },
{ 0x0213F0FE99102184, 0x00002EC9, 0xFFFFEC5F, 0x000004B8, 0x000018B6, 0xFFFFF936, 0x000002D8, 0x000018B6, 0xFFFFF936, 0x000002D8 },
{ 0x0213F0FE99064084, 0x000038A7, 0xFFFFE6AC, 0x00000589, 0x00001C42, 0xFFFFF70B, 0x00000329, 0x00001C42, 0xFFFFF70B, 0x00000329 },
{ 0x0213F0FE993008A4, 0x00002F6B, 0xFFFFEBF6, 0x000004CF, 0x000018AE, 0xFFFFF928, 0x000002E3, 0x000018AE, 0xFFFFF928, 0x000002E3 },
{ 0x0213F0FD42DA5104, 0x000029CD, 0xFFFFEEE1, 0x00000459, 0x00001AB5, 0xFFFFF76F, 0x00000324, 0x00001AB5, 0xFFFFF76F, 0x00000324 },
{ 0x0213EA94DE0638C4, 0x00003921, 0xFFFFE71D, 0x00000577, 0x00001646, 0xFFFFFB24, 0x00000293, 0x00001646, 0xFFFFFB24, 0x00000293 },
{ 0x0213EA94DE044164, 0x00003940, 0xFFFFE521, 0x000005E8, 0x00001947, 0xFFFFF839, 0x0000030D, 0x00001947, 0xFFFFF839, 0x0000030D },
{ 0x0213F0FD42D24164, 0x00003DCA, 0xFFFFE211, 0x00000659, 0x0000250E, 0xFFFFF072, 0x00000443, 0x0000250E, 0xFFFFF072, 0x00000443 },
{ 0x0213F0FE990C0904, 0x00002E95, 0xFFFFEC20, 0x000004C9, 0x000015B4, 0xFFFFFAD3, 0x0000029D, 0x000015B4, 0xFFFFFAD3, 0x0000029D },
{ 0x0213F0FE99041084, 0x00002C11, 0xFFFFEE6E, 0x00000468, 0x00001901, 0xFFFFF924, 0x000002E7, 0x00001901, 0xFFFFF924, 0x000002E7 },
{ 0x0213EA94DE062104, 0x0000293F, 0xFFFFF158, 0x000003E6, 0x0000183F, 0xFFFFF9F6, 0x000002D2, 0x0000183F, 0xFFFFF9F6, 0x000002D2 },
{ 0x0213F0FE990E1104, 0x00002A67, 0xFFFFEF34, 0x0000043E, 0x00001C6F, 0xFFFFF6F1, 0x0000032B, 0x00001C6F, 0xFFFFF6F1, 0x0000032B },
{ 0x0213EA94DE065124, 0x00002F8D, 0xFFFFEB77, 0x000004DA, 0x00001C0D, 0xFFFFF627, 0x00000365, 0x00001C0D, 0xFFFFF627, 0x00000365 },
{ 0x0213F0FE990C38C4, 0x00003476, 0xFFFFEA5B, 0x000004E7, 0x00001DBF, 0xFFFFF6C7, 0x00000333, 0x00001DBF, 0xFFFFF6C7, 0x00000333 },
{ 0x0213F0FE990E0944, 0x00003336, 0xFFFFE92F, 0x00000546, 0x00001614, 0xFFFFFAE0, 0x00000296, 0x00001614, 0xFFFFFAE0, 0x00000296 },
{ 0x0213F0FE99162164, 0x00002513, 0xFFFFF323, 0x000003BC, 0x000016DB, 0xFFFFFA79, 0x000002CD, 0x000016DB, 0xFFFFFA79, 0x000002CD },
{ 0x0213F0FE990A2944, 0x000035A7, 0xFFFFE78E, 0x00000584, 0x00001B0D, 0xFFFFF77D, 0x0000031F, 0x00001B0D, 0xFFFFF77D, 0x0000031F },
{ 0x0213F0FE993238E4, 0x00003171, 0xFFFFEB98, 0x000004C6, 0x00001C76, 0xFFFFF71F, 0x0000032F, 0x00001C76, 0xFFFFF71F, 0x0000032F },
{ 0x0213F0FD42DA1084, 0x00002C52, 0xFFFFED2E, 0x000004A7, 0x00002182, 0xFFFFF2F4, 0x000003E4, 0x00002182, 0xFFFFF2F4, 0x000003E4 },
{ 0x0213F0FE99102924, 0x000032E1, 0xFFFFEB39, 0x000004D0, 0x00001B55, 0xFFFFF859, 0x000002FA, 0x00001B55, 0xFFFFF859, 0x000002FA },
{ 0x0213F0FE991848A4, 0x000029B6, 0xFFFFEFF7, 0x00000430, 0x0000151B, 0xFFFFFBC6, 0x0000027F, 0x0000151B, 0xFFFFFBC6, 0x0000027F },
{ 0x0213F0FD42DA1964, 0x00002FF7, 0xFFFFEB67, 0x000004DA, 0x000020E9, 0xFFFFF363, 0x000003CE, 0x000020E9, 0xFFFFF363, 0x000003CE },
{ 0x0213F0FD42DA5124, 0x00003CDD, 0xFFFFE2B2, 0x00000649, 0x00001B18, 0xFFFFF739, 0x00000329, 0x00001B18, 0xFFFFF739, 0x00000329 },
{ 0x0213F0FE990628A4, 0x00003C82, 0xFFFFE5C6, 0x0000058E, 0x00001F3F, 0xFFFFF5AD, 0x00000361, 0x00001F3F, 0xFFFFF5AD, 0x00000361 },
{ 0x0213F0FD42DC4084, 0x0000319B, 0xFFFFEA15, 0x0000051B, 0x00001CC9, 0xFFFFF62E, 0x00000358, 0x00001CC9, 0xFFFFF62E, 0x00000358 },
{ 0x0213EA94DE0638E4, 0x000032B6, 0xFFFFEB2B, 0x000004D6, 0x000018E0, 0xFFFFF966, 0x000002DE, 0x000018E0, 0xFFFFF966, 0x000002DE },
{ 0x0213EA94DE023984, 0x0000300A, 0xFFFFEBA6, 0x000004D1, 0x00001CFD, 0xFFFFF5F6, 0x0000036D, 0x00001CFD, 0xFFFFF5F6, 0x0000036D },
{ 0x0213F0FD42D82984, 0x000026A9, 0xFFFFF15D, 0x00000400, 0x00001561, 0xFFFFFB1F, 0x000002A0, 0x00001561, 0xFFFFFB1F, 0x000002A0 },
{ 0x0213F0FE990E5124, 0x00003123, 0xFFFFEAD2, 0x000004FA, 0x000018CB, 0xFFFFF8F5, 0x000002EC, 0x000018CB, 0xFFFFF8F5, 0x000002EC },
{ 0x0213F0FE991840C4, 0x00003577, 0xFFFFE935, 0x00000533, 0x000016CD, 0xFFFFFB44, 0x00000289, 0x000016CD, 0xFFFFFB44, 0x00000289 },
{ 0x0213F0FE99282184, 0x00002875, 0xFFFFF170, 0x000003F3, 0x00001567, 0xFFFFFBD5, 0x00000289, 0x00001567, 0xFFFFFBD5, 0x00000289 },
{ 0x0213F0FE99084084, 0x00003AE2, 0xFFFFE538, 0x000005C1, 0x00001CB4, 0xFFFFF6A3, 0x0000033C, 0x00001CB4, 0xFFFFF6A3, 0x0000033C },
{ 0x0213F0FE990C38E4, 0x000031DF, 0xFFFFEC2A, 0x000004A3, 0x00001EF0, 0xFFFFF626, 0x00000352, 0x00001EF0, 0xFFFFF626, 0x00000352 },
{ 0x0213F0FD42D25144, 0x00004A6A, 0xFFFFDB15, 0x00000758, 0x000027F3, 0xFFFFEEEE, 0x00000479, 0x000027F3, 0xFFFFEEEE, 0x00000479 },
{ 0x0213EA94DE063904, 0x00002BB9, 0xFFFFEF5D, 0x00000433, 0x00001589, 0xFFFFFB57, 0x00000295, 0x00001589, 0xFFFFFB57, 0x00000295 },
{ 0x0213F0FE99042164, 0x000033A0, 0xFFFFE98F, 0x00000528, 0x00001CB4, 0xFFFFF706, 0x0000032D, 0x00001CB4, 0xFFFFF706, 0x0000032D },
{ 0x0213F0FE99163064, 0x0000248E, 0xFFFFF380, 0x000003AC, 0x000016EA, 0xFFFFFA6C, 0x000002CE, 0x000016EA, 0xFFFFFA6C, 0x000002CE },
{ 0x0213F0FE990221A4, 0x00002FE2, 0xFFFFEB2F, 0x000004E9, 0x00001D4E, 0xFFFFF56B, 0x00000380, 0x00001D4E, 0xFFFFF56B, 0x00000380 },
{ 0x0213F0FE990A2884, 0x00003283, 0xFFFFE9E7, 0x0000051D, 0x00000694, 0xFFFFFD32, 0x000003C3, 0x00000694, 0xFFFFFD32, 0x000003C3 },
{ 0x0213F0FD42D850C4, 0x00002EE4, 0xFFFFEBFD, 0x000004D3, 0x0000151A, 0xFFFFFAF6, 0x000002A4, 0x0000151A, 0xFFFFFAF6, 0x000002A4 },
{ 0x0213F0FD42DC18E4, 0x0000302D, 0xFFFFEB7F, 0x000004DA, 0x00001E6D, 0xFFFFF54B, 0x00000380, 0x00001E6D, 0xFFFFF54B, 0x00000380 },
{ 0x0213F0FD42DA50C4, 0x000033DA, 0xFFFFE7FB, 0x0000057F, 0x00001DED, 0xFFFFF50E, 0x0000038D, 0x00001DED, 0xFFFFF50E, 0x0000038D },
{ 0x0213F0FE992C4084, 0x000030B5, 0xFFFFEBB8, 0x000004C4, 0x00001C3F, 0xFFFFF726, 0x0000032A, 0x00001C3F, 0xFFFFF726, 0x0000032A },
{ 0x0213F0FE990831C4, 0x00003BBD, 0xFFFFE55C, 0x000005B8, 0x000019DB, 0xFFFFF8BB, 0x000002EF, 0x000019DB, 0xFFFFF8BB, 0x000002EF },
{ 0x0213F0FE990E3884, 0x00002964, 0xFFFFF051, 0x0000040E, 0x000025CD, 0xFFFFF11B, 0x0000041F, 0x000025CD, 0xFFFFF11B, 0x0000041F },
{ 0x0213F0FD42DC4884, 0x000033F5, 0xFFFFE863, 0x00000560, 0x00001BCE, 0xFFFFF689, 0x0000034B, 0x00001BCE, 0xFFFFF689, 0x0000034B },
{ 0x0213F0FE990A2864, 0x00003294, 0xFFFFE924, 0x00000548, 0x00001D41, 0xFFFFF580, 0x0000037D, 0x00001D41, 0xFFFFF580, 0x0000037D },
{ 0x0213F0FD42DC39A4, 0x000034FB, 0xFFFFE7FE, 0x0000056D, 0x00001CB1, 0xFFFFF635, 0x00000357, 0x00001CB1, 0xFFFFF635, 0x00000357 },
{ 0x0213F0FE990A10A4, 0x00002E28, 0xFFFFEBB9, 0x000004E0, 0x00001B20, 0xFFFFF6E3, 0x0000033C, 0x00001B20, 0xFFFFF6E3, 0x0000033C },
{ 0x0213F0FD42DA1904, 0x00002799, 0xFFFFF0F4, 0x000003FC, 0x00001C9D, 0xFFFFF6A1, 0x00000345, 0x00001C9D, 0xFFFFF6A1, 0x00000345 },
{ 0x0213F0FE99064104, 0x00003AEA, 0xFFFFE5DB, 0x0000059D, 0x00001B61, 0xFFFFF7F0, 0x00000301, 0x00001B61, 0xFFFFF7F0, 0x00000301 },
{ 0x0213EA94DE041984, 0x000031F6, 0xFFFFEAB8, 0x000004F3, 0x00001D90, 0xFFFFF622, 0x00000359, 0x00001D90, 0xFFFFF622, 0x00000359 },
{ 0x0213F0FE990C4064, 0x000031B8, 0xFFFFEA61, 0x0000050F, 0x0000199D, 0xFFFFF87C, 0x000002FD, 0x0000199D, 0xFFFFF87C, 0x000002FD },
{ 0x0213F0FD42D23144, 0x00004514, 0xFFFFDDFF, 0x000006F6, 0x000022CD, 0xFFFFF29F, 0x000003D9, 0x000022CD, 0xFFFFF29F, 0x000003D9 },
{ 0x0213EA94DE043164, 0x00002F30, 0xFFFFECB8, 0x000004A0, 0x00001B07, 0xFFFFF7E2, 0x00000313, 0x00001B07, 0xFFFFF7E2, 0x00000313 },
{ 0x0213F0FD42DC30A4, 0x0000383B, 0xFFFFE702, 0x00000581, 0x00001A08, 0xFFFFF8CA, 0x000002E2, 0x00001A08, 0xFFFFF8CA, 0x000002E2 },
{ 0x0213F0FE99022164, 0x00002CC5, 0xFFFFEDF8, 0x00000465, 0x00001F47, 0xFFFFF4B2, 0x00000393, 0x00001F47, 0xFFFFF4B2, 0x00000393 },
{ 0x0213F0FE991621C4, 0x00002304, 0xFFFFF453, 0x00000384, 0x0000170A, 0xFFFFFA3F, 0x000002CE, 0x0000170A, 0xFFFFFA3F, 0x000002CE },
{ 0x0213F0FE990A5124, 0x0000337E, 0xFFFFE850, 0x0000056E, 0x00001BDD, 0xFFFFF668, 0x00000353, 0x00001BDD, 0xFFFFF668, 0x00000353 },
{ 0x0213F0FE990E4924, 0x00002E2F, 0xFFFFEC9B, 0x000004AE, 0x00001C4D, 0xFFFFF6D3, 0x00000338, 0x00001C4D, 0xFFFFF6D3, 0x00000338 },
{ 0x0213EA94DE061124, 0x00002DDD, 0xFFFFEDA4, 0x00000477, 0x00002010, 0xFFFFF4BB, 0x00000390, 0x00002010, 0xFFFFF4BB, 0x00000390 },
{ 0x0213F0FD42DA48E4, 0x0000290C, 0xFFFFEF61, 0x00000445, 0x00002133, 0xFFFFF324, 0x000003D8, 0x00002133, 0xFFFFF324, 0x000003D8 },
{ 0x0213F0FE99062924, 0x0000371E, 0xFFFFE8D5, 0x00000524, 0x00001C3A, 0xFFFFF7AE, 0x00000314, 0x00001C3A, 0xFFFFF7AE, 0x00000314 },
{ 0x0213F0FD42D838E4, 0x00002A58, 0xFFFFF007, 0x00000429, 0x000018A6, 0xFFFFF98F, 0x000002E1, 0x000018A6, 0xFFFFF98F, 0x000002E1 },
{ 0x0213F0FE99023084, 0x00002FED, 0xFFFFEC48, 0x000004AA, 0x00001E9D, 0xFFFFF584, 0x00000370, 0x00001E9D, 0xFFFFF584, 0x00000370 },
{ 0x0213F0FE99181884, 0x00002829, 0xFFFFF15F, 0x000003F7, 0x0000157E, 0xFFFFFBD4, 0x00000282, 0x0000157E, 0xFFFFFBD4, 0x00000282 },
{ 0x0213F0FE99101924, 0x000030CF, 0xFFFFEB8D, 0x000004CE, 0x00001A4C, 0xFFFFF868, 0x000002F7, 0x00001A4C, 0xFFFFF868, 0x000002F7 },
{ 0x0213F0FD42DA2084, 0x00002C8F, 0xFFFFEDD2, 0x0000047D, 0x00001CCE, 0xFFFFF6A1, 0x00000343, 0x00001CCE, 0xFFFFF6A1, 0x00000343 },
{ 0x0213F0FE99182164, 0x00002A84, 0xFFFFEFBA, 0x0000043E, 0x000015EF, 0xFFFFFB4B, 0x0000029E, 0x000015EF, 0xFFFFFB4B, 0x0000029E },
{ 0x0213F0FE990C28A4, 0x000034CA, 0xFFFFEA08, 0x000004FF, 0x00001C19, 0xFFFFF7ED, 0x00000309, 0x00001C19, 0xFFFFF7ED, 0x00000309 },
{ 0x0213F0FE991639A4, 0x00002187, 0xFFFFF4B0, 0x0000037E, 0x0000154A, 0xFFFFFB0C, 0x000002AE, 0x0000154A, 0xFFFFFB0C, 0x000002AE },
{ 0x0213F0FD42DA3844, 0x00002F4F, 0xFFFFEB3C, 0x000004F8, 0x0000181F, 0xFFFFF92D, 0x000002DF, 0x0000181F, 0xFFFFF92D, 0x000002DF },
{ 0x0213F0FE990410E4, 0x0000290C, 0xFFFFF0B1, 0x000003FC, 0x00001DB0, 0xFFFFF636, 0x00000355, 0x00001DB0, 0xFFFFF636, 0x00000355 },
{ 0x0213F0FE990A1064, 0x000034C1, 0xFFFFE888, 0x0000055A, 0x000019BF, 0xFFFFF881, 0x000002FB, 0x000019BF, 0xFFFFF881, 0x000002FB },
{ 0x0213F0FD42DC18C4, 0x00003139, 0xFFFFEA98, 0x00000504, 0x000019F2, 0xFFFFF820, 0x0000030B, 0x000019F2, 0xFFFFF820, 0x0000030B },
{ 0x0213F0FD42D83144, 0x00002CAC, 0xFFFFEEB2, 0x00000458, 0x0000152C, 0xFFFFFBEF, 0x0000027B, 0x0000152C, 0xFFFFFBEF, 0x0000027B },
{ 0x0213F0FE992C38E4, 0x00003577, 0xFFFFE99C, 0x0000050D, 0x00001E64, 0xFFFFF679, 0x0000033F, 0x00001E64, 0xFFFFF679, 0x0000033F },
{ 0x0213F0FD42DA4104, 0x0000263A, 0xFFFFF1E4, 0x000003D4, 0x00001F68, 0xFFFFF4ED, 0x00000386, 0x00001F68, 0xFFFFF4ED, 0x00000386 },
{ 0x0213F0FD42D81984, 0x00002CE9, 0xFFFFED63, 0x00000497, 0x00001810, 0xFFFFF94D, 0x000002E3, 0x00001810, 0xFFFFF94D, 0x000002E3 },
{ 0x0213EA94DE044104, 0x0000318A, 0xFFFFEAC8, 0x000004F5, 0x0000195C, 0xFFFFF896, 0x000002FB, 0x0000195C, 0xFFFFF896, 0x000002FB },
{ 0x0213F0FD42D83904, 0x00002C41, 0xFFFFEEC6, 0x0000045D, 0x000017DD, 0xFFFFFA16, 0x000002CB, 0x000017DD, 0xFFFFFA16, 0x000002CB },
{ 0x0213F0FE990231A4, 0x00002DD4, 0xFFFFEC98, 0x000004AD, 0x00001BD7, 0xFFFFF69F, 0x00000347, 0x00001BD7, 0xFFFFF69F, 0x00000347 },
{ 0x0213F0FD42DA3944, 0x00003351, 0xFFFFE9B2, 0x0000051A, 0x00001CA1, 0xFFFFF6A4, 0x00000341, 0x00001CA1, 0xFFFFF6A4, 0x00000341 },
{ 0x0213F0FE99021104, 0x0000322D, 0xFFFFE9BE, 0x00000527, 0x00001CF9, 0xFFFFF5EB, 0x00000366, 0x00001CF9, 0xFFFFF5EB, 0x00000366 },
{ 0x0213F0FE990C28C4, 0x00003678, 0xFFFFE9A8, 0x00000503, 0x00001AD4, 0xFFFFF8F6, 0x000002E3, 0x00001AD4, 0xFFFFF8F6, 0x000002E3 },
{ 0x0213F0FE99161924, 0x0000260E, 0xFFFFF2C1, 0x000003CA, 0x00001139, 0xFFFFFE48, 0x00000236, 0x00001139, 0xFFFFFE48, 0x00000236 },
{ 0x0213F0FE990A2164, 0x000033D3, 0xFFFFE872, 0x00000565, 0x00001B72, 0xFFFFF713, 0x00000332, 0x00001B72, 0xFFFFF713, 0x00000332 },
{ 0x0213F0FE99323844, 0x0000309B, 0xFFFFEB42, 0x000004E4, 0x00001918, 0xFFFFF8C8, 0x000002F2, 0x00001918, 0xFFFFF8C8, 0x000002F2 },
{ 0x0213F0FE99182864, 0x000028B8, 0xFFFFF105, 0x00000402, 0x000018BB, 0xFFFFF9BC, 0x000002D3, 0x000018BB, 0xFFFFF9BC, 0x000002D3 },
{ 0x0213F0FE990A1884, 0x00003123, 0xFFFFE9D1, 0x00000534, 0x00001B19, 0xFFFFF6FE, 0x0000033C, 0x00001B19, 0xFFFFF6FE, 0x0000033C },
{ 0x0213F0FE99022144, 0x00003216, 0xFFFFEA8E, 0x000004F6, 0x00001F72, 0xFFFFF4CE, 0x0000038B, 0x00001F72, 0xFFFFF4CE, 0x0000038B },
{ 0x0213F0FE99162964, 0x00002564, 0xFFFFF32D, 0x000003B6, 0x00001685, 0xFFFFFADB, 0x000002BB, 0x00001685, 0xFFFFFADB, 0x000002BB },
{ 0x0213F0FD42DA2924, 0x00002E60, 0xFFFFED13, 0x00000497, 0x00001CA5, 0xFFFFF6B9, 0x00000346, 0x00001CA5, 0xFFFFF6B9, 0x00000346 },
{ 0x0213F0FE990E39A4, 0x0000336D, 0xFFFFE934, 0x0000053B, 0x00001B3E, 0xFFFFF763, 0x00000327, 0x00001B3E, 0xFFFFF763, 0x00000327 },
{ 0x0213F0FE99101084, 0x0000274A, 0xFFFFF119, 0x000003FA, 0x00001D75, 0xFFFFF5CD, 0x0000036F, 0x00001D75, 0xFFFFF5CD, 0x0000036F },
{ 0x0213F0FD42DA2164, 0x0000366B, 0xFFFFE70A, 0x0000059A, 0x00001ED8, 0xFFFFF501, 0x00000389, 0x00001ED8, 0xFFFFF501, 0x00000389 },
{ 0x0213F0FE99223964, 0x00003164, 0xFFFFEAB4, 0x000004FA, 0x00001C52, 0xFFFFF6E0, 0x00000336, 0x00001C52, 0xFFFFF6E0, 0x00000336 },
{ 0x0213F0FD42D23064, 0x00004224, 0xFFFFDF7F, 0x000006C1, 0x00002A52, 0xFFFFED5E, 0x000004BB, 0x00002A52, 0xFFFFED5E, 0x000004BB },
{ 0x0213F0FE99102864, 0x000030E3, 0xFFFFEB07, 0x000004ED, 0x00001FD3, 0xFFFFF46D, 0x000003A1, 0x00001FD3, 0xFFFFF46D, 0x000003A1 },
{ 0x0213F0FD42D82884, 0x00002AEB, 0xFFFFEF1B, 0x00000454, 0x00001829, 0xFFFFF995, 0x000002DD, 0x00001829, 0xFFFFF995, 0x000002DD },
{ 0x0213F0FD42DC50E4, 0x0000346B, 0xFFFFE7A2, 0x0000058B, 0x000020C5, 0xFFFFF2E8, 0x000003EC, 0x000020C5, 0xFFFFF2E8, 0x000003EC },
{ 0x0213F0FD42DC4164, 0x000039CF, 0xFFFFE5D7, 0x000005A9, 0x00001D66, 0xFFFFF5D6, 0x00000366, 0x00001D66, 0xFFFFF5D6, 0x00000366 },
{ 0x0213F0FE990418E4, 0x000034AC, 0xFFFFE9AE, 0x00000515, 0x00001A28, 0xFFFFF904, 0x000002DC, 0x00001A28, 0xFFFFF904, 0x000002DC },
{ 0x0213F0FD42DC2084, 0x00002D68, 0xFFFFED21, 0x00000498, 0x00001C6F, 0xFFFFF686, 0x0000034C, 0x00001C6F, 0xFFFFF686, 0x0000034C },
{ 0x0213F0FE990820C4, 0x0000328B, 0xFFFFEBA1, 0x000004B4, 0x00001DA3, 0xFFFFF683, 0x00000349, 0x00001DA3, 0xFFFFF683, 0x00000349 },
{ 0x0213F0FE991828C4, 0x000027DC, 0xFFFFF295, 0x000003BF, 0x000019C1, 0xFFFFF98E, 0x000002E8, 0x000019C1, 0xFFFFF98E, 0x000002E8 },
{ 0x0213F0FE99184084, 0x00002756, 0xFFFFF1D7, 0x000003DF, 0x000015D9, 0xFFFFFB51, 0x00000298, 0x000015D9, 0xFFFFFB51, 0x00000298 },
{ 0x0213F0FE99083884, 0x00003526, 0xFFFFE907, 0x00000526, 0x000017AB, 0xFFFFFA12, 0x000002AB, 0x000017AB, 0xFFFFFA12, 0x000002AB },
{ 0x0213F0FD42DA18E4, 0x0000351B, 0xFFFFE8B7, 0x00000540, 0x00001A86, 0xFFFFF821, 0x00000303, 0x00001A86, 0xFFFFF821, 0x00000303 },
{ 0x0213F0FE99164144, 0x000024B2, 0xFFFFF34E, 0x000003B1, 0x000018E2, 0xFFFFF926, 0x000002FC, 0x000018E2, 0xFFFFF926, 0x000002FC },
{ 0x0213F0FD42D828A4, 0x00002F36, 0xFFFFED5D, 0x00000486, 0x0000157A, 0xFFFFFB85, 0x00000293, 0x0000157A, 0xFFFFFB85, 0x00000293 },
{ 0x0213F0FD42DC50C4, 0x00003A6E, 0xFFFFE456, 0x000005FD, 0x00001F68, 0xFFFFF3D1, 0x000003C3, 0x00001F68, 0xFFFFF3D1, 0x000003C3 },
{ 0x0213F0FE990A31A4, 0x00002BC3, 0xFFFFED2D, 0x000004A7, 0x00001C3F, 0xFFFFF609, 0x00000364, 0x00001C3F, 0xFFFFF609, 0x00000364 },
{ 0x0213F0FE990E2084, 0x000032E1, 0xFFFFEA83, 0x000004F6, 0x00001B37, 0xFFFFF842, 0x000002F5, 0x00001B37, 0xFFFFF842, 0x000002F5 },
{ 0x0213F0FD42D83184, 0x000028E3, 0xFFFFF07F, 0x00000412, 0x00001676, 0xFFFFFA68, 0x000002BE, 0x00001676, 0xFFFFFA68, 0x000002BE },
{ 0x0213F0FD42D21104, 0x0000444C, 0xFFFFDDAD, 0x00000712, 0x00002634, 0xFFFFEF89, 0x0000046C, 0x00002634, 0xFFFFEF89, 0x0000046C },
{ 0x0213F0FE990418C4, 0x00003121, 0xFFFFEBBB, 0x000004C6, 0x00001C98, 0xFFFFF72B, 0x0000032D, 0x00001C98, 0xFFFFF72B, 0x0000032D },
{ 0x0213F0FD42D840A4, 0x00002C31, 0xFFFFEDC4, 0x00000490, 0x0000162D, 0xFFFFFA8E, 0x000002B4, 0x0000162D, 0xFFFFFA8E, 0x000002B4 },
{ 0x0213F0FD42DA18C4, 0x00002749, 0xFFFFF112, 0x000003FC, 0x00001C85, 0xFFFFF6B8, 0x00000342, 0x00001C85, 0xFFFFF6B8, 0x00000342 },
{ 0x0213F0FE99044104, 0x00003159, 0xFFFFEB99, 0x000004C2, 0x00001BD0, 0xFFFFF7CA, 0x00000307, 0x00001BD0, 0xFFFFF7CA, 0x00000307 },
{ 0x0213F0FE99164164, 0x00002610, 0xFFFFF1FD, 0x000003EC, 0x000016BE, 0xFFFFFA53, 0x000002CB, 0x000016BE, 0xFFFFFA53, 0x000002CB },
{ 0x0213F0FE99023184, 0x000037B5, 0xFFFFE63D, 0x000005B5, 0x00002285, 0xFFFFF25D, 0x000003F7, 0x00002285, 0xFFFFF25D, 0x000003F7 },
{ 0x0213F0FE990A28A4, 0x00002FEE, 0xFFFFEB47, 0x000004EF, 0x00001CBE, 0xFFFFF64E, 0x00000358, 0x00001CBE, 0xFFFFF64E, 0x00000358 },
{ 0x0213F0FE99105104, 0x00002E90, 0xFFFFEC48, 0x000004C0, 0x00001A47, 0xFFFFF7D1, 0x0000031A, 0x00001A47, 0xFFFFF7D1, 0x0000031A },
{ 0x0213F0FD42DA4084, 0x000034AB, 0xFFFFE84A, 0x00000559, 0x00001A72, 0xFFFFF79A, 0x0000031C, 0x00001A72, 0xFFFFF79A, 0x0000031C },
{ 0x0213F0FE99183884, 0x00002F7B, 0xFFFFECFC, 0x0000049C, 0x00001814, 0xFFFFFA22, 0x000002C2, 0x00001814, 0xFFFFFA22, 0x000002C2 },
{ 0x0213F0FE99021964, 0x00003618, 0xFFFFE709, 0x00000596, 0x00001EBF, 0xFFFFF482, 0x000003A5, 0x00001EBF, 0xFFFFF482, 0x000003A5 },
{ 0x0213EA94DE024904, 0x0000341B, 0xFFFFE8B2, 0x0000054F, 0x00001D26, 0xFFFFF578, 0x00000388, 0x00001D26, 0xFFFFF578, 0x00000388 },
{ 0x0213F0FE99102144, 0x000030F6, 0xFFFFEB89, 0x000004CD, 0x000019C0, 0xFFFFF8CC, 0x000002E6, 0x000019C0, 0xFFFFF8CC, 0x000002E6 },
{ 0x0213F0FE992841A4, 0x00002B76, 0xFFFFEF6C, 0x00000444, 0x00001563, 0xFFFFFBBE, 0x0000028D, 0x00001563, 0xFFFFFBBE, 0x0000028D },
{ 0x0213F0FD42D81864, 0x00002BA2, 0xFFFFEE31, 0x0000047F, 0x00001A3D, 0xFFFFF7F3, 0x00000320, 0x00001A3D, 0xFFFFF7F3, 0x00000320 },
{ 0x0213F0FE992C48E4, 0x00003545, 0xFFFFE87A, 0x0000054A, 0x00001B5A, 0xFFFFF7B0, 0x0000030C, 0x00001B5A, 0xFFFFF7B0, 0x0000030C },
{ 0x0213EA94DE042944, 0x00003879, 0xFFFFE73F, 0x00000578, 0x00001649, 0xFFFFFB57, 0x00000283, 0x00001649, 0xFFFFFB57, 0x00000283 },
{ 0x0213F0FD42D840C4, 0x00002772, 0xFFFFF0F1, 0x00000410, 0x0000142F, 0xFFFFFBCF, 0x00000287, 0x0000142F, 0xFFFFFBCF, 0x00000287 },
{ 0x0213F0FD42DA3184, 0x00003228, 0xFFFFE98E, 0x00000535, 0x00001F48, 0xFFFFF495, 0x00000399, 0x00001F48, 0xFFFFF495, 0x00000399 },
{ 0x0213F0FE990E40E4, 0x00002887, 0xFFFFF119, 0x000003E8, 0x000021AA, 0xFFFFF3F5, 0x000003A5, 0x000021AA, 0xFFFFF3F5, 0x000003A5 },
{ 0x0213F0FD42DA28A4, 0x0000301F, 0xFFFFEBB2, 0x000004D2, 0x00001C02, 0xFFFFF736, 0x0000032B, 0x00001C02, 0xFFFFF736, 0x0000032B },
{ 0x0213F0FE991820A4, 0x00002E13, 0xFFFFEE3F, 0x00000468, 0x000016AC, 0xFFFFFB32, 0x0000029E, 0x000016AC, 0xFFFFFB32, 0x0000029E },
{ 0x0213F0FE99044924, 0x00003478, 0xFFFFE8F9, 0x00000538, 0x00001DAB, 0xFFFFF645, 0x00000345, 0x00001DAB, 0xFFFFF645, 0x00000345 },
{ 0x0213F0FE990608C4, 0x000030C6, 0xFFFFEB6C, 0x000004D4, 0x0000184A, 0xFFFFF934, 0x000002E1, 0x0000184A, 0xFFFFF934, 0x000002E1 },
{ 0x0213F0FE990A2044, 0x00002F1B, 0xFFFFEBD3, 0x000004D3, 0x000019E7, 0xFFFFF813, 0x0000030D, 0x000019E7, 0xFFFFF813, 0x0000030D },
{ 0x0213F0FE99023904, 0x00003214, 0xFFFFEAE9, 0x000004E0, 0x0000178F, 0xFFFFFA1C, 0x000002B1, 0x0000178F, 0xFFFFFA1C, 0x000002B1 },
{ 0x0213F0FD42DC3144, 0x0000399C, 0xFFFFE738, 0x0000055E, 0x00001EA1, 0xFFFFF5E7, 0x0000035A, 0x00001EA1, 0xFFFFF5E7, 0x0000035A },
{ 0x0213F0FE990650C4, 0x00003A01, 0xFFFFE5B2, 0x000005B6, 0x00001D95, 0xFFFFF5D2, 0x0000036A, 0x00001D95, 0xFFFFF5D2, 0x0000036A },
{ 0x0213F0FE99043884, 0x0000310D, 0xFFFFEB78, 0x000004D0, 0x00001C06, 0xFFFFF76E, 0x0000031A, 0x00001C06, 0xFFFFF76E, 0x0000031A },
{ 0x0213F0FE99063864, 0x00003CD1, 0xFFFFE42F, 0x000005EB, 0x00001933, 0xFFFFF91F, 0x000002D4, 0x00001933, 0xFFFFF91F, 0x000002D4 },
{ 0x0213F0FD42DA3164, 0x00003119, 0xFFFFEB1B, 0x000004E1, 0x00001FC7, 0xFFFFF46A, 0x000003A2, 0x00001FC7, 0xFFFFF46A, 0x000003A2 },
{ 0x0213EA94DE0648A4, 0x0000390D, 0xFFFFE566, 0x000005D8, 0x00001EC6, 0xFFFFF4DC, 0x00000391, 0x00001EC6, 0xFFFFF4DC, 0x00000391 },
{ 0x0213F0FD42DA10C4, 0x00003446, 0xFFFFE858, 0x00000561, 0x00001FDB, 0xFFFFF3FF, 0x000003B9, 0x00001FDB, 0xFFFFF3FF, 0x000003B9 },
{ 0x0213F0FE99044904, 0x000032BA, 0xFFFFEA07, 0x00000511, 0x00001B25, 0xFFFFF7C9, 0x0000030D, 0x00001B25, 0xFFFFF7C9, 0x0000030D },
{ 0x0213F0FE990E1864, 0x00002CCF, 0xFFFFEDE5, 0x00000478, 0x00001BC8, 0xFFFFF761, 0x00000326, 0x00001BC8, 0xFFFFF761, 0x00000326 },
{ 0x0213F0FE99062984, 0x0000400E, 0xFFFFE1CB, 0x00000652, 0x00001AF8, 0xFFFFF7B9, 0x00000312, 0x00001AF8, 0xFFFFF7B9, 0x00000312 },
{ 0x0213F0FE990408E4, 0x00002F24, 0xFFFFEC2A, 0x000004C7, 0x00001B94, 0xFFFFF748, 0x00000333, 0x00001B94, 0xFFFFF748, 0x00000333 },
{ 0x0213F0FD42D21924, 0x00003FDA, 0xFFFFE1C1, 0x0000064B, 0x00002427, 0xFFFFF180, 0x0000040C, 0x00002427, 0xFFFFF180, 0x0000040C },
{ 0x0213F0FE990A18C4, 0x00002F6B, 0xFFFFEBA7, 0x000004DD, 0x00001C25, 0xFFFFF6C1, 0x00000344, 0x00001C25, 0xFFFFF6C1, 0x00000344 },
{ 0x0213F0FE99182104, 0x00002A53, 0xFFFFF0EE, 0x00000402, 0x000017C6, 0xFFFFFAA0, 0x000002BF, 0x000017C6, 0xFFFFFAA0, 0x000002BF },
{ 0x0213F0FE99105144, 0x000031F4, 0xFFFFEA34, 0x00000517, 0x000016FF, 0xFFFFFA4E, 0x000002AC, 0x000016FF, 0xFFFFFA4E, 0x000002AC },
{ 0x0213F0FE99322144, 0x00002E24, 0xFFFFED46, 0x00000489, 0x00001712, 0xFFFFFA5D, 0x000002AC, 0x00001712, 0xFFFFFA5D, 0x000002AC },
{ 0x0213F0FE99182824, 0x000028CD, 0xFFFFF0E3, 0x0000040E, 0x00001606, 0xFFFFFB37, 0x000002A4, 0x00001606, 0xFFFFFB37, 0x000002A4 },
{ 0x0213F0FE990220C4, 0x00003184, 0xFFFFEB88, 0x000004C3, 0x000018DA, 0xFFFFF939, 0x000002DB, 0x000018DA, 0xFFFFF939, 0x000002DB },
{ 0x0213F0FE99162124, 0x0000239B, 0xFFFFF470, 0x00000386, 0x00001714, 0xFFFFFA9F, 0x000002C8, 0x00001714, 0xFFFFFA9F, 0x000002C8 },
{ 0x0213F0FD42DC38E4, 0x00003641, 0xFFFFE92B, 0x00000515, 0x00001BE2, 0xFFFFF795, 0x0000031B, 0x00001BE2, 0xFFFFF795, 0x0000031B },
{ 0x0213F0FE992C1144, 0x00003278, 0xFFFFEA17, 0x00000510, 0x00001B71, 0xFFFFF778, 0x0000031D, 0x00001B71, 0xFFFFF778, 0x0000031D },
{ 0x0213F0FE99062844, 0x000035B9, 0xFFFFE8DA, 0x0000052D, 0x00001A6A, 0xFFFFF83B, 0x000002FF, 0x00001A6A, 0xFFFFF83B, 0x000002FF },
{ 0x0213F0FE990E18C4, 0x00002E5E, 0xFFFFED32, 0x0000048B, 0x00001E7D, 0xFFFFF60E, 0x0000034E, 0x00001E7D, 0xFFFFF60E, 0x0000034E },
{ 0x0213F0FE991019A4, 0x00003178, 0xFFFFEA52, 0x00000513, 0x00001AD0, 0xFFFFF793, 0x0000031F, 0x00001AD0, 0xFFFFF793, 0x0000031F },
{ 0x0213F0FD42D44104, 0x00003A2C, 0xFFFFE346, 0x00000641, 0x000023D0, 0xFFFFF0CE, 0x00000433, 0x000023D0, 0xFFFFF0CE, 0x00000433 },
{ 0x0213F0FD42D818C4, 0x000028FD, 0xFFFFF02A, 0x0000042B, 0x0000152B, 0xFFFFFB90, 0x00000289, 0x0000152B, 0xFFFFFB90, 0x00000289 },
{ 0x0213F0FE990E3084, 0x000030DE, 0xFFFFEBDF, 0x000004BE, 0x00001CDC, 0xFFFFF747, 0x0000031C, 0x00001CDC, 0xFFFFF747, 0x0000031C },
{ 0x0213F0FE99021944, 0x000036CB, 0xFFFFE6EE, 0x00000596, 0x00002096, 0xFFFFF3C2, 0x000003BB, 0x00002096, 0xFFFFF3C2, 0x000003BB },
{ 0x0213F0FE990C48C4, 0x00003172, 0xFFFFEAC1, 0x000004F4, 0x00001C87, 0xFFFFF6CD, 0x00000337, 0x00001C87, 0xFFFFF6CD, 0x00000337 },
{ 0x0213F0FD42D24864, 0x00004A18, 0xFFFFDB34, 0x00000758, 0x0000213C, 0xFFFFF3A2, 0x000003AC, 0x0000213C, 0xFFFFF3A2, 0x000003AC },
{ 0x0213F0FE99022104, 0x000031F3, 0xFFFFEB73, 0x000004C6, 0x00001B23, 0xFFFFF7CB, 0x0000031A, 0x00001B23, 0xFFFFF7CB, 0x0000031A },
{ 0x0213F0FE990A2924, 0x000031C0, 0xFFFFEABA, 0x000004F7, 0x00001A5A, 0xFFFFF845, 0x000002FF, 0x00001A5A, 0xFFFFF845, 0x000002FF },
{ 0x0213F0FE99104944, 0x00003B77, 0xFFFFE3B3, 0x00000623, 0x00001BCA, 0xFFFFF6F8, 0x00000333, 0x00001BCA, 0xFFFFF6F8, 0x00000333 },
{ 0x0213F0FE990A3944, 0x000035AF, 0xFFFFE76D, 0x00000588, 0x00001C16, 0xFFFFF6AB, 0x00000341, 0x00001C16, 0xFFFFF6AB, 0x00000341 },
{ 0x0213EA94DE0438C4, 0x000032AD, 0xFFFFEA8E, 0x000004F8, 0x00001A3A, 0xFFFFF832, 0x0000030E, 0x00001A3A, 0xFFFFF832, 0x0000030E },
{ 0x0213F0FE99104884, 0x00002E92, 0xFFFFEBD2, 0x000004DA, 0x00001E04, 0xFFFFF51E, 0x0000038A, 0x00001E04, 0xFFFFF51E, 0x0000038A },
{ 0x0213F0FD42D440A4, 0x00003E57, 0xFFFFE0F7, 0x0000068F, 0x000021F1, 0xFFFFF1C6, 0x00000411, 0x000021F1, 0xFFFFF1C6, 0x00000411 },
{ 0x0213F0FE990821A4, 0x00003598, 0xFFFFE8BB, 0x00000535, 0x00001B62, 0xFFFFF764, 0x00000326, 0x00001B62, 0xFFFFF764, 0x00000326 },
{ 0x0213F0FE990A3884, 0x00002B15, 0xFFFFEDEC, 0x00000487, 0x00001E8B, 0xFFFFF4AB, 0x0000039F, 0x00001E8B, 0xFFFFF4AB, 0x0000039F },
{ 0x0213EA94DE060904, 0x0000267E, 0xFFFFF1A7, 0x000003E1, 0x000021C1, 0xFFFFF2E9, 0x000003EA, 0x000021C1, 0xFFFFF2E9, 0x000003EA },
{ 0x0213EA94DE0239A4, 0x00002ED7, 0xFFFFEC88, 0x000004A6, 0x00001DEC, 0xFFFFF57C, 0x00000378, 0x00001DEC, 0xFFFFF57C, 0x00000378 },
{ 0x0213EA94DE0441A4, 0x00003365, 0xFFFFE946, 0x00000536, 0x000019E9, 0xFFFFF7E0, 0x0000031D, 0x000019E9, 0xFFFFF7E0, 0x0000031D },
{ 0x0213F0FE991818E4, 0x000029A4, 0xFFFFF0FD, 0x000003FE, 0x0000163F, 0xFFFFFB68, 0x00000299, 0x0000163F, 0xFFFFFB68, 0x00000299 },
{ 0x0213EA94DE021904, 0x0000348D, 0xFFFFE9F7, 0x00000509, 0x000017A0, 0xFFFFFA59, 0x000002B6, 0x000017A0, 0xFFFFFA59, 0x000002B6 },
{ 0x0213F0FE990610C4, 0x00003144, 0xFFFFEB23, 0x000004D9, 0x00001C9B, 0xFFFFF664, 0x00000351, 0x00001C9B, 0xFFFFF664, 0x00000351 },
{ 0x0213EA94DE0620E4, 0x00002E95, 0xFFFFEE1A, 0x00000463, 0x00001707, 0xFFFFFAB7, 0x000002B3, 0x00001707, 0xFFFFFAB7, 0x000002B3 },
{ 0x0213F0FD42D41864, 0x0000489C, 0xFFFFDA43, 0x000007AC, 0x00002866, 0xFFFFED6B, 0x000004D0, 0x00002866, 0xFFFFED6B, 0x000004D0 },
{ 0x0213F0FE99161844, 0x00002895, 0xFFFFF10A, 0x0000040A, 0x000013E9, 0xFFFFFC9F, 0x0000026E, 0x000013E9, 0xFFFFFC9F, 0x0000026E },
{ 0x0213F0FE99061964, 0x000033A0, 0xFFFFE9B1, 0x00000510, 0x00001D96, 0xFFFFF5AE, 0x0000036F, 0x00001D96, 0xFFFFF5AE, 0x0000036F },
{ 0x0213F0FE99083984, 0x0000327C, 0xFFFFEAEA, 0x000004DD, 0x00001D45, 0xFFFFF649, 0x00000356, 0x00001D45, 0xFFFFF649, 0x00000356 },
{ 0x0213EA94DE0248A4, 0x000031DF, 0xFFFFE9AB, 0x0000052F, 0x000019C8, 0xFFFFF7B7, 0x00000321, 0x000019C8, 0xFFFFF7B7, 0x00000321 },
{ 0x0213F0FE991640A4, 0x00002BCC, 0xFFFFEEF4, 0x0000045C, 0x000015CD, 0xFFFFFB58, 0x0000029E, 0x000015CD, 0xFFFFFB58, 0x0000029E },
{ 0x0213F0FE990638E4, 0x00003534, 0xFFFFEA10, 0x000004EB, 0x00001BB6, 0xFFFFF7B9, 0x00000314, 0x00001BB6, 0xFFFFF7B9, 0x00000314 },
{ 0x0213F0FE99041984, 0x00002F4F, 0xFFFFEC35, 0x000004B9, 0x0000205D, 0xFFFFF47F, 0x00000392, 0x0000205D, 0xFFFFF47F, 0x00000392 },
{ 0x0213F0FE990C20A4, 0x00003295, 0xFFFFEB1C, 0x000004D6, 0x000019C1, 0xFFFFF931, 0x000002D5, 0x000019C1, 0xFFFFF931, 0x000002D5 },
{ 0x0213F0FE99024144, 0x00003557, 0xFFFFE7F7, 0x00000568, 0x00002342, 0xFFFFF1F9, 0x00000405, 0x00002342, 0xFFFFF1F9, 0x00000405 },
{ 0x0213F0FE990450C4, 0x00003487, 0xFFFFE872, 0x0000055D, 0x000019D7, 0xFFFFF823, 0x0000030C, 0x000019D7, 0xFFFFF823, 0x0000030C },
{ 0x0213F0FE992C3944, 0x0000378F, 0xFFFFE7A6, 0x00000566, 0x00001875, 0xFFFFFA04, 0x000002AF, 0x00001875, 0xFFFFFA04, 0x000002AF },
{ 0x0213EA94DE0230E4, 0x00002A67, 0xFFFFF157, 0x000003DD, 0x000017BD, 0xFFFFFA53, 0x000002D1, 0x000017BD, 0xFFFFFA53, 0x000002D1 },
{ 0x0213F0FD42D220E4, 0x000030B5, 0xFFFFEB32, 0x000004D9, 0x00002129, 0xFFFFF38A, 0x000003BB, 0x00002129, 0xFFFFF38A, 0x000003BB },
{ 0x0213F0FE990610A4, 0x00003786, 0xFFFFE703, 0x00000584, 0x00001D63, 0xFFFFF5DC, 0x00000367, 0x00001D63, 0xFFFFF5DC, 0x00000367 },
{ 0x0213F0FD42DA20C4, 0x0000346A, 0xFFFFE93E, 0x0000052C, 0x00001B27, 0xFFFFF79D, 0x0000031F, 0x00001B27, 0xFFFFF79D, 0x0000031F },
{ 0x0213F0FE990E3024, 0x0000294E, 0xFFFFF0A5, 0x00000409, 0x00001928, 0xFFFFF93B, 0x000002E6, 0x00001928, 0xFFFFF93B, 0x000002E6 },
{ 0x0213F0FD42D410C4, 0x00003E09, 0xFFFFE0FF, 0x00000694, 0x000025A0, 0xFFFFEF0F, 0x0000048F, 0x000025A0, 0xFFFFEF0F, 0x0000048F },
{ 0x0213F0FE990A2964, 0x00003197, 0xFFFFEA06, 0x00000520, 0x00001B42, 0xFFFFF73B, 0x0000032A, 0x00001B42, 0xFFFFF73B, 0x0000032A },
{ 0x0213F0FE99161864, 0x000022CB, 0xFFFFF3FC, 0x000003A3, 0x00001449, 0xFFFFFBD0, 0x00000297, 0x00001449, 0xFFFFFBD0, 0x00000297 },
{ 0x0213F0FD42D82944, 0x00002A79, 0xFFFFEFD2, 0x00000433, 0x00001585, 0xFFFFFB92, 0x0000028E, 0x00001585, 0xFFFFFB92, 0x0000028E },
{ 0x0213F0FE990C4184, 0x00003249, 0xFFFFEA92, 0x000004F4, 0x000019CB, 0xFFFFF8CF, 0x000002E1, 0x000019CB, 0xFFFFF8CF, 0x000002E1 },
{ 0x0213EA94DE0218A4, 0x00002CEA, 0xFFFFEE46, 0x00000463, 0x00001A5E, 0xFFFFF83C, 0x0000030D, 0x00001A5E, 0xFFFFF83C, 0x0000030D },
{ 0x0213F0FD42DC5144, 0x00003AE2, 0xFFFFE422, 0x00000600, 0x00001C65, 0xFFFFF62F, 0x0000034B, 0x00001C65, 0xFFFFF62F, 0x0000034B },
{ 0x0213F0FE99181184, 0x000026A0, 0xFFFFF1C2, 0x000003F8, 0x000010E5, 0xFFFFFE56, 0x0000022A, 0x000010E5, 0xFFFFFE56, 0x0000022A },
{ 0x0213F0FE992829A4, 0x00002A7B, 0xFFFFF063, 0x00000417, 0x000016FC, 0xFFFFFAD7, 0x000002B1, 0x000016FC, 0xFFFFFAD7, 0x000002B1 },
{ 0x0213F0FE993210C4, 0x00003092, 0xFFFFEAB9, 0x00000507, 0x00001AE3, 0xFFFFF783, 0x00000323, 0x00001AE3, 0xFFFFF783, 0x00000323 },
{ 0x0213F0FE990438E4, 0x00003265, 0xFFFFEBE8, 0x000004AA, 0x00001D65, 0xFFFFF73F, 0x00000321, 0x00001D65, 0xFFFFF73F, 0x00000321 },
{ 0x0213EA94DE023084, 0x00002F14, 0xFFFFECC2, 0x000004A4, 0x00001A8D, 0xFFFFF7F3, 0x0000031D, 0x00001A8D, 0xFFFFF7F3, 0x0000031D },
{ 0x0213F0FD42DC10E4, 0x000035FB, 0xFFFFE6D3, 0x000005AC, 0x00001B19, 0xFFFFF712, 0x00000338, 0x00001B19, 0xFFFFF712, 0x00000338 },
{ 0x0213F0FD42DA2124, 0x00003519, 0xFFFFE8CC, 0x0000053A, 0x00001A0F, 0xFFFFF86E, 0x000002F5, 0x00001A0F, 0xFFFFF86E, 0x000002F5 },
{ 0x0213F0FE992C2144, 0x0000364C, 0xFFFFE879, 0x00000541, 0x00001A42, 0xFFFFF8BA, 0x000002E2, 0x00001A42, 0xFFFFF8BA, 0x000002E2 },
{ 0x0213EA94DE0218C4, 0x000029BA, 0xFFFFF09A, 0x00000408, 0x00001986, 0xFFFFF8D9, 0x000002FE, 0x00001986, 0xFFFFF8D9, 0x000002FE },
{ 0x0213F0FD42DA38E4, 0x00003507, 0xFFFFE961, 0x00000518, 0x00001B79, 0xFFFFF775, 0x00000325, 0x00001B79, 0xFFFFF775, 0x00000325 },
{ 0x0213F0FD42DC3184, 0x00003AD5, 0xFFFFE415, 0x00000613, 0x00001CB4, 0xFFFFF66D, 0x00000348, 0x00001CB4, 0xFFFFF66D, 0x00000348 },
{ 0x0213F0FE991640E4, 0x000023D1, 0xFFFFF42B, 0x0000038F, 0x00001546, 0xFFFFFBA0, 0x0000029F, 0x00001546, 0xFFFFFBA0, 0x0000029F },
{ 0x0213F0FE990A1924, 0x0000399E, 0xFFFFE518, 0x000005E7, 0x00001990, 0xFFFFF871, 0x000002FB, 0x00001990, 0xFFFFF871, 0x000002FB },
{ 0x0213F0FD42D82964, 0x00002EDE, 0xFFFFEC93, 0x000004B8, 0x0000152C, 0xFFFFFBB3, 0x0000027E, 0x0000152C, 0xFFFFFBB3, 0x0000027E },
{ 0x0213EA94DE042964, 0x00003140, 0xFFFFEBC9, 0x000004BB, 0x000016BE, 0xFFFFFB0A, 0x00000288, 0x000016BE, 0xFFFFFB0A, 0x00000288 },
{ 0x0213F0FE99064064, 0x000030F6, 0xFFFFEB89, 0x000004CD, 0x0000185D, 0xFFFFF95A, 0x000002D9, 0x0000185D, 0xFFFFF95A, 0x000002D9 },
{ 0x0213F0FE99023844, 0x0000389C, 0xFFFFE65A, 0x000005A2, 0x0000195D, 0xFFFFF8C8, 0x000002E8, 0x0000195D, 0xFFFFF8C8, 0x000002E8 },
{ 0x0213F0FE99042104, 0x0000362B, 0xFFFFE9EC, 0x000004F6, 0x00001605, 0xFFFFFC1C, 0x00000263, 0x00001605, 0xFFFFFC1C, 0x00000263 },
{ 0x0213F0FE992A1964, 0x00002946, 0xFFFFF04F, 0x00000426, 0x000015BA, 0xFFFFFB2F, 0x000002A3, 0x000015BA, 0xFFFFFB2F, 0x000002A3 },
{ 0x0213F0FE99082184, 0x0000368E, 0xFFFFE837, 0x0000054A, 0x000017D7, 0xFFFFF9EB, 0x000002BA, 0x000017D7, 0xFFFFF9EB, 0x000002BA },
{ 0x0213F0FD42DA2844, 0x00002E74, 0xFFFFEBE8, 0x000004DA, 0x00001DD6, 0xFFFFF57E, 0x00000379, 0x00001DD6, 0xFFFFF57E, 0x00000379 },
{ 0x0213F0FE99041944, 0x0000322D, 0xFFFFEAA8, 0x000004F5, 0x00001B55, 0xFFFFF7DD, 0x0000030B, 0x00001B55, 0xFFFFF7DD, 0x0000030B },
{ 0x0213F0FE99181904, 0x00002A29, 0xFFFFF07B, 0x00000416, 0x00001671, 0xFFFFFB3E, 0x0000029F, 0x00001671, 0xFFFFFB3E, 0x0000029F },
{ 0x0213F0FD42DA2104, 0x000030F6, 0xFFFFEB89, 0x000004CD, 0x00001815, 0xFFFFF9AE, 0x000002C9, 0x00001815, 0xFFFFF9AE, 0x000002C9 },
{ 0x0213F0FE990E10E4, 0x0000265F, 0xFFFFF1CB, 0x000003D5, 0x00001ED2, 0xFFFFF539, 0x0000037A, 0x00001ED2, 0xFFFFF539, 0x0000037A },
{ 0x0213F0FE99162184, 0x000027A8, 0xFFFFF10D, 0x00000413, 0x000014B5, 0xFFFFFBA1, 0x00000299, 0x000014B5, 0xFFFFFBA1, 0x00000299 },
{ 0x0213F0FE99043064, 0x00002CEE, 0xFFFFEDF6, 0x00000476, 0x00001A99, 0xFFFFF83E, 0x00000305, 0x00001A99, 0xFFFFF83E, 0x00000305 },
{ 0x0213F0FE990640C4, 0x0000346C, 0xFFFFEA17, 0x000004EF, 0x00001D38, 0xFFFFF69F, 0x0000033D, 0x00001D38, 0xFFFFF69F, 0x0000033D },
{ 0x0213F0FD42DA2944, 0x00002DBB, 0xFFFFED35, 0x00000490, 0x000018C1, 0xFFFFF930, 0x000002DA, 0x000018C1, 0xFFFFF930, 0x000002DA },
{ 0x0213F0FE99042924, 0x000038DF, 0xFFFFE8A7, 0x0000051E, 0x00001B59, 0xFFFFF915, 0x000002D3, 0x00001B59, 0xFFFFF915, 0x000002D3 },
{ 0x0213F0FE99080944, 0x00003384, 0xFFFFE979, 0x00000524, 0x00001AF3, 0xFFFFF74C, 0x0000032F, 0x00001AF3, 0xFFFFF74C, 0x0000032F },
{ 0x0213F0FE99181864, 0x0000258B, 0xFFFFF2AE, 0x000003CB, 0x0000190C, 0xFFFFF93E, 0x000002EF, 0x0000190C, 0xFFFFF93E, 0x000002EF },
{ 0x0213F0FE99103884, 0x000034F1, 0xFFFFE84B, 0x0000055E, 0x00001CB8, 0xFFFFF670, 0x0000034A, 0x00001CB8, 0xFFFFF670, 0x0000034A },
{ 0x0213F0FE990C2104, 0x000030FB, 0xFFFFECD2, 0x00000488, 0x00001BF4, 0xFFFFF821, 0x00000302, 0x00001BF4, 0xFFFFF821, 0x00000302 },
{ 0x0213F0FE99063044, 0x000036A6, 0xFFFFE815, 0x00000556, 0x000018FD, 0xFFFFF925, 0x000002DF, 0x000018FD, 0xFFFFF925, 0x000002DF },
{ 0x0213EA94DE023044, 0x0000302A, 0xFFFFEB79, 0x000004E0, 0x00001C11, 0xFFFFF694, 0x00000358, 0x00001C11, 0xFFFFF694, 0x00000358 },
{ 0x0213F0FE99181124, 0x00002555, 0xFFFFF2C4, 0x000003CB, 0x000017E3, 0xFFFFFA1F, 0x000002CB, 0x000017E3, 0xFFFFFA1F, 0x000002CB },
{ 0x0213F0FE990A3164, 0x000032A3, 0xFFFFE933, 0x00000544, 0x000019D3, 0xFFFFF81A, 0x00000306, 0x000019D3, 0xFFFFF81A, 0x00000306 },
{ 0x0213F0FD42D85104, 0x00002B91, 0xFFFFED81, 0x000004A9, 0x0000158B, 0xFFFFFAB9, 0x000002AC, 0x0000158B, 0xFFFFFAB9, 0x000002AC },
{ 0x0213F0FE990E20C4, 0x00003537, 0xFFFFE912, 0x0000052C, 0x00001C8A, 0xFFFFF754, 0x0000031B, 0x00001C8A, 0xFFFFF754, 0x0000031B },
{ 0x0213EA94DE063184, 0x000032E1, 0xFFFFEA5A, 0x000004F9, 0x000017B4, 0xFFFFF9D9, 0x000002C2, 0x000017B4, 0xFFFFF9D9, 0x000002C2 },
{ 0x0213F0FD42D210C4, 0x00003B76, 0xFFFFE330, 0x00000636, 0x000026FB, 0xFFFFEF06, 0x00000481, 0x000026FB, 0xFFFFEF06, 0x00000481 },
{ 0x0213F0FE99042144, 0x0000320C, 0xFFFFEB84, 0x000004C3, 0x00001A3A, 0xFFFFF8E9, 0x000002DF, 0x00001A3A, 0xFFFFF8E9, 0x000002DF },
{ 0x0213F0FE99023984, 0x0000317D, 0xFFFFEA1F, 0x00000515, 0x00002100, 0xFFFFF31B, 0x000003DD, 0x00002100, 0xFFFFF31B, 0x000003DD },
{ 0x0213F0FD42D43164, 0x00003DCB, 0xFFFFE0B4, 0x000006B4, 0x00002160, 0xFFFFF269, 0x000003F0, 0x00002160, 0xFFFFF269, 0x000003F0 },
{ 0x0213F0FE991618C4, 0x00002737, 0xFFFFF218, 0x000003E1, 0x000015B5, 0xFFFFFB8F, 0x0000029C, 0x000015B5, 0xFFFFFB8F, 0x0000029C },
{ 0x0213EA94DE023184, 0x0000318F, 0xFFFFEB3F, 0x000004D8, 0x00001938, 0xFFFFF8E9, 0x000002EB, 0x00001938, 0xFFFFF8E9, 0x000002EB },
{ 0x0213F0FE991048C4, 0x000031BD, 0xFFFFE9DE, 0x00000527, 0x000018A7, 0xFFFFF8CA, 0x000002ED, 0x000018A7, 0xFFFFF8CA, 0x000002ED },
{ 0x0213F0FD42DA3884, 0x00002F77, 0xFFFFEC2F, 0x000004B4, 0x00001D25, 0xFFFFF61B, 0x0000035D, 0x00001D25, 0xFFFFF61B, 0x0000035D },
{ 0x0213F0FE990E4904, 0x00002CCA, 0xFFFFEDB3, 0x0000047C, 0x00001FBD, 0xFFFFF4A7, 0x00000391, 0x00001FBD, 0xFFFFF4A7, 0x00000391 },
{ 0x0213F0FD42D438A4, 0x00003FF6, 0xFFFFE058, 0x000006A2, 0x000024CD, 0xFFFFF026, 0x00000452, 0x000024CD, 0xFFFFF026, 0x00000452 },
{ 0x0213F0FE990A38E4, 0x00003161, 0xFFFFEAC8, 0x000004F3, 0x00001BB6, 0xFFFFF72A, 0x0000032B, 0x00001BB6, 0xFFFFF72A, 0x0000032B },
{ 0x0213F0FD42D838A4, 0x00002EA0, 0xFFFFECA6, 0x000004B7, 0x000018C2, 0xFFFFF94E, 0x000002E1, 0x000018C2, 0xFFFFF94E, 0x000002E1 },
{ 0x0213F0FE99182184, 0x00002F62, 0xFFFFEC9E, 0x000004B8, 0x00001531, 0xFFFFFBCD, 0x00000285, 0x00001531, 0xFFFFFBCD, 0x00000285 },
{ 0x0213F0FE990440A4, 0x00003013, 0xFFFFEBD6, 0x000004C2, 0x00001B01, 0xFFFFF802, 0x000002FF, 0x00001B01, 0xFFFFF802, 0x000002FF },
{ 0x0213F0FE99183064, 0x00002972, 0xFFFFF08D, 0x00000417, 0x00001A32, 0xFFFFF8A4, 0x00000305, 0x00001A32, 0xFFFFF8A4, 0x00000305 },
{ 0x0213F0FD42D820E4, 0x00002E95, 0xFFFFED94, 0x00000487, 0x00001529, 0xFFFFFC26, 0x00000271, 0x00001529, 0xFFFFFC26, 0x00000271 },
{ 0x0213F0FE990A1084, 0x00002D6A, 0xFFFFEC79, 0x000004C1, 0x00001AE2, 0xFFFFF725, 0x00000337, 0x00001AE2, 0xFFFFF725, 0x00000337 },
{ 0x0213F0FE99021884, 0x000036B4, 0xFFFFE704, 0x00000591, 0x00001E7E, 0xFFFFF51C, 0x00000383, 0x00001E7E, 0xFFFFF51C, 0x00000383 },
{ 0x0213F0FE99041844, 0x00002A6F, 0xFFFFEF70, 0x00000443, 0x00001BAA, 0xFFFFF752, 0x00000336, 0x00001BAA, 0xFFFFF752, 0x00000336 },
{ 0x0213F0FE99183944, 0x00002C66, 0xFFFFEF5F, 0x0000043A, 0x000019F7, 0xFFFFF931, 0x000002EC, 0x000019F7, 0xFFFFF931, 0x000002EC },
{ 0x0213EA94DE0631C4, 0x00003852, 0xFFFFE6AB, 0x00000590, 0x000019C1, 0xFFFFF8B1, 0x000002E5, 0x000019C1, 0xFFFFF8B1, 0x000002E5 },
{ 0x0213F0FD42DA3124, 0x00003521, 0xFFFFE932, 0x00000523, 0x000018A9, 0xFFFFF96B, 0x000002D0, 0x000018A9, 0xFFFFF96B, 0x000002D0 },
{ 0x0213F0FE99062164, 0x000031B9, 0xFFFFEB36, 0x000004D0, 0x00001D65, 0xFFFFF612, 0x0000035D, 0x00001D65, 0xFFFFF612, 0x0000035D },
{ 0x0213F0FD42D41064, 0x00003ED0, 0xFFFFE135, 0x00000679, 0x00002351, 0xFFFFF0FE, 0x00000433, 0x00002351, 0xFFFFF0FE, 0x00000433 },
{ 0x0213F0FE990A20E4, 0x000033ED, 0xFFFFE91A, 0x00000541, 0x00001C93, 0xFFFFF6A0, 0x0000034A, 0x00001C93, 0xFFFFF6A0, 0x0000034A },
{ 0x0213EA94DE021844, 0x0000356F, 0xFFFFE8F7, 0x00000530, 0x000016BF, 0xFFFFFA85, 0x000002AB, 0x000016BF, 0xFFFFFA85, 0x000002AB },
{ 0x0213F0FE991840E4, 0x00002304, 0xFFFFF4F3, 0x00000364, 0x000017CC, 0xFFFFFA41, 0x000002CA, 0x000017CC, 0xFFFFFA41, 0x000002CA },
{ 0x0213F0FE99161164, 0x00002887, 0xFFFFEFD7, 0x00000450, 0x00001474, 0xFFFFFB94, 0x00000299, 0x00001474, 0xFFFFFB94, 0x00000299 },
{ 0x0213F0FE99063064, 0x00003D0B, 0xFFFFE416, 0x000005EF, 0x00001C7E, 0xFFFFF71D, 0x00000325, 0x00001C7E, 0xFFFFF71D, 0x00000325 },
{ 0x0213F0FE990810E4, 0x00003185, 0xFFFFEAFA, 0x000004E4, 0x00001A12, 0xFFFFF83C, 0x00000303, 0x00001A12, 0xFFFFF83C, 0x00000303 },
{ 0x0213F0FE990A1944, 0x00003032, 0xFFFFEAE6, 0x000004FC, 0x00001B2A, 0xFFFFF73F, 0x0000032B, 0x00001B2A, 0xFFFFF73F, 0x0000032B },
{ 0x0213F0FD42D838C4, 0x00002691, 0xFFFFF22D, 0x000003D6, 0x00001700, 0xFFFFFA6E, 0x000002C0, 0x00001700, 0xFFFFFA6E, 0x000002C0 },
{ 0x0213F0FE990218A4, 0x00002B2F, 0xFFFFEEC4, 0x0000044B, 0x0000215F, 0xFFFFF33F, 0x000003D2, 0x0000215F, 0xFFFFF33F, 0x000003D2 },
{ 0x0213F0FE990A4184, 0x000034AA, 0xFFFFE706, 0x000005B1, 0x00001B28, 0xFFFFF6B5, 0x00000349, 0x00001B28, 0xFFFFF6B5, 0x00000349 },
{ 0x0213F0FD42DA2964, 0x0000307E, 0xFFFFEB38, 0x000004E6, 0x00001A22, 0xFFFFF83F, 0x00000300, 0x00001A22, 0xFFFFF83F, 0x00000300 },
{ 0x0213F0FE990618A4, 0x000038D6, 0xFFFFE6D8, 0x0000057C, 0x00001B24, 0xFFFFF7E4, 0x00000307, 0x00001B24, 0xFFFFF7E4, 0x00000307 },
{ 0x0213F0FE99183044, 0x00002757, 0xFFFFF1E8, 0x000003DD, 0x000017F5, 0xFFFFFA15, 0x000002C8, 0x000017F5, 0xFFFFFA15, 0x000002C8 },
{ 0x0213F0FE99083184, 0x000031FC, 0xFFFFEB3E, 0x000004CE, 0x00001B4C, 0xFFFFF7AD, 0x00000319, 0x00001B4C, 0xFFFFF7AD, 0x00000319 },
{ 0x0213F0FE99301864, 0x00002933, 0xFFFFF073, 0x0000040E, 0x00001C3C, 0xFFFFF701, 0x0000033C, 0x00001C3C, 0xFFFFF701, 0x0000033C },
{ 0x0213F0FD42D218A4, 0x000040BB, 0xFFFFE066, 0x0000069A, 0x0000257F, 0xFFFFF08A, 0x00000435, 0x0000257F, 0xFFFFF08A, 0x00000435 },
{ 0x0213F0FE991010A4, 0x0000305B, 0xFFFFEB9B, 0x000004CB, 0x00001996, 0xFFFFF846, 0x00000308, 0x00001996, 0xFFFFF846, 0x00000308 },
{ 0x0213F0FE99064884, 0x000039C0, 0xFFFFE5D3, 0x000005B0, 0x00001A8D, 0xFFFFF7DA, 0x00000313, 0x00001A8D, 0xFFFFF7DA, 0x00000313 },
{ 0x0213EA94DE0210A4, 0x00002E23, 0xFFFFED3F, 0x0000048F, 0x0000189D, 0xFFFFF94C, 0x000002DE, 0x0000189D, 0xFFFFF94C, 0x000002DE },
{ 0x0213EA94DE021984, 0x0000332B, 0xFFFFE9F1, 0x00000516, 0x000018E6, 0xFFFFF8FE, 0x000002EC, 0x000018E6, 0xFFFFF8FE, 0x000002EC },
{ 0x0213F0FE990838C4, 0x000034A0, 0xFFFFEA44, 0x000004E4, 0x00001ECD, 0xFFFFF5B4, 0x00000364, 0x00001ECD, 0xFFFFF5B4, 0x00000364 },
{ 0x0213F0FD42D24104, 0x0000448C, 0xFFFFDF34, 0x000006A8, 0x0000231C, 0xFFFFF286, 0x000003D9, 0x0000231C, 0xFFFFF286, 0x000003D9 },
{ 0x0213EA94DE062144, 0x00002D8C, 0xFFFFEE65, 0x00000456, 0x000018B1, 0xFFFFF9C8, 0x000002C8, 0x000018B1, 0xFFFFF9C8, 0x000002C8 },
{ 0x0213F0FE99061904, 0x00003527, 0xFFFFE9BF, 0x000004FD, 0x00001D23, 0xFFFFF69F, 0x00000342, 0x00001D23, 0xFFFFF69F, 0x00000342 },
{ 0x0213F0FD42DC38A4, 0x00002C51, 0xFFFFEDC3, 0x00000483, 0x00001BE0, 0xFFFFF720, 0x0000032D, 0x00001BE0, 0xFFFFF720, 0x0000032D },
{ 0x0213F0FE990A3044, 0x00002C6C, 0xFFFFECEB, 0x000004B7, 0x00001C86, 0xFFFFF5E7, 0x00000371, 0x00001C86, 0xFFFFF5E7, 0x00000371 },
{ 0x0213F0FE99045144, 0x000037CF, 0xFFFFE6BE, 0x00000599, 0x000018CD, 0xFFFFF967, 0x000002C7, 0x000018CD, 0xFFFFF967, 0x000002C7 },
{ 0x0213F0FE99103164, 0x00002E6F, 0xFFFFED1D, 0x0000048E, 0x00001ADC, 0xFFFFF7F4, 0x0000030E, 0x00001ADC, 0xFFFFF7F4, 0x0000030E },
{ 0x0213F0FD42D42984, 0x00003FF3, 0xFFFFDF13, 0x000006F9, 0x000025BF, 0xFFFFEEEE, 0x00000497, 0x000025BF, 0xFFFFEEEE, 0x00000497 },
{ 0x0213F0FD42DC5104, 0x00004135, 0xFFFFDF97, 0x000006CC, 0x00001D52, 0xFFFFF541, 0x00000383, 0x00001D52, 0xFFFFF541, 0x00000383 },
{ 0x0213F0FD42DC20E4, 0x00002EA9, 0xFFFFEDDB, 0x0000045F, 0x0000197C, 0xFFFFF8E1, 0x000002F0, 0x0000197C, 0xFFFFF8E1, 0x000002F0 },
{ 0x0213EA94DE043084, 0x0000345C, 0xFFFFE922, 0x00000532, 0x00001922, 0xFFFFF8C7, 0x000002F1, 0x00001922, 0xFFFFF8C7, 0x000002F1 },
{ 0x0213F0FE99064124, 0x000035C4, 0xFFFFE8FE, 0x00000521, 0x00001C87, 0xFFFFF6F3, 0x00000330, 0x00001C87, 0xFFFFF6F3, 0x00000330 },
{ 0x0213F0FD42D83164, 0x00002888, 0xFFFFF08A, 0x0000041E, 0x0000150F, 0xFFFFFB87, 0x00000291, 0x0000150F, 0xFFFFFB87, 0x00000291 },
{ 0x0213F0FE990A1124, 0x000035E9, 0xFFFFE657, 0x000005CC, 0x00001BD6, 0xFFFFF664, 0x00000355, 0x00001BD6, 0xFFFFF664, 0x00000355 },
{ 0x0213F0FE991648E4, 0x00002F94, 0xFFFFEBD0, 0x000004E5, 0x00001333, 0xFFFFFCA7, 0x00000266, 0x00001333, 0xFFFFFCA7, 0x00000266 },
{ 0x0213F0FE99181964, 0x000029E7, 0xFFFFF009, 0x00000433, 0x0000144A, 0xFFFFFC37, 0x0000027D, 0x0000144A, 0xFFFFFC37, 0x0000027D },
{ 0x0213F0FE992C1944, 0x00003418, 0xFFFFE979, 0x00000521, 0x00001D33, 0xFFFFF66B, 0x0000034A, 0x00001D33, 0xFFFFF66B, 0x0000034A },
{ 0x0213EA94DE0440E4, 0x00003656, 0xFFFFE79D, 0x0000057A, 0x000017C2, 0xFFFFF992, 0x000002D4, 0x000017C2, 0xFFFFF992, 0x000002D4 },
{ 0x0213F0FE990C40C4, 0x00002EB2, 0xFFFFECFE, 0x00000493, 0x00001F2A, 0xFFFFF543, 0x0000037B, 0x00001F2A, 0xFFFFF543, 0x0000037B },
{ 0x0213F0FE99021124, 0x00002FC1, 0xFFFFEB3F, 0x000004E8, 0x00001CD0, 0xFFFFF5F7, 0x00000364, 0x00001CD0, 0xFFFFF5F7, 0x00000364 },
{ 0x0213F0FE990C1124, 0x0000307B, 0xFFFFEB66, 0x000004DE, 0x00001953, 0xFFFFF8ED, 0x000002E4, 0x00001953, 0xFFFFF8ED, 0x000002E4 },
{ 0x0213F0FD42DA1884, 0x00002CAA, 0xFFFFED07, 0x000004AC, 0x0000251C, 0xFFFFF086, 0x0000044D, 0x0000251C, 0xFFFFF086, 0x0000044D },
{ 0x0213EA94DE043944, 0x00002C94, 0xFFFFEE5F, 0x0000045B, 0x000018D7, 0xFFFFF900, 0x000002EB, 0x000018D7, 0xFFFFF900, 0x000002EB },
{ 0x0213F0FE99021864, 0x000031F1, 0xFFFFE9BE, 0x0000052E, 0x00001DDF, 0xFFFFF558, 0x00000380, 0x00001DDF, 0xFFFFF558, 0x00000380 },
{ 0x0213F0FE990E50C4, 0x00002603, 0xFFFFF1E9, 0x000003DA, 0x00001B37, 0xFFFFF75A, 0x0000032F, 0x00001B37, 0xFFFFF75A, 0x0000032F },
{ 0x0213F0FD42DA3044, 0x00003992, 0xFFFFE4F9, 0x000005EB, 0x00001775, 0xFFFFF9B8, 0x000002C2, 0x00001775, 0xFFFFF9B8, 0x000002C2 },
{ 0x0213F0FE99184964, 0x000029DA, 0xFFFFF052, 0x0000041F, 0x000016E2, 0xFFFFFA99, 0x000002BB, 0x000016E2, 0xFFFFFA99, 0x000002BB },
{ 0x0213F0FE99101064, 0x00002FF2, 0xFFFFEB8F, 0x000004DF, 0x00001AF6, 0xFFFFF7A1, 0x00000321, 0x00001AF6, 0xFFFFF7A1, 0x00000321 },
{ 0x0213F0FE991608E4, 0x00002590, 0xFFFFF222, 0x000003EE, 0x0000130B, 0xFFFFFCC9, 0x00000268, 0x0000130B, 0xFFFFFCC9, 0x00000268 },
{ 0x0213F0FE99024064, 0x000038A2, 0xFFFFE65F, 0x000005A2, 0x000018B1, 0xFFFFF917, 0x000002E1, 0x000018B1, 0xFFFFF917, 0x000002E1 },
{ 0x0213F0FD42DC48E4, 0x000035FD, 0xFFFFE73C, 0x0000058D, 0x00001BB3, 0xFFFFF6E1, 0x00000337, 0x00001BB3, 0xFFFFF6E1, 0x00000337 },
{ 0x0213F0FE991038C4, 0x00002AB7, 0xFFFFEF98, 0x00000429, 0x00001F35, 0xFFFFF539, 0x0000037C, 0x00001F35, 0xFFFFF539, 0x0000037C },
{ 0x0213F0FE990A0944, 0x000034BA, 0xFFFFE73D, 0x000005A6, 0x000018A6, 0xFFFFF888, 0x000002FB, 0x000018A6, 0xFFFFF888, 0x000002FB },
{ 0x0213F0FE99063844, 0x000032EA, 0xFFFFEA78, 0x000004F4, 0x00001AB6, 0xFFFFF812, 0x00000308, 0x00001AB6, 0xFFFFF812, 0x00000308 },
{ 0x0213F0FE990C3044, 0x00002BE9, 0xFFFFEE9A, 0x00000457, 0x00001942, 0xFFFFF8D2, 0x000002F2, 0x00001942, 0xFFFFF8D2, 0x000002F2 },
{ 0x0213F0FE99105124, 0x00002FAB, 0xFFFFEB76, 0x000004E1, 0x00001DCA, 0xFFFFF57D, 0x00000378, 0x00001DCA, 0xFFFFF57D, 0x00000378 },
{ 0x0213F0FE992E2844, 0x0000330A, 0xFFFFE9E1, 0x0000051B, 0x00001CC4, 0xFFFFF6DF, 0x00000335, 0x00001CC4, 0xFFFFF6DF, 0x00000335 },
{ 0x0213F0FE991828A4, 0x000027D8, 0xFFFFF276, 0x000003BF, 0x0000178A, 0xFFFFFABF, 0x000002B5, 0x0000178A, 0xFFFFFABF, 0x000002B5 },
{ 0x0213F0FD42DC3864, 0x0000340A, 0xFFFFE86D, 0x00000562, 0x00001B85, 0xFFFFF719, 0x0000032F, 0x00001B85, 0xFFFFF719, 0x0000032F },
{ 0x0213EA94DE063084, 0x00003879, 0xFFFFE73F, 0x00000578, 0x0000161C, 0xFFFFFB6B, 0x00000281, 0x0000161C, 0xFFFFFB6B, 0x00000281 },
{ 0x0213F0FE99184064, 0x00002879, 0xFFFFF0F8, 0x0000040A, 0x00001749, 0xFFFFFA37, 0x000002CC, 0x00001749, 0xFFFFFA37, 0x000002CC },
{ 0x0213F0FE99043964, 0x00002C3A, 0xFFFFEEA0, 0x0000044F, 0x00001D57, 0xFFFFF6C2, 0x00000332, 0x00001D57, 0xFFFFF6C2, 0x00000332 },
{ 0x0213EA94DE021964, 0x000035BB, 0xFFFFE90D, 0x0000052A, 0x000017D9, 0xFFFFF9F5, 0x000002C3, 0x000017D9, 0xFFFFF9F5, 0x000002C3 },
{ 0x0213EA94DE041124, 0x000031F1, 0xFFFFEAD4, 0x000004ED, 0x00001F10, 0xFFFFF539, 0x0000037D, 0x00001F10, 0xFFFFF539, 0x0000037D },
{ 0x0213F0FE99102824, 0x00002A1A, 0xFFFFEFAD, 0x00000430, 0x00001D47, 0xFFFFF62F, 0x0000035E, 0x00001D47, 0xFFFFF62F, 0x0000035E },
{ 0x0213F0FE99164924, 0x00002AF0, 0xFFFFEEDC, 0x00000465, 0x0000145F, 0xFFFFFBEB, 0x00000281, 0x0000145F, 0xFFFFFBEB, 0x00000281 },
{ 0x0213F0FE99183164, 0x00002657, 0xFFFFF2E0, 0x000003B6, 0x00001664, 0xFFFFFB37, 0x000002A2, 0x00001664, 0xFFFFFB37, 0x000002A2 },
{ 0x0213F0FD42D03864, 0x00003183, 0xFFFFE9F1, 0x0000052B, 0x00002020, 0xFFFFF3CE, 0x000003C1, 0x00002020, 0xFFFFF3CE, 0x000003C1 },
{ 0x0213F0FD42C628E4, 0x00003240, 0xFFFFEB65, 0x000004C7, 0x00002425, 0xFFFFF245, 0x000003F3, 0x00002425, 0xFFFFF245, 0x000003F3 },
{ 0x0213EA94DE321104, 0x000023D0, 0xFFFFF400, 0x00000397, 0x00001345, 0xFFFFFD6B, 0x00000241, 0x00001345, 0xFFFFFD6B, 0x00000241 },
{ 0x0213F0FD42CE38A4, 0x00003440, 0xFFFFE872, 0x0000055B, 0x00002247, 0xFFFFF296, 0x000003E8, 0x00002247, 0xFFFFF296, 0x000003E8 },
{ 0x0213F0FD42D04904, 0x00003275, 0xFFFFE970, 0x00000538, 0x00001F94, 0xFFFFF429, 0x000003AD, 0x00001F94, 0xFFFFF429, 0x000003AD },
{ 0x0213F0FD42C640A4, 0x00003918, 0xFFFFE5DA, 0x000005B6, 0x000024FC, 0xFFFFF106, 0x00000426, 0x000024FC, 0xFFFFF106, 0x00000426 },
{ 0x0213EA94DE062044, 0x0000334B, 0xFFFFEA39, 0x000004FD, 0x00001983, 0xFFFFF8F6, 0x000002E2, 0x00001983, 0xFFFFF8F6, 0x000002E2 },
{ 0x0213F0FD42C64984, 0x00003B59, 0xFFFFE4D0, 0x000005DA, 0x00002605, 0xFFFFF090, 0x00000439, 0x00002605, 0xFFFFF090, 0x00000439 },
{ 0x0213F0FD42D03124, 0x00003251, 0xFFFFEA46, 0x00000511, 0x00002781, 0xFFFFEF84, 0x00000470, 0x00002781, 0xFFFFEF84, 0x00000470 },
{ 0x0213F0FD42CA3164, 0x00003304, 0xFFFFE926, 0x00000542, 0x00001EE9, 0xFFFFF4E4, 0x0000038B, 0x00001EE9, 0xFFFFF4E4, 0x0000038B },
{ 0x0213F0FD42CC38C4, 0x00002F4C, 0xFFFFEC0C, 0x000004C4, 0x00001E49, 0xFFFFF578, 0x00000374, 0x00001E49, 0xFFFFF578, 0x00000374 },
{ 0x0213EA94DE1C2164, 0x00002034, 0xFFFFF692, 0x0000034C, 0x000014B8, 0xFFFFFC5B, 0x00000294, 0x000014B8, 0xFFFFFC5B, 0x00000294 },
{ 0x0213F0FD42CE4924, 0x0000385F, 0xFFFFE513, 0x000005F3, 0x000024E7, 0xFFFFF053, 0x00000450, 0x000024E7, 0xFFFFF053, 0x00000450 },
{ 0x0213EA94DE1C40E4, 0x00001D70, 0xFFFFF821, 0x0000030F, 0x00001541, 0xFFFFFBB4, 0x000002B0, 0x00001541, 0xFFFFFBB4, 0x000002B0 },
{ 0x0213F0FD42D02084, 0x000034EB, 0xFFFFE7FF, 0x00000575, 0x000019B4, 0xFFFFF836, 0x00000308, 0x000019B4, 0xFFFFF836, 0x00000308 },
{ 0x0213F0FD42D050E4, 0x000037C9, 0xFFFFE5D4, 0x000005CD, 0x000026A1, 0xFFFFEF0C, 0x00000491, 0x000026A1, 0xFFFFEF0C, 0x00000491 },
{ 0x0213EA94DE121944, 0x00002918, 0xFFFFF148, 0x000003E9, 0x00001A49, 0xFFFFF94C, 0x000002CF, 0x00001A49, 0xFFFFF94C, 0x000002CF },
{ 0x0213F0FD42CA4064, 0x00002F90, 0xFFFFEAB5, 0x00000514, 0x00001707, 0xFFFFF9C7, 0x000002C4, 0x00001707, 0xFFFFF9C7, 0x000002C4 },
{ 0x0213EA94DE062064, 0x0000327E, 0xFFFFEA99, 0x000004F4, 0x0000194F, 0xFFFFF929, 0x000002DC, 0x0000194F, 0xFFFFF929, 0x000002DC },
{ 0x0213F0FD42C64084, 0x0000326F, 0xFFFFE9CF, 0x00000519, 0x00002240, 0xFFFFF299, 0x000003E7, 0x00002240, 0xFFFFF299, 0x000003E7 },
{ 0x0213EA94DE321124, 0x000022FB, 0xFFFFF4C6, 0x00000371, 0x00001506, 0xFFFFFC73, 0x00000265, 0x00001506, 0xFFFFFC73, 0x00000265 },
{ 0x0213F0FD42CA3924, 0x00003AD6, 0xFFFFE470, 0x000005FE, 0x00001F03, 0xFFFFF4F3, 0x00000387, 0x00001F03, 0xFFFFF4F3, 0x00000387 },
{ 0x0213EA94DE201124, 0x00001F11, 0xFFFFF756, 0x00000332, 0x00001666, 0xFFFFFB8A, 0x000002B2, 0x00001666, 0xFFFFFB8A, 0x000002B2 },
{ 0x0213EA94DE0238A4, 0x00002A5F, 0xFFFFEFA7, 0x00000430, 0x00001943, 0xFFFFF8C6, 0x000002F7, 0x00001943, 0xFFFFF8C6, 0x000002F7 },
{ 0x0213EA94DE1650E4, 0x0000235E, 0xFFFFF3B4, 0x000003B3, 0x00001489, 0xFFFFFBCF, 0x0000029B, 0x00001489, 0xFFFFFBCF, 0x0000029B },
{ 0x0213F0FD42CC38A4, 0x00003570, 0xFFFFE780, 0x0000058D, 0x00001B1D, 0xFFFFF767, 0x00000325, 0x00001B1D, 0xFFFFF767, 0x00000325 },
{ 0x0213EA94DE042064, 0x00003678, 0xFFFFE7C3, 0x00000569, 0x00001831, 0xFFFFF98E, 0x000002C8, 0x00001831, 0xFFFFF98E, 0x000002C8 },
{ 0x0213EA94DE201864, 0x000020B9, 0xFFFFF625, 0x0000035A, 0x000015C5, 0xFFFFFB8A, 0x000002B5, 0x000015C5, 0xFFFFFB8A, 0x000002B5 },
{ 0x0213F0FD42C63184, 0x00003985, 0xFFFFE529, 0x000005DD, 0x00002165, 0xFFFFF351, 0x000003C5, 0x00002165, 0xFFFFF351, 0x000003C5 },
{ 0x0213F0FD42D02064, 0x0000322A, 0xFFFFE99D, 0x00000535, 0x000019A1, 0xFFFFF844, 0x00000305, 0x000019A1, 0xFFFFF844, 0x00000305 },
{ 0x0213F0FD42D05104, 0x000033ED, 0xFFFFE834, 0x00000571, 0x00002094, 0xFFFFF33A, 0x000003DB, 0x00002094, 0xFFFFF33A, 0x000003DB },
{ 0x0213EA94DE2040C4, 0x00001D10, 0xFFFFF84D, 0x0000030B, 0x00001659, 0xFFFFFB0A, 0x000002CB, 0x00001659, 0xFFFFFB0A, 0x000002CB },
{ 0x0213EA94DE1C1124, 0x0000210F, 0xFFFFF644, 0x00000355, 0x00001A4A, 0xFFFFF90F, 0x00000310, 0x00001A4A, 0xFFFFF90F, 0x00000310 },
{ 0x0213EA94DE164164, 0x00001CA8, 0xFFFFF813, 0x00000316, 0x00001440, 0xFFFFFC1C, 0x0000029D, 0x00001440, 0xFFFFFC1C, 0x0000029D },
{ 0x0213EA94DE3210C4, 0x00002864, 0xFFFFF15A, 0x000003FA, 0x0000137F, 0xFFFFFD43, 0x00000248, 0x0000137F, 0xFFFFFD43, 0x00000248 },
{ 0x0213F0FD42D04184, 0x00002CDB, 0xFFFFECFD, 0x000004A7, 0x00002472, 0xFFFFF0E1, 0x00000437, 0x00002472, 0xFFFFF0E1, 0x00000437 },
{ 0x0213F0FD42CC5104, 0x00003348, 0xFFFFE8CA, 0x00000554, 0x00001E91, 0xFFFFF4D4, 0x00000392, 0x00001E91, 0xFFFFF4D4, 0x00000392 },
{ 0x0213F0FD42C64944, 0x00003989, 0xFFFFE4BB, 0x000005F8, 0x00001ACB, 0xFFFFF780, 0x00000319, 0x00001ACB, 0xFFFFF780, 0x00000319 },
{ 0x0213F0FD42CA2104, 0x00003238, 0xFFFFEA09, 0x0000051E, 0x00001F08, 0xFFFFF4F4, 0x0000038C, 0x00001F08, 0xFFFFF4F4, 0x0000038C },
{ 0x0213EA94DE120904, 0x00002453, 0xFFFFF3B0, 0x0000038D, 0x00001AED, 0xFFFFF8A2, 0x000002EA, 0x00001AED, 0xFFFFF8A2, 0x000002EA },
{ 0x0213EA94DE1C3024, 0x00002459, 0xFFFFF409, 0x000003A8, 0x000017B5, 0xFFFFFA53, 0x000002E1, 0x000017B5, 0xFFFFFA53, 0x000002E1 },
{ 0x0213EA94DE021184, 0x0000310D, 0xFFFFEB78, 0x000004D0, 0x00001DC9, 0xFFFFF5D5, 0x00000368, 0x00001DC9, 0xFFFFF5D5, 0x00000368 },
{ 0x0213EA94DE023104, 0x000031BF, 0xFFFFECA3, 0x00000498, 0x00001DC9, 0xFFFFF717, 0x00000336, 0x00001DC9, 0xFFFFF717, 0x00000336 },
{ 0x0213F0FD42CE2104, 0x00003896, 0xFFFFE5DD, 0x000005C5, 0x000023E2, 0xFFFFF1A1, 0x00000416, 0x000023E2, 0xFFFFF1A1, 0x00000416 },
{ 0x0213EA94DE323904, 0x000023CB, 0xFFFFF4C8, 0x00000372, 0x00001C33, 0xFFFFF7D5, 0x0000032A, 0x00001C33, 0xFFFFF7D5, 0x0000032A },
{ 0x0213F0FD42D020C4, 0x00002F6B, 0xFFFFEBF0, 0x000004CE, 0x00001C89, 0xFFFFF689, 0x0000034D, 0x00001C89, 0xFFFFF689, 0x0000034D },
{ 0x0213F0FD42CE3904, 0x00003E72, 0xFFFFE211, 0x0000065D, 0x0000218D, 0xFFFFF309, 0x000003DC, 0x0000218D, 0xFFFFF309, 0x000003DC },
{ 0x0213EA94DE022084, 0x00002612, 0xFFFFF2C3, 0x000003AD, 0x000019F7, 0xFFFFF891, 0x000002FE, 0x000019F7, 0xFFFFF891, 0x000002FE },
{ 0x0213EA94DE164184, 0x0000205D, 0xFFFFF59F, 0x00000372, 0x000012E6, 0xFFFFFD0A, 0x00000270, 0x000012E6, 0xFFFFFD0A, 0x00000270 },
{ 0x0213F0FD42CA2124, 0x00002ECB, 0xFFFFEC47, 0x000004BD, 0x00001936, 0xFFFFF8D9, 0x000002E4, 0x00001936, 0xFFFFF8D9, 0x000002E4 },
{ 0x0213EA94DE064904, 0x00002BDB, 0xFFFFEE6D, 0x00000458, 0x00001852, 0xFFFFF943, 0x000002D9, 0x00001852, 0xFFFFF943, 0x000002D9 },
{ 0x0213EA94DE124904, 0x00003387, 0xFFFFE958, 0x00000534, 0x00001932, 0xFFFFF8FA, 0x000002E4, 0x00001932, 0xFFFFF8FA, 0x000002E4 },
{ 0x0213EA94DE0208C4, 0x00002E3C, 0xFFFFED26, 0x00000495, 0x00001858, 0xFFFFF990, 0x000002D1, 0x00001858, 0xFFFFF990, 0x000002D1 },
{ 0x0213EA94DE022964, 0x000033B8, 0xFFFFEA5C, 0x000004F9, 0x00001BD1, 0xFFFFF76A, 0x0000032E, 0x00001BD1, 0xFFFFF76A, 0x0000032E },
{ 0x0213EA94DE062984, 0x00002BCE, 0xFFFFEEE9, 0x00000443, 0x00001982, 0xFFFFF90D, 0x000002DF, 0x00001982, 0xFFFFF90D, 0x000002DF },
{ 0x0213F0FD42D048E4, 0x00003495, 0xFFFFE7D9, 0x0000057B, 0x00001D2A, 0xFFFFF5A5, 0x00000372, 0x00001D2A, 0xFFFFF5A5, 0x00000372 },
{ 0x0213F0FD42CA38E4, 0x000034B1, 0xFFFFE88D, 0x00000556, 0x00002014, 0xFFFFF43A, 0x000003AA, 0x00002014, 0xFFFFF43A, 0x000003AA },
{ 0x0213F0FD42CC3124, 0x00002F96, 0xFFFFEC84, 0x000004AD, 0x000024A2, 0xFFFFF1CE, 0x0000040A, 0x000024A2, 0xFFFFF1CE, 0x0000040A },
{ 0x0213EA94DE161064, 0x0000203B, 0xFFFFF640, 0x00000359, 0x000014EC, 0xFFFFFC14, 0x0000029C, 0x000014EC, 0xFFFFFC14, 0x0000029C },
{ 0x0213F0FD42D02984, 0x000034E2, 0xFFFFE7B8, 0x00000582, 0x00001938, 0xFFFFF872, 0x000002FA, 0x00001938, 0xFFFFF872, 0x000002FA },
{ 0x0213EA94DE063124, 0x00002AC7, 0xFFFFF0C1, 0x000003F5, 0x00002268, 0xFFFFF39C, 0x000003C9, 0x00002268, 0xFFFFF39C, 0x000003C9 },
{ 0x0213F0FD42C63144, 0x000036F6, 0xFFFFE77F, 0x00000571, 0x000027D9, 0xFFFFEF6F, 0x00000461, 0x000027D9, 0xFFFFEF6F, 0x00000461 },
{ 0x0213EA94DE123124, 0x00002BAB, 0xFFFFF018, 0x00000419, 0x00002126, 0xFFFFF4E2, 0x0000038F, 0x00002126, 0xFFFFF4E2, 0x0000038F },
{ 0x0213EA94DE323924, 0x000028C4, 0xFFFFF161, 0x000003F8, 0x0000180C, 0xFFFFFA4B, 0x000002C8, 0x0000180C, 0xFFFFFA4B, 0x000002C8 },
{ 0x0213F0FD42CA2864, 0x00002F48, 0xFFFFEB62, 0x000004EE, 0x00001912, 0xFFFFF8C8, 0x000002EA, 0x00001912, 0xFFFFF8C8, 0x000002EA },
{ 0x0213F0FD42CE2864, 0x000032DF, 0xFFFFE911, 0x00000545, 0x00001F06, 0xFFFFF485, 0x0000039C, 0x00001F06, 0xFFFFF485, 0x0000039C },
{ 0x0213F0FD42D04144, 0x000035B8, 0xFFFFE74F, 0x00000590, 0x00001FD7, 0xFFFFF410, 0x000003AF, 0x00001FD7, 0xFFFFF410, 0x000003AF },
{ 0x0213F0FD42D050C4, 0x00003608, 0xFFFFE6D7, 0x000005A9, 0x000024A6, 0xFFFFF075, 0x00000450, 0x000024A6, 0xFFFFF075, 0x00000450 },
{ 0x0213F0FD42CA3884, 0x000030AB, 0xFFFFEAED, 0x000004F5, 0x000019EE, 0xFFFFF84E, 0x000002FC, 0x000019EE, 0xFFFFF84E, 0x000002FC },
{ 0x0213EA94DE0620C4, 0x000030C6, 0xFFFFEC92, 0x0000049E, 0x000019BB, 0xFFFFF8F1, 0x000002F3, 0x000019BB, 0xFFFFF8F1, 0x000002F3 },
{ 0x0213F0FD42C630A4, 0x00003B27, 0xFFFFE544, 0x000005C1, 0x00002697, 0xFFFFF072, 0x00000438, 0x00002697, 0xFFFFF072, 0x00000438 },
{ 0x0213EA94DE1248E4, 0x00002F23, 0xFFFFEC48, 0x000004B9, 0x0000199A, 0xFFFFF8CF, 0x000002E9, 0x0000199A, 0xFFFFF8CF, 0x000002E9 },
{ 0x0213EA94DE0629A4, 0x00002BD7, 0xFFFFEEAC, 0x00000450, 0x00001991, 0xFFFFF8F4, 0x000002E2, 0x00001991, 0xFFFFF8F4, 0x000002E2 },
{ 0x0213EA94DE022024, 0x00003210, 0xFFFFEB24, 0x000004DE, 0x00001BDF, 0xFFFFF744, 0x00000333, 0x00001BDF, 0xFFFFF744, 0x00000333 },
{ 0x0213EA94DE244144, 0x00002DDC, 0xFFFFED0D, 0x000004AC, 0x000019D0, 0xFFFFF869, 0x0000030F, 0x000019D0, 0xFFFFF869, 0x0000030F },
{ 0x0213EA94DE203964, 0x000023E6, 0xFFFFF40C, 0x000003A9, 0x000014EB, 0xFFFFFBC4, 0x000002AF, 0x000014EB, 0xFFFFFBC4, 0x000002AF },
{ 0x0213F0FD42CA29A4, 0x000030CE, 0xFFFFE9A5, 0x0000053C, 0x00001C45, 0xFFFFF60E, 0x0000035D, 0x00001C45, 0xFFFFF60E, 0x0000035D },
{ 0x0213EA94DE161084, 0x00001E89, 0xFFFFF73A, 0x00000337, 0x0000157C, 0xFFFFFBC0, 0x000002AA, 0x0000157C, 0xFFFFFBC0, 0x000002AA },
{ 0x0213F0FD42D04124, 0x000036C6, 0xFFFFE6CF, 0x000005A1, 0x00002457, 0xFFFFF11D, 0x0000042D, 0x00002457, 0xFFFFF11D, 0x0000042D },
{ 0x0213EA94DE321944, 0x00002815, 0xFFFFF19A, 0x000003F2, 0x000016D2, 0xFFFFFB40, 0x00000299, 0x000016D2, 0xFFFFFB40, 0x00000299 },
{ 0x0213EA94DE1C19A4, 0x00001FE2, 0xFFFFF660, 0x00000354, 0x000015A7, 0xFFFFFB47, 0x000002C1, 0x000015A7, 0xFFFFFB47, 0x000002C1 },
{ 0x0213EA94DE161964, 0x00002114, 0xFFFFF634, 0x00000356, 0x000016C1, 0xFFFFFB43, 0x000002B8, 0x000016C1, 0xFFFFFB43, 0x000002B8 },
{ 0x0213F0FD42CC28C4, 0x000028E3, 0xFFFFF075, 0x00000414, 0x0000203C, 0xFFFFF438, 0x000003B3, 0x0000203C, 0xFFFFF438, 0x000003B3 },
{ 0x0213EA94DE1C3924, 0x00001EEB, 0xFFFFF7BB, 0x0000031A, 0x00001580, 0xFFFFFBD7, 0x000002AD, 0x00001580, 0xFFFFFBD7, 0x000002AD },
{ 0x0213EA94DE2408C4, 0x00002BB2, 0xFFFFEE72, 0x00000470, 0x0000192C, 0xFFFFF91E, 0x000002E7, 0x0000192C, 0xFFFFF91E, 0x000002E7 },
{ 0x0213EA94DE0650E4, 0x00003A3D, 0xFFFFE49D, 0x000005F5, 0x00001A3B, 0xFFFFF7B1, 0x00000320, 0x00001A3B, 0xFFFFF7B1, 0x00000320 },
{ 0x0213F0FD42CE3164, 0x00002E93, 0xFFFFEC5A, 0x000004B4, 0x000025EB, 0xFFFFF03C, 0x0000044A, 0x000025EB, 0xFFFFF03C, 0x0000044A },
{ 0x0213F0FD42CA20C4, 0x0000331F, 0xFFFFE97A, 0x00000531, 0x00001A06, 0xFFFFF850, 0x000002FD, 0x00001A06, 0xFFFFF850, 0x000002FD },
{ 0x0213F0FD42C63964, 0x00003937, 0xFFFFE5A0, 0x000005C7, 0x0000235E, 0xFFFFF234, 0x000003F2, 0x0000235E, 0xFFFFF234, 0x000003F2 },
{ 0x0213EA94DE1E3924, 0x00001DD0, 0xFFFFF80E, 0x00000319, 0x000015C7, 0xFFFFFB91, 0x000002BC, 0x000015C7, 0xFFFFFB91, 0x000002BC },
{ 0x0213F0FD42D03964, 0x00003328, 0xFFFFE905, 0x0000054A, 0x00002054, 0xFFFFF3BF, 0x000003C0, 0x00002054, 0xFFFFF3BF, 0x000003C0 },
{ 0x0213F0FD42CC1104, 0x00002FE5, 0xFFFFEA65, 0x00000520, 0x0000188B, 0xFFFFF8A7, 0x000002F5, 0x0000188B, 0xFFFFF8A7, 0x000002F5 },
{ 0x0213F0FD42CA38A4, 0x00002ED3, 0xFFFFEC51, 0x000004B9, 0x00001888, 0xFFFFF96A, 0x000002CA, 0x00001888, 0xFFFFF96A, 0x000002CA },
{ 0x0213F0FD42D03084, 0x00002FCC, 0xFFFFEB60, 0x000004EA, 0x00001F8D, 0xFFFFF436, 0x000003B4, 0x00001F8D, 0xFFFFF436, 0x000003B4 },
{ 0x0213F0FD42CE4084, 0x0000329F, 0xFFFFE8F7, 0x0000054F, 0x000023DB, 0xFFFFF0EE, 0x0000043A, 0x000023DB, 0xFFFFF0EE, 0x0000043A },
{ 0x0213EA94DE0438A4, 0x000030B5, 0xFFFFEBB8, 0x000004C4, 0x00001AFD, 0xFFFFF781, 0x00000329, 0x00001AFD, 0xFFFFF781, 0x00000329 },
{ 0x0213EA94DE1E19A4, 0x00001BBF, 0xFFFFF8E2, 0x000002F7, 0x00001722, 0xFFFFFA85, 0x000002DB, 0x00001722, 0xFFFFFA85, 0x000002DB },
{ 0x0213EA94DE022044, 0x000030E4, 0xFFFFEBE6, 0x000004BB, 0x00001C80, 0xFFFFF6E1, 0x0000033E, 0x00001C80, 0xFFFFF6E1, 0x0000033E },
{ 0x0213EA94DE122944, 0x000030E2, 0xFFFFECD0, 0x00000492, 0x00001CE0, 0xFFFFF753, 0x0000032F, 0x00001CE0, 0xFFFFF753, 0x0000032F },
{ 0x0213EA94DE322864, 0x00002513, 0xFFFFF323, 0x000003BC, 0x00001965, 0xFFFFF93C, 0x000002F0, 0x00001965, 0xFFFFF93C, 0x000002F0 },
{ 0x0213EA94DE1610A4, 0x00002147, 0xFFFFF585, 0x0000037A, 0x000014CC, 0xFFFFFC3B, 0x00000296, 0x000014CC, 0xFFFFFC3B, 0x00000296 },
{ 0x0213EA94DE322124, 0x00002507, 0xFFFFF432, 0x0000038A, 0x00001890, 0xFFFFFA61, 0x000002C6, 0x00001890, 0xFFFFFA61, 0x000002C6 },
{ 0x0213EA94DE0638A4, 0x0000339B, 0xFFFFEA7D, 0x000004F0, 0x0000191E, 0xFFFFF944, 0x000002DF, 0x0000191E, 0xFFFFF944, 0x000002DF },
{ 0x0213F0FD42CC28A4, 0x00002842, 0xFFFFF043, 0x00000427, 0x00001988, 0xFFFFF892, 0x000002F7, 0x00001988, 0xFFFFF892, 0x000002F7 },
{ 0x0213F0FD42C618A4, 0x0000389D, 0xFFFFE5D8, 0x000005BF, 0x00001EE1, 0xFFFFF4EF, 0x00000387, 0x00001EE1, 0xFFFFF4EF, 0x00000387 },
{ 0x0213F0FD42CE3184, 0x0000396D, 0xFFFFE4D7, 0x000005F2, 0x000020DA, 0xFFFFF34E, 0x000003CD, 0x000020DA, 0xFFFFF34E, 0x000003CD },
{ 0x0213F0FD42CA3104, 0x0000355F, 0xFFFFE85A, 0x0000055F, 0x0000281F, 0xFFFFEF28, 0x0000047D, 0x0000281F, 0xFFFFEF28, 0x0000047D },
{ 0x0213EA94DE1C50E4, 0x00002284, 0xFFFFF46E, 0x00000399, 0x00001498, 0xFFFFFBE3, 0x0000029C, 0x00001498, 0xFFFFFBE3, 0x0000029C },
{ 0x0213EA94DE023944, 0x000031B6, 0xFFFFEB42, 0x000004D9, 0x00001F54, 0xFFFFF4D2, 0x00000399, 0x00001F54, 0xFFFFF4D2, 0x00000399 },
{ 0x0213F0FD42C63064, 0x000035CE, 0xFFFFE79D, 0x00000578, 0x00001C78, 0xFFFFF68C, 0x00000344, 0x00001C78, 0xFFFFF68C, 0x00000344 },
{ 0x0213EA94DE1E4964, 0x00001C0A, 0xFFFFF81B, 0x00000318, 0x00001492, 0xFFFFFBCC, 0x000002A5, 0x00001492, 0xFFFFFBCC, 0x000002A5 },
{ 0x0213EA94DE022184, 0x00003492, 0xFFFFE95C, 0x00000526, 0x00001A97, 0xFFFFF81B, 0x0000030B, 0x00001A97, 0xFFFFF81B, 0x0000030B },
{ 0x0213EA94DE163164, 0x00001E89, 0xFFFFF7D0, 0x0000031A, 0x000017A5, 0xFFFFFA99, 0x000002D9, 0x000017A5, 0xFFFFFA99, 0x000002D9 },
{ 0x0213F0FD42CA48C4, 0x00002DCC, 0xFFFFEBE0, 0x000004DE, 0x000019BA, 0xFFFFF7F5, 0x0000030D, 0x000019BA, 0xFFFFF7F5, 0x0000030D },
{ 0x0213EA94DE042984, 0x000030EF, 0xFFFFEBC1, 0x000004C0, 0x00001AA9, 0xFFFFF814, 0x0000030A, 0x00001AA9, 0xFFFFF814, 0x0000030A },
{ 0x0213EA94DE245124, 0x00002EA3, 0xFFFFEBF6, 0x000004D8, 0x00001DCF, 0xFFFFF521, 0x00000399, 0x00001DCF, 0xFFFFF521, 0x00000399 },
{ 0x0213EA94DE324164, 0x00002B5F, 0xFFFFEEA1, 0x0000046C, 0x000017EB, 0xFFFFF9C9, 0x000002D4, 0x000017EB, 0xFFFFF9C9, 0x000002D4 },
{ 0x0213EA94DE024104, 0x00002C63, 0xFFFFEE82, 0x00000455, 0x00002268, 0xFFFFF29D, 0x000003F6, 0x00002268, 0xFFFFF29D, 0x000003F6 },
{ 0x0213EA94DE121904, 0x00002B1A, 0xFFFFF016, 0x0000041C, 0x000019AA, 0xFFFFF988, 0x000002D2, 0x000019AA, 0xFFFFF988, 0x000002D2 },
{ 0x0213F0FD42CA2964, 0x0000332F, 0xFFFFE934, 0x0000053B, 0x00001E47, 0xFFFFF566, 0x00000374, 0x00001E47, 0xFFFFF566, 0x00000374 },
{ 0x0213F0FD42CA48E4, 0x00002995, 0xFFFFEEC1, 0x00000465, 0x0000178F, 0xFFFFF995, 0x000002C5, 0x0000178F, 0xFFFFF995, 0x000002C5 },
{ 0x0213EA94DE201884, 0x00001C2E, 0xFFFFF932, 0x000002E9, 0x000015C2, 0xFFFFFBC5, 0x000002AD, 0x000015C2, 0xFFFFFBC5, 0x000002AD },
{ 0x0213F0FD42C640E4, 0x00003B08, 0xFFFFE4E8, 0x000005D8, 0x0000209D, 0xFFFFF444, 0x00000398, 0x0000209D, 0xFFFFF444, 0x00000398 },
{ 0x0213EA94DE0450E4, 0x00002F1F, 0xFFFFEB74, 0x000004EB, 0x00001F4C, 0xFFFFF3D4, 0x000003CE, 0x00001F4C, 0xFFFFF3D4, 0x000003CE },
{ 0x0213EA94DE043884, 0x00003415, 0xFFFFE89F, 0x00000553, 0x0000186B, 0xFFFFF8E1, 0x000002EF, 0x0000186B, 0xFFFFF8E1, 0x000002EF },
{ 0x0213F0FD42CC10C4, 0x00003441, 0xFFFFE779, 0x0000059D, 0x000019EA, 0xFFFFF7B2, 0x0000031F, 0x000019EA, 0xFFFFF7B2, 0x0000031F },
{ 0x0213EA94DE164064, 0x00002174, 0xFFFFF546, 0x00000378, 0x00001456, 0xFFFFFC5F, 0x00000284, 0x00001456, 0xFFFFFC5F, 0x00000284 },
{ 0x0213F0FD42CE40C4, 0x00003788, 0xFFFFE61E, 0x000005BF, 0x00001DF4, 0xFFFFF562, 0x00000374, 0x00001DF4, 0xFFFFF562, 0x00000374 },
{ 0x0213EA94DE1E1844, 0x00001C41, 0xFFFFF8C1, 0x000002FC, 0x0000171E, 0xFFFFFA93, 0x000002DE, 0x0000171E, 0xFFFFFA93, 0x000002DE },
{ 0x0213F0FD42CA3864, 0x00002B15, 0xFFFFEDEC, 0x00000487, 0x000017E4, 0xFFFFF934, 0x000002DF, 0x000017E4, 0xFFFFF934, 0x000002DF },
{ 0x0213F0FD42CC3144, 0x0000327A, 0xFFFFEA71, 0x000004FF, 0x00001D96, 0xFFFFF63B, 0x00000351, 0x00001D96, 0xFFFFF63B, 0x00000351 },
{ 0x0213EA94DE1E4064, 0x000023C6, 0xFFFFF3E5, 0x000003B6, 0x000014DE, 0xFFFFFC29, 0x00000294, 0x000014DE, 0xFFFFFC29, 0x00000294 },
{ 0x0213EA94DE164944, 0x00001F96, 0xFFFFF5FA, 0x00000364, 0x00001397, 0xFFFFFC9D, 0x0000027D, 0x00001397, 0xFFFFFC9D, 0x0000027D },
{ 0x0213EA94DE063144, 0x00002B51, 0xFFFFEFB5, 0x00000420, 0x00001ACA, 0xFFFFF824, 0x0000030D, 0x00001ACA, 0xFFFFF824, 0x0000030D },
{ 0x0213EA94DE1E4944, 0x000020DB, 0xFFFFF55B, 0x0000037C, 0x0000153D, 0xFFFFFB5F, 0x000002BA, 0x0000153D, 0xFFFFFB5F, 0x000002BA },
{ 0x0213EA94DE0221A4, 0x000030BB, 0xFFFFEBDA, 0x000004BC, 0x00001B0E, 0xFFFFF7A8, 0x0000031E, 0x00001B0E, 0xFFFFF7A8, 0x0000031E },
{ 0x0213F0FD42C62904, 0x000033C4, 0xFFFFEA41, 0x000004FA, 0x000022C6, 0xFFFFF363, 0x000003BC, 0x000022C6, 0xFFFFF363, 0x000003BC },
{ 0x0213EA94DE240924, 0x00002D47, 0xFFFFEE01, 0x00000477, 0x000021CD, 0xFFFFF36E, 0x000003D6, 0x000021CD, 0xFFFFF36E, 0x000003D6 },
{ 0x0213EA94DE1E31A4, 0x00001E7B, 0xFFFFF733, 0x00000339, 0x00001668, 0xFFFFFB29, 0x000002BF, 0x00001668, 0xFFFFFB29, 0x000002BF },
{ 0x0213F0FD42CA2984, 0x00002F7E, 0xFFFFEAFF, 0x000004FC, 0x000018D4, 0xFFFFF8BE, 0x000002E8, 0x000018D4, 0xFFFFF8BE, 0x000002E8 },
{ 0x0213EA94DE3238A4, 0x00002635, 0xFFFFF2E1, 0x000003BC, 0x000017A4, 0xFFFFFA67, 0x000002C3, 0x000017A4, 0xFFFFFA67, 0x000002C3 },
{ 0x0213EA94DE1230A4, 0x000026CA, 0xFFFFF2C1, 0x000003B2, 0x00001C3E, 0xFFFFF7AE, 0x0000031F, 0x00001C3E, 0xFFFFF7AE, 0x0000031F },
{ 0x0213EA94DE1C1064, 0x00002550, 0xFFFFF380, 0x000003B5, 0x000019F5, 0xFFFFF8E7, 0x00000313, 0x000019F5, 0xFFFFF8E7, 0x00000313 },
{ 0x0213F0FD42CA4904, 0x00002FBC, 0xFFFFEAF8, 0x000004FA, 0x000018CC, 0xFFFFF8C6, 0x000002E8, 0x000018CC, 0xFFFFF8C6, 0x000002E8 },
{ 0x0213F0FD42D018E4, 0x00002FCC, 0xFFFFEB60, 0x000004EA, 0x00001EFF, 0xFFFFF4DA, 0x0000038F, 0x00001EFF, 0xFFFFF4DA, 0x0000038F },
{ 0x0213EA94DE164084, 0x000023E6, 0xFFFFF413, 0x000003A1, 0x00001544, 0xFFFFFC16, 0x0000028B, 0x00001544, 0xFFFFFC16, 0x0000028B },
{ 0x0213F0FD42CE3024, 0x00003251, 0xFFFFEAA2, 0x000004F5, 0x000025B0, 0xFFFFF0DF, 0x00000431, 0x000025B0, 0xFFFFF0DF, 0x00000431 },
{ 0x0213F0FD42D03984, 0x00002F6F, 0xFFFFEB67, 0x000004E6, 0x00002275, 0xFFFFF249, 0x000003FB, 0x00002275, 0xFFFFF249, 0x000003FB },
{ 0x0213EA94DE322964, 0x00002597, 0xFFFFF34A, 0x000003B1, 0x00001BCC, 0xFFFFF822, 0x0000031A, 0x00001BCC, 0xFFFFF822, 0x0000031A },
{ 0x0213F0FD42C63864, 0x00003B1D, 0xFFFFE40E, 0x0000060D, 0x00001F61, 0xFFFFF470, 0x0000039F, 0x00001F61, 0xFFFFF470, 0x0000039F },
{ 0x0213F0FD42C64144, 0x0000379F, 0xFFFFE6DB, 0x0000058C, 0x00002460, 0xFFFFF170, 0x00000415, 0x00002460, 0xFFFFF170, 0x00000415 },
{ 0x0213EA94DE165144, 0x00002442, 0xFFFFF2FB, 0x000003D9, 0x00001414, 0xFFFFFBDC, 0x000002A2, 0x00001414, 0xFFFFFBDC, 0x000002A2 },
{ 0x0213EA94DE0240C4, 0x00003270, 0xFFFFEA0D, 0x0000051C, 0x00001AFD, 0xFFFFF783, 0x00000328, 0x00001AFD, 0xFFFFF783, 0x00000328 },
{ 0x0213EA94DE161104, 0x00001B23, 0xFFFFF94B, 0x000002EB, 0x000015F1, 0xFFFFFB82, 0x000002B4, 0x000015F1, 0xFFFFFB82, 0x000002B4 },
{ 0x0213EA94DE323844, 0x000026AE, 0xFFFFF21A, 0x000003DB, 0x00001827, 0xFFFFFA10, 0x000002C8, 0x00001827, 0xFFFFFA10, 0x000002C8 },
{ 0x0213F0FD42CA4884, 0x00002DCF, 0xFFFFEBD8, 0x000004DB, 0x00001A75, 0xFFFFF719, 0x0000033A, 0x00001A75, 0xFFFFF719, 0x0000033A },
{ 0x0213F0FD42CE40E4, 0x00003983, 0xFFFFE500, 0x000005EA, 0x000022A6, 0xFFFFF25F, 0x000003F1, 0x000022A6, 0xFFFFF25F, 0x000003F1 },
{ 0x0213EA94DE1218C4, 0x00002AD5, 0xFFFFF07A, 0x00000406, 0x000019FB, 0xFFFFF961, 0x000002D8, 0x000019FB, 0xFFFFF961, 0x000002D8 },
{ 0x0213F0FD42CA39A4, 0x00002A43, 0xFFFFEE43, 0x00000474, 0x00001D65, 0xFFFFF538, 0x00000387, 0x00001D65, 0xFFFFF538, 0x00000387 },
{ 0x0213F0FD42C62084, 0x0000311E, 0xFFFFEAF8, 0x000004E8, 0x00001959, 0xFFFFF8E4, 0x000002DC, 0x00001959, 0xFFFFF8E4, 0x000002DC },
{ 0x0213F0FD42D031A4, 0x0000339A, 0xFFFFE8A7, 0x00000559, 0x00001A04, 0xFFFFF7E5, 0x00000311, 0x00001A04, 0xFFFFF7E5, 0x00000311 },
{ 0x0213EA94DE204144, 0x000021B3, 0xFFFFF50F, 0x00000389, 0x00001470, 0xFFFFFBF7, 0x000002A5, 0x00001470, 0xFFFFFBF7, 0x000002A5 },
{ 0x0213EA94DE021884, 0x00003417, 0xFFFFE9A6, 0x0000051D, 0x000018A4, 0xFFFFF984, 0x000002CF, 0x000018A4, 0xFFFFF984, 0x000002CF },
{ 0x0213EA94DE202984, 0x00001FED, 0xFFFFF6A2, 0x00000347, 0x00001639, 0xFFFFFB59, 0x000002BB, 0x00001639, 0xFFFFFB59, 0x000002BB },
{ 0x0213EA94DE1218A4, 0x000032D2, 0xFFFFEB18, 0x000004DC, 0x00001A01, 0xFFFFF95E, 0x000002CF, 0x00001A01, 0xFFFFF95E, 0x000002CF },
{ 0x0213F0FD42D04084, 0x00003147, 0xFFFFEA3B, 0x00000518, 0x0000241D, 0xFFFFF11C, 0x00000431, 0x0000241D, 0xFFFFF11C, 0x00000431 },
{ 0x0213EA94DE1C0904, 0x00001D44, 0xFFFFF7E7, 0x0000031A, 0x0000153F, 0xFFFFFBBC, 0x000002A9, 0x0000153F, 0xFFFFFBBC, 0x000002A9 },
{ 0x0213F0FD42CC4104, 0x00003690, 0xFFFFE6E3, 0x000005A4, 0x000018DE, 0xFFFFF908, 0x000002DD, 0x000018DE, 0xFFFFF908, 0x000002DD },
{ 0x0213F0FD42CC2184, 0x00003561, 0xFFFFE6F8, 0x000005AB, 0x000018B5, 0xFFFFF8A0, 0x000002F3, 0x000018B5, 0xFFFFF8A0, 0x000002F3 },
{ 0x0213EA94DE323124, 0x000028F4, 0xFFFFF23A, 0x000003CE, 0x00001BC6, 0xFFFFF881, 0x00000311, 0x00001BC6, 0xFFFFF881, 0x00000311 },
{ 0x0213F0FD42D03184, 0x000035D7, 0xFFFFE71C, 0x0000059B, 0x00001D49, 0xFFFFF5C8, 0x00000368, 0x00001D49, 0xFFFFF5C8, 0x00000368 },
{ 0x0213F0FD42CE18A4, 0x0000397E, 0xFFFFE4CB, 0x000005F4, 0x00001989, 0xFFFFF844, 0x000002FD, 0x00001989, 0xFFFFF844, 0x000002FD },
{ 0x0213F0FD42C62064, 0x00003BAB, 0xFFFFE332, 0x0000063F, 0x00001A69, 0xFFFFF7B9, 0x00000312, 0x00001A69, 0xFFFFF7B9, 0x00000312 },
{ 0x0213F0FD42D03064, 0x00002F26, 0xFFFFEB82, 0x000004E8, 0x00001D7D, 0xFFFFF590, 0x00000379, 0x00001D7D, 0xFFFFF590, 0x00000379 },
{ 0x0213EA94DE0631A4, 0x00002FDC, 0xFFFFEBE0, 0x000004C3, 0x00001940, 0xFFFFF8CC, 0x000002EE, 0x00001940, 0xFFFFF8CC, 0x000002EE },
{ 0x0213EA94DE1C08E4, 0x000021B2, 0xFFFFF558, 0x00000379, 0x00001643, 0xFFFFFB1C, 0x000002C3, 0x00001643, 0xFFFFFB1C, 0x000002C3 },
{ 0x0213EA94DE321904, 0x00002897, 0xFFFFF181, 0x000003F7, 0x00001990, 0xFFFFF994, 0x000002E2, 0x00001990, 0xFFFFF994, 0x000002E2 },
{ 0x0213EA94DE1E0924, 0x00001D19, 0xFFFFF829, 0x0000031A, 0x00001558, 0xFFFFFBCA, 0x000002AF, 0x00001558, 0xFFFFFBCA, 0x000002AF },
{ 0x0213EA94DE043144, 0x00003311, 0xFFFFEAD9, 0x000004E1, 0x00001BDC, 0xFFFFF79E, 0x0000031D, 0x00001BDC, 0xFFFFF79E, 0x0000031D },
{ 0x0213EA94DE1E29C4, 0x00001E54, 0xFFFFF740, 0x00000333, 0x000016A1, 0xFFFFFAF0, 0x000002C4, 0x000016A1, 0xFFFFFAF0, 0x000002C4 },
{ 0x0213F0FD42CE3964, 0x00003266, 0xFFFFE9A8, 0x00000527, 0x00002307, 0xFFFFF219, 0x000003FC, 0x00002307, 0xFFFFF219, 0x000003FC },
{ 0x0213EA94DE321144, 0x00001D1F, 0xFFFFF82B, 0x000002F0, 0x000013F0, 0xFFFFFD0B, 0x0000024E, 0x000013F0, 0xFFFFFD0B, 0x0000024E },
{ 0x0213F0FD42C648A4, 0x0000312E, 0xFFFFEA67, 0x00000502, 0x0000222A, 0xFFFFF253, 0x000003F9, 0x0000222A, 0xFFFFF253, 0x000003F9 },
{ 0x0213F0FD42CA4124, 0x000032B2, 0xFFFFE9AD, 0x00000523, 0x00001E97, 0xFFFFF527, 0x0000037F, 0x00001E97, 0xFFFFF527, 0x0000037F },
{ 0x0213EA94DE1640E4, 0x00001F6A, 0xFFFFF6FC, 0x00000338, 0x0000164B, 0xFFFFFB2C, 0x000002C2, 0x0000164B, 0xFFFFFB2C, 0x000002C2 },
{ 0x0213EA94DE0228C4, 0x00002603, 0xFFFFF386, 0x00000392, 0x00001EE0, 0xFFFFF601, 0x00000369, 0x00001EE0, 0xFFFFF601, 0x00000369 },
{ 0x0213EA94DE201164, 0x00001D0C, 0xFFFFF803, 0x00000317, 0x00001345, 0xFFFFFD52, 0x00000260, 0x00001345, 0xFFFFFD52, 0x00000260 },
{ 0x0213F0FD42CC1884, 0x0000327A, 0xFFFFE8E5, 0x0000055C, 0x00001680, 0xFFFFFA2D, 0x000002B2, 0x00001680, 0xFFFFFA2D, 0x000002B2 },
{ 0x0213F0FD42CA3964, 0x000032B8, 0xFFFFE91A, 0x0000054A, 0x00001BAB, 0xFFFFF6EC, 0x00000338, 0x00001BAB, 0xFFFFF6EC, 0x00000338 },
{ 0x0213F0FD42CC3044, 0x00002F79, 0xFFFFEB63, 0x000004EF, 0x000017BB, 0xFFFFF9B1, 0x000002CA, 0x000017BB, 0xFFFFF9B1, 0x000002CA },
{ 0x0213EA94DE0438E4, 0x00002AE5, 0xFFFFEFCB, 0x0000041D, 0x0000214A, 0xFFFFF3A7, 0x000003C7, 0x0000214A, 0xFFFFF3A7, 0x000003C7 },
{ 0x0213EA94DE322064, 0x0000212C, 0xFFFFF5BC, 0x0000034F, 0x000017ED, 0xFFFFFA4C, 0x000002C1, 0x000017ED, 0xFFFFFA4C, 0x000002C1 },
{ 0x0213EA94DE121124, 0x00002BE7, 0xFFFFEF40, 0x0000043C, 0x00001AE2, 0xFFFFF8CF, 0x000002E3, 0x00001AE2, 0xFFFFF8CF, 0x000002E3 },
{ 0x0213F0FD42D05144, 0x000032DC, 0xFFFFE90F, 0x00000549, 0x00002A2D, 0xFFFFECC9, 0x000004ED, 0x00002A2D, 0xFFFFECC9, 0x000004ED },
{ 0x0213EA94DE1618A4, 0x00001DE3, 0xFFFFF80D, 0x00000319, 0x000016FA, 0xFFFFFB42, 0x000002BC, 0x000016FA, 0xFFFFFB42, 0x000002BC },
{ 0x0213EA94DE1E2844, 0x00001F1B, 0xFFFFF6DE, 0x00000346, 0x00001502, 0xFFFFFC23, 0x00000298, 0x00001502, 0xFFFFFC23, 0x00000298 },
{ 0x0213EA94DE061864, 0x00003203, 0xFFFFEA87, 0x000004FE, 0x0000194E, 0xFFFFF8E3, 0x000002EC, 0x0000194E, 0xFFFFF8E3, 0x000002EC },
{ 0x0213F0FD42D02144, 0x0000337A, 0xFFFFE8DD, 0x00000551, 0x00001E3C, 0xFFFFF534, 0x00000385, 0x00001E3C, 0xFFFFF534, 0x00000385 },
{ 0x0213F0FD42CA4864, 0x000036F6, 0xFFFFE62A, 0x000005C5, 0x000023C0, 0xFFFFF117, 0x00000435, 0x000023C0, 0xFFFFF117, 0x00000435 },
{ 0x0213F0FD42CC2144, 0x00003125, 0xFFFFEA4E, 0x0000051A, 0x00001E6C, 0xFFFFF503, 0x0000038E, 0x00001E6C, 0xFFFFF503, 0x0000038E },
{ 0x0213EA94DE1C08A4, 0x00001CD4, 0xFFFFF82D, 0x0000030E, 0x0000156D, 0xFFFFFB64, 0x000002B8, 0x0000156D, 0xFFFFFB64, 0x000002B8 },
{ 0x0213EA94DE0240A4, 0x00002F14, 0xFFFFEC46, 0x000004B8, 0x000017F1, 0xFFFFF977, 0x000002D2, 0x000017F1, 0xFFFFF977, 0x000002D2 },
{ 0x0213EA94DE0640A4, 0x000031F1, 0xFFFFEAD4, 0x000004ED, 0x0000184C, 0xFFFFF983, 0x000002D4, 0x0000184C, 0xFFFFF983, 0x000002D4 },
{ 0x0213F0FD42D04984, 0x00002EA9, 0xFFFFEBD7, 0x000004D5, 0x0000288D, 0xFFFFEDDB, 0x000004C0, 0x0000288D, 0xFFFFEDDB, 0x000004C0 },
{ 0x0213F0FD42CA3984, 0x0000335F, 0xFFFFE82C, 0x00000579, 0x00001DBF, 0xFFFFF512, 0x0000038C, 0x00001DBF, 0xFFFFF512, 0x0000038C },
{ 0x0213EA94DE201184, 0x0000224F, 0xFFFFF4B5, 0x00000391, 0x0000138C, 0xFFFFFCC3, 0x0000027A, 0x0000138C, 0xFFFFFCC3, 0x0000027A },
{ 0x0213EA94DE1240A4, 0x0000320D, 0xFFFFEACD, 0x000004F5, 0x00001976, 0xFFFFF913, 0x000002E2, 0x00001976, 0xFFFFF913, 0x000002E2 },
{ 0x0213EA94DE202104, 0x00001BEB, 0xFFFFF99C, 0x000002E4, 0x000016A4, 0xFFFFFB77, 0x000002C3, 0x000016A4, 0xFFFFFB77, 0x000002C3 },
{ 0x0213EA94DE063044, 0x0000396E, 0xFFFFE616, 0x000005A9, 0x000018F4, 0xFFFFF91A, 0x000002E3, 0x000018F4, 0xFFFFF91A, 0x000002E3 },
{ 0x0213EA94DE022864, 0x00003251, 0xFFFFEA8E, 0x000004FA, 0x000018EF, 0xFFFFF910, 0x000002E4, 0x000018EF, 0xFFFFF910, 0x000002E4 },
{ 0x0213EA94DE1C1924, 0x00001DAF, 0xFFFFF857, 0x0000030D, 0x00001915, 0xFFFFF9D8, 0x000002F7, 0x00001915, 0xFFFFF9D8, 0x000002F7 },
{ 0x0213EA94DE2041A4, 0x000025B6, 0xFFFFF26B, 0x000003E5, 0x00001531, 0xFFFFFB68, 0x000002AF, 0x00001531, 0xFFFFFB68, 0x000002AF },
{ 0x0213EA94DE061884, 0x00002B2E, 0xFFFFEF2E, 0x00000440, 0x00001968, 0xFFFFF91A, 0x000002DF, 0x00001968, 0xFFFFF91A, 0x000002DF },
{ 0x0213EA94DE1C2064, 0x00002305, 0xFFFFF528, 0x00000377, 0x000018A4, 0xFFFFF9EB, 0x000002F0, 0x000018A4, 0xFFFFF9EB, 0x000002F0 },
{ 0x0213F0FD42CA40C4, 0x000032A1, 0xFFFFE992, 0x0000052E, 0x00001A55, 0xFFFFF826, 0x000002FE, 0x00001A55, 0xFFFFF826, 0x000002FE },
{ 0x0213EA94DE042184, 0x00002CCD, 0xFFFFEE35, 0x00000462, 0x00001B09, 0xFFFFF7E6, 0x0000030F, 0x00001B09, 0xFFFFF7E6, 0x0000030F },
{ 0x0213EA94DE323084, 0x00002602, 0xFFFFF2CF, 0x000003C5, 0x000016EE, 0xFFFFFAD4, 0x000002B4, 0x000016EE, 0xFFFFFAD4, 0x000002B4 },
{ 0x0213F0FD42D01964, 0x00003370, 0xFFFFE891, 0x00000560, 0x000017F0, 0xFFFFF930, 0x000002DF, 0x000017F0, 0xFFFFF930, 0x000002DF },
{ 0x0213F0FD42CA1884, 0x00002EDC, 0xFFFFEB6D, 0x000004EC, 0x000016E6, 0xFFFFF9ED, 0x000002BC, 0x000016E6, 0xFFFFF9ED, 0x000002BC },
{ 0x0213EA94DE1228C4, 0x00002A05, 0xFFFFF13D, 0x000003F0, 0x00002065, 0xFFFFF57B, 0x00000378, 0x00002065, 0xFFFFF57B, 0x00000378 },
{ 0x0213F0FD42CE2044, 0x00002F8A, 0xFFFFEB6E, 0x000004E4, 0x00001E3E, 0xFFFFF50E, 0x0000038D, 0x00001E3E, 0xFFFFF50E, 0x0000038D },
{ 0x0213F0FD42CA3044, 0x00002BB5, 0xFFFFED6A, 0x000004A1, 0x000017BF, 0xFFFFF937, 0x000002E5, 0x000017BF, 0xFFFFF937, 0x000002E5 },
{ 0x0213EA94DE201964, 0x0000202C, 0xFFFFF6CE, 0x0000033F, 0x000015EE, 0xFFFFFB83, 0x000002B9, 0x000015EE, 0xFFFFFB83, 0x000002B9 },
{ 0x0213EA94DE022884, 0x00002C0C, 0xFFFFEF10, 0x0000043F, 0x00001A73, 0xFFFFF83E, 0x0000030C, 0x00001A73, 0xFFFFF83E, 0x0000030C },
{ 0x0213EA94DE324104, 0x0000234F, 0xFFFFF460, 0x00000385, 0x000018C3, 0xFFFFF9A5, 0x000002DD, 0x000018C3, 0xFFFFF9A5, 0x000002DD },
{ 0x0213F0FD42CE1904, 0x00003679, 0xFFFFE704, 0x00000595, 0x00002177, 0xFFFFF31A, 0x000003D7, 0x00002177, 0xFFFFF31A, 0x000003D7 },
{ 0x0213F0FD42CA2924, 0x00003008, 0xFFFFEBB8, 0x000004D5, 0x000024FF, 0xFFFFF112, 0x00000430, 0x000024FF, 0xFFFFF112, 0x00000430 },
{ 0x0213F0FD42C641A4, 0x00003848, 0xFFFFE6A3, 0x00000594, 0x00002958, 0xFFFFEE37, 0x000004A0, 0x00002958, 0xFFFFEE37, 0x000004A0 },
{ 0x0213F0FD42CC1924, 0x00002FDF, 0xFFFFEB08, 0x000004FD, 0x00001D77, 0xFFFFF58B, 0x0000037A, 0x00001D77, 0xFFFFF58B, 0x0000037A },
{ 0x0213EA94DE063064, 0x00002EC8, 0xFFFFED41, 0x00000481, 0x00001949, 0xFFFFF91C, 0x000002DF, 0x00001949, 0xFFFFF91C, 0x000002DF },
{ 0x0213F0FD42D041A4, 0x000037C1, 0xFFFFE5BA, 0x000005D7, 0x0000252C, 0xFFFFF023, 0x00000460, 0x0000252C, 0xFFFFF023, 0x00000460 },
{ 0x0213F0FD42CE2944, 0x00003716, 0xFFFFE70C, 0x0000058A, 0x000028CC, 0xFFFFEE57, 0x0000049D, 0x000028CC, 0xFFFFEE57, 0x0000049D },
{ 0x0213F0FD42CA40E4, 0x000033D1, 0xFFFFE8E8, 0x00000547, 0x00001AB1, 0xFFFFF7E5, 0x00000309, 0x00001AB1, 0xFFFFF7E5, 0x00000309 },
{ 0x0213F0FD42CC2944, 0x00002D72, 0xFFFFED65, 0x0000048E, 0x00001E0D, 0xFFFFF5A7, 0x00000370, 0x00001E0D, 0xFFFFF5A7, 0x00000370 },
{ 0x0213EA94DE1C39A4, 0x00002292, 0xFFFFF49F, 0x00000393, 0x000017F4, 0xFFFFF9CD, 0x000002F5, 0x000017F4, 0xFFFFF9CD, 0x000002F5 },
{ 0x0213EA94DE243044, 0x000026EE, 0xFFFFF18C, 0x000003F7, 0x000018A7, 0xFFFFF95A, 0x000002E5, 0x000018A7, 0xFFFFF95A, 0x000002E5 },
{ 0x0213EA94DE042164, 0x00002F62, 0xFFFFEC9B, 0x000004A4, 0x0000194E, 0xFFFFF932, 0x000002D9, 0x0000194E, 0xFFFFF932, 0x000002D9 },
{ 0x0213EA94DE1E3984, 0x00001CE8, 0xFFFFF7FA, 0x0000031C, 0x000014CE, 0xFFFFFBD4, 0x000002AB, 0x000014CE, 0xFFFFFBD4, 0x000002AB },
{ 0x0213EA94DE1210E4, 0x00002E5A, 0xFFFFEDAB, 0x0000047C, 0x00001A82, 0xFFFFF8F7, 0x000002DE, 0x00001A82, 0xFFFFF8F7, 0x000002DE },
{ 0x0213F0FD42CC30E4, 0x00003057, 0xFFFFEC34, 0x000004B9, 0x00002296, 0xFFFFF342, 0x000003D0, 0x00002296, 0xFFFFF342, 0x000003D0 },
{ 0x0213EA94DE0418A4, 0x00002B0F, 0xFFFFEF58, 0x00000434, 0x00001BFD, 0xFFFFF721, 0x00000330, 0x00001BFD, 0xFFFFF721, 0x00000330 },
{ 0x0213EA94DE2010A4, 0x00001F01, 0xFFFFF751, 0x0000032F, 0x00001502, 0xFFFFFC3E, 0x00000296, 0x00001502, 0xFFFFFC3E, 0x00000296 },
{ 0x0213F0FD42CA3064, 0x00002FF4, 0xFFFFEAE2, 0x00000503, 0x00001B36, 0xFFFFF736, 0x00000330, 0x00001B36, 0xFFFFF736, 0x00000330 },
{ 0x0213F0FD42CE2064, 0x00003762, 0xFFFFE5AB, 0x000005DE, 0x000018CB, 0xFFFFF896, 0x000002F4, 0x000018CB, 0xFFFFF896, 0x000002F4 },
{ 0x0213F0FD42CC2064, 0x00002890, 0xFFFFEF92, 0x00000445, 0x0000191D, 0xFFFFF86F, 0x00000302, 0x0000191D, 0xFFFFF86F, 0x00000302 },
{ 0x0213EA94DE043064, 0x00002F76, 0xFFFFEC0E, 0x000004BF, 0x00001F7D, 0xFFFFF41A, 0x000003C0, 0x00001F7D, 0xFFFFF41A, 0x000003C0 },
{ 0x0213EA94DE1E08A4, 0x00001D55, 0xFFFFF7F8, 0x0000031E, 0x000015DF, 0xFFFFFB79, 0x000002B7, 0x000015DF, 0xFFFFFB79, 0x000002B7 },
{ 0x0213EA94DE204924, 0x00001FE9, 0xFFFFF64A, 0x00000353, 0x000019E8, 0xFFFFF882, 0x0000032A, 0x000019E8, 0xFFFFF882, 0x0000032A },
{ 0x0213EA94DE063964, 0x000030B5, 0xFFFFEBB8, 0x000004C4, 0x00001857, 0xFFFFF968, 0x000002D8, 0x00001857, 0xFFFFF968, 0x000002D8 },
{ 0x0213F0FD42CA28C4, 0x00003398, 0xFFFFE9A3, 0x00000524, 0x00001FF9, 0xFFFFF458, 0x000003AD, 0x00001FF9, 0xFFFFF458, 0x000003AD },
{ 0x0213F0FD42CE2964, 0x00003897, 0xFFFFE5BD, 0x000005C8, 0x00002519, 0xFFFFF0BA, 0x00000438, 0x00002519, 0xFFFFF0BA, 0x00000438 },
{ 0x0213F0FD42D04064, 0x00003234, 0xFFFFE9B1, 0x00000530, 0x000022CC, 0xFFFFF20E, 0x00000409, 0x000022CC, 0xFFFFF20E, 0x00000409 },
{ 0x0213EA94DE205104, 0x00001FD2, 0xFFFFF641, 0x00000354, 0x000017C9, 0xFFFFF9C0, 0x000002FB, 0x000017C9, 0xFFFFF9C0, 0x000002FB },
{ 0x0213F0FD42CE48E4, 0x00003234, 0xFFFFE946, 0x0000053D, 0x00002267, 0xFFFFF1F5, 0x0000040D, 0x00002267, 0xFFFFF1F5, 0x0000040D },
{ 0x0213EA94DE2029A4, 0x00002330, 0xFFFFF474, 0x00000399, 0x00001490, 0xFFFFFC67, 0x00000288, 0x00001490, 0xFFFFFC67, 0x00000288 },
{ 0x0213F0FD42D03924, 0x000032A3, 0xFFFFE9EB, 0x0000051B, 0x0000234D, 0xFFFFF23C, 0x000003F7, 0x0000234D, 0xFFFFF23C, 0x000003F7 },
{ 0x0213EA94DE200904, 0x0000217E, 0xFFFFF53A, 0x00000384, 0x00001511, 0xFFFFFBF5, 0x0000029E, 0x00001511, 0xFFFFFBF5, 0x0000029E },
{ 0x0213F0FD42CE50E4, 0x0000384F, 0xFFFFE562, 0x000005E2, 0x0000295A, 0xFFFFED53, 0x000004D3, 0x0000295A, 0xFFFFED53, 0x000004D3 },
{ 0x0213F0FD42D05124, 0x00003315, 0xFFFFE8D1, 0x00000552, 0x000025D1, 0xFFFFEFAF, 0x00000471, 0x000025D1, 0xFFFFEFAF, 0x00000471 },
{ 0x0213F0FD42C64924, 0x00004183, 0xFFFFDF61, 0x000006DA, 0x0000193C, 0xFFFFF88F, 0x000002EC, 0x0000193C, 0xFFFFF88F, 0x000002EC },
{ 0x0213EA94DE242164, 0x00002DFC, 0xFFFFEDF2, 0x0000047A, 0x00001755, 0xFFFFFAC2, 0x000002AC, 0x00001755, 0xFFFFFAC2, 0x000002AC },
{ 0x0213F0FD42CA31A4, 0x000033FE, 0xFFFFE774, 0x0000059F, 0x00001E70, 0xFFFFF492, 0x000003A0, 0x00001E70, 0xFFFFF492, 0x000003A0 },
{ 0x0213F0FD42C629A4, 0x000040D7, 0xFFFFDFB8, 0x000006CE, 0x00001AC8, 0xFFFFF773, 0x0000031D, 0x00001AC8, 0xFFFFF773, 0x0000031D },
{ 0x0213EA94DE1E1164, 0x00001D02, 0xFFFFF803, 0x00000322, 0x000015FE, 0xFFFFFB71, 0x000002BB, 0x000015FE, 0xFFFFFB71, 0x000002BB },
{ 0x0213F0FD42D02884, 0x00002EB0, 0xFFFFEC31, 0x000004C4, 0x00001B3C, 0xFFFFF73B, 0x00000330, 0x00001B3C, 0xFFFFF73B, 0x00000330 },
{ 0x0213F0FD42CA4984, 0x00002D9F, 0xFFFFECBF, 0x000004A8, 0x000022B0, 0xFFFFF23C, 0x000003F9, 0x000022B0, 0xFFFFF23C, 0x000003F9 },
{ 0x0213F0FD42CC18E4, 0x00002C6A, 0xFFFFEDAC, 0x00000488, 0x00002419, 0xFFFFF159, 0x00000427, 0x00002419, 0xFFFFF159, 0x00000427 },
{ 0x0213EA94DE1210A4, 0x00002991, 0xFFFFF06C, 0x0000040E, 0x00001AA9, 0xFFFFF8D0, 0x000002E1, 0x00001AA9, 0xFFFFF8D0, 0x000002E1 },
{ 0x0213EA94DE123904, 0x00002F8E, 0xFFFFED1B, 0x00000493, 0x00001DE4, 0xFFFFF69C, 0x00000347, 0x00001DE4, 0xFFFFF69C, 0x00000347 },
{ 0x0213EA94DE204184, 0x00002136, 0xFFFFF540, 0x0000037C, 0x000014FF, 0xFFFFFB83, 0x000002B2, 0x000014FF, 0xFFFFFB83, 0x000002B2 },
{ 0x0213EA94DE0618E4, 0x0000354C, 0xFFFFE97D, 0x0000051A, 0x00001906, 0xFFFFF965, 0x000002DD, 0x00001906, 0xFFFFF965, 0x000002DD },
{ 0x0213F0FD42C620C4, 0x0000348B, 0xFFFFE94D, 0x0000051F, 0x0000285B, 0xFFFFEF1A, 0x00000473, 0x0000285B, 0xFFFFEF1A, 0x00000473 },
{ 0x0213EA94DE3218A4, 0x000026E6, 0xFFFFF24E, 0x000003D6, 0x0000141F, 0xFFFFFCCE, 0x00000260, 0x0000141F, 0xFFFFFCCE, 0x00000260 },
{ 0x0213F0FD42C64164, 0x00003CED, 0xFFFFE2A5, 0x0000064E, 0x00002060, 0xFFFFF3E0, 0x000003B0, 0x00002060, 0xFFFFF3E0, 0x000003B0 },
{ 0x0213EA94DE021084, 0x000029D4, 0xFFFFEFF7, 0x00000426, 0x00001976, 0xFFFFF8E1, 0x000002EE, 0x00001976, 0xFFFFF8E1, 0x000002EE },
{ 0x0213F0FD42CA40A4, 0x00003767, 0xFFFFE601, 0x000005CC, 0x00001D22, 0xFFFFF5F4, 0x00000361, 0x00001D22, 0xFFFFF5F4, 0x00000361 },
{ 0x0213F0FD42C650C4, 0x00003CE8, 0xFFFFE2E8, 0x00000637, 0x0000232C, 0xFFFFF1E7, 0x00000405, 0x0000232C, 0xFFFFF1E7, 0x00000405 },
{ 0x0213EA94DE201064, 0x000023A8, 0xFFFFF4CD, 0x00000386, 0x00001944, 0xFFFFF983, 0x00000300, 0x00001944, 0xFFFFF983, 0x00000300 },
{ 0x0213F0FD42CC30A4, 0x00003451, 0xFFFFE8B9, 0x00000551, 0x00001AD7, 0xFFFFF7BF, 0x00000318, 0x00001AD7, 0xFFFFF7BF, 0x00000318 },
{ 0x0213F0FD42CE2984, 0x0000381B, 0xFFFFE5A0, 0x000005D0, 0x00001E0F, 0xFFFFF521, 0x00000382, 0x00001E0F, 0xFFFFF521, 0x00000382 },
{ 0x0213EA94DE2038C4, 0x000023A4, 0xFFFFF4A6, 0x00000394, 0x0000171F, 0xFFFFFABB, 0x000002D9, 0x0000171F, 0xFFFFFABB, 0x000002D9 },
{ 0x0213F0FD42C620A4, 0x00003C2B, 0xFFFFE447, 0x000005F0, 0x0000207F, 0xFFFFF44E, 0x0000039A, 0x0000207F, 0xFFFFF44E, 0x0000039A },
{ 0x0213F0FD42CC3984, 0x00002F07, 0xFFFFEB70, 0x000004E9, 0x00001765, 0xFFFFF9A5, 0x000002C6, 0x00001765, 0xFFFFF9A5, 0x000002C6 },
{ 0x0213F0FD42C62984, 0x00003A01, 0xFFFFE4E0, 0x000005E7, 0x0000227A, 0xFFFFF292, 0x000003E5, 0x0000227A, 0xFFFFF292, 0x000003E5 },
{ 0x0213F0FD42CE20A4, 0x0000376E, 0xFFFFE686, 0x000005A6, 0x00001FCF, 0xFFFFF43B, 0x000003A8, 0x00001FCF, 0xFFFFF43B, 0x000003A8 },
{ 0x0213F0FFEF5A4984, 0x0000485F, 0xFFFFDCC1, 0x00000713, 0x00002CF8, 0xFFFFEC45, 0x000004DA, 0x00002CF8, 0xFFFFEC45, 0x000004DA },
{ 0x0213F0FFEF5C3184, 0x0000331C, 0xFFFFE8FF, 0x00000541, 0x00002366, 0xFFFFF19D, 0x00000411, 0x00002366, 0xFFFFF19D, 0x00000411 },
{ 0x0213F0FFEF643864, 0x00003CF3, 0xFFFFE15A, 0x00000694, 0x00002FB3, 0xFFFFE827, 0x000005B9, 0x00002FB3, 0xFFFFE827, 0x000005B9 },
{ 0x0213EA94DE321104, 0x000023F3, 0xFFFFF3EA, 0x0000039A, 0x00001345, 0xFFFFFD6B, 0x00000241, 0x00001345, 0xFFFFFD6B, 0x00000241 },
{ 0x0213F0FFEF5C28A4, 0x000038C0, 0xFFFFE58A, 0x000005CC, 0x000023CA, 0xFFFFF1AA, 0x00000408, 0x000023CA, 0xFFFFF1AA, 0x00000408 },
{ 0x0213F0FFEF662944, 0x00004976, 0xFFFFDD6A, 0x000006D7, 0x000033C6, 0xFFFFE8EB, 0x0000054D, 0x000033C6, 0xFFFFE8EB, 0x0000054D },
{ 0x0213F0FFEF644904, 0x00004049, 0xFFFFDF6D, 0x000006D8, 0x00003129, 0xFFFFE716, 0x000005E9, 0x00003129, 0xFFFFE716, 0x000005E9 },
{ 0x0213F0FFEF661164, 0x000046C2, 0xFFFFDCEB, 0x0000071C, 0x00002E6D, 0xFFFFEA8F, 0x0000052E, 0x00002E6D, 0xFFFFEA8F, 0x0000052E },
{ 0x0213F0FFEF6238A4, 0x00004080, 0xFFFFE1E1, 0x0000063A, 0x0000396D, 0xFFFFE40A, 0x0000062C, 0x0000396D, 0xFFFFE40A, 0x0000062C },
{ 0x0213F0FFEF5E2124, 0x00003DE0, 0xFFFFE358, 0x0000060C, 0x00002AA2, 0xFFFFEDBF, 0x000004A0, 0x00002AA2, 0xFFFFEDBF, 0x000004A0 },
{ 0x0213F0FFEF5E3144, 0x00003FC0, 0xFFFFE2A1, 0x0000061A, 0x000027D8, 0xFFFFEFEC, 0x0000043A, 0x000027D8, 0xFFFFEFEC, 0x0000043A },
{ 0x0213F0FFEF661924, 0x00003FBF, 0xFFFFE2F5, 0x00000603, 0x000032D7, 0xFFFFE900, 0x00000552, 0x000032D7, 0xFFFFE900, 0x00000552 },
{ 0x0213F0FFEF5C10E4, 0x000035EE, 0xFFFFE6CA, 0x000005A2, 0x0000247C, 0xFFFFF088, 0x00000446, 0x0000247C, 0xFFFFF088, 0x00000446 },
{ 0x0213F0FFEF643884, 0x000039C8, 0xFFFFE3AE, 0x0000062A, 0x000028AF, 0xFFFFED24, 0x000004DF, 0x000028AF, 0xFFFFED24, 0x000004DF },
{ 0x0213F0FFEF5C2884, 0x00003BDE, 0xFFFFE33B, 0x00000632, 0x00001B6C, 0xFFFFF720, 0x00000326, 0x00001B6C, 0xFFFFF720, 0x00000326 },
{ 0x0213F0FFEF7210A4, 0x00003818, 0xFFFFE57D, 0x000005D4, 0x000020EF, 0xFFFFF327, 0x000003CE, 0x000020EF, 0xFFFFF327, 0x000003CE },
{ 0x0213F0FFEF5E19A4, 0x000038DA, 0xFFFFE561, 0x000005D3, 0x0000297D, 0xFFFFED6D, 0x000004C5, 0x0000297D, 0xFFFFED6D, 0x000004C5 },
{ 0x0213F0FFEF684884, 0x000027AC, 0xFFFFF0CE, 0x00000417, 0x00001F5F, 0xFFFFF484, 0x000003B2, 0x00001F5F, 0xFFFFF484, 0x000003B2 },
{ 0x0213F0FFEF6648A4, 0x00003F02, 0xFFFFE222, 0x00000643, 0x000026D4, 0xFFFFF000, 0x00000443, 0x000026D4, 0xFFFFF000, 0x00000443 },
{ 0x0213F0FFEF624164, 0x00004303, 0xFFFFDFE3, 0x00000690, 0x0000312C, 0xFFFFE912, 0x00000561, 0x0000312C, 0xFFFFE912, 0x00000561 },
{ 0x0213F0FFEF600904, 0x000039E5, 0xFFFFE31F, 0x00000657, 0x00001D23, 0xFFFFF51F, 0x00000386, 0x00001D23, 0xFFFFF51F, 0x00000386 },
{ 0x0213F0FFEF661144, 0x000041FA, 0xFFFFE01B, 0x00000697, 0x00002767, 0xFFFFEF90, 0x00000455, 0x00002767, 0xFFFFEF90, 0x00000455 },
{ 0x0213F0FFEF6830A4, 0x00002888, 0xFFFFF11C, 0x00000403, 0x00001864, 0xFFFFF9D8, 0x000002D3, 0x00001864, 0xFFFFF9D8, 0x000002D3 },
{ 0x0213EA94DE201864, 0x0000215C, 0xFFFFF5B6, 0x0000036D, 0x000015C5, 0xFFFFFB8A, 0x000002B5, 0x000015C5, 0xFFFFFB8A, 0x000002B5 },
{ 0x0213F0FFEF683984, 0x00002FAF, 0xFFFFEC27, 0x000004CA, 0x00002184, 0xFFFFF39C, 0x000003CD, 0x00002184, 0xFFFFF39C, 0x000003CD },
{ 0x0213F0FFEF5E10C4, 0x00004ACE, 0xFFFFD9A3, 0x000007BC, 0x00001A5D, 0xFFFFF7F6, 0x000002FC, 0x00001A5D, 0xFFFFF7F6, 0x000002FC },
{ 0x0213F0FFEF5A3044, 0x00003763, 0xFFFFE797, 0x0000055F, 0x000029B5, 0xFFFFEEA1, 0x00000474, 0x000029B5, 0xFFFFEEA1, 0x00000474 },
{ 0x0213F0FFEF5E3164, 0x00003832, 0xFFFFE6F9, 0x00000575, 0x00002C99, 0xFFFFEC42, 0x000004E3, 0x00002C99, 0xFFFFEC42, 0x000004E3 },
{ 0x0213F0FFEF604164, 0x000041C9, 0xFFFFDE33, 0x0000071E, 0x0000199D, 0xFFFFF808, 0x000002F9, 0x0000199D, 0xFFFFF808, 0x000002F9 },
{ 0x0213F0FFEF641164, 0x0000474A, 0xFFFFD96E, 0x00000802, 0x00002A30, 0xFFFFEB57, 0x0000053F, 0x00002A30, 0xFFFFEB57, 0x0000053F },
{ 0x0213F0FFEF5C31C4, 0x0000312F, 0xFFFFEA6A, 0x00000508, 0x000029D3, 0xFFFFED38, 0x000004D3, 0x000029D3, 0xFFFFED38, 0x000004D3 },
{ 0x0213F0FFEF7210C4, 0x00003BD6, 0xFFFFE2E7, 0x00000644, 0x00002093, 0xFFFFF37B, 0x000003BD, 0x00002093, 0xFFFFF37B, 0x000003BD },
{ 0x0213F0FFEF6840E4, 0x00002F94, 0xFFFFECD4, 0x000004A3, 0x00002196, 0xFFFFF40B, 0x000003B5, 0x00002196, 0xFFFFF40B, 0x000003B5 },
{ 0x0213F0FFEF5E1944, 0x0000369B, 0xFFFFE762, 0x00000571, 0x00002726, 0xFFFFEF99, 0x00000459, 0x00002726, 0xFFFFEF99, 0x00000459 },
{ 0x0213F0FFEF642064, 0x00003F57, 0xFFFFDF47, 0x000006F4, 0x00002E5F, 0xFFFFE8AE, 0x000005AB, 0x00002E5F, 0xFFFFE8AE, 0x000005AB },
{ 0x0213EA94DE0A40C4, 0x00004313, 0xFFFFDD81, 0x0000072D, 0x00002468, 0xFFFFF068, 0x00000440, 0x00002468, 0xFFFFF068, 0x00000440 },
{ 0x0213F0FFEF683044, 0x00002A35, 0xFFFFEFA8, 0x00000441, 0x00001F3F, 0xFFFFF4F3, 0x000003A0, 0x00001F3F, 0xFFFFF4F3, 0x000003A0 },
{ 0x0213F0FFEF6630A4, 0x00003E33, 0xFFFFE4B0, 0x000005AF, 0x00002802, 0xFFFFF092, 0x00000412, 0x00002802, 0xFFFFF092, 0x00000412 },
{ 0x0213EA94DE323904, 0x00002815, 0xFFFFF20E, 0x000003DD, 0x00001C33, 0xFFFFF7D5, 0x0000032A, 0x00001C33, 0xFFFFF7D5, 0x0000032A },
{ 0x0213F0FFEF5A2184, 0x00003CC2, 0xFFFFE43E, 0x000005DE, 0x00002C16, 0xFFFFECED, 0x000004BA, 0x00002C16, 0xFFFFECED, 0x000004BA },
{ 0x0213F0FFEF5C4084, 0x00003CFA, 0xFFFFE1EE, 0x00000673, 0x00001F7D, 0xFFFFF402, 0x000003AE, 0x00001F7D, 0xFFFFF402, 0x000003AE },
{ 0x0213F0FFEF622104, 0x0000486E, 0xFFFFDD43, 0x000006EE, 0x000036F0, 0xFFFFE609, 0x000005D5, 0x000036F0, 0xFFFFE609, 0x000005D5 },
{ 0x0213F0FFEF5C4964, 0x000039FE, 0xFFFFE41F, 0x00000613, 0x0000266C, 0xFFFFEF35, 0x0000047D, 0x0000266C, 0xFFFFEF35, 0x0000047D },
{ 0x0213EA94DE123124, 0x00002EA4, 0xFFFFEE3B, 0x00000462, 0x00002126, 0xFFFFF4E2, 0x0000038F, 0x00002126, 0xFFFFF4E2, 0x0000038F },
{ 0x0213F0FFEF683944, 0x00002D2E, 0xFFFFEE7B, 0x00000462, 0x0000229D, 0xFFFFF363, 0x000003D4, 0x0000229D, 0xFFFFF363, 0x000003D4 },
{ 0x0213F0FFEF5E2844, 0x0000375C, 0xFFFFE695, 0x0000059D, 0x00002319, 0xFFFFF237, 0x000003EE, 0x00002319, 0xFFFFF237, 0x000003EE },
{ 0x0213F0FFEF7250C4, 0x00004522, 0xFFFFDC71, 0x0000075E, 0x0000247E, 0xFFFFF0A0, 0x0000043C, 0x0000247E, 0xFFFFF0A0, 0x0000043C },
{ 0x0213EA94DE1248E4, 0x00002E58, 0xFFFFECB9, 0x000004A9, 0x0000199A, 0xFFFFF8CF, 0x000002E9, 0x0000199A, 0xFFFFF8CF, 0x000002E9 },
{ 0x0213F0FFEF6438E4, 0x00003791, 0xFFFFE5FE, 0x000005B6, 0x000029F5, 0xFFFFED0D, 0x000004CD, 0x000029F5, 0xFFFFED0D, 0x000004CD },
{ 0x0213EA94DE244144, 0x00002E9E, 0xFFFFEC8D, 0x000004C1, 0x000019D0, 0xFFFFF869, 0x0000030F, 0x000019D0, 0xFFFFF869, 0x0000030F },
{ 0x0213EA94DE203964, 0x0000237C, 0xFFFFF435, 0x000003A6, 0x000014EB, 0xFFFFFBC4, 0x000002AF, 0x000014EB, 0xFFFFFBC4, 0x000002AF },
{ 0x0213F0FFEF662924, 0x00003FE5, 0xFFFFE4A2, 0x000005A0, 0x00003416, 0xFFFFE995, 0x00000523, 0x00003416, 0xFFFFE995, 0x00000523 },
{ 0x0213F0FFEF5C0924, 0x00002B27, 0xFFFFED51, 0x000004A5, 0x000025D1, 0xFFFFEF18, 0x00000492, 0x000025D1, 0xFFFFEF18, 0x00000492 },
{ 0x0213F0FFEF684904, 0x00002D77, 0xFFFFED79, 0x00000494, 0x00002196, 0xFFFFF352, 0x000003DE, 0x00002196, 0xFFFFF352, 0x000003DE },
{ 0x0213F0FFEF5C20C4, 0x00003750, 0xFFFFE6AC, 0x00000596, 0x00002524, 0xFFFFF0B5, 0x00000431, 0x00002524, 0xFFFFF0B5, 0x00000431 },
{ 0x0213EA94DE122944, 0x00002896, 0xFFFFF1BB, 0x000003D9, 0x00001CE0, 0xFFFFF753, 0x0000032F, 0x00001CE0, 0xFFFFF753, 0x0000032F },
{ 0x0213F0FFEF641984, 0x00003CA7, 0xFFFFE0F7, 0x000006B1, 0x00002CB8, 0xFFFFE9AB, 0x00000587, 0x00002CB8, 0xFFFFE9AB, 0x00000587 },
{ 0x0213EA94DE322864, 0x00002513, 0xFFFFF323, 0x000003BC, 0x00001965, 0xFFFFF93C, 0x000002F0, 0x00001965, 0xFFFFF93C, 0x000002F0 },
{ 0x0213F0FFEF662164, 0x00003914, 0xFFFFE683, 0x00000586, 0x00003120, 0xFFFFE9A6, 0x00000543, 0x00003120, 0xFFFFE9A6, 0x00000543 },
{ 0x0213F0FFEF643904, 0x000040D0, 0xFFFFE007, 0x000006AC, 0x00002B9E, 0xFFFFEBF5, 0x000004FB, 0x00002B9E, 0xFFFFEBF5, 0x000004FB },
{ 0x0213F0FFEF5A4884, 0x00004412, 0xFFFFDF5F, 0x000006A9, 0x00002A9E, 0xFFFFEDCE, 0x00000498, 0x00002A9E, 0xFFFFEDCE, 0x00000498 },
{ 0x0213F0FFEF624884, 0x000042A6, 0xFFFFDFEF, 0x00000696, 0x00002E65, 0xFFFFEAAE, 0x00000529, 0x00002E65, 0xFFFFEAAE, 0x00000529 },
{ 0x0213EA94DE322124, 0x000022E8, 0xFFFFF565, 0x0000035F, 0x00001890, 0xFFFFFA61, 0x000002C6, 0x00001890, 0xFFFFFA61, 0x000002C6 },
{ 0x0213F0FFEF6239A4, 0x00004637, 0xFFFFDDD8, 0x000006E9, 0x0000349D, 0xFFFFE6C8, 0x000005C7, 0x0000349D, 0xFFFFE6C8, 0x000005C7 },
{ 0x0213EA94DE263904, 0x00004686, 0xFFFFDC58, 0x0000073D, 0x00003972, 0xFFFFE27B, 0x0000068E, 0x00003972, 0xFFFFE27B, 0x0000068E },
{ 0x0213F0FFEF6808E4, 0x00002B35, 0xFFFFEE9C, 0x0000046C, 0x00001F5B, 0xFFFFF4A3, 0x000003A9, 0x00001F5B, 0xFFFFF4A3, 0x000003A9 },
{ 0x0213F0FFEF724144, 0x00003AC9, 0xFFFFE3B2, 0x0000061B, 0x000023A1, 0xFFFFF170, 0x0000040F, 0x000023A1, 0xFFFFF170, 0x0000040F },
{ 0x0213F0FFEF5E1884, 0x00003C50, 0xFFFFE37E, 0x00000617, 0x0000218F, 0xFFFFF339, 0x000003C4, 0x0000218F, 0xFFFFF339, 0x000003C4 },
{ 0x0213F0FFEF663044, 0x00003793, 0xFFFFE761, 0x0000055D, 0x000029C7, 0xFFFFEE03, 0x00000496, 0x000029C7, 0xFFFFEE03, 0x00000496 },
{ 0x0213F0FFEF6438A4, 0x000040B5, 0xFFFFDF78, 0x000006DA, 0x00002DED, 0xFFFFEA20, 0x00000551, 0x00002DED, 0xFFFFEA20, 0x00000551 },
{ 0x0213F0FFEF601144, 0x000039D6, 0xFFFFE37D, 0x0000063C, 0x00001AED, 0xFFFFF6E2, 0x00000331, 0x00001AED, 0xFFFFF6E2, 0x00000331 },
{ 0x0213F0FFEF662144, 0x0000431F, 0xFFFFE09B, 0x0000066A, 0x00002BDF, 0xFFFFED93, 0x00000496, 0x00002BDF, 0xFFFFED93, 0x00000496 },
{ 0x0213F0FFEF623864, 0x00004887, 0xFFFFDC65, 0x00000721, 0x00003669, 0xFFFFE5C4, 0x000005E9, 0x00003669, 0xFFFFE5C4, 0x000005E9 },
{ 0x0213F0FFEF640924, 0x00004120, 0xFFFFDDAE, 0x00000748, 0x0000303B, 0xFFFFE70D, 0x000005FC, 0x0000303B, 0xFFFFE70D, 0x000005FC },
{ 0x0213F0FFEF5E28A4, 0x0000415D, 0xFFFFE0BE, 0x0000067B, 0x00002FA7, 0xFFFFEA28, 0x00000538, 0x00002FA7, 0xFFFFEA28, 0x00000538 },
{ 0x0213F0FFEF681904, 0x00002B12, 0xFFFFEFF9, 0x00000428, 0x00001DDA, 0xFFFFF693, 0x00000356, 0x00001DDA, 0xFFFFF693, 0x00000356 },
{ 0x0213F0FFEF5E3184, 0x00003ED3, 0xFFFFE28D, 0x0000062D, 0x00002B00, 0xFFFFED4E, 0x000004B3, 0x00002B00, 0xFFFFED4E, 0x000004B3 },
{ 0x0213F0FFEF6250A4, 0x00004218, 0xFFFFE039, 0x0000068F, 0x00002F84, 0xFFFFEA0C, 0x00000541, 0x00002F84, 0xFFFFEA0C, 0x00000541 },
{ 0x0213F0FFEF5A3844, 0x00003FF5, 0xFFFFE2A3, 0x00000617, 0x00003017, 0xFFFFEA7A, 0x00000520, 0x00003017, 0xFFFFEA7A, 0x00000520 },
{ 0x0213F0FFEF5A08A4, 0x00004304, 0xFFFFDFCC, 0x0000069E, 0x00002E0C, 0xFFFFEB51, 0x00000505, 0x00002E0C, 0xFFFFEB51, 0x00000505 },
{ 0x0213F0FFEF641944, 0x00003D3A, 0xFFFFE17F, 0x00000687, 0x0000284C, 0xFFFFED83, 0x000004CD, 0x0000284C, 0xFFFFED83, 0x000004CD },
{ 0x0213F0FFEF5E40A4, 0x000042F5, 0xFFFFDF76, 0x000006B2, 0x000027B6, 0xFFFFEF72, 0x00000455, 0x000027B6, 0xFFFFEF72, 0x00000455 },
{ 0x0213F0FFEF5C38C4, 0x00004267, 0xFFFFDF29, 0x000006D5, 0x0000298F, 0xFFFFEDBD, 0x000004AC, 0x0000298F, 0xFFFFEDBD, 0x000004AC },
{ 0x0213EA94DE240924, 0x0000303E, 0xFFFFEC00, 0x000004CB, 0x000021CD, 0xFFFFF36E, 0x000003D6, 0x000021CD, 0xFFFFF36E, 0x000003D6 },
{ 0x0213F0FFEF5E28C4, 0x00003127, 0xFFFFEBDB, 0x000004A6, 0x00002E95, 0xFFFFEB78, 0x000004F3, 0x00002E95, 0xFFFFEB78, 0x000004F3 },
{ 0x0213EA94DE1C1064, 0x00002655, 0xFFFFF2D9, 0x000003CF, 0x000019F5, 0xFFFFF8E7, 0x00000313, 0x000019F5, 0xFFFFF8E7, 0x00000313 },
{ 0x0213EA94DE164084, 0x00002372, 0xFFFFF449, 0x0000039B, 0x00001544, 0xFFFFFC16, 0x0000028B, 0x00001544, 0xFFFFFC16, 0x0000028B },
{ 0x0213F0FFEF6628C4, 0x0000348E, 0xFFFFEB20, 0x000004B2, 0x00002BE8, 0xFFFFEE80, 0x00000467, 0x00002BE8, 0xFFFFEE80, 0x00000467 },
{ 0x0213F0FFEF5E1104, 0x00004092, 0xFFFFE073, 0x0000069B, 0x00002061, 0xFFFFF403, 0x000003A0, 0x00002061, 0xFFFFF403, 0x000003A0 },
{ 0x0213F0FFEF7220E4, 0x000039D1, 0xFFFFE55D, 0x000005CC, 0x000025CB, 0xFFFFF0C0, 0x00000428, 0x000025CB, 0xFFFFF0C0, 0x00000428 },
{ 0x0213F0FFEF5E4884, 0x000042AA, 0xFFFFDF68, 0x000006C2, 0x0000290B, 0xFFFFEE78, 0x00000485, 0x0000290B, 0xFFFFEE78, 0x00000485 },
{ 0x0213F0FFEF7218C4, 0x0000356F, 0xFFFFE7AC, 0x0000056E, 0x00001BE8, 0xFFFFF6E3, 0x0000032A, 0x00001BE8, 0xFFFFF6E3, 0x0000032A },
{ 0x0213F0FFEF5E1144, 0x00003525, 0xFFFFE7FF, 0x0000055D, 0x0000242C, 0xFFFFF12E, 0x0000041D, 0x0000242C, 0xFFFFF12E, 0x0000041D },
{ 0x0213F0FFEF5C48C4, 0x00003360, 0xFFFFE895, 0x00000550, 0x00002175, 0xFFFFF29E, 0x000003E9, 0x00002175, 0xFFFFF29E, 0x000003E9 },
{ 0x0213F0FFEF6440A4, 0x00003C94, 0xFFFFE1C4, 0x0000067E, 0x00002E28, 0xFFFFE964, 0x0000057F, 0x00002E28, 0xFFFFE964, 0x0000057F },
{ 0x0213F0FFEF724124, 0x0000431C, 0xFFFFDE4B, 0x000006FF, 0x00002270, 0xFFFFF268, 0x000003E5, 0x00002270, 0xFFFFF268, 0x000003E5 },
{ 0x0213EA94DE1218C4, 0x00002B67, 0xFFFFF01D, 0x00000414, 0x000019FB, 0xFFFFF961, 0x000002D8, 0x000019FB, 0xFFFFF961, 0x000002D8 },
{ 0x0213F0FFEF5E3984, 0x0000400B, 0xFFFFE13D, 0x0000066F, 0x000024F3, 0xFFFFF125, 0x00000417, 0x000024F3, 0xFFFFF125, 0x00000417 },
{ 0x0213F0FFEF5A20A4, 0x00004460, 0xFFFFE00E, 0x0000067B, 0x000023DF, 0xFFFFF2E6, 0x000003BB, 0x000023DF, 0xFFFFF2E6, 0x000003BB },
{ 0x0213F0FFEF641864, 0x00003AFB, 0xFFFFE2C5, 0x00000650, 0x00002D46, 0xFFFFE9C4, 0x00000571, 0x00002D46, 0xFFFFE9C4, 0x00000571 },
{ 0x0213F0FFEF622924, 0x00005482, 0xFFFFD5BC, 0x0000081A, 0x00003250, 0xFFFFE961, 0x00000541, 0x00003250, 0xFFFFE961, 0x00000541 },
{ 0x0213F0FFEF5C2944, 0x00003D27, 0xFFFFE2FA, 0x00000632, 0x00002A4D, 0xFFFFED6A, 0x000004BB, 0x00002A4D, 0xFFFFED6A, 0x000004BB },
{ 0x0213F0FFEF6018A4, 0x00003E03, 0xFFFFE142, 0x00000690, 0x00001E08, 0xFFFFF555, 0x0000036C, 0x00001E08, 0xFFFFF555, 0x0000036C },
{ 0x0213F0FFEF5C2064, 0x000031B5, 0xFFFFE97D, 0x00000535, 0x0000232E, 0xFFFFF166, 0x00000422, 0x0000232E, 0xFFFFF166, 0x00000422 },
{ 0x0213F0FFEF5E18E4, 0x00003753, 0xFFFFE724, 0x00000575, 0x0000281A, 0xFFFFEF1A, 0x0000046B, 0x0000281A, 0xFFFFEF1A, 0x0000046B },
{ 0x0213EA94DE204144, 0x00002071, 0xFFFFF5C9, 0x0000036F, 0x00001470, 0xFFFFFBF7, 0x000002A5, 0x00001470, 0xFFFFFBF7, 0x000002A5 },
{ 0x0213F0FFEF683144, 0x00002799, 0xFFFFF223, 0x000003CF, 0x00001CD3, 0xFFFFF74A, 0x00000333, 0x00001CD3, 0xFFFFF74A, 0x00000333 },
{ 0x0213F0FFEF6610C4, 0x000040DF, 0xFFFFE11C, 0x00000664, 0x000031D4, 0xFFFFE8BC, 0x0000056F, 0x000031D4, 0xFFFFE8BC, 0x0000056F },
{ 0x0213F0FFEF6440C4, 0x00003A4D, 0xFFFFE3A6, 0x00000627, 0x00002871, 0xFFFFEDA0, 0x000004C0, 0x00002871, 0xFFFFEDA0, 0x000004C0 },
{ 0x0213F0FFEF681984, 0x00002AF9, 0xFFFFEED7, 0x00000464, 0x0000219B, 0xFFFFF368, 0x000003D6, 0x0000219B, 0xFFFFF368, 0x000003D6 },
{ 0x0213EA94DE323124, 0x000026D5, 0xFFFFF36C, 0x000003A3, 0x00001BC6, 0xFFFFF881, 0x00000311, 0x00001BC6, 0xFFFFF881, 0x00000311 },
{ 0x0213F0FFEF5E2044, 0x0000325D, 0xFFFFEA07, 0x0000050B, 0x000026D1, 0xFFFFEFB3, 0x0000045A, 0x000026D1, 0xFFFFEFB3, 0x0000045A },
{ 0x0213F0FFEF682864, 0x00002F75, 0xFFFFEC64, 0x000004BE, 0x00001EEB, 0xFFFFF559, 0x00000386, 0x00001EEB, 0xFFFFF559, 0x00000386 },
{ 0x0213F0FFEF5A38A4, 0x00003C2F, 0xFFFFE541, 0x000005A3, 0x000025B6, 0xFFFFF16F, 0x000003FA, 0x000025B6, 0xFFFFF16F, 0x000003FA },
{ 0x0213F0FFEF684924, 0x00002BC2, 0xFFFFEE89, 0x0000046A, 0x00001D04, 0xFFFFF651, 0x00000361, 0x00001D04, 0xFFFFF651, 0x00000361 },
{ 0x0213F0FFEF6829A4, 0x00002DD0, 0xFFFFED40, 0x0000049F, 0x00001C8C, 0xFFFFF6B3, 0x00000353, 0x00001C8C, 0xFFFFF6B3, 0x00000353 },
{ 0x0213EA94DE1C08E4, 0x000021ED, 0xFFFFF530, 0x00000380, 0x00001643, 0xFFFFFB1C, 0x000002C3, 0x00001643, 0xFFFFFB1C, 0x000002C3 },
{ 0x0213EA94DE321904, 0x000028C7, 0xFFFFF160, 0x000003FD, 0x00001990, 0xFFFFF994, 0x000002E2, 0x00001990, 0xFFFFF994, 0x000002E2 },
{ 0x0213F0FFEF6610A4, 0x0000431C, 0xFFFFDF9D, 0x000006A3, 0x000034A6, 0xFFFFE6B0, 0x000005C9, 0x000034A6, 0xFFFFE6B0, 0x000005C9 },
{ 0x0213EA94DE2630A4, 0x00004115, 0xFFFFE0D6, 0x00000667, 0x000031AD, 0xFFFFE850, 0x00000585, 0x000031AD, 0xFFFFE850, 0x00000585 },
{ 0x0213F0FFEF643924, 0x0000424A, 0xFFFFDEEC, 0x000006E1, 0x0000346A, 0xFFFFE5EA, 0x00000602, 0x0000346A, 0xFFFFE5EA, 0x00000602 },
{ 0x0213F0FFEF661984, 0x00004990, 0xFFFFDAFA, 0x00000771, 0x00002A9C, 0xFFFFED37, 0x000004BC, 0x00002A9C, 0xFFFFED37, 0x000004BC },
{ 0x0213F0FFEF6428A4, 0x00003858, 0xFFFFE568, 0x000005D2, 0x00003030, 0xFFFFE8B0, 0x0000058E, 0x00003030, 0xFFFFE8B0, 0x0000058E },
{ 0x0213F0FFEF684164, 0x00001EDC, 0xFFFFF6CD, 0x00000322, 0x00001FCA, 0xFFFFF4BD, 0x0000039E, 0x00001FCA, 0xFFFFF4BD, 0x0000039E },
{ 0x0213F0FFEF662124, 0x00004C88, 0xFFFFDBA3, 0x0000071B, 0x000030C4, 0xFFFFEAFD, 0x000004F7, 0x000030C4, 0xFFFFEAFD, 0x000004F7 },
{ 0x0213F0FFEF680904, 0x00002B9A, 0xFFFFEE41, 0x0000047D, 0x00002131, 0xFFFFF344, 0x000003E5, 0x00002131, 0xFFFFF344, 0x000003E5 },
{ 0x0213F0FFEF623984, 0x00003E4B, 0xFFFFE33C, 0x000005FA, 0x00003877, 0xFFFFE437, 0x0000062E, 0x00003877, 0xFFFFE437, 0x0000062E },
{ 0x0213EA94DE322064, 0x00002376, 0xFFFFF444, 0x0000038A, 0x000017ED, 0xFFFFFA4C, 0x000002C1, 0x000017ED, 0xFFFFFA4C, 0x000002C1 },
{ 0x0213F0FFEF661084, 0x00004517, 0xFFFFDDF4, 0x000006F2, 0x000030DC, 0xFFFFE8EF, 0x00000571, 0x000030DC, 0xFFFFE8EF, 0x00000571 },
{ 0x0213F0FFEF681944, 0x0000270C, 0xFFFFF1F3, 0x000003DF, 0x0000207B, 0xFFFFF474, 0x000003AD, 0x0000207B, 0xFFFFF474, 0x000003AD },
{ 0x0213F0FFEF645144, 0x00004086, 0xFFFFDF39, 0x000006E3, 0x00002A24, 0xFFFFEC2B, 0x000004FF, 0x00002A24, 0xFFFFEC2B, 0x000004FF },
{ 0x0213F0FFEF5C3124, 0x00003BDE, 0xFFFFE45E, 0x000005EB, 0x00002CD5, 0xFFFFEC45, 0x000004DD, 0x00002CD5, 0xFFFFEC45, 0x000004DD },
{ 0x0213F0FFEF7230E4, 0x00003803, 0xFFFFE714, 0x00000579, 0x0000288A, 0xFFFFEF21, 0x0000046B, 0x0000288A, 0xFFFFEF21, 0x0000046B },
{ 0x0213F0FFEF601104, 0x00003F50, 0xFFFFE002, 0x000006CD, 0x00001AD4, 0xFFFFF72E, 0x0000031F, 0x00001AD4, 0xFFFFF72E, 0x0000031F },
{ 0x0213F0FFEF6820E4, 0x00002968, 0xFFFFF100, 0x00000402, 0x00001FB5, 0xFFFFF57C, 0x0000037F, 0x00001FB5, 0xFFFFF57C, 0x0000037F },
{ 0x0213F0FFEF662104, 0x00004283, 0xFFFFE2A7, 0x000005F5, 0x00003165, 0xFFFFEB0C, 0x000004EC, 0x00003165, 0xFFFFEB0C, 0x000004EC },
{ 0x0213F0FFEF6431A4, 0x00004253, 0xFFFFDDA8, 0x00000732, 0x00002E5C, 0xFFFFE90A, 0x00000593, 0x00002E5C, 0xFFFFE90A, 0x00000593 },
{ 0x0213F0FFEF5C50A4, 0x00003551, 0xFFFFE756, 0x0000058D, 0x000029A7, 0xFFFFED0C, 0x000004DE, 0x000029A7, 0xFFFFED0C, 0x000004DE },
{ 0x0213F0FFEF6428C4, 0x00003728, 0xFFFFE604, 0x000005C4, 0x00002832, 0xFFFFEE64, 0x00000493, 0x00002832, 0xFFFFEE64, 0x00000493 },
{ 0x0213F0FFEF623964, 0x00004796, 0xFFFFDCC8, 0x00000715, 0x000032AB, 0xFFFFE848, 0x0000057C, 0x000032AB, 0xFFFFE848, 0x0000057C },
{ 0x0213F0FFEF6210C4, 0x000049DF, 0xFFFFDB24, 0x0000075F, 0x00003076, 0xFFFFE967, 0x0000055C, 0x00003076, 0xFFFFE967, 0x0000055C },
{ 0x0213F0FFEF721104, 0x00003F13, 0xFFFFE099, 0x000006A8, 0x00002279, 0xFFFFF226, 0x000003F3, 0x00002279, 0xFFFFF226, 0x000003F3 },
{ 0x0213F0FFEF6430A4, 0x00003E03, 0xFFFFE19F, 0x00000674, 0x00002D66, 0xFFFFEAA7, 0x00000537, 0x00002D66, 0xFFFFEAA7, 0x00000537 },
{ 0x0213F0FFEF5C4104, 0x000037DA, 0xFFFFE63F, 0x000005A7, 0x00002543, 0xFFFFF0A0, 0x00000431, 0x00002543, 0xFFFFF0A0, 0x00000431 },
{ 0x0213F0FFEF624944, 0x00003D82, 0xFFFFE3F5, 0x000005D9, 0x0000332F, 0xFFFFE834, 0x00000577, 0x0000332F, 0xFFFFE834, 0x00000577 },
{ 0x0213EA94DE1228C4, 0x00002915, 0xFFFFF1E0, 0x000003D4, 0x00002065, 0xFFFFF57B, 0x00000378, 0x00002065, 0xFFFFF57B, 0x00000378 },
{ 0x0213F0FFEF5E4904, 0x000036FC, 0xFFFFE72D, 0x00000577, 0x00002811, 0xFFFFEF30, 0x00000464, 0x00002811, 0xFFFFEF30, 0x00000464 },
{ 0x0213F0FFEF623184, 0x00004767, 0xFFFFDD30, 0x000006FD, 0x00003703, 0xFFFFE564, 0x000005F8, 0x00003703, 0xFFFFE564, 0x000005F8 },
{ 0x0213F0FFEF603184, 0x00003094, 0xFFFFEAA9, 0x000004F5, 0x000022E7, 0xFFFFF200, 0x000003FB, 0x000022E7, 0xFFFFF200, 0x000003FB },
{ 0x0213F0FFEF641144, 0x00003EF0, 0xFFFFDF83, 0x000006ED, 0x00002A27, 0xFFFFEB7C, 0x00000537, 0x00002A27, 0xFFFFEB7C, 0x00000537 },
{ 0x0213F0FFEF681124, 0x0000243C, 0xFFFFF358, 0x000003AC, 0x00001DC4, 0xFFFFF5E9, 0x00000372, 0x00001DC4, 0xFFFFF5E9, 0x00000372 },
{ 0x0213F0FFEF722144, 0x0000284B, 0xFFFFF036, 0x0000040F, 0x00001FCD, 0xFFFFF445, 0x00000395, 0x00001FCD, 0xFFFFF445, 0x00000395 },
{ 0x0213F0FFEF6840C4, 0x00002611, 0xFFFFF285, 0x000003C7, 0x00001CFE, 0xFFFFF6A0, 0x00000355, 0x00001CFE, 0xFFFFF6A0, 0x00000355 },
{ 0x0213EA94DE1C39A4, 0x00002292, 0xFFFFF49F, 0x00000393, 0x000017F4, 0xFFFFF9CD, 0x000002F5, 0x000017F4, 0xFFFFF9CD, 0x000002F5 },
{ 0x0213F0FFEF5E38A4, 0x000037F3, 0xFFFFE68D, 0x00000590, 0x00002443, 0xFFFFF1AD, 0x000003FA, 0x00002443, 0xFFFFF1AD, 0x000003FA },
{ 0x0213F0FFEF682144, 0x00002C01, 0xFFFFEF3F, 0x00000444, 0x0000210A, 0xFFFFF475, 0x000003A7, 0x0000210A, 0xFFFFF475, 0x000003A7 },
{ 0x0213EA94DE1210E4, 0x00002C0E, 0xFFFFEF0F, 0x00000446, 0x00001A82, 0xFFFFF8F7, 0x000002DE, 0x00001A82, 0xFFFFF8F7, 0x000002DE },
{ 0x0213F0FFEF5E20C4, 0x00003FA6, 0xFFFFE20A, 0x0000063F, 0x00002E29, 0xFFFFEB21, 0x00000510, 0x00002E29, 0xFFFFEB21, 0x00000510 },
{ 0x0213F0FFEF5C2164, 0x00003BCD, 0xFFFFE31B, 0x0000063C, 0x000019AF, 0xFFFFF83D, 0x000002F8, 0x000019AF, 0xFFFFF83D, 0x000002F8 },
{ 0x0213F0FFEF664164, 0x000044C8, 0xFFFFDF08, 0x000006B0, 0x00002E2E, 0xFFFFEB62, 0x000004FD, 0x00002E2E, 0xFFFFEB62, 0x000004FD },
{ 0x0213F0FFEF5C1884, 0x00003790, 0xFFFFE571, 0x000005E3, 0x00002042, 0xFFFFF35D, 0x000003CF, 0x00002042, 0xFFFFF35D, 0x000003CF },
{ 0x0213F0FFEF6050E4, 0x000038AC, 0xFFFFE46C, 0x00000609, 0x0000215E, 0xFFFFF22D, 0x00000403, 0x0000215E, 0xFFFFF22D, 0x00000403 },
{ 0x0213F0FFEF5E29A4, 0x00003A1E, 0xFFFFE536, 0x000005C9, 0x000024F3, 0xFFFFF11A, 0x0000041B, 0x000024F3, 0xFFFFF11A, 0x0000041B },
{ 0x0213F0FFEF6650E4, 0x0000431A, 0xFFFFDF1B, 0x000006C5, 0x00002F34, 0xFFFFEA02, 0x00000545, 0x00002F34, 0xFFFFEA02, 0x00000545 },
{ 0x0213F0FFEF641904, 0x000042DC, 0xFFFFDE28, 0x0000070C, 0x00003B53, 0xFFFFE0EA, 0x000006E2, 0x00003B53, 0xFFFFE0EA, 0x000006E2 },
{ 0x0213F0FFEF683164, 0x0000264B, 0xFFFFF29A, 0x000003C4, 0x000021D0, 0xFFFFF3CE, 0x000003C4, 0x000021D0, 0xFFFFF3CE, 0x000003C4 },
{ 0x0213F0FFEF5A4064, 0x00004225, 0xFFFFE0E8, 0x00000665, 0x00002B53, 0xFFFFED89, 0x0000049F, 0x00002B53, 0xFFFFED89, 0x0000049F },
{ 0x0213EA94DE204924, 0x00001FCC, 0xFFFFF63F, 0x00000358, 0x000019E8, 0xFFFFF882, 0x0000032A, 0x000019E8, 0xFFFFF882, 0x0000032A },
{ 0x0213F0FFEF6240A4, 0x000045E0, 0xFFFFDDD0, 0x000006ED, 0x00003193, 0xFFFFE8BD, 0x00000572, 0x00003193, 0xFFFFE8BD, 0x00000572 },
{ 0x0213F0FFEF683924, 0x000024FC, 0xFFFFF366, 0x000003A6, 0x00001FE8, 0xFFFFF509, 0x00000394, 0x00001FE8, 0xFFFFF509, 0x00000394 },
{ 0x0213F0FFEF5C4884, 0x0000378F, 0xFFFFE54B, 0x000005F1, 0x00001C9B, 0xFFFFF5C7, 0x00000368, 0x00001C9B, 0xFFFFF5C7, 0x00000368 },
{ 0x0213F0FFEF6418A4, 0x00003CF3, 0xFFFFE15A, 0x00000694, 0x00002CDD, 0xFFFFEA44, 0x00000557, 0x00002CDD, 0xFFFFEA44, 0x00000557 },
{ 0x0213EA94DE200904, 0x000021EC, 0xFFFFF4F4, 0x0000038F, 0x00001511, 0xFFFFFBF5, 0x0000029E, 0x00001511, 0xFFFFFBF5, 0x0000029E },
{ 0x0213F0FFEF6010A4, 0x00003C8A, 0xFFFFE1C1, 0x00000685, 0x000019C7, 0xFFFFF7E2, 0x00000301, 0x000019C7, 0xFFFFF7E2, 0x00000301 },
{ 0x0213F0FFEF5E2064, 0x00003908, 0xFFFFE5C7, 0x000005B3, 0x00002793, 0xFFFFEF46, 0x00000465, 0x00002793, 0xFFFFEF46, 0x00000465 },
{ 0x0213F0FFEF605104, 0x000040A3, 0xFFFFDE61, 0x00000725, 0x00002077, 0xFFFFF2CE, 0x000003E8, 0x00002077, 0xFFFFF2CE, 0x000003E8 },
{ 0x0213F0FFEF664144, 0x00003DCA, 0xFFFFE34D, 0x00000608, 0x00002D66, 0xFFFFEBDF, 0x000004E8, 0x00002D66, 0xFFFFEBDF, 0x000004E8 },
{ 0x0213F0FFEF5E50C4, 0x00003085, 0xFFFFEB70, 0x000004C8, 0x000029B1, 0xFFFFEDD9, 0x000004A5, 0x000029B1, 0xFFFFEDD9, 0x000004A5 },
{ 0x0213EA94DE083884, 0x00004C73, 0xFFFFD676, 0x0000086C, 0x0000280A, 0xFFFFED89, 0x000004C2, 0x0000280A, 0xFFFFED89, 0x000004C2 },
{ 0x0213EA94DE242164, 0x00002CE5, 0xFFFFEE8C, 0x00000466, 0x00001755, 0xFFFFFAC2, 0x000002AC, 0x00001755, 0xFFFFFAC2, 0x000002AC },
{ 0x0213F0FFEF621124, 0x0000489F, 0xFFFFDBF1, 0x0000073E, 0x0000332D, 0xFFFFE786, 0x000005AD, 0x0000332D, 0xFFFFE786, 0x000005AD },
{ 0x0213F0FFEF602864, 0x00003D09, 0xFFFFE193, 0x00000689, 0x00001E82, 0xFFFFF4C0, 0x00000386, 0x00001E82, 0xFFFFF4C0, 0x00000386 },
{ 0x0213F0FFEF644104, 0x00003E4C, 0xFFFFE131, 0x00000689, 0x00002F4E, 0xFFFFE925, 0x0000057B, 0x00002F4E, 0xFFFFE925, 0x0000057B },
{ 0x0213F0FFEF5A4084, 0x00003B31, 0xFFFFE53F, 0x000005B3, 0x0000248A, 0xFFFFF211, 0x000003DF, 0x0000248A, 0xFFFFF211, 0x000003DF },
{ 0x0213F0FFEF644124, 0x000038DD, 0xFFFFE54A, 0x000005C9, 0x00002B6D, 0xFFFFEBDF, 0x00000502, 0x00002B6D, 0xFFFFEBDF, 0x00000502 },
{ 0x0213F0FFEF684064, 0x00002698, 0xFFFFF1A8, 0x000003F2, 0x00002163, 0xFFFFF34B, 0x000003E3, 0x00002163, 0xFFFFF34B, 0x000003E3 },
{ 0x0213EA94DE201064, 0x000023A8, 0xFFFFF4CD, 0x00000386, 0x00001944, 0xFFFFF983, 0x00000300, 0x00001944, 0xFFFFF983, 0x00000300 },
{ 0x0213F0FFEF6418C4, 0x00003EAF, 0xFFFFE0C3, 0x000006A0, 0x000030AB, 0xFFFFE829, 0x000005A6, 0x000030AB, 0xFFFFE829, 0x000005A6 },
{ 0x0213F0FFEF684944, 0x00002E89, 0xFFFFECA6, 0x000004B6, 0x00001FA0, 0xFFFFF4A8, 0x000003A3, 0x00001FA0, 0xFFFFF4A8, 0x000003A3 },
{ 0x0213F0FFEF6828A4, 0x000028A4, 0xFFFFF112, 0x00000402, 0x00001F7C, 0xFFFFF545, 0x0000038A, 0x00001F7C, 0xFFFFF545, 0x0000038A },
{ 0x0213F0FFEF6650A4, 0x00004135, 0xFFFFDFA2, 0x000006C5, 0x0000324C, 0xFFFFE7AA, 0x000005AF, 0x0000324C, 0xFFFFE7AA, 0x000005AF },
{ 0x0213EA94DE2038C4, 0x00002012, 0xFFFFF693, 0x00000352, 0x0000171F, 0xFFFFFABB, 0x000002D9, 0x0000171F, 0xFFFFFABB, 0x000002D9 },
{ 0x0213F0FFEF643084, 0x00003D7C, 0xFFFFE1BC, 0x00000671, 0x00002A45, 0xFFFFEC84, 0x000004EC, 0x00002A45, 0xFFFFEC84, 0x000004EC },
{ 0x0213F0FFEF723064, 0x00004172, 0xFFFFDF58, 0x000006DA, 0x00002504, 0xFFFFF0A6, 0x00000431, 0x00002504, 0xFFFFF0A6, 0x00000431 },
{ 0x0213F0FE99281944, 0x000029C7, 0xFFFFF087, 0x00000414, 0x00001DCB, 0xFFFFF675, 0x0000035F, 0x00001DCB, 0xFFFFF675, 0x0000035F },
{ 0x0213F0FE992A29A4, 0x000027F0, 0xFFFFF05A, 0x00000432, 0x00001707, 0xFFFFFA0E, 0x000002D1, 0x00001707, 0xFFFFFA0E, 0x000002D1 },
{ 0x0213F0FE99222144, 0x00003279, 0xFFFFE9F7, 0x00000511, 0x00001B5E, 0xFFFFF787, 0x00000317, 0x00001B5E, 0xFFFFF787, 0x00000317 },
{ 0x0213F0FE99322184, 0x000030A5, 0xFFFFEABC, 0x000004FF, 0x000019D1, 0xFFFFF83C, 0x00000304, 0x000019D1, 0xFFFFF83C, 0x00000304 },
{ 0x0213F0FE99282844, 0x0000283B, 0xFFFFF122, 0x00000402, 0x000019C2, 0xFFFFF8E9, 0x000002FB, 0x000019C2, 0xFFFFF8E9, 0x000002FB },
{ 0x0213F0FE992C2084, 0x00003376, 0xFFFFE9E1, 0x00000510, 0x000021A7, 0xFFFFF39F, 0x000003BF, 0x000021A7, 0xFFFFF39F, 0x000003BF },
{ 0x0213F0FE993218C4, 0x000031D2, 0xFFFFEA9C, 0x000004FC, 0x00001F66, 0xFFFFF4E4, 0x00000390, 0x00001F66, 0xFFFFF4E4, 0x00000390 },
{ 0x0213F0FE991A3864, 0x00003006, 0xFFFFEB18, 0x000004F2, 0x000019B3, 0xFFFFF84E, 0x00000301, 0x000019B3, 0xFFFFF84E, 0x00000301 },
{ 0x0213F0FE993039A4, 0x0000364F, 0xFFFFE81F, 0x00000556, 0x00002AC9, 0xFFFFED87, 0x000004BD, 0x00002AC9, 0xFFFFED87, 0x000004BD },
{ 0x0213F0FE992E3844, 0x00003043, 0xFFFFEBAE, 0x000004CD, 0x00001B0C, 0xFFFFF7ED, 0x0000030C, 0x00001B0C, 0xFFFFF7ED, 0x0000030C },
{ 0x0213F0FE993048A4, 0x000037CE, 0xFFFFE69E, 0x00000596, 0x0000276B, 0xFFFFEF65, 0x0000046E, 0x0000276B, 0xFFFFEF65, 0x0000046E },
{ 0x0213F0FE992C3104, 0x00003063, 0xFFFFED5E, 0x0000046F, 0x000024AE, 0xFFFFF2C4, 0x000003D8, 0x000024AE, 0xFFFFF2C4, 0x000003D8 },
{ 0x0213F0FE992E08A4, 0x00002F5D, 0xFFFFEBDC, 0x000004D3, 0x00001EDB, 0xFFFFF50F, 0x0000038E, 0x00001EDB, 0xFFFFF50F, 0x0000038E },
{ 0x0213F0FE992E48A4, 0x00003148, 0xFFFFEA9A, 0x000004FB, 0x0000192D, 0xFFFFF8E9, 0x000002DF, 0x0000192D, 0xFFFFF8E9, 0x000002DF },
{ 0x0213F0FE992C2064, 0x00003682, 0xFFFFE7E4, 0x0000055C, 0x0000250E, 0xFFFFF150, 0x0000041A, 0x0000250E, 0xFFFFF150, 0x0000041A },
{ 0x0213F0FE992A2084, 0x0000284E, 0xFFFFF15A, 0x000003F8, 0x00001CE2, 0xFFFFF6F9, 0x0000034F, 0x00001CE2, 0xFFFFF6F9, 0x0000034F },
{ 0x0213F0FE993018A4, 0x00003171, 0xFFFFEAE9, 0x000004ED, 0x00001F40, 0xFFFFF513, 0x00000384, 0x00001F40, 0xFFFFF513, 0x00000384 },
{ 0x0213F0FE99323044, 0x000031BD, 0xFFFFEA64, 0x0000050A, 0x00001EFD, 0xFFFFF4F7, 0x00000390, 0x00001EFD, 0xFFFFF4F7, 0x00000390 },
{ 0x0213F0FE992E50E4, 0x00003050, 0xFFFFEB29, 0x000004EA, 0x000019B3, 0xFFFFF878, 0x000002F9, 0x000019B3, 0xFFFFF878, 0x000002F9 },
{ 0x0213F0FE992C1904, 0x00003400, 0xFFFFE9A0, 0x0000051A, 0x00002460, 0xFFFFF1DA, 0x00000409, 0x00002460, 0xFFFFF1DA, 0x00000409 },
{ 0x0213F0FE992C4884, 0x000034A1, 0xFFFFE86F, 0x00000558, 0x0000255D, 0xFFFFF09E, 0x00000443, 0x0000255D, 0xFFFFF09E, 0x00000443 },
{ 0x0213F0FE992E48E4, 0x00003103, 0xFFFFEAD7, 0x000004F0, 0x00001896, 0xFFFFF95A, 0x000002CC, 0x00001896, 0xFFFFF95A, 0x000002CC },
{ 0x0213F0FE993018E4, 0x00003120, 0xFFFFEB9E, 0x000004CB, 0x000021E8, 0xFFFFF3A2, 0x000003C1, 0x000021E8, 0xFFFFF3A2, 0x000003C1 },
{ 0x0213F0FE991C50E4, 0x00003558, 0xFFFFE812, 0x00000565, 0x0000256E, 0xFFFFF097, 0x00000447, 0x0000256E, 0xFFFFF097, 0x00000447 },
{ 0x0213F0FE991A2844, 0x00002DA8, 0xFFFFECA8, 0x000004B7, 0x0000180B, 0xFFFFF96D, 0x000002D8, 0x0000180B, 0xFFFFF96D, 0x000002D8 },
{ 0x0213F0FE992E3064, 0x00003232, 0xFFFFEA66, 0x000004FF, 0x00001DDE, 0xFFFFF5FE, 0x0000035A, 0x00001DDE, 0xFFFFF5FE, 0x0000035A },
{ 0x0213F0FE993050E4, 0x000034D2, 0xFFFFE89F, 0x00000548, 0x0000246C, 0xFFFFF17F, 0x00000418, 0x0000246C, 0xFFFFF17F, 0x00000418 },
{ 0x0213F0FE99304904, 0x000033EC, 0xFFFFE954, 0x0000052A, 0x00002323, 0xFFFFF279, 0x000003EE, 0x00002323, 0xFFFFF279, 0x000003EE },
{ 0x0213F0FE99303884, 0x000033AA, 0xFFFFE955, 0x0000052D, 0x0000229F, 0xFFFFF2B2, 0x000003E7, 0x0000229F, 0xFFFFF2B2, 0x000003E7 },
{ 0x0213F0FE99324964, 0x00003258, 0xFFFFE9AA, 0x0000052A, 0x00001D5F, 0xFFFFF5D1, 0x0000036B, 0x00001D5F, 0xFFFFF5D1, 0x0000036B },
{ 0x0213F0FE993029A4, 0x0000323A, 0xFFFFEA5F, 0x00000504, 0x00002108, 0xFFFFF3D5, 0x000003BA, 0x00002108, 0xFFFFF3D5, 0x000003BA },
{ 0x0213F0FE992C2184, 0x00003216, 0xFFFFEA6B, 0x000004FF, 0x00001D6E, 0xFFFFF640, 0x00000350, 0x00001D6E, 0xFFFFF640, 0x00000350 },
{ 0x0213F0FE993210E4, 0x000030C5, 0xFFFFEAC4, 0x000004FC, 0x00001924, 0xFFFFF8C2, 0x000002EE, 0x00001924, 0xFFFFF8C2, 0x000002EE },
{ 0x0213F0FE99305104, 0x000032BB, 0xFFFFE9F1, 0x00000515, 0x00002211, 0xFFFFF31B, 0x000003D5, 0x00002211, 0xFFFFF31B, 0x000003D5 },
{ 0x0213F0FE993048C4, 0x0000352C, 0xFFFFE85B, 0x00000553, 0x00002410, 0xFFFFF1B4, 0x0000040F, 0x00002410, 0xFFFFF1B4, 0x0000040F },
{ 0x0213F0FE992238C4, 0x000036A0, 0xFFFFE7E8, 0x0000055D, 0x00002901, 0xFFFFEEB8, 0x00000489, 0x00002901, 0xFFFFEEB8, 0x00000489 },
{ 0x0213F0FE992C3044, 0x00003340, 0xFFFFE9D9, 0x00000516, 0x00002332, 0xFFFFF27A, 0x000003F0, 0x00002332, 0xFFFFF27A, 0x000003F0 },
{ 0x0213F0FE991A38A4, 0x00003564, 0xFFFFE86D, 0x0000054E, 0x00002613, 0xFFFFF07C, 0x00000444, 0x00002613, 0xFFFFF07C, 0x00000444 },
{ 0x0213F0FE99280904, 0x00002AD1, 0xFFFFEF0B, 0x0000045C, 0x00001DEA, 0xFFFFF5C8, 0x00000381, 0x00001DEA, 0xFFFFF5C8, 0x00000381 },
{ 0x0213F0FE992220E4, 0x000035B0, 0xFFFFE846, 0x00000555, 0x000027BE, 0xFFFFEF5D, 0x00000474, 0x000027BE, 0xFFFFEF5D, 0x00000474 },
{ 0x0213F0FE992238A4, 0x000032C4, 0xFFFFEA48, 0x00000502, 0x000022C6, 0xFFFFF2DF, 0x000003DE, 0x000022C6, 0xFFFFF2DF, 0x000003DE },
{ 0x0213F0FE993008C4, 0x00003036, 0xFFFFEB0D, 0x000004F9, 0x00001FE8, 0xFFFFF41A, 0x000003BC, 0x00001FE8, 0xFFFFF41A, 0x000003BC },
{ 0x0213F0FE991A0904, 0x000030F8, 0xFFFFEA13, 0x00000524, 0x00001B6A, 0xFFFFF6C9, 0x0000034A, 0x00001B6A, 0xFFFFF6C9, 0x0000034A },
{ 0x0213F0FE993010A4, 0x00002EE2, 0xFFFFEC0C, 0x000004CB, 0x00001A39, 0xFFFFF814, 0x0000030F, 0x00001A39, 0xFFFFF814, 0x0000030F },
{ 0x0213F0FE991C3184, 0x00003457, 0xFFFFE924, 0x0000052A, 0x00001E9D, 0xFFFFF59C, 0x00000363, 0x00001E9D, 0xFFFFF59C, 0x00000363 },
{ 0x0213F0FE99322844, 0x000030BF, 0xFFFFEB18, 0x000004ED, 0x00001D37, 0xFFFFF636, 0x0000035C, 0x00001D37, 0xFFFFF636, 0x0000035C },
{ 0x0213F0FE992E4084, 0x000031AF, 0xFFFFEA75, 0x000004FE, 0x000019F2, 0xFFFFF87A, 0x000002F0, 0x000019F2, 0xFFFFF87A, 0x000002F0 },
{ 0x0213F0FE99302884, 0x00003642, 0xFFFFE85B, 0x00000547, 0x00002975, 0xFFFFEE98, 0x0000048B, 0x00002975, 0xFFFFEE98, 0x0000048B },
{ 0x0213F0FE992E2884, 0x00002E8B, 0xFFFFED1E, 0x0000048E, 0x000019C1, 0xFFFFF917, 0x000002D6, 0x000019C1, 0xFFFFF917, 0x000002D6 },
{ 0x0213F0FE993241A4, 0x000033D9, 0xFFFFE8E1, 0x00000548, 0x0000224B, 0xFFFFF298, 0x000003F4, 0x0000224B, 0xFFFFF298, 0x000003F4 },
{ 0x0213F0FE992E28C4, 0x000032BC, 0xFFFFEB0F, 0x000004D6, 0x00002488, 0xFFFFF240, 0x000003F2, 0x00002488, 0xFFFFF240, 0x000003F2 },
{ 0x0213F0FE99304944, 0x000035FD, 0xFFFFE838, 0x00000553, 0x00002762, 0xFFFFEFBC, 0x00000460, 0x00002762, 0xFFFFEFBC, 0x00000460 },
{ 0x0213F0FE992818A4, 0x0000268B, 0xFFFFF263, 0x000003D1, 0x00001914, 0xFFFFF977, 0x000002E8, 0x00001914, 0xFFFFF977, 0x000002E8 },
{ 0x0213F0FE992C3184, 0x0000330B, 0xFFFFEA1E, 0x00000505, 0x000020B1, 0xFFFFF44D, 0x0000039E, 0x000020B1, 0xFFFFF44D, 0x0000039E },
{ 0x0213F0FE99222084, 0x0000326E, 0xFFFFEA26, 0x00000508, 0x00001C17, 0xFFFFF722, 0x00000328, 0x00001C17, 0xFFFFF722, 0x00000328 },
{ 0x0213F0FE992A31A4, 0x00002A3F, 0xFFFFEEE8, 0x0000046D, 0x00001B2B, 0xFFFFF737, 0x0000034D, 0x00001B2B, 0xFFFFF737, 0x0000034D },
{ 0x0213F0FE992C4064, 0x00003732, 0xFFFFE765, 0x00000574, 0x00002A6D, 0xFFFFEDA8, 0x000004B7, 0x00002A6D, 0xFFFFEDA8, 0x000004B7 },
{ 0x0213F0FE99300924, 0x000034D3, 0xFFFFE827, 0x00000569, 0x000027AA, 0xFFFFEEE7, 0x00000492, 0x000027AA, 0xFFFFEEE7, 0x00000492 },
{ 0x0213F0FE992E40C4, 0x00003306, 0xFFFFEA39, 0x000004FC, 0x00001DCC, 0xFFFFF655, 0x00000344, 0x00001DCC, 0xFFFFF655, 0x00000344 },
{ 0x0213F0FE99282044, 0x00002A48, 0xFFFFEFCA, 0x00000439, 0x00001DED, 0xFFFFF60D, 0x00000375, 0x00001DED, 0xFFFFF60D, 0x00000375 },
{ 0x0213F0FE993038C4, 0x000033A3, 0xFFFFEA36, 0x000004F9, 0x0000247C, 0xFFFFF21F, 0x000003F4, 0x0000247C, 0xFFFFF21F, 0x000003F4 },
{ 0x0213F0FE992C3164, 0x0000311B, 0xFFFFEB76, 0x000004D1, 0x00001EB1, 0xFFFFF5B6, 0x00000366, 0x00001EB1, 0xFFFFF5B6, 0x00000366 },
{ 0x0213F0FE99324164, 0x00003307, 0xFFFFE97F, 0x0000052A, 0x00001E76, 0xFFFFF54D, 0x0000037C, 0x00001E76, 0xFFFFF54D, 0x0000037C },
{ 0x0213F0FE991C2144, 0x0000344B, 0xFFFFE9C5, 0x00000509, 0x000020D6, 0xFFFFF486, 0x0000038F, 0x000020D6, 0xFFFFF486, 0x0000038F },
{ 0x0213F0FE992C3144, 0x000034B9, 0xFFFFEA0B, 0x000004F7, 0x000027B3, 0xFFFFF057, 0x0000043A, 0x000027B3, 0xFFFFF057, 0x0000043A },
{ 0x0213F0FE99301964, 0x00003360, 0xFFFFE984, 0x00000527, 0x00002238, 0xFFFFF2EE, 0x000003E0, 0x00002238, 0xFFFFF2EE, 0x000003E0 },
{ 0x0213F0FE99302124, 0x0000315C, 0xFFFFEC05, 0x000004B1, 0x000023C8, 0xFFFFF2CC, 0x000003DE, 0x000023C8, 0xFFFFF2CC, 0x000003DE },
{ 0x0213F0FE992C2864, 0x0000389B, 0xFFFFE6D5, 0x00000582, 0x00002C6C, 0xFFFFEC92, 0x000004DE, 0x00002C6C, 0xFFFFEC92, 0x000004DE },
{ 0x0213F0FE992E1124, 0x00003058, 0xFFFFEB30, 0x000004E6, 0x000019B5, 0xFFFFF88B, 0x000002F1, 0x000019B5, 0xFFFFF88B, 0x000002F1 },
{ 0x0213F0FE992E0904, 0x00002F69, 0xFFFFEB4A, 0x000004F1, 0x00001B82, 0xFFFFF6EC, 0x00000341, 0x00001B82, 0xFFFFF6EC, 0x00000341 },
{ 0x0213F0FE991A18E4, 0x000031EB, 0xFFFFEA64, 0x00000508, 0x00002059, 0xFFFFF427, 0x000003B0, 0x00002059, 0xFFFFF427, 0x000003B0 },
{ 0x0213F0FE99224124, 0x000033E2, 0xFFFFE94D, 0x0000052A, 0x000020BF, 0xFFFFF40B, 0x000003AB, 0x000020BF, 0xFFFFF40B, 0x000003AB },
{ 0x0213F0FE99283184, 0x00002AF9, 0xFFFFEFE9, 0x00000427, 0x00001F72, 0xFFFFF57A, 0x00000383, 0x00001F72, 0xFFFFF57A, 0x00000383 },
{ 0x0213F0FE992C2824, 0x00003282, 0xFFFFEA88, 0x000004FA, 0x00002561, 0xFFFFF126, 0x0000042B, 0x00002561, 0xFFFFF126, 0x0000042B },
{ 0x0213F0FE993010E4, 0x0000308A, 0xFFFFEB5D, 0x000004E0, 0x00001E83, 0xFFFFF577, 0x00000378, 0x00001E83, 0xFFFFF577, 0x00000378 },
{ 0x0213F0FE99324884, 0x0000336E, 0xFFFFE8C8, 0x00000553, 0x0000217C, 0xFFFFF2E1, 0x000003EB, 0x0000217C, 0xFFFFF2E1, 0x000003EB },
{ 0x0213F0FE991A2164, 0x000034A9, 0xFFFFE838, 0x00000561, 0x000020CE, 0xFFFFF38A, 0x000003C7, 0x000020CE, 0xFFFFF38A, 0x000003C7 },
{ 0x0213F0FE99222184, 0x00003152, 0xFFFFE9EB, 0x00000522, 0x00001755, 0xFFFFF9A9, 0x000002C6, 0x00001755, 0xFFFFF9A9, 0x000002C6 },
{ 0x0213F0FE99281884, 0x0000286E, 0xFFFFF136, 0x000003FD, 0x00001BAB, 0xFFFFF7C3, 0x0000032C, 0x00001BAB, 0xFFFFF7C3, 0x0000032C },
{ 0x0213F0FE99300944, 0x0000316B, 0xFFFFEA02, 0x00000528, 0x00002247, 0xFFFFF24E, 0x00000408, 0x00002247, 0xFFFFF24E, 0x00000408 },
{ 0x0213F0FE992C08E4, 0x000034CF, 0xFFFFE83D, 0x00000562, 0x00002458, 0xFFFFF130, 0x00000430, 0x00002458, 0xFFFFF130, 0x00000430 },
{ 0x0213F0FE992C2984, 0x00003352, 0xFFFFE9D1, 0x00000515, 0x0000212A, 0xFFFFF3DC, 0x000003B4, 0x0000212A, 0xFFFFF3DC, 0x000003B4 },
{ 0x0213F0FE992840A4, 0x00002946, 0xFFFFF09B, 0x00000415, 0x00001DC9, 0xFFFFF650, 0x00000366, 0x00001DC9, 0xFFFFF650, 0x00000366 },
{ 0x0213F0FE99301124, 0x00003080, 0xFFFFEB47, 0x000004E1, 0x00001BD5, 0xFFFFF73B, 0x00000329, 0x00001BD5, 0xFFFFF73B, 0x00000329 },
{ 0x0213F0FE991A1884, 0x00002FBD, 0xFFFFEB7B, 0x000004DD, 0x000017FC, 0xFFFFF99E, 0x000002C7, 0x000017FC, 0xFFFFF99E, 0x000002C7 },
{ 0x0213F0FE99281124, 0x00002A28, 0xFFFFF032, 0x0000041F, 0x00001B19, 0xFFFFF83A, 0x00000312, 0x00001B19, 0xFFFFF83A, 0x00000312 },
{ 0x0213F0FE992240C4, 0x00003420, 0xFFFFE936, 0x00000530, 0x000023C2, 0xFFFFF203, 0x00000406, 0x000023C2, 0xFFFFF203, 0x00000406 },
{ 0x0213F0FE99301144, 0x00002F7C, 0xFFFFEBBA, 0x000004D1, 0x0000185D, 0xFFFFF975, 0x000002CA, 0x0000185D, 0xFFFFF975, 0x000002CA },
{ 0x0213F0FE992E2044, 0x00002C51, 0xFFFFEE3B, 0x0000046F, 0x000019AA, 0xFFFFF8DD, 0x000002ED, 0x000019AA, 0xFFFFF8DD, 0x000002ED },
{ 0x0213F0FE991A4144, 0x000033D6, 0xFFFFE8F2, 0x0000053D, 0x00001D73, 0xFFFFF5FB, 0x0000035B, 0x00001D73, 0xFFFFF5FB, 0x0000035B },
{ 0x0213F0FE99323084, 0x000031D9, 0xFFFFEAF7, 0x000004E4, 0x00001EBD, 0xFFFFF5A6, 0x00000368, 0x00001EBD, 0xFFFFF5A6, 0x00000368 },
{ 0x0213F0FE991A20A4, 0x00003386, 0xFFFFE9CE, 0x00000515, 0x00002422, 0xFFFFF1F3, 0x00000405, 0x00002422, 0xFFFFF1F3, 0x00000405 },
{ 0x0213F0FE992C50E4, 0x000032FB, 0xFFFFE9BC, 0x00000520, 0x00002301, 0xFFFFF267, 0x000003F7, 0x00002301, 0xFFFFF267, 0x000003F7 },
{ 0x0213F0FE99322924, 0x000032C2, 0xFFFFEAC0, 0x000004EA, 0x0000250F, 0xFFFFF1A2, 0x00000413, 0x0000250F, 0xFFFFF1A2, 0x00000413 },
{ 0x0213F0FE991C2944, 0x00003722, 0xFFFFE8A6, 0x00000527, 0x000026E4, 0xFFFFF0F5, 0x0000041C, 0x000026E4, 0xFFFFF0F5, 0x0000041C },
{ 0x0213F0FE992C48C4, 0x000035A4, 0xFFFFE822, 0x00000558, 0x000022F2, 0xFFFFF288, 0x000003E8, 0x000022F2, 0xFFFFF288, 0x000003E8 },
{ 0x0213F0FE99280924, 0x00002CD1, 0xFFFFEDC6, 0x0000048C, 0x00001EAF, 0xFFFFF53D, 0x00000396, 0x00001EAF, 0xFFFFF53D, 0x00000396 },
{ 0x0213F0FE99301164, 0x00003156, 0xFFFFEA60, 0x0000050B, 0x00001BBC, 0xFFFFF704, 0x00000335, 0x00001BBC, 0xFFFFF704, 0x00000335 },
{ 0x0213F0FE992C5104, 0x000034A1, 0xFFFFE8C0, 0x00000544, 0x00002528, 0xFFFFF105, 0x0000042C, 0x00002528, 0xFFFFF105, 0x0000042C },
{ 0x0213F0FE99323064, 0x000032CE, 0xFFFFE9D3, 0x00000520, 0x000021FF, 0xFFFFF2FD, 0x000003E4, 0x000021FF, 0xFFFFF2FD, 0x000003E4 },
{ 0x0213F0FE991A50A4, 0x000034A0, 0xFFFFE823, 0x0000056D, 0x0000256F, 0xFFFFF047, 0x0000045A, 0x0000256F, 0xFFFFF047, 0x0000045A },
{ 0x0213F0FE99303944, 0x00003109, 0xFFFFEBD6, 0x000004BF, 0x000022D4, 0xFFFFF32D, 0x000003D0, 0x000022D4, 0xFFFFF32D, 0x000003D0 },
{ 0x0213F0FE992C1164, 0x000030B7, 0xFFFFEAF0, 0x000004F3, 0x00001AEC, 0xFFFFF7A9, 0x0000031B, 0x00001AEC, 0xFFFFF7A9, 0x0000031B },
{ 0x0213F0FE992C39A4, 0x00003078, 0xFFFFEBA4, 0x000004CF, 0x00001E7A, 0xFFFFF5AF, 0x0000036B, 0x00001E7A, 0xFFFFF5AF, 0x0000036B },
{ 0x0213F0FE99304124, 0x00003442, 0xFFFFE998, 0x00000518, 0x000025EA, 0xFFFFF0F3, 0x0000042B, 0x000025EA, 0xFFFFF0F3, 0x0000042B },
{ 0x0213F0FE993021A4, 0x000031CB, 0xFFFFEA80, 0x00000501, 0x000020A3, 0xFFFFF403, 0x000003B2, 0x000020A3, 0xFFFFF403, 0x000003B2 },
{ 0x0213F0FE992A2984, 0x00002947, 0xFFFFF018, 0x00000433, 0x00001BA5, 0xFFFFF75C, 0x00000340, 0x00001BA5, 0xFFFFF75C, 0x00000340 },
{ 0x0213F0FE992C3984, 0x000033F9, 0xFFFFE99D, 0x00000518, 0x00002231, 0xFFFFF358, 0x000003C5, 0x00002231, 0xFFFFF358, 0x000003C5 },
{ 0x0213F0FE99321124, 0x00003131, 0xFFFFEA45, 0x00000513, 0x00001973, 0xFFFFF85E, 0x00000301, 0x00001973, 0xFFFFF85E, 0x00000301 },
{ 0x0213F0FE991C29A4, 0x00003571, 0xFFFFE8AC, 0x00000539, 0x00002049, 0xFFFFF49C, 0x0000038D, 0x00002049, 0xFFFFF49C, 0x0000038D },
{ 0x0213F0FE992E3864, 0x0000309E, 0xFFFFEB1D, 0x000004E8, 0x000019ED, 0xFFFFF86E, 0x000002F8, 0x000019ED, 0xFFFFF86E, 0x000002F8 },
{ 0x0213F0FE99302984, 0x00003091, 0xFFFFEB9B, 0x000004CC, 0x00001D2C, 0xFFFFF6A2, 0x0000033D, 0x00001D2C, 0xFFFFF6A2, 0x0000033D },
{ 0x0213F0FE993008E4, 0x00003069, 0xFFFFEAFD, 0x000004F8, 0x00001E82, 0xFFFFF51C, 0x0000038D, 0x00001E82, 0xFFFFF51C, 0x0000038D },
{ 0x0213F0FE992210A4, 0x00003459, 0xFFFFE7F2, 0x00000572, 0x00001DA7, 0xFFFFF552, 0x0000037F, 0x00001DA7, 0xFFFFF552, 0x0000037F },
{ 0x0213F0FE99321104, 0x0000304B, 0xFFFFEAFB, 0x000004F4, 0x0000191E, 0xFFFFF8BD, 0x000002EE, 0x0000191E, 0xFFFFF8BD, 0x000002EE },
{ 0x0213F0FE993020C4, 0x0000346E, 0xFFFFEA07, 0x000004FD, 0x00002767, 0xFFFFF058, 0x00000440, 0x00002767, 0xFFFFF058, 0x00000440 },
{ 0x0213F0FE992E3084, 0x000030B5, 0xFFFFEBC1, 0x000004C1, 0x00001B3C, 0xFFFFF818, 0x000002FD, 0x00001B3C, 0xFFFFF818, 0x000002FD },
{ 0x0213F0FE99300904, 0x0000321F, 0xFFFFE9EA, 0x00000524, 0x00002380, 0xFFFFF1C2, 0x0000041A, 0x00002380, 0xFFFFF1C2, 0x0000041A },
{ 0x0213F0FE992E3044, 0x000030DF, 0xFFFFEB37, 0x000004E2, 0x00001E3C, 0xFFFFF5BB, 0x00000369, 0x00001E3C, 0xFFFFF5BB, 0x00000369 },
{ 0x0213F0FE992848A4, 0x000027E0, 0xFFFFF0E2, 0x00000416, 0x00001A6A, 0xFFFFF820, 0x00000321, 0x00001A6A, 0xFFFFF820, 0x00000321 },
{ 0x0213F0FE991A1084, 0x00002FA1, 0xFFFFEB63, 0x000004E7, 0x0000196B, 0xFFFFF880, 0x000002FB, 0x0000196B, 0xFFFFF880, 0x000002FB },
{ 0x0213F0FE991C1084, 0x0000310C, 0xFFFFEAAF, 0x000004FC, 0x000019EF, 0xFFFFF850, 0x000002FD, 0x000019EF, 0xFFFFF850, 0x000002FD },
{ 0x0213F0FE99323904, 0x0000334A, 0xFFFFEA07, 0x0000050B, 0x00002380, 0xFFFFF26F, 0x000003F0, 0x00002380, 0xFFFFF26F, 0x000003F0 },
{ 0x0213F0FE99302944, 0x00002FF9, 0xFFFFECDC, 0x00000492, 0x00002297, 0xFFFFF394, 0x000003BF, 0x00002297, 0xFFFFF394, 0x000003BF },
{ 0x0213F0FE992C2164, 0x0000354B, 0xFFFFE894, 0x00000546, 0x000024C4, 0xFFFFF16C, 0x0000041B, 0x000024C4, 0xFFFFF16C, 0x0000041B },
{ 0x0213F0FE99220924, 0x00003245, 0xFFFFE92F, 0x00000544, 0x00001829, 0xFFFFF8F1, 0x000002EA, 0x00001829, 0xFFFFF8F1, 0x000002EA },
{ 0x0213F0FE992E4884, 0x0000302F, 0xFFFFEB51, 0x000004E3, 0x0000199F, 0xFFFFF894, 0x000002F4, 0x0000199F, 0xFFFFF894, 0x000002F4 },
{ 0x0213F0FE992E18C4, 0x00002F54, 0xFFFFEC86, 0x000004A6, 0x00001A6F, 0xFFFFF891, 0x000002EC, 0x00001A6F, 0xFFFFF891, 0x000002EC },
{ 0x0213F0FE99284164, 0x00002908, 0xFFFFF0D8, 0x0000040A, 0x00001C9B, 0xFFFFF729, 0x00000342, 0x00001C9B, 0xFFFFF729, 0x00000342 },
{ 0x0213F0FE99302964, 0x000031D9, 0xFFFFEB40, 0x000004D7, 0x000023F5, 0xFFFFF259, 0x000003F4, 0x000023F5, 0xFFFFF259, 0x000003F4 },
{ 0x0213F0FE993048E4, 0x000034C8, 0xFFFFE8C6, 0x0000053F, 0x00002313, 0xFFFFF280, 0x000003EC, 0x00002313, 0xFFFFF280, 0x000003EC },
{ 0x0213F0FE993050C4, 0x000037D1, 0xFFFFE6A1, 0x0000059C, 0x00002C6A, 0xFFFFEBFF, 0x00000504, 0x00002C6A, 0xFFFFEBFF, 0x00000504 },
{ 0x0213F0FE99321964, 0x000030E9, 0xFFFFEA6B, 0x0000050F, 0x00001A2D, 0xFFFFF7DF, 0x00000316, 0x00001A2D, 0xFFFFF7DF, 0x00000316 },
{ 0x0213F0FE99302084, 0x0000323D, 0xFFFFEA95, 0x000004F4, 0x00001ED2, 0xFFFFF584, 0x0000036C, 0x00001ED2, 0xFFFFF584, 0x0000036C },
{ 0x0213F0FE992C3024, 0x000033D6, 0xFFFFE9DB, 0x00000510, 0x000027A7, 0xFFFFEFC7, 0x0000045E, 0x000027A7, 0xFFFFEFC7, 0x0000045E },
{ 0x0213F0FE991C3164, 0x00003444, 0xFFFFE98A, 0x00000517, 0x000020FD, 0xFFFFF43F, 0x0000039D, 0x000020FD, 0xFFFFF43F, 0x0000039D },
{ 0x0213F0FE992808E4, 0x00002987, 0xFFFFEFA1, 0x0000044B, 0x00001B06, 0xFFFFF788, 0x0000033C, 0x00001B06, 0xFFFFF788, 0x0000033C },
{ 0x0213F0FE992C28E4, 0x0000311D, 0xFFFFED20, 0x00000474, 0x000025DA, 0xFFFFF223, 0x000003F0, 0x000025DA, 0xFFFFF223, 0x000003F0 },
{ 0x0213F0FE992C1124, 0x000032A2, 0xFFFFEA0A, 0x0000050D, 0x00001D48, 0xFFFFF659, 0x0000034A, 0x00001D48, 0xFFFFF659, 0x0000034A },
{ 0x0213F0FE992208E4, 0x00003110, 0xFFFFE9EA, 0x00000529, 0x00001786, 0xFFFFF958, 0x000002DB, 0x00001786, 0xFFFFF958, 0x000002DB },
{ 0x0213F0FE992821A4, 0x000027F2, 0xFFFFF174, 0x000003F7, 0x00001C7A, 0xFFFFF72A, 0x00000348, 0x00001C7A, 0xFFFFF72A, 0x00000348 },
{ 0x0213F0FE991C10E4, 0x000031DB, 0xFFFFEA7D, 0x000004FB, 0x000019C4, 0xFFFFF8B1, 0x000002E6, 0x000019C4, 0xFFFFF8B1, 0x000002E6 },
{ 0x0213F0FE992C1104, 0x00003158, 0xFFFFEAAC, 0x000004FA, 0x00001BC1, 0xFFFFF737, 0x0000032B, 0x00001BC1, 0xFFFFF737, 0x0000032B },
{ 0x0213F0FE993010C4, 0x00002F36, 0xFFFFEBF9, 0x000004CA, 0x00001A2A, 0xFFFFF83F, 0x00000303, 0x00001A2A, 0xFFFFF83F, 0x00000303 },
{ 0x0213F0FE993238A4, 0x000032B4, 0xFFFFEA72, 0x000004FA, 0x000021FF, 0xFFFFF378, 0x000003C5, 0x000021FF, 0xFFFFF378, 0x000003C5 },
{ 0x0213F0FE99303164, 0x00003262, 0xFFFFEAFA, 0x000004DF, 0x00002441, 0xFFFFF237, 0x000003F6, 0x00002441, 0xFFFFF237, 0x000003F6 },
{ 0x0213F0FE99303924, 0x0000336A, 0xFFFFEAFB, 0x000004D1, 0x00002746, 0xFFFFF0B8, 0x0000042B, 0x00002746, 0xFFFFF0B8, 0x0000042B },
{ 0x0213F0FE991A4084, 0x000032E5, 0xFFFFE923, 0x00000541, 0x00001DF0, 0xFFFFF552, 0x00000380, 0x00001DF0, 0xFFFFF552, 0x00000380 },
{ 0x0213F0FE99304064, 0x000035D1, 0xFFFFE80B, 0x0000055F, 0x00002780, 0xFFFFEF74, 0x0000046F, 0x00002780, 0xFFFFEF74, 0x0000046F },
{ 0x0213F0FE993028A4, 0x000033EC, 0xFFFFEA48, 0x000004F4, 0x0000269F, 0xFFFFF0D8, 0x0000042A, 0x0000269F, 0xFFFFF0D8, 0x0000042A },
{ 0x0213F0FE99323884, 0x000030C4, 0xFFFFEB39, 0x000004E2, 0x00001B44, 0xFFFFF7AA, 0x00000318, 0x00001B44, 0xFFFFF7AA, 0x00000318 },
{ 0x0213F0FE99281144, 0x00002926, 0xFFFFF0AF, 0x0000040E, 0x0000194E, 0xFFFFF959, 0x000002E2, 0x0000194E, 0xFFFFF959, 0x000002E2 },
{ 0x0213F0FE992C10C4, 0x00003141, 0xFFFFEAAF, 0x000004F6, 0x00001864, 0xFFFFF97C, 0x000002C6, 0x00001864, 0xFFFFF97C, 0x000002C6 },
{ 0x0213F0FE99301064, 0x000030B2, 0xFFFFEB7C, 0x000004DB, 0x000022CE, 0xFFFFF2B5, 0x000003F0, 0x000022CE, 0xFFFFF2B5, 0x000003F0 },
{ 0x0213F0FE99301944, 0x0000318C, 0xFFFFEAC7, 0x000004F6, 0x00002113, 0xFFFFF3CA, 0x000003BD, 0x00002113, 0xFFFFF3CA, 0x000003BD },
{ 0x0213F0FE992E1104, 0x00002FD2, 0xFFFFEB8F, 0x000004D9, 0x00001996, 0xFFFFF89F, 0x000002F1, 0x00001996, 0xFFFFF89F, 0x000002F1 },
{ 0x0213F0FE991A28A4, 0x0000310D, 0xFFFFEB25, 0x000004E7, 0x00001F67, 0xFFFFF4EF, 0x0000038E, 0x00001F67, 0xFFFFF4EF, 0x0000038E },
{ 0x0213F0FE992A4964, 0x00002BBC, 0xFFFFEE68, 0x00000477, 0x00002050, 0xFFFFF41D, 0x000003C8, 0x00002050, 0xFFFFF41D, 0x000003C8 },
{ 0x0213F0FE99302104, 0x00003096, 0xFFFFECED, 0x00000486, 0x000024C9, 0xFFFFF278, 0x000003E7, 0x000024C9, 0xFFFFF278, 0x000003E7 },
{ 0x0213F0FE992C10A4, 0x00003401, 0xFFFFE8F1, 0x0000053C, 0x00001E75, 0xFFFFF55C, 0x00000376, 0x00001E75, 0xFFFFF55C, 0x00000376 },
{ 0x0213F0FE99302844, 0x0000319E, 0xFFFFEAB1, 0x000004F8, 0x00001EA3, 0xFFFFF567, 0x00000378, 0x00001EA3, 0xFFFFF567, 0x00000378 },
{ 0x0213F0FE99322964, 0x000030FD, 0xFFFFEB4C, 0x000004DB, 0x00001CA6, 0xFFFFF6E8, 0x00000335, 0x00001CA6, 0xFFFFF6E8, 0x00000335 },
{ 0x0213F0FE992E40A4, 0x000030D6, 0xFFFFEB1A, 0x000004E4, 0x00001A0D, 0xFFFFF87D, 0x000002EF, 0x00001A0D, 0xFFFFF87D, 0x000002EF },
{ 0x0213F0FE992C2124, 0x0000324B, 0xFFFFEB17, 0x000004D9, 0x00002225, 0xFFFFF3A8, 0x000003BA, 0x00002225, 0xFFFFF3A8, 0x000003BA },
{ 0x0213F0FE99284084, 0x00002A00, 0xFFFFF02E, 0x00000424, 0x00001E21, 0xFFFFF61D, 0x0000036C, 0x00001E21, 0xFFFFF61D, 0x0000036C },
{ 0x0213F0FE992A48A4, 0x000029CF, 0xFFFFEF53, 0x00000457, 0x00001B11, 0xFFFFF772, 0x0000033D, 0x00001B11, 0xFFFFF772, 0x0000033D },
{ 0x0213F0FE991A30A4, 0x000032A1, 0xFFFFEA63, 0x000004FB, 0x00001F83, 0xFFFFF516, 0x0000037E, 0x00001F83, 0xFFFFF516, 0x0000037E },
{ 0x0213F0FE992E20C4, 0x0000305C, 0xFFFFEC14, 0x000004B5, 0x00001D0B, 0xFFFFF6ED, 0x00000332, 0x00001D0B, 0xFFFFF6ED, 0x00000332 },
{ 0x0213F0FE992C1064, 0x00003467, 0xFFFFE8D5, 0x00000543, 0x0000243F, 0xFFFFF190, 0x00000418, 0x0000243F, 0xFFFFF190, 0x00000418 },
{ 0x0213F0FE992A2064, 0x00002796, 0xFFFFF133, 0x00000409, 0x00001903, 0xFFFFF91C, 0x000002FC, 0x00001903, 0xFFFFF91C, 0x000002FC },
{ 0x0213F0FE99302164, 0x000031F6, 0xFFFFEAB7, 0x000004F5, 0x000022B9, 0xFFFFF2D0, 0x000003E6, 0x000022B9, 0xFFFFF2D0, 0x000003E6 },
{ 0x0213F0FE992E5104, 0x00003196, 0xFFFFEA76, 0x00000503, 0x00001CC5, 0xFFFFF67D, 0x0000034A, 0x00001CC5, 0xFFFFF67D, 0x0000034A },
{ 0x0213F0FE99321144, 0x00002F9E, 0xFFFFEAD9, 0x00000505, 0x000017C1, 0xFFFFF93D, 0x000002DF, 0x000017C1, 0xFFFFF93D, 0x000002DF },
{ 0x0213F0FE992E2124, 0x00002FBC, 0xFFFFEC75, 0x000004A8, 0x00001D6D, 0xFFFFF6AC, 0x0000033D, 0x00001D6D, 0xFFFFF6AC, 0x0000033D },
{ 0x0213F0FE992C38A4, 0x00003541, 0xFFFFE921, 0x00000524, 0x00002662, 0xFFFFF0CB, 0x0000042B, 0x00002662, 0xFFFFF0CB, 0x0000042B },
{ 0x0213F0FE992A21A4, 0x00002953, 0xFFFFEF76, 0x00000459, 0x00001C05, 0xFFFFF6A0, 0x00000368, 0x00001C05, 0xFFFFF6A0, 0x00000368 },
{ 0x0213F0FE992C4924, 0x000034BC, 0xFFFFE8DD, 0x00000536, 0x0000210E, 0xFFFFF3F4, 0x000003A8, 0x0000210E, 0xFFFFF3F4, 0x000003A8 },
{ 0x0213F0FE992C29A4, 0x000034BE, 0xFFFFE916, 0x0000052F, 0x000024A1, 0xFFFFF1A6, 0x00000410, 0x000024A1, 0xFFFFF1A6, 0x00000410 },
{ 0x0213F0FE99304964, 0x000037B5, 0xFFFFE7A9, 0x0000055B, 0x000028A1, 0xFFFFEF51, 0x00000467, 0x000028A1, 0xFFFFEF51, 0x00000467 },
{ 0x0213F0FE99301104, 0x00002FC5, 0xFFFFEBBE, 0x000004D1, 0x00001BA5, 0xFFFFF757, 0x00000328, 0x00001BA5, 0xFFFFF757, 0x00000328 },
{ 0x0213F0FE993040A4, 0x000033CB, 0xFFFFE944, 0x0000052B, 0x00001FBE, 0xFFFFF4B1, 0x0000038C, 0x00001FBE, 0xFFFFF4B1, 0x0000038C },
{ 0x0213F0FE99301844, 0x000030AE, 0xFFFFEBA0, 0x000004D3, 0x00002268, 0xFFFFF316, 0x000003DD, 0x00002268, 0xFFFFF316, 0x000003DD },
{ 0x0213F0FE992C20A4, 0x00002F90, 0xFFFFEC5A, 0x000004B0, 0x00001C3A, 0xFFFFF752, 0x00000323, 0x00001C3A, 0xFFFFF752, 0x00000323 },
{ 0x0213F0FE992E38E4, 0x00003113, 0xFFFFEB91, 0x000004C8, 0x00001E3C, 0xFFFFF623, 0x0000034E, 0x00001E3C, 0xFFFFF623, 0x0000034E },
{ 0x0213F0FE99323984, 0x0000330B, 0xFFFFE94B, 0x00000539, 0x000020E7, 0xFFFFF37E, 0x000003CD, 0x000020E7, 0xFFFFF37E, 0x000003CD },
{ 0x0213F0FE992E2864, 0x000031D1, 0xFFFFEACB, 0x000004ED, 0x00001E82, 0xFFFFF5B2, 0x00000365, 0x00001E82, 0xFFFFF5B2, 0x00000365 },
{ 0x0213F0FE992A3984, 0x00002CD5, 0xFFFFEDC1, 0x0000048D, 0x000020F8, 0xFFFFF3C1, 0x000003D1, 0x000020F8, 0xFFFFF3C1, 0x000003D1 },
{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }
};
int pp_override_get_default_fuse_value(uint64_t key,
struct phm_fuses_default *result)
{
const struct phm_fuses_default *list = vega10_fuses_default;
uint32_t i;
for (i = 0; list[i].key != 0; i++) {
if (key == list[i].key) {
result->key = list[i].key;
result->VFT2_m1 = list[i].VFT2_m1;
result->VFT2_m2 = list[i].VFT2_m2;
result->VFT2_b = list[i].VFT2_b;
result->VFT1_m1 = list[i].VFT1_m1;
result->VFT1_m2 = list[i].VFT1_m2;
result->VFT1_b = list[i].VFT1_b;
result->VFT0_m1 = list[i].VFT0_m1;
result->VFT0_m2 = list[i].VFT0_m2;
result->VFT0_b = list[i].VFT0_b;
return 0;
}
}
return -EINVAL;
}
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_overdriver.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "tonga_baco.h"
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
static const struct baco_cmd_entry gpio_tbl[] = {
{ CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
{ CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
{ CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
{ CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
};
static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
{ CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
};
static const struct baco_cmd_entry use_bclk_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
{ CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
{ CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
{ CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
{ CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
};
static const struct baco_cmd_entry turn_off_plls_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
{ CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
{ CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
{ CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
{ CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
{ CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
{ CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
};
static const struct baco_cmd_entry enter_baco_tbl[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
};
#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
static const struct baco_cmd_entry exit_baco_tbl[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
};
static const struct baco_cmd_entry clean_baco_tbl[] = {
{ CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
{ CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
};
static const struct baco_cmd_entry gpio_tbl_iceland[] = {
{ CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }
};
static const struct baco_cmd_entry exit_baco_tbl_iceland[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
{ CMD_DELAY_MS, 0, 0, 0, 20, 0 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
};
static const struct baco_cmd_entry clean_baco_tbl_iceland[] = {
{ CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
};
int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
{
enum BACO_STATE cur_state;
smu7_baco_get_state(hwmgr, &cur_state);
if (cur_state == state)
/* aisc already in the target state */
return 0;
if (state == BACO_STATE_IN) {
if (hwmgr->chip_id == CHIP_TOPAZ)
baco_program_registers(hwmgr, gpio_tbl_iceland, ARRAY_SIZE(gpio_tbl_iceland));
else
baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
ARRAY_SIZE(enable_fb_req_rej_tbl));
baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
baco_program_registers(hwmgr, turn_off_plls_tbl,
ARRAY_SIZE(turn_off_plls_tbl));
if (baco_program_registers(hwmgr, enter_baco_tbl,
ARRAY_SIZE(enter_baco_tbl)))
return 0;
} else if (state == BACO_STATE_OUT) {
/* HW requires at least 20ms between regulator off and on */
msleep(20);
/* Execute Hardware BACO exit sequence */
if (hwmgr->chip_id == CHIP_TOPAZ) {
if (baco_program_registers(hwmgr, exit_baco_tbl_iceland,
ARRAY_SIZE(exit_baco_tbl_iceland))) {
if (baco_program_registers(hwmgr, clean_baco_tbl_iceland,
ARRAY_SIZE(clean_baco_tbl_iceland)))
return 0;
}
} else {
if (baco_program_registers(hwmgr, exit_baco_tbl,
ARRAY_SIZE(exit_baco_tbl))) {
if (baco_program_registers(hwmgr, clean_baco_tbl,
ARRAY_SIZE(clean_baco_tbl)))
return 0;
}
}
}
return -EINVAL;
}
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "common_baco.h"
static bool baco_wait_register(struct pp_hwmgr *hwmgr, u32 reg, u32 mask, u32 value)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
u32 timeout = 5000, data;
do {
msleep(1);
data = RREG32(reg);
timeout--;
} while (value != (data & mask) && (timeout != 0));
if (timeout == 0)
return false;
return true;
}
static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 mask,
u32 shift, u32 value, u32 timeout)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
u32 data;
bool ret = true;
switch (command) {
case CMD_WRITE:
WREG32(reg, value << shift);
break;
case CMD_READMODIFYWRITE:
data = RREG32(reg);
data = (data & (~mask)) | (value << shift);
WREG32(reg, data);
break;
case CMD_WAITFOR:
ret = baco_wait_register(hwmgr, reg, mask, value);
break;
case CMD_DELAY_MS:
if (timeout)
/* Delay in milli Seconds */
msleep(timeout);
break;
case CMD_DELAY_US:
if (timeout)
/* Delay in micro Seconds */
udelay(timeout);
break;
default:
dev_warn(adev->dev, "Invalid BACO command.\n");
ret = false;
}
return ret;
}
bool baco_program_registers(struct pp_hwmgr *hwmgr,
const struct baco_cmd_entry *entry,
const u32 array_size)
{
u32 i, reg = 0;
for (i = 0; i < array_size; i++) {
if ((entry[i].cmd == CMD_WRITE) ||
(entry[i].cmd == CMD_READMODIFYWRITE) ||
(entry[i].cmd == CMD_WAITFOR))
reg = entry[i].reg_offset;
if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask,
entry[i].shift, entry[i].val, entry[i].timeout))
return false;
}
return true;
}
bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
const struct soc15_baco_cmd_entry *entry,
const u32 array_size)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
u32 i, reg = 0;
for (i = 0; i < array_size; i++) {
if ((entry[i].cmd == CMD_WRITE) ||
(entry[i].cmd == CMD_READMODIFYWRITE) ||
(entry[i].cmd == CMD_WAITFOR))
reg = adev->reg_offset[entry[i].hwip][entry[i].inst][entry[i].seg]
+ entry[i].reg_offset;
if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask,
entry[i].shift, entry[i].val, entry[i].timeout))
return false;
}
return true;
}
| linux-master | drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.