python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* Copyright 2012 Nouveau Community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres <[email protected]>
* Ben Skeggs
*/
#include "priv.h"
#include <subdev/therm.h>
#include <subdev/timer.h>
static int
nv50_bus_hwsq_exec(struct nvkm_bus *bus, u32 *data, u32 size)
{
struct nvkm_device *device = bus->subdev.device;
int i;
nvkm_mask(device, 0x001098, 0x00000008, 0x00000000);
nvkm_wr32(device, 0x001304, 0x00000000);
for (i = 0; i < size; i++)
nvkm_wr32(device, 0x001400 + (i * 4), data[i]);
nvkm_mask(device, 0x001098, 0x00000018, 0x00000018);
nvkm_wr32(device, 0x00130c, 0x00000003);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x001308) & 0x00000100))
break;
) < 0)
return -ETIMEDOUT;
return 0;
}
void
nv50_bus_intr(struct nvkm_bus *bus)
{
struct nvkm_subdev *subdev = &bus->subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
if (stat & 0x00000008) {
u32 addr = nvkm_rd32(device, 0x009084);
u32 data = nvkm_rd32(device, 0x009088);
nvkm_error_ratelimited(subdev, "MMIO %s of %08x FAULT at %06x\n",
(addr & 0x00000002) ? "write" : "read", data,
(addr & 0x00fffffc));
stat &= ~0x00000008;
nvkm_wr32(device, 0x001100, 0x00000008);
}
if (stat & 0x00010000) {
struct nvkm_therm *therm = device->therm;
if (therm)
nvkm_subdev_intr(&therm->subdev);
stat &= ~0x00010000;
nvkm_wr32(device, 0x001100, 0x00010000);
}
if (stat) {
nvkm_error(subdev, "intr %08x\n", stat);
nvkm_mask(device, 0x001140, stat, 0);
}
}
void
nv50_bus_init(struct nvkm_bus *bus)
{
struct nvkm_device *device = bus->subdev.device;
nvkm_wr32(device, 0x001100, 0xffffffff);
nvkm_wr32(device, 0x001140, 0x00010008);
}
static const struct nvkm_bus_func
nv50_bus = {
.init = nv50_bus_init,
.intr = nv50_bus_intr,
.hwsq_exec = nv50_bus_hwsq_exec,
.hwsq_size = 64,
};
int
nv50_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bus **pbus)
{
return nvkm_bus_new_(&nv50_bus, device, type, inst, pbus);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
static void
nvkm_bus_intr(struct nvkm_subdev *subdev)
{
struct nvkm_bus *bus = nvkm_bus(subdev);
bus->func->intr(bus);
}
static int
nvkm_bus_init(struct nvkm_subdev *subdev)
{
struct nvkm_bus *bus = nvkm_bus(subdev);
bus->func->init(bus);
return 0;
}
static void *
nvkm_bus_dtor(struct nvkm_subdev *subdev)
{
return nvkm_bus(subdev);
}
static const struct nvkm_subdev_func
nvkm_bus = {
.dtor = nvkm_bus_dtor,
.init = nvkm_bus_init,
.intr = nvkm_bus_intr,
};
int
nvkm_bus_new_(const struct nvkm_bus_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_bus **pbus)
{
struct nvkm_bus *bus;
if (!(bus = *pbus = kzalloc(sizeof(*bus), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_bus, device, type, inst, &bus->subdev);
bus->func = func;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
struct nvkm_hwsq {
struct nvkm_subdev *subdev;
u32 addr;
u32 data;
struct {
u8 data[512];
u16 size;
} c;
};
static void
hwsq_cmd(struct nvkm_hwsq *hwsq, int size, u8 data[])
{
memcpy(&hwsq->c.data[hwsq->c.size], data, size * sizeof(data[0]));
hwsq->c.size += size;
}
int
nvkm_hwsq_init(struct nvkm_subdev *subdev, struct nvkm_hwsq **phwsq)
{
struct nvkm_hwsq *hwsq;
hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
if (hwsq) {
hwsq->subdev = subdev;
hwsq->addr = ~0;
hwsq->data = ~0;
memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data));
hwsq->c.size = 0;
}
return hwsq ? 0 : -ENOMEM;
}
int
nvkm_hwsq_fini(struct nvkm_hwsq **phwsq, bool exec)
{
struct nvkm_hwsq *hwsq = *phwsq;
int ret = 0, i;
if (hwsq) {
struct nvkm_subdev *subdev = hwsq->subdev;
struct nvkm_bus *bus = subdev->device->bus;
hwsq->c.size = (hwsq->c.size + 4) / 4;
if (hwsq->c.size <= bus->func->hwsq_size) {
if (exec)
ret = bus->func->hwsq_exec(bus,
(u32 *)hwsq->c.data,
hwsq->c.size);
if (ret)
nvkm_error(subdev, "hwsq exec failed: %d\n", ret);
} else {
nvkm_error(subdev, "hwsq ucode too large\n");
ret = -ENOSPC;
}
for (i = 0; ret && i < hwsq->c.size; i++)
nvkm_error(subdev, "\t%08x\n", ((u32 *)hwsq->c.data)[i]);
*phwsq = NULL;
kfree(hwsq);
}
return ret;
}
void
nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data)
{
nvkm_debug(hwsq->subdev, "R[%06x] = %08x\n", addr, data);
if (hwsq->data != data) {
if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) {
hwsq_cmd(hwsq, 5, (u8[]){ 0xe2, data, data >> 8,
data >> 16, data >> 24 });
} else {
hwsq_cmd(hwsq, 3, (u8[]){ 0x42, data, data >> 8 });
}
}
if ((addr & 0xffff0000) != (hwsq->addr & 0xffff0000)) {
hwsq_cmd(hwsq, 5, (u8[]){ 0xe0, addr, addr >> 8,
addr >> 16, addr >> 24 });
} else {
hwsq_cmd(hwsq, 3, (u8[]){ 0x40, addr, addr >> 8 });
}
hwsq->addr = addr;
hwsq->data = data;
}
void
nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data)
{
nvkm_debug(hwsq->subdev, " FLAG[%02x] = %d\n", flag, data);
flag += 0x80;
if (data >= 0)
flag += 0x20;
if (data >= 1)
flag += 0x20;
hwsq_cmd(hwsq, 1, (u8[]){ flag });
}
void
nvkm_hwsq_wait(struct nvkm_hwsq *hwsq, u8 flag, u8 data)
{
nvkm_debug(hwsq->subdev, " WAIT[%02x] = %d\n", flag, data);
hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
}
void
nvkm_hwsq_wait_vblank(struct nvkm_hwsq *hwsq)
{
struct nvkm_subdev *subdev = hwsq->subdev;
struct nvkm_device *device = subdev->device;
u32 heads, x, y, px = 0;
int i, head_sync;
heads = nvkm_rd32(device, 0x610050);
for (i = 0; i < 2; i++) {
/* Heuristic: sync to head with biggest resolution */
if (heads & (2 << (i << 3))) {
x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
y = (x & 0xffff0000) >> 16;
x &= 0x0000ffff;
if ((x * y) > px) {
px = (x * y);
head_sync = i;
}
}
}
if (px == 0) {
nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
return;
}
nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
nvkm_hwsq_wait(hwsq, head_sync ? 0x3 : 0x1, 0x0);
nvkm_hwsq_wait(hwsq, head_sync ? 0x3 : 0x1, 0x1);
}
void
nvkm_hwsq_nsec(struct nvkm_hwsq *hwsq, u32 nsec)
{
u8 shift = 0, usec = nsec / 1000;
while (usec & ~3) {
usec >>= 2;
shift++;
}
nvkm_debug(hwsq->subdev, " DELAY = %d ns\n", nsec);
hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec });
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c |
/*
* Copyright 2012 Nouveau Community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres <[email protected]>
* Ben Skeggs
*/
#include "priv.h"
#include <subdev/timer.h>
static int
g94_bus_hwsq_exec(struct nvkm_bus *bus, u32 *data, u32 size)
{
struct nvkm_device *device = bus->subdev.device;
int i;
nvkm_mask(device, 0x001098, 0x00000008, 0x00000000);
nvkm_wr32(device, 0x001304, 0x00000000);
nvkm_wr32(device, 0x001318, 0x00000000);
for (i = 0; i < size; i++)
nvkm_wr32(device, 0x080000 + (i * 4), data[i]);
nvkm_mask(device, 0x001098, 0x00000018, 0x00000018);
nvkm_wr32(device, 0x00130c, 0x00000001);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x001308) & 0x00000100))
break;
) < 0)
return -ETIMEDOUT;
return 0;
}
static const struct nvkm_bus_func
g94_bus = {
.init = nv50_bus_init,
.intr = nv50_bus_intr,
.hwsq_exec = g94_bus_hwsq_exec,
.hwsq_size = 128,
};
int
g94_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bus **pbus)
{
return nvkm_bus_new_(&g94_bus, device, type, inst, pbus);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/falcon.h>
#include <core/firmware.h>
#include <nvfw/fw.h>
#include <nvfw/ls.h>
void
nvkm_acr_lsfw_del(struct nvkm_acr_lsfw *lsfw)
{
nvkm_blob_dtor(&lsfw->img);
kfree(lsfw->sigs);
nvkm_firmware_put(lsfw->sig);
list_del(&lsfw->head);
kfree(lsfw);
}
void
nvkm_acr_lsfw_del_all(struct nvkm_acr *acr)
{
struct nvkm_acr_lsfw *lsfw, *lsft;
list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) {
nvkm_acr_lsfw_del(lsfw);
}
}
static struct nvkm_acr_lsfw *
nvkm_acr_lsfw_get(struct nvkm_acr *acr, enum nvkm_acr_lsf_id id)
{
struct nvkm_acr_lsfw *lsfw;
list_for_each_entry(lsfw, &acr->lsfw, head) {
if (lsfw->id == id)
return lsfw;
}
return NULL;
}
struct nvkm_acr_lsfw *
nvkm_acr_lsfw_add(const struct nvkm_acr_lsf_func *func, struct nvkm_acr *acr,
struct nvkm_falcon *falcon, enum nvkm_acr_lsf_id id)
{
struct nvkm_acr_lsfw *lsfw;
if (!acr || list_empty(&acr->hsfw))
return ERR_PTR(-ENOSYS);
lsfw = nvkm_acr_lsfw_get(acr, id);
if (lsfw && lsfw->func) {
nvkm_error(&acr->subdev, "LSFW %d redefined\n", id);
return ERR_PTR(-EEXIST);
}
if (!lsfw) {
if (!(lsfw = kzalloc(sizeof(*lsfw), GFP_KERNEL)))
return ERR_PTR(-ENOMEM);
lsfw->id = id;
list_add_tail(&lsfw->head, &acr->lsfw);
}
lsfw->func = func;
lsfw->falcon = falcon;
return lsfw;
}
static struct nvkm_acr_lsfw *
nvkm_acr_lsfw_load_sig_image_desc_(struct nvkm_subdev *subdev,
struct nvkm_falcon *falcon,
enum nvkm_acr_lsf_id id,
const char *path, int ver,
const struct nvkm_acr_lsf_func *func,
const struct firmware **pdesc)
{
struct nvkm_acr *acr = subdev->device->acr;
struct nvkm_acr_lsfw *lsfw;
int ret;
if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id))))
return lsfw;
ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig);
if (ret)
goto done;
ret = nvkm_firmware_load_blob(subdev, path, "image", ver, &lsfw->img);
if (ret)
goto done;
ret = nvkm_firmware_load_name(subdev, path, "desc", ver, pdesc);
done:
if (ret) {
nvkm_acr_lsfw_del(lsfw);
return ERR_PTR(ret);
}
return lsfw;
}
static void
nvkm_acr_lsfw_from_desc(const struct nvfw_ls_desc_head *desc,
struct nvkm_acr_lsfw *lsfw)
{
lsfw->bootloader_size = ALIGN(desc->bootloader_size, 256);
lsfw->bootloader_imem_offset = desc->bootloader_imem_offset;
lsfw->app_size = ALIGN(desc->app_size, 256);
lsfw->app_start_offset = desc->app_start_offset;
lsfw->app_imem_entry = desc->app_imem_entry;
lsfw->app_resident_code_offset = desc->app_resident_code_offset;
lsfw->app_resident_code_size = desc->app_resident_code_size;
lsfw->app_resident_data_offset = desc->app_resident_data_offset;
lsfw->app_resident_data_size = desc->app_resident_data_size;
lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) +
lsfw->bootloader_size;
lsfw->data_size = lsfw->app_size + lsfw->bootloader_size -
lsfw->ucode_size;
}
int
nvkm_acr_lsfw_load_sig_image_desc(struct nvkm_subdev *subdev,
struct nvkm_falcon *falcon,
enum nvkm_acr_lsf_id id,
const char *path, int ver,
const struct nvkm_acr_lsf_func *func)
{
const struct firmware *fw;
struct nvkm_acr_lsfw *lsfw;
lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver,
func, &fw);
if (IS_ERR(lsfw))
return PTR_ERR(lsfw);
nvkm_acr_lsfw_from_desc(&nvfw_ls_desc(subdev, fw->data)->head, lsfw);
nvkm_firmware_put(fw);
return 0;
}
int
nvkm_acr_lsfw_load_sig_image_desc_v1(struct nvkm_subdev *subdev,
struct nvkm_falcon *falcon,
enum nvkm_acr_lsf_id id,
const char *path, int ver,
const struct nvkm_acr_lsf_func *func)
{
const struct firmware *fw;
struct nvkm_acr_lsfw *lsfw;
lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver,
func, &fw);
if (IS_ERR(lsfw))
return PTR_ERR(lsfw);
nvkm_acr_lsfw_from_desc(&nvfw_ls_desc_v1(subdev, fw->data)->head, lsfw);
nvkm_firmware_put(fw);
return 0;
}
int
nvkm_acr_lsfw_load_sig_image_desc_v2(struct nvkm_subdev *subdev,
struct nvkm_falcon *falcon,
enum nvkm_acr_lsf_id id,
const char *path, int ver,
const struct nvkm_acr_lsf_func *func)
{
const struct firmware *fw;
struct nvkm_acr_lsfw *lsfw;
const struct nvfw_ls_desc_v2 *desc;
int ret = 0;
lsfw = nvkm_acr_lsfw_load_sig_image_desc_(subdev, falcon, id, path, ver, func, &fw);
if (IS_ERR(lsfw))
return PTR_ERR(lsfw);
desc = nvfw_ls_desc_v2(subdev, fw->data);
lsfw->secure_bootloader = desc->secure_bootloader;
lsfw->bootloader_size = ALIGN(desc->bootloader_size, 256);
lsfw->bootloader_imem_offset = desc->bootloader_imem_offset;
lsfw->app_size = ALIGN(desc->app_size, 256);
lsfw->app_start_offset = desc->app_start_offset;
lsfw->app_imem_entry = desc->app_imem_entry;
lsfw->app_resident_code_offset = desc->app_resident_code_offset;
lsfw->app_resident_code_size = desc->app_resident_code_size;
lsfw->app_resident_data_offset = desc->app_resident_data_offset;
lsfw->app_resident_data_size = desc->app_resident_data_size;
lsfw->app_imem_offset = desc->app_imem_offset;
lsfw->app_dmem_offset = desc->app_dmem_offset;
lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) + lsfw->bootloader_size;
lsfw->data_size = lsfw->app_size + lsfw->bootloader_size - lsfw->ucode_size;
nvkm_firmware_put(fw);
if (lsfw->secure_bootloader) {
const struct firmware *hsbl;
const struct nvfw_ls_hsbl_bin_hdr *hdr;
const struct nvfw_ls_hsbl_hdr *hshdr;
u32 loc, sig, cnt, *meta;
ret = nvkm_firmware_load_name(subdev, path, "hs_bl_sig", ver, &hsbl);
if (ret)
return ret;
hdr = nvfw_ls_hsbl_bin_hdr(subdev, hsbl->data);
hshdr = nvfw_ls_hsbl_hdr(subdev, hsbl->data + hdr->header_offset);
meta = (u32 *)(hsbl->data + hshdr->meta_data_offset);
loc = *(u32 *)(hsbl->data + hshdr->patch_loc);
sig = *(u32 *)(hsbl->data + hshdr->patch_sig);
cnt = *(u32 *)(hsbl->data + hshdr->num_sig);
lsfw->fuse_ver = meta[0];
lsfw->engine_id = meta[1];
lsfw->ucode_id = meta[2];
lsfw->sig_size = hshdr->sig_prod_size / cnt;
lsfw->sig_nr = cnt;
lsfw->sigs = kmemdup(hsbl->data + hshdr->sig_prod_offset + sig,
lsfw->sig_nr * lsfw->sig_size, GFP_KERNEL);
nvkm_firmware_put(hsbl);
if (!lsfw->sigs)
ret = -ENOMEM;
}
return ret;
}
int
nvkm_acr_lsfw_load_bl_inst_data_sig(struct nvkm_subdev *subdev,
struct nvkm_falcon *falcon,
enum nvkm_acr_lsf_id id,
const char *path, int ver,
const struct nvkm_acr_lsf_func *func)
{
struct nvkm_acr *acr = subdev->device->acr;
struct nvkm_acr_lsfw *lsfw;
const struct firmware *bl = NULL, *inst = NULL, *data = NULL;
const struct nvfw_bin_hdr *hdr;
const struct nvfw_bl_desc *desc;
u32 *bldata;
int ret;
if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id))))
return PTR_ERR(lsfw);
ret = nvkm_firmware_load_name(subdev, path, "bl", ver, &bl);
if (ret)
goto done;
hdr = nvfw_bin_hdr(subdev, bl->data);
desc = nvfw_bl_desc(subdev, bl->data + hdr->header_offset);
bldata = (void *)(bl->data + hdr->data_offset);
ret = nvkm_firmware_load_name(subdev, path, "inst", ver, &inst);
if (ret)
goto done;
ret = nvkm_firmware_load_name(subdev, path, "data", ver, &data);
if (ret)
goto done;
ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig);
if (ret)
goto done;
lsfw->bootloader_size = ALIGN(desc->code_size, 256);
lsfw->bootloader_imem_offset = desc->start_tag << 8;
lsfw->app_start_offset = lsfw->bootloader_size;
lsfw->app_imem_entry = 0;
lsfw->app_resident_code_offset = 0;
lsfw->app_resident_code_size = ALIGN(inst->size, 256);
lsfw->app_resident_data_offset = lsfw->app_resident_code_size;
lsfw->app_resident_data_size = ALIGN(data->size, 256);
lsfw->app_size = lsfw->app_resident_code_size +
lsfw->app_resident_data_size;
lsfw->img.size = lsfw->bootloader_size + lsfw->app_size;
if (!(lsfw->img.data = kzalloc(lsfw->img.size, GFP_KERNEL))) {
ret = -ENOMEM;
goto done;
}
memcpy(lsfw->img.data, bldata, lsfw->bootloader_size);
memcpy(lsfw->img.data + lsfw->app_start_offset +
lsfw->app_resident_code_offset, inst->data, inst->size);
memcpy(lsfw->img.data + lsfw->app_start_offset +
lsfw->app_resident_data_offset, data->data, data->size);
lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) +
lsfw->bootloader_size;
lsfw->data_size = lsfw->app_size + lsfw->bootloader_size -
lsfw->ucode_size;
done:
if (ret)
nvkm_acr_lsfw_del(lsfw);
nvkm_firmware_put(data);
nvkm_firmware_put(inst);
nvkm_firmware_put(bl);
return ret;
}
int
nvkm_acr_lsfw_load_bl_sig_net(struct nvkm_subdev *subdev,
struct nvkm_falcon *falcon,
enum nvkm_acr_lsf_id id,
const char *path, int ver,
const struct nvkm_acr_lsf_func *func,
const void *inst_data, u32 inst_size,
const void *data_data, u32 data_size)
{
struct nvkm_acr *acr = subdev->device->acr;
struct nvkm_acr_lsfw *lsfw;
const struct firmware _inst = { .data = inst_data, .size = inst_size };
const struct firmware _data = { .data = data_data, .size = data_size };
const struct firmware *bl = NULL, *inst = &_inst, *data = &_data;
const struct {
int bin_magic;
int bin_version;
int bin_size;
int header_offset;
int header_size;
} *hdr;
u32 *bldata;
int ret;
if (IS_ERR((lsfw = nvkm_acr_lsfw_add(func, acr, falcon, id))))
return PTR_ERR(lsfw);
ret = nvkm_firmware_load_name(subdev, path, "bl", ver, &bl);
if (ret)
goto done;
hdr = (const void *)bl->data;
bldata = (void *)(bl->data + hdr->header_offset);
ret = nvkm_firmware_load_name(subdev, path, "sig", ver, &lsfw->sig);
if (ret)
goto done;
lsfw->bootloader_size = ALIGN(hdr->header_size, 256);
lsfw->bootloader_imem_offset = func->bl_entry;
lsfw->app_start_offset = lsfw->bootloader_size;
lsfw->app_imem_entry = 0;
lsfw->app_resident_code_offset = 0;
lsfw->app_resident_code_size = ALIGN(inst->size, 256);
lsfw->app_resident_data_offset = lsfw->app_resident_code_size;
lsfw->app_resident_data_size = ALIGN(data->size, 256);
lsfw->app_imem_offset = 0;
lsfw->app_dmem_offset = 0;
lsfw->app_size = lsfw->app_resident_code_size + lsfw->app_resident_data_size;
lsfw->img.size = lsfw->bootloader_size + lsfw->app_size;
if (!(lsfw->img.data = kzalloc(lsfw->img.size, GFP_KERNEL))) {
ret = -ENOMEM;
goto done;
}
memcpy(lsfw->img.data, bldata, lsfw->bootloader_size);
memcpy(lsfw->img.data + lsfw->app_start_offset +
lsfw->app_resident_code_offset, inst->data, inst->size);
memcpy(lsfw->img.data + lsfw->app_start_offset +
lsfw->app_resident_data_offset, data->data, data->size);
lsfw->ucode_size = ALIGN(lsfw->app_resident_data_offset, 256) +
lsfw->bootloader_size;
lsfw->data_size = lsfw->app_size + lsfw->bootloader_size -
lsfw->ucode_size;
done:
if (ret)
nvkm_acr_lsfw_del(lsfw);
nvkm_firmware_put(bl);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/acr/lsfw.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/firmware.h>
#include <core/memory.h>
#include <subdev/gsp.h>
#include <subdev/pmu.h>
#include <engine/sec2.h>
#include <nvfw/acr.h>
int
tu102_acr_init(struct nvkm_acr *acr)
{
int ret = nvkm_acr_hsfw_boot(acr, "AHESASC");
if (ret)
return ret;
return nvkm_acr_hsfw_boot(acr, "ASB");
}
static int
tu102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
{
struct nvkm_acr_lsfw *lsfw;
u32 offset = 0;
int ret;
/*XXX: shared sub-WPR headers, fill terminator for now. */
nvkm_wo32(acr->wpr, 0x200, 0xffffffff);
/* Fill per-LSF structures. */
list_for_each_entry(lsfw, &acr->lsfw, head) {
struct lsf_signature_v1 *sig = (void *)lsfw->sig->data;
struct wpr_header_v1 hdr = {
.falcon_id = lsfw->id,
.lsb_offset = lsfw->offset.lsb,
.bootstrap_owner = NVKM_ACR_LSF_GSPLITE,
.lazy_bootstrap = 1,
.bin_version = sig->version,
.status = WPR_HEADER_V1_STATUS_COPY,
};
/* Write WPR header. */
nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
offset += sizeof(hdr);
/* Write LSB header. */
ret = gp102_acr_wpr_build_lsb(acr, lsfw);
if (ret)
return ret;
/* Write ucode image. */
nvkm_wobj(acr->wpr, lsfw->offset.img,
lsfw->img.data,
lsfw->img.size);
/* Write bootloader data. */
lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
}
/* Finalise WPR. */
nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID);
return 0;
}
static int
tu102_acr_hsfw_nofw(struct nvkm_acr *acr, const char *bl, const char *fw,
const char *name, int version,
const struct nvkm_acr_hsf_fwif *fwif)
{
return 0;
}
MODULE_FIRMWARE("nvidia/tu102/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/tu102/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/tu104/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/tu104/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/tu116/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/tu116/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/tu117/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/tu117/acr/ucode_unload.bin");
static const struct nvkm_acr_hsf_fwif
tu102_acr_unload_fwif[] = {
{ 0, gm200_acr_hsfw_ctor, &gp108_acr_hsfw_0, NVKM_ACR_HSF_PMU, 0, 0x00000000 },
{ -1, tu102_acr_hsfw_nofw },
{}
};
MODULE_FIRMWARE("nvidia/tu102/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/tu104/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/tu116/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/tu117/acr/ucode_asb.bin");
static const struct nvkm_acr_hsf_fwif
tu102_acr_asb_fwif[] = {
{ 0, gm200_acr_hsfw_ctor, &gp108_acr_hsfw_0, NVKM_ACR_HSF_GSP, 0, 0x00000000 },
{ -1, tu102_acr_hsfw_nofw },
{}
};
MODULE_FIRMWARE("nvidia/tu102/acr/bl.bin");
MODULE_FIRMWARE("nvidia/tu102/acr/ucode_ahesasc.bin");
MODULE_FIRMWARE("nvidia/tu104/acr/bl.bin");
MODULE_FIRMWARE("nvidia/tu104/acr/ucode_ahesasc.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/bl.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/ucode_ahesasc.bin");
MODULE_FIRMWARE("nvidia/tu116/acr/bl.bin");
MODULE_FIRMWARE("nvidia/tu116/acr/ucode_ahesasc.bin");
MODULE_FIRMWARE("nvidia/tu117/acr/bl.bin");
MODULE_FIRMWARE("nvidia/tu117/acr/ucode_ahesasc.bin");
static const struct nvkm_acr_hsf_fwif
tu102_acr_ahesasc_fwif[] = {
{ 0, gm200_acr_hsfw_ctor, &gp108_acr_load_0, NVKM_ACR_HSF_SEC2, 0, 0x00000000 },
{ -1, tu102_acr_hsfw_nofw },
{}
};
static const struct nvkm_acr_func
tu102_acr = {
.ahesasc = tu102_acr_ahesasc_fwif,
.asb = tu102_acr_asb_fwif,
.unload = tu102_acr_unload_fwif,
.wpr_parse = gp102_acr_wpr_parse,
.wpr_layout = gp102_acr_wpr_layout,
.wpr_alloc = gp102_acr_wpr_alloc,
.wpr_patch = gp102_acr_wpr_patch,
.wpr_build = tu102_acr_wpr_build,
.wpr_check = gm200_acr_wpr_check,
.init = tu102_acr_init,
};
static int
tu102_acr_load(struct nvkm_acr *acr, int version,
const struct nvkm_acr_fwif *fwif)
{
struct nvkm_subdev *subdev = &acr->subdev;
const struct nvkm_acr_hsf_fwif *hsfwif;
hsfwif = nvkm_firmware_load(subdev, fwif->func->ahesasc, "AcrAHESASC",
acr, "acr/bl", "acr/ucode_ahesasc",
"AHESASC");
if (IS_ERR(hsfwif))
return PTR_ERR(hsfwif);
hsfwif = nvkm_firmware_load(subdev, fwif->func->asb, "AcrASB",
acr, "acr/bl", "acr/ucode_asb", "ASB");
if (IS_ERR(hsfwif))
return PTR_ERR(hsfwif);
hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
acr, "acr/unload_bl", "acr/ucode_unload",
"unload");
if (IS_ERR(hsfwif))
return PTR_ERR(hsfwif);
return 0;
}
static const struct nvkm_acr_fwif
tu102_acr_fwif[] = {
{ 0, tu102_acr_load, &tu102_acr },
{ -1, gm200_acr_nofw, &gm200_acr },
{}
};
int
tu102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_acr **pacr)
{
return nvkm_acr_new_(tu102_acr_fwif, device, type, inst, pacr);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
void
ga100_acr_wpr_check(struct nvkm_acr *acr, u64 *start, u64 *limit)
{
struct nvkm_device *device = acr->subdev.device;
*start = (u64)(nvkm_rd32(device, 0x1fa81c) & 0xffffff00) << 8;
*limit = (u64)(nvkm_rd32(device, 0x1fa820) & 0xffffff00) << 8;
*limit = *limit + 0x20000;
}
int
ga100_acr_hsfw_ctor(struct nvkm_acr *acr, const char *bl, const char *fw,
const char *name, int ver, const struct nvkm_acr_hsf_fwif *fwif)
{
struct nvkm_acr_hsfw *hsfw;
if (!(hsfw = kzalloc(sizeof(*hsfw), GFP_KERNEL)))
return -ENOMEM;
hsfw->falcon_id = fwif->falcon_id;
hsfw->boot_mbox0 = fwif->boot_mbox0;
hsfw->intr_clear = fwif->intr_clear;
list_add_tail(&hsfw->head, &acr->hsfw);
return nvkm_falcon_fw_ctor_hs_v2(fwif->func, name, &acr->subdev, fw, ver, NULL, &hsfw->fw);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga100.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/falcon.h>
#include <core/firmware.h>
#include <core/memory.h>
#include <subdev/mc.h>
#include <subdev/mmu.h>
#include <subdev/pmu.h>
#include <subdev/timer.h>
#include <nvfw/acr.h>
#include <nvfw/flcn.h>
const struct nvkm_acr_func
gm200_acr = {
};
int
gm200_acr_nofw(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
{
nvkm_warn(&acr->subdev, "firmware unavailable\n");
return 0;
}
int
gm200_acr_init(struct nvkm_acr *acr)
{
return nvkm_acr_hsfw_boot(acr, "load");
}
void
gm200_acr_wpr_check(struct nvkm_acr *acr, u64 *start, u64 *limit)
{
struct nvkm_device *device = acr->subdev.device;
nvkm_wr32(device, 0x100cd4, 2);
*start = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8;
nvkm_wr32(device, 0x100cd4, 3);
*limit = (u64)(nvkm_rd32(device, 0x100cd4) & 0xffffff00) << 8;
*limit = *limit + 0x20000;
}
int
gm200_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
{
struct nvkm_subdev *subdev = &acr->subdev;
struct wpr_header hdr;
struct lsb_header lsb;
struct nvkm_acr_lsf *lsfw;
u32 offset = 0;
do {
nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
wpr_header_dump(subdev, &hdr);
list_for_each_entry(lsfw, &acr->lsfw, head) {
if (lsfw->id != hdr.falcon_id)
continue;
nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb));
lsb_header_dump(subdev, &lsb);
lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust);
break;
}
offset += sizeof(hdr);
} while (hdr.falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID);
return 0;
}
void
gm200_acr_wpr_build_lsb_tail(struct nvkm_acr_lsfw *lsfw,
struct lsb_header_tail *hdr)
{
hdr->ucode_off = lsfw->offset.img;
hdr->ucode_size = lsfw->ucode_size;
hdr->data_size = lsfw->data_size;
hdr->bl_code_size = lsfw->bootloader_size;
hdr->bl_imem_off = lsfw->bootloader_imem_offset;
hdr->bl_data_off = lsfw->offset.bld;
hdr->bl_data_size = lsfw->bl_data_size;
hdr->app_code_off = lsfw->app_start_offset +
lsfw->app_resident_code_offset;
hdr->app_code_size = lsfw->app_resident_code_size;
hdr->app_data_off = lsfw->app_start_offset +
lsfw->app_resident_data_offset;
hdr->app_data_size = lsfw->app_resident_data_size;
hdr->flags = lsfw->func->flags;
}
static int
gm200_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
{
struct lsb_header hdr;
if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature)))
return -EINVAL;
memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size);
gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail);
nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr));
return 0;
}
int
gm200_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
{
struct nvkm_acr_lsfw *lsfw;
u32 offset = 0;
int ret;
/* Fill per-LSF structures. */
list_for_each_entry(lsfw, &acr->lsfw, head) {
struct wpr_header hdr = {
.falcon_id = lsfw->id,
.lsb_offset = lsfw->offset.lsb,
.bootstrap_owner = NVKM_ACR_LSF_PMU,
.lazy_bootstrap = rtos && lsfw->id != rtos->id,
.status = WPR_HEADER_V0_STATUS_COPY,
};
/* Write WPR header. */
nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
offset += sizeof(hdr);
/* Write LSB header. */
ret = gm200_acr_wpr_build_lsb(acr, lsfw);
if (ret)
return ret;
/* Write ucode image. */
nvkm_wobj(acr->wpr, lsfw->offset.img,
lsfw->img.data,
lsfw->img.size);
/* Write bootloader data. */
lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
}
/* Finalise WPR. */
nvkm_wo32(acr->wpr, offset, WPR_HEADER_V0_FALCON_ID_INVALID);
return 0;
}
static int
gm200_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
{
int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST,
ALIGN(wpr_size, 0x40000), 0x40000, true,
&acr->wpr);
if (ret)
return ret;
acr->wpr_start = nvkm_memory_addr(acr->wpr);
acr->wpr_end = acr->wpr_start + nvkm_memory_size(acr->wpr);
return 0;
}
u32
gm200_acr_wpr_layout(struct nvkm_acr *acr)
{
struct nvkm_acr_lsfw *lsfw;
u32 wpr = 0;
wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header);
list_for_each_entry(lsfw, &acr->lsfw, head) {
wpr = ALIGN(wpr, 256);
lsfw->offset.lsb = wpr;
wpr += sizeof(struct lsb_header);
wpr = ALIGN(wpr, 4096);
lsfw->offset.img = wpr;
wpr += lsfw->img.size;
wpr = ALIGN(wpr, 256);
lsfw->offset.bld = wpr;
lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
wpr += lsfw->bl_data_size;
}
return wpr;
}
int
gm200_acr_wpr_parse(struct nvkm_acr *acr)
{
const struct wpr_header *hdr = (void *)acr->wpr_fw->data;
struct nvkm_acr_lsfw *lsfw;
while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) {
wpr_header_dump(&acr->subdev, hdr);
lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
if (IS_ERR(lsfw))
return PTR_ERR(lsfw);
}
return 0;
}
int
gm200_acr_hsfw_load_bld(struct nvkm_falcon_fw *fw)
{
struct flcn_bl_dmem_desc_v1 hsdesc = {
.ctx_dma = FALCON_DMAIDX_VIRT,
.code_dma_base = fw->vma->addr,
.non_sec_code_off = fw->nmem_base,
.non_sec_code_size = fw->nmem_size,
.sec_code_off = fw->imem_base,
.sec_code_size = fw->imem_size,
.code_entry_point = 0,
.data_dma_base = fw->vma->addr + fw->dmem_base_img,
.data_size = fw->dmem_size,
};
flcn_bl_dmem_desc_v1_dump(fw->falcon->user, &hsdesc);
return nvkm_falcon_pio_wr(fw->falcon, (u8 *)&hsdesc, 0, 0, DMEM, 0, sizeof(hsdesc), 0, 0);
}
int
gm200_acr_hsfw_ctor(struct nvkm_acr *acr, const char *bl, const char *fw, const char *name, int ver,
const struct nvkm_acr_hsf_fwif *fwif)
{
struct nvkm_acr_hsfw *hsfw;
if (!(hsfw = kzalloc(sizeof(*hsfw), GFP_KERNEL)))
return -ENOMEM;
hsfw->falcon_id = fwif->falcon_id;
hsfw->boot_mbox0 = fwif->boot_mbox0;
hsfw->intr_clear = fwif->intr_clear;
list_add_tail(&hsfw->head, &acr->hsfw);
return nvkm_falcon_fw_ctor_hs(fwif->func, name, &acr->subdev, bl, fw, ver, NULL, &hsfw->fw);
}
const struct nvkm_falcon_fw_func
gm200_acr_unload_0 = {
.signature = gm200_flcn_fw_signature,
.reset = gm200_flcn_fw_reset,
.load = gm200_flcn_fw_load,
.load_bld = gm200_acr_hsfw_load_bld,
.boot = gm200_flcn_fw_boot,
};
MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
static const struct nvkm_acr_hsf_fwif
gm200_acr_unload_fwif[] = {
{ 0, gm200_acr_hsfw_ctor, &gm200_acr_unload_0, NVKM_ACR_HSF_PMU, 0, 0x00000010 },
{}
};
static int
gm200_acr_load_setup(struct nvkm_falcon_fw *fw)
{
struct flcn_acr_desc *desc = (void *)&fw->fw.img[fw->dmem_base_img];
struct nvkm_acr *acr = fw->falcon->owner->device->acr;
desc->wpr_region_id = 1;
desc->regions.no_regions = 2;
desc->regions.region_props[0].start_addr = acr->wpr_start >> 8;
desc->regions.region_props[0].end_addr = acr->wpr_end >> 8;
desc->regions.region_props[0].region_id = 1;
desc->regions.region_props[0].read_mask = 0xf;
desc->regions.region_props[0].write_mask = 0xc;
desc->regions.region_props[0].client_mask = 0x2;
flcn_acr_desc_dump(&acr->subdev, desc);
return 0;
}
static const struct nvkm_falcon_fw_func
gm200_acr_load_0 = {
.signature = gm200_flcn_fw_signature,
.reset = gm200_flcn_fw_reset,
.setup = gm200_acr_load_setup,
.load = gm200_flcn_fw_load,
.load_bld = gm200_acr_hsfw_load_bld,
.boot = gm200_flcn_fw_boot,
};
MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
static const struct nvkm_acr_hsf_fwif
gm200_acr_load_fwif[] = {
{ 0, gm200_acr_hsfw_ctor, &gm200_acr_load_0, NVKM_ACR_HSF_PMU, 0, 0x00000010 },
{}
};
static const struct nvkm_acr_func
gm200_acr_0 = {
.load = gm200_acr_load_fwif,
.unload = gm200_acr_unload_fwif,
.wpr_parse = gm200_acr_wpr_parse,
.wpr_layout = gm200_acr_wpr_layout,
.wpr_alloc = gm200_acr_wpr_alloc,
.wpr_build = gm200_acr_wpr_build,
.wpr_patch = gm200_acr_wpr_patch,
.wpr_check = gm200_acr_wpr_check,
.init = gm200_acr_init,
.bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_FECS) |
BIT_ULL(NVKM_ACR_LSF_GPCCS),
};
static int
gm200_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
{
struct nvkm_subdev *subdev = &acr->subdev;
const struct nvkm_acr_hsf_fwif *hsfwif;
hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
acr, "acr/bl", "acr/ucode_load", "load");
if (IS_ERR(hsfwif))
return PTR_ERR(hsfwif);
hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
acr, "acr/bl", "acr/ucode_unload",
"unload");
if (IS_ERR(hsfwif))
return PTR_ERR(hsfwif);
return 0;
}
static const struct nvkm_acr_fwif
gm200_acr_fwif[] = {
{ 0, gm200_acr_load, &gm200_acr_0 },
{ -1, gm200_acr_nofw, &gm200_acr },
{}
};
int
gm200_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_acr **pacr)
{
return nvkm_acr_new_(gm200_acr_fwif, device, type, inst, pacr);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/firmware.h>
#include <core/memory.h>
#include <subdev/mmu.h>
#include <engine/sec2.h>
#include <nvfw/acr.h>
#include <nvfw/flcn.h>
int
gp102_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
{
struct wpr_header_v1 hdr;
struct lsb_header_v1 lsb;
struct nvkm_acr_lsfw *lsfw;
u32 offset = 0;
do {
nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
wpr_header_v1_dump(&acr->subdev, &hdr);
list_for_each_entry(lsfw, &acr->lsfw, head) {
if (lsfw->id != hdr.falcon_id)
continue;
nvkm_robj(acr->wpr, hdr.lsb_offset, &lsb, sizeof(lsb));
lsb_header_v1_dump(&acr->subdev, &lsb);
lsfw->func->bld_patch(acr, lsb.tail.bl_data_off, adjust);
break;
}
offset += sizeof(hdr);
} while (hdr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID);
return 0;
}
int
gp102_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
{
struct lsb_header_v1 hdr;
if (WARN_ON(lsfw->sig->size != sizeof(hdr.signature)))
return -EINVAL;
memcpy(&hdr.signature, lsfw->sig->data, lsfw->sig->size);
gm200_acr_wpr_build_lsb_tail(lsfw, &hdr.tail);
nvkm_wobj(acr->wpr, lsfw->offset.lsb, &hdr, sizeof(hdr));
return 0;
}
int
gp102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
{
struct nvkm_acr_lsfw *lsfw;
u32 offset = 0;
int ret;
/* Fill per-LSF structures. */
list_for_each_entry(lsfw, &acr->lsfw, head) {
struct lsf_signature_v1 *sig = (void *)lsfw->sig->data;
struct wpr_header_v1 hdr = {
.falcon_id = lsfw->id,
.lsb_offset = lsfw->offset.lsb,
.bootstrap_owner = NVKM_ACR_LSF_SEC2,
.lazy_bootstrap = rtos && lsfw->id != rtos->id,
.bin_version = sig->version,
.status = WPR_HEADER_V1_STATUS_COPY,
};
/* Write WPR header. */
nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
offset += sizeof(hdr);
/* Write LSB header. */
ret = gp102_acr_wpr_build_lsb(acr, lsfw);
if (ret)
return ret;
/* Write ucode image. */
nvkm_wobj(acr->wpr, lsfw->offset.img,
lsfw->img.data,
lsfw->img.size);
/* Write bootloader data. */
lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
}
/* Finalise WPR. */
nvkm_wo32(acr->wpr, offset, WPR_HEADER_V1_FALCON_ID_INVALID);
return 0;
}
int
gp102_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
{
int ret = nvkm_memory_new(acr->subdev.device, NVKM_MEM_TARGET_INST,
ALIGN(wpr_size, 0x40000) << 1, 0x40000, true,
&acr->wpr);
if (ret)
return ret;
acr->shadow_start = nvkm_memory_addr(acr->wpr);
acr->wpr_start = acr->shadow_start + (nvkm_memory_size(acr->wpr) >> 1);
acr->wpr_end = acr->wpr_start + (nvkm_memory_size(acr->wpr) >> 1);
return 0;
}
u32
gp102_acr_wpr_layout(struct nvkm_acr *acr)
{
struct nvkm_acr_lsfw *lsfw;
u32 wpr = 0;
wpr += 11 /* MAX_LSF */ * sizeof(struct wpr_header_v1);
wpr = ALIGN(wpr, 256);
wpr += 0x100; /* Shared sub-WPR headers. */
list_for_each_entry(lsfw, &acr->lsfw, head) {
wpr = ALIGN(wpr, 256);
lsfw->offset.lsb = wpr;
wpr += sizeof(struct lsb_header_v1);
wpr = ALIGN(wpr, 4096);
lsfw->offset.img = wpr;
wpr += lsfw->img.size;
wpr = ALIGN(wpr, 256);
lsfw->offset.bld = wpr;
lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
wpr += lsfw->bl_data_size;
}
return wpr;
}
int
gp102_acr_wpr_parse(struct nvkm_acr *acr)
{
const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data;
struct nvkm_acr_lsfw *lsfw;
while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
wpr_header_v1_dump(&acr->subdev, hdr);
lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
if (IS_ERR(lsfw))
return PTR_ERR(lsfw);
}
return 0;
}
MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin");
static const struct nvkm_acr_hsf_fwif
gp102_acr_unload_fwif[] = {
{ 0, gm200_acr_hsfw_ctor, &gm200_acr_unload_0, NVKM_ACR_HSF_PMU, 0x1d, 0x00000010 },
{}
};
int
gp102_acr_load_setup(struct nvkm_falcon_fw *fw)
{
struct flcn_acr_desc_v1 *desc = (void *)&fw->fw.img[fw->dmem_base_img];
struct nvkm_acr *acr = fw->falcon->owner->device->acr;
desc->wpr_region_id = 1;
desc->regions.no_regions = 2;
desc->regions.region_props[0].start_addr = acr->wpr_start >> 8;
desc->regions.region_props[0].end_addr = acr->wpr_end >> 8;
desc->regions.region_props[0].region_id = 1;
desc->regions.region_props[0].read_mask = 0xf;
desc->regions.region_props[0].write_mask = 0xc;
desc->regions.region_props[0].client_mask = 0x2;
desc->regions.region_props[0].shadow_mem_start_addr = acr->shadow_start >> 8;
flcn_acr_desc_v1_dump(&acr->subdev, desc);
return 0;
}
static const struct nvkm_falcon_fw_func
gp102_acr_load_0 = {
.signature = gm200_flcn_fw_signature,
.reset = gm200_flcn_fw_reset,
.setup = gp102_acr_load_setup,
.load = gm200_flcn_fw_load,
.load_bld = gm200_acr_hsfw_load_bld,
.boot = gm200_flcn_fw_boot,
};
MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
static const struct nvkm_acr_hsf_fwif
gp102_acr_load_fwif[] = {
{ 0, gm200_acr_hsfw_ctor, &gp102_acr_load_0, NVKM_ACR_HSF_SEC2, 0, 0x00000010 },
{}
};
static const struct nvkm_acr_func
gp102_acr = {
.load = gp102_acr_load_fwif,
.unload = gp102_acr_unload_fwif,
.wpr_parse = gp102_acr_wpr_parse,
.wpr_layout = gp102_acr_wpr_layout,
.wpr_alloc = gp102_acr_wpr_alloc,
.wpr_build = gp102_acr_wpr_build,
.wpr_patch = gp102_acr_wpr_patch,
.wpr_check = gm200_acr_wpr_check,
.init = gm200_acr_init,
};
int
gp102_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
{
struct nvkm_subdev *subdev = &acr->subdev;
const struct nvkm_acr_hsf_fwif *hsfwif;
hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
acr, "acr/bl", "acr/ucode_load", "load");
if (IS_ERR(hsfwif))
return PTR_ERR(hsfwif);
hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
acr, "acr/unload_bl", "acr/ucode_unload",
"unload");
if (IS_ERR(hsfwif))
return PTR_ERR(hsfwif);
return 0;
}
static const struct nvkm_acr_fwif
gp102_acr_fwif[] = {
{ 0, gp102_acr_load, &gp102_acr },
{ -1, gm200_acr_nofw, &gm200_acr },
{}
};
int
gp102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_acr **pacr)
{
return nvkm_acr_new_(gp102_acr_fwif, device, type, inst, pacr);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <subdev/mmu.h>
#include <nvfw/flcn.h>
int
gp108_acr_hsfw_load_bld(struct nvkm_falcon_fw *fw)
{
struct flcn_bl_dmem_desc_v2 hsdesc = {
.ctx_dma = FALCON_DMAIDX_VIRT,
.code_dma_base = fw->vma->addr,
.non_sec_code_off = fw->nmem_base,
.non_sec_code_size = fw->nmem_size,
.sec_code_off = fw->imem_base,
.sec_code_size = fw->imem_size,
.code_entry_point = 0,
.data_dma_base = fw->vma->addr + fw->dmem_base_img,
.data_size = fw->dmem_size,
.argc = 0,
.argv = 0,
};
flcn_bl_dmem_desc_v2_dump(fw->falcon->user, &hsdesc);
return nvkm_falcon_pio_wr(fw->falcon, (u8 *)&hsdesc, 0, 0, DMEM, 0, sizeof(hsdesc), 0, 0);
}
const struct nvkm_falcon_fw_func
gp108_acr_hsfw_0 = {
.signature = gm200_flcn_fw_signature,
.reset = gm200_flcn_fw_reset,
.load = gm200_flcn_fw_load,
.load_bld = gp108_acr_hsfw_load_bld,
.boot = gm200_flcn_fw_boot,
};
MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
static const struct nvkm_acr_hsf_fwif
gp108_acr_unload_fwif[] = {
{ 0, gm200_acr_hsfw_ctor, &gp108_acr_hsfw_0, NVKM_ACR_HSF_PMU, 0x1d, 0x00000010 },
{}
};
const struct nvkm_falcon_fw_func
gp108_acr_load_0 = {
.signature = gm200_flcn_fw_signature,
.reset = gm200_flcn_fw_reset,
.setup = gp102_acr_load_setup,
.load = gm200_flcn_fw_load,
.load_bld = gp108_acr_hsfw_load_bld,
.boot = gm200_flcn_fw_boot,
};
MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
static const struct nvkm_acr_hsf_fwif
gp108_acr_load_fwif[] = {
{ 0, gm200_acr_hsfw_ctor, &gp108_acr_load_0, NVKM_ACR_HSF_SEC2, 0, 0x00000010 },
{}
};
static const struct nvkm_acr_func
gp108_acr = {
.load = gp108_acr_load_fwif,
.unload = gp108_acr_unload_fwif,
.wpr_parse = gp102_acr_wpr_parse,
.wpr_layout = gp102_acr_wpr_layout,
.wpr_alloc = gp102_acr_wpr_alloc,
.wpr_build = gp102_acr_wpr_build,
.wpr_patch = gp102_acr_wpr_patch,
.wpr_check = gm200_acr_wpr_check,
.init = gm200_acr_init,
};
static const struct nvkm_acr_fwif
gp108_acr_fwif[] = {
{ 0, gp102_acr_load, &gp108_acr },
{ -1, gm200_acr_nofw, &gm200_acr },
{}
};
int
gp108_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_acr **pacr)
{
return nvkm_acr_new_(gp108_acr_fwif, device, type, inst, pacr);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/firmware.h>
#include <core/memory.h>
#include <subdev/mmu.h>
#include <subdev/gsp.h>
#include <subdev/pmu.h>
#include <engine/sec2.h>
#include <engine/nvdec.h>
static struct nvkm_acr_hsfw *
nvkm_acr_hsfw_find(struct nvkm_acr *acr, const char *name)
{
struct nvkm_acr_hsfw *hsfw;
list_for_each_entry(hsfw, &acr->hsfw, head) {
if (!strcmp(hsfw->fw.fw.name, name))
return hsfw;
}
return NULL;
}
int
nvkm_acr_hsfw_boot(struct nvkm_acr *acr, const char *name)
{
struct nvkm_subdev *subdev = &acr->subdev;
struct nvkm_acr_hsfw *hsfw;
hsfw = nvkm_acr_hsfw_find(acr, name);
if (!hsfw)
return -EINVAL;
return nvkm_falcon_fw_boot(&hsfw->fw, subdev, true, NULL, NULL,
hsfw->boot_mbox0, hsfw->intr_clear);
}
static struct nvkm_acr_lsf *
nvkm_acr_rtos(struct nvkm_acr *acr)
{
struct nvkm_acr_lsf *lsf;
if (acr) {
list_for_each_entry(lsf, &acr->lsf, head) {
if (lsf->func->bootstrap_falcon)
return lsf;
}
}
return NULL;
}
static void
nvkm_acr_unload(struct nvkm_acr *acr)
{
if (acr->done) {
if (acr->rtos) {
nvkm_subdev_unref(acr->rtos->falcon->owner);
acr->rtos = NULL;
}
nvkm_acr_hsfw_boot(acr, "unload");
acr->done = false;
}
}
static int
nvkm_acr_load(struct nvkm_acr *acr)
{
struct nvkm_subdev *subdev = &acr->subdev;
struct nvkm_acr_lsf *rtos = nvkm_acr_rtos(acr);
u64 start, limit;
int ret;
if (list_empty(&acr->lsf)) {
nvkm_debug(subdev, "No LSF(s) present.\n");
return 0;
}
ret = acr->func->init(acr);
if (ret)
return ret;
acr->func->wpr_check(acr, &start, &limit);
if (start != acr->wpr_start || limit != acr->wpr_end) {
nvkm_error(subdev, "WPR not configured as expected: "
"%016llx-%016llx vs %016llx-%016llx\n",
acr->wpr_start, acr->wpr_end, start, limit);
return -EIO;
}
acr->done = true;
if (rtos) {
ret = nvkm_subdev_ref(rtos->falcon->owner);
if (ret)
return ret;
acr->rtos = rtos;
}
return ret;
}
static int
nvkm_acr_reload(struct nvkm_acr *acr)
{
nvkm_acr_unload(acr);
return nvkm_acr_load(acr);
}
int
nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask)
{
struct nvkm_acr *acr = device->acr;
struct nvkm_acr_lsf *rtos = nvkm_acr_rtos(acr);
unsigned long id;
/* If there's no LS FW managing bootstrapping of other LS falcons,
* we depend on the HS firmware being able to do it instead.
*/
if (!rtos) {
/* Which isn't possible everywhere... */
if ((mask & acr->func->bootstrap_falcons) == mask) {
int ret = nvkm_acr_reload(acr);
if (ret)
return ret;
return acr->done ? 0 : -EINVAL;
}
return -ENOSYS;
}
if ((mask & rtos->func->bootstrap_falcons) != mask)
return -ENOSYS;
if (rtos->func->bootstrap_multiple_falcons)
return rtos->func->bootstrap_multiple_falcons(rtos->falcon, mask);
for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) {
int ret = rtos->func->bootstrap_falcon(rtos->falcon, id);
if (ret)
return ret;
}
return 0;
}
bool
nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id)
{
struct nvkm_acr *acr = device->acr;
if (acr) {
if (acr->managed_falcons & BIT_ULL(id))
return true;
}
return false;
}
static int
nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend)
{
if (!subdev->use.enabled)
return 0;
nvkm_acr_unload(nvkm_acr(subdev));
return 0;
}
static int
nvkm_acr_init(struct nvkm_subdev *subdev)
{
struct nvkm_acr *acr = nvkm_acr(subdev);
if (!nvkm_acr_rtos(acr))
return 0;
return nvkm_acr_load(acr);
}
static void
nvkm_acr_cleanup(struct nvkm_acr *acr)
{
nvkm_acr_lsfw_del_all(acr);
nvkm_firmware_put(acr->wpr_fw);
acr->wpr_fw = NULL;
}
static int
nvkm_acr_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_device *device = subdev->device;
struct nvkm_acr *acr = nvkm_acr(subdev);
struct nvkm_acr_hsfw *hsfw;
struct nvkm_acr_lsfw *lsfw, *lsft;
struct nvkm_acr_lsf *lsf, *rtos;
struct nvkm_falcon *falcon;
u32 wpr_size = 0;
u64 falcons;
int ret, i;
if (list_empty(&acr->hsfw) || !acr->func || !acr->func->wpr_layout) {
nvkm_debug(subdev, "No HSFW(s)\n");
nvkm_acr_cleanup(acr);
return 0;
}
/* Determine layout/size of WPR image up-front, as we need to know
* it to allocate memory before we begin constructing it.
*/
list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) {
/* Cull unknown falcons that are present in WPR image. */
if (acr->wpr_fw) {
if (!lsfw->func) {
nvkm_acr_lsfw_del(lsfw);
continue;
}
wpr_size = acr->wpr_fw->size;
}
/* Ensure we've fetched falcon configuration. */
ret = nvkm_falcon_get(lsfw->falcon, subdev);
if (ret)
return ret;
nvkm_falcon_put(lsfw->falcon, subdev);
if (!(lsf = kmalloc(sizeof(*lsf), GFP_KERNEL)))
return -ENOMEM;
lsf->func = lsfw->func;
lsf->falcon = lsfw->falcon;
lsf->id = lsfw->id;
list_add_tail(&lsf->head, &acr->lsf);
acr->managed_falcons |= BIT_ULL(lsf->id);
}
/* Ensure the falcon that'll provide ACR functions is booted first. */
rtos = nvkm_acr_rtos(acr);
if (rtos) {
falcons = rtos->func->bootstrap_falcons;
list_move(&rtos->head, &acr->lsf);
} else {
falcons = acr->func->bootstrap_falcons;
}
/* Cull falcons that can't be bootstrapped, or the HSFW can fail to
* boot and leave the GPU in a weird state.
*/
list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) {
if (!(falcons & BIT_ULL(lsfw->id))) {
nvkm_warn(subdev, "%s falcon cannot be bootstrapped\n",
nvkm_acr_lsf_id(lsfw->id));
nvkm_acr_lsfw_del(lsfw);
}
}
if (!acr->wpr_fw || acr->wpr_comp)
wpr_size = acr->func->wpr_layout(acr);
/* Allocate/Locate WPR + fill ucode blob pointer.
*
* dGPU: allocate WPR + shadow blob
* Tegra: locate WPR with regs, ensure size is sufficient,
* allocate ucode blob.
*/
ret = acr->func->wpr_alloc(acr, wpr_size);
if (ret)
return ret;
nvkm_debug(subdev, "WPR region is from 0x%llx-0x%llx (shadow 0x%llx)\n",
acr->wpr_start, acr->wpr_end, acr->shadow_start);
/* Write WPR to ucode blob. */
nvkm_kmap(acr->wpr);
if (acr->wpr_fw && !acr->wpr_comp)
nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size);
if (!acr->wpr_fw || acr->wpr_comp)
acr->func->wpr_build(acr, rtos);
acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev);
if (acr->wpr_fw && acr->wpr_comp) {
nvkm_kmap(acr->wpr);
for (i = 0; i < acr->wpr_fw->size; i += 4) {
u32 us = nvkm_ro32(acr->wpr, i);
u32 fw = ((u32 *)acr->wpr_fw->data)[i/4];
if (fw != us) {
nvkm_warn(subdev, "%08x: %08x %08x\n",
i, us, fw);
}
}
return -EINVAL;
}
nvkm_done(acr->wpr);
/* Allocate instance block for ACR-related stuff. */
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
&acr->inst);
if (ret)
return ret;
ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "acr", &acr->vmm);
if (ret)
return ret;
acr->vmm->debug = acr->subdev.debug;
ret = nvkm_vmm_join(acr->vmm, acr->inst);
if (ret)
return ret;
/* Load HS firmware blobs into ACR VMM. */
list_for_each_entry(hsfw, &acr->hsfw, head) {
switch (hsfw->falcon_id) {
case NVKM_ACR_HSF_PMU : falcon = &device->pmu->falcon; break;
case NVKM_ACR_HSF_SEC2: falcon = &device->sec2->falcon; break;
case NVKM_ACR_HSF_GSP : falcon = &device->gsp->falcon; break;
default:
WARN_ON(1);
return -EINVAL;
}
ret = nvkm_falcon_fw_oneinit(&hsfw->fw, falcon, acr->vmm, acr->inst);
if (ret)
return ret;
}
/* Kill temporary data. */
nvkm_acr_cleanup(acr);
return 0;
}
static void *
nvkm_acr_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_acr *acr = nvkm_acr(subdev);
struct nvkm_acr_hsfw *hsfw, *hsft;
struct nvkm_acr_lsf *lsf, *lst;
list_for_each_entry_safe(hsfw, hsft, &acr->hsfw, head) {
nvkm_falcon_fw_dtor(&hsfw->fw);
list_del(&hsfw->head);
kfree(hsfw);
}
nvkm_vmm_part(acr->vmm, acr->inst);
nvkm_vmm_unref(&acr->vmm);
nvkm_memory_unref(&acr->inst);
nvkm_memory_unref(&acr->wpr);
list_for_each_entry_safe(lsf, lst, &acr->lsf, head) {
list_del(&lsf->head);
kfree(lsf);
}
nvkm_acr_cleanup(acr);
return acr;
}
static const struct nvkm_subdev_func
nvkm_acr = {
.dtor = nvkm_acr_dtor,
.oneinit = nvkm_acr_oneinit,
.init = nvkm_acr_init,
.fini = nvkm_acr_fini,
};
static int
nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver)
{
struct nvkm_subdev *subdev = &acr->subdev;
struct nvkm_device *device = subdev->device;
int ret;
ret = nvkm_firmware_get(subdev, "acr/wpr", ver, &acr->wpr_fw);
if (ret < 0)
return ret;
/* Pre-add LSFs in the order they appear in the FW WPR image so that
* we're able to do a binary comparison with our own generator.
*/
ret = acr->func->wpr_parse(acr);
if (ret)
return ret;
acr->wpr_comp = nvkm_boolopt(device->cfgopt, "NvAcrWprCompare", false);
acr->wpr_prev = nvkm_longopt(device->cfgopt, "NvAcrWprPrevAddr", 0);
return 0;
}
int
nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_acr **pacr)
{
struct nvkm_acr *acr;
long wprfw;
if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_acr, device, type, inst, &acr->subdev);
INIT_LIST_HEAD(&acr->hsfw);
INIT_LIST_HEAD(&acr->lsfw);
INIT_LIST_HEAD(&acr->lsf);
fwif = nvkm_firmware_load(&acr->subdev, fwif, "Acr", acr);
if (IS_ERR(fwif))
return PTR_ERR(fwif);
acr->func = fwif->func;
wprfw = nvkm_longopt(device->cfgopt, "NvAcrWpr", -1);
if (wprfw >= 0) {
int ret = nvkm_acr_ctor_wpr(acr, wprfw);
if (ret)
return ret;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
#endif
static const struct nvkm_acr_hsf_fwif
gp10b_acr_load_fwif[] = {
{ 0, gm200_acr_hsfw_ctor, &gm20b_acr_load_0, NVKM_ACR_HSF_PMU, 0, 0x00000010 },
{}
};
static const struct nvkm_acr_func
gp10b_acr = {
.load = gp10b_acr_load_fwif,
.wpr_parse = gm200_acr_wpr_parse,
.wpr_layout = gm200_acr_wpr_layout,
.wpr_alloc = gm20b_acr_wpr_alloc,
.wpr_build = gm200_acr_wpr_build,
.wpr_patch = gm200_acr_wpr_patch,
.wpr_check = gm200_acr_wpr_check,
.init = gm200_acr_init,
};
static const struct nvkm_acr_fwif
gp10b_acr_fwif[] = {
{ 0, gm20b_acr_load, &gp10b_acr },
{ -1, gm200_acr_nofw, &gm200_acr },
{}
};
int
gp10b_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_acr **pacr)
{
return nvkm_acr_new_(gp10b_acr_fwif, device, type, inst, pacr);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c |
/*
* Copyright 2022 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
MODULE_FIRMWARE("nvidia/gv100/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gv100/acr/ucode_unload.bin");
static const struct nvkm_acr_hsf_fwif
gv100_acr_unload_fwif[] = {
{ 0, gm200_acr_hsfw_ctor, &gp108_acr_hsfw_0, NVKM_ACR_HSF_PMU, 0, 0x00000000 },
{}
};
MODULE_FIRMWARE("nvidia/gv100/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gv100/acr/ucode_load.bin");
static const struct nvkm_acr_hsf_fwif
gv100_acr_load_fwif[] = {
{ 0, gm200_acr_hsfw_ctor, &gp108_acr_load_0, NVKM_ACR_HSF_SEC2, 0, 0x00000010 },
{}
};
static const struct nvkm_acr_func
gv100_acr = {
.load = gv100_acr_load_fwif,
.unload = gv100_acr_unload_fwif,
.wpr_parse = gp102_acr_wpr_parse,
.wpr_layout = gp102_acr_wpr_layout,
.wpr_alloc = gp102_acr_wpr_alloc,
.wpr_build = gp102_acr_wpr_build,
.wpr_patch = gp102_acr_wpr_patch,
.wpr_check = gm200_acr_wpr_check,
.init = gm200_acr_init,
};
static const struct nvkm_acr_fwif
gv100_acr_fwif[] = {
{ 0, gp102_acr_load, &gv100_acr },
{ -1, gm200_acr_nofw, &gm200_acr },
{}
};
int
gv100_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_acr **pacr)
{
return nvkm_acr_new_(gv100_acr_fwif, device, type, inst, pacr);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/acr/gv100.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <nvfw/acr.h>
static int
ga102_acr_wpr_patch(struct nvkm_acr *acr, s64 adjust)
{
struct wpr_header_v2 hdr;
struct lsb_header_v2 *lsb;
struct nvkm_acr_lsfw *lsfw;
u32 offset = 0;
lsb = kvmalloc(sizeof(*lsb), GFP_KERNEL);
if (!lsb)
return -ENOMEM;
do {
nvkm_robj(acr->wpr, offset, &hdr, sizeof(hdr));
wpr_header_v2_dump(&acr->subdev, &hdr);
list_for_each_entry(lsfw, &acr->lsfw, head) {
if (lsfw->id != hdr.wpr.falcon_id)
continue;
nvkm_robj(acr->wpr, hdr.wpr.lsb_offset, lsb, sizeof(*lsb));
lsb_header_v2_dump(&acr->subdev, lsb);
lsfw->func->bld_patch(acr, lsb->bl_data_off, adjust);
break;
}
offset += sizeof(hdr);
} while (hdr.wpr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID);
kvfree(lsb);
return 0;
}
static int
ga102_acr_wpr_build_lsb(struct nvkm_acr *acr, struct nvkm_acr_lsfw *lsfw)
{
struct lsb_header_v2 *hdr;
int ret = 0;
if (WARN_ON(lsfw->sig->size != sizeof(hdr->signature)))
return -EINVAL;
hdr = kvzalloc(sizeof(*hdr), GFP_KERNEL);
if (!hdr)
return -ENOMEM;
hdr->hdr.identifier = WPR_GENERIC_HEADER_ID_LSF_LSB_HEADER;
hdr->hdr.version = 2;
hdr->hdr.size = sizeof(*hdr);
memcpy(&hdr->signature, lsfw->sig->data, lsfw->sig->size);
hdr->ucode_off = lsfw->offset.img;
hdr->ucode_size = lsfw->ucode_size;
hdr->data_size = lsfw->data_size;
hdr->bl_code_size = lsfw->bootloader_size;
hdr->bl_imem_off = lsfw->bootloader_imem_offset;
hdr->bl_data_off = lsfw->offset.bld;
hdr->bl_data_size = lsfw->bl_data_size;
hdr->app_code_off = lsfw->app_start_offset + lsfw->app_resident_code_offset;
hdr->app_code_size = ALIGN(lsfw->app_resident_code_size, 0x100);
hdr->app_data_off = lsfw->app_start_offset + lsfw->app_resident_data_offset;
hdr->app_data_size = ALIGN(lsfw->app_resident_data_size, 0x100);
hdr->app_imem_offset = lsfw->app_imem_offset;
hdr->app_dmem_offset = lsfw->app_dmem_offset;
hdr->flags = lsfw->func->flags;
hdr->monitor_code_offset = 0;
hdr->monitor_data_offset = 0;
hdr->manifest_offset = 0;
if (lsfw->secure_bootloader) {
struct nvkm_falcon_fw fw = {
.fw.img = hdr->hs_fmc_params.pkc_signature,
.fw.name = "LSFW",
.func = &(const struct nvkm_falcon_fw_func) {
.signature = ga100_flcn_fw_signature,
},
.sig_size = lsfw->sig_size,
.sig_nr = lsfw->sig_nr,
.sigs = lsfw->sigs,
.fuse_ver = lsfw->fuse_ver,
.engine_id = lsfw->engine_id,
.ucode_id = lsfw->ucode_id,
.falcon = lsfw->falcon,
};
ret = nvkm_falcon_get(fw.falcon, &acr->subdev);
if (ret == 0) {
hdr->hs_fmc_params.hs_fmc = 1;
hdr->hs_fmc_params.pkc_algo = 0;
hdr->hs_fmc_params.pkc_algo_version = 1;
hdr->hs_fmc_params.engid_mask = lsfw->engine_id;
hdr->hs_fmc_params.ucode_id = lsfw->ucode_id;
hdr->hs_fmc_params.fuse_ver = lsfw->fuse_ver;
ret = nvkm_falcon_fw_patch(&fw);
nvkm_falcon_put(fw.falcon, &acr->subdev);
}
}
nvkm_wobj(acr->wpr, lsfw->offset.lsb, hdr, sizeof(*hdr));
kvfree(hdr);
return ret;
}
static int
ga102_acr_wpr_build(struct nvkm_acr *acr, struct nvkm_acr_lsf *rtos)
{
struct nvkm_acr_lsfw *lsfw;
struct wpr_header_v2 hdr;
u32 offset = 0;
int ret;
/*XXX: shared sub-WPR headers, fill terminator for now. */
nvkm_wo32(acr->wpr, 0x300, (2 << 16) | WPR_GENERIC_HEADER_ID_LSF_SHARED_SUB_WPR);
nvkm_wo32(acr->wpr, 0x304, 0x14);
nvkm_wo32(acr->wpr, 0x308, 0xffffffff);
nvkm_wo32(acr->wpr, 0x30c, 0);
nvkm_wo32(acr->wpr, 0x310, 0);
/* Fill per-LSF structures. */
list_for_each_entry(lsfw, &acr->lsfw, head) {
struct lsf_signature_v2 *sig = (void *)lsfw->sig->data;
hdr.hdr.identifier = WPR_GENERIC_HEADER_ID_LSF_WPR_HEADER;
hdr.hdr.version = 2;
hdr.hdr.size = sizeof(hdr);
hdr.wpr.falcon_id = lsfw->id;
hdr.wpr.lsb_offset = lsfw->offset.lsb;
hdr.wpr.bootstrap_owner = NVKM_ACR_LSF_GSPLITE;
hdr.wpr.lazy_bootstrap = 1;
hdr.wpr.bin_version = sig->ls_ucode_version;
hdr.wpr.status = WPR_HEADER_V1_STATUS_COPY;
/* Write WPR header. */
nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
offset += sizeof(hdr);
/* Write LSB header. */
ret = ga102_acr_wpr_build_lsb(acr, lsfw);
if (ret)
return ret;
/* Write ucode image. */
nvkm_wobj(acr->wpr, lsfw->offset.img,
lsfw->img.data,
lsfw->img.size);
/* Write bootloader data. */
lsfw->func->bld_write(acr, lsfw->offset.bld, lsfw);
}
/* Finalise WPR. */
hdr.hdr.identifier = WPR_GENERIC_HEADER_ID_LSF_WPR_HEADER;
hdr.hdr.version = 2;
hdr.hdr.size = sizeof(hdr);
hdr.wpr.falcon_id = WPR_HEADER_V1_FALCON_ID_INVALID;
nvkm_wobj(acr->wpr, offset, &hdr, sizeof(hdr));
return 0;
}
static u32
ga102_acr_wpr_layout(struct nvkm_acr *acr)
{
struct nvkm_acr_lsfw *lsfw;
u32 wpr = 0;
wpr += 21 /* MAX_LSF */ * sizeof(struct wpr_header_v2);
wpr = ALIGN(wpr, 256);
wpr += 0x100; /* Shared sub-WPR headers. */
list_for_each_entry(lsfw, &acr->lsfw, head) {
wpr = ALIGN(wpr, 256);
lsfw->offset.lsb = wpr;
wpr += sizeof(struct lsb_header_v2);
wpr = ALIGN(wpr, 4096);
lsfw->offset.img = wpr;
wpr += lsfw->img.size;
wpr = ALIGN(wpr, 256);
lsfw->offset.bld = wpr;
lsfw->bl_data_size = ALIGN(lsfw->func->bld_size, 256);
wpr += lsfw->bl_data_size;
}
return wpr;
}
static int
ga102_acr_wpr_parse(struct nvkm_acr *acr)
{
const struct wpr_header_v2 *hdr = (void *)acr->wpr_fw->data;
while (hdr->wpr.falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
wpr_header_v2_dump(&acr->subdev, hdr);
if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->wpr.falcon_id))
return -ENOMEM;
}
return 0;
}
MODULE_FIRMWARE("nvidia/ga102/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/ga103/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/ga104/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/ga106/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/ga107/acr/ucode_unload.bin");
static const struct nvkm_acr_hsf_fwif
ga102_acr_unload_fwif[] = {
{ 0, ga100_acr_hsfw_ctor, &ga102_flcn_fw, NVKM_ACR_HSF_SEC2 },
{}
};
MODULE_FIRMWARE("nvidia/ga102/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/ga103/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/ga104/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/ga106/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/ga107/acr/ucode_asb.bin");
static const struct nvkm_acr_hsf_fwif
ga102_acr_asb_fwif[] = {
{ 0, ga100_acr_hsfw_ctor, &ga102_flcn_fw, NVKM_ACR_HSF_GSP },
{}
};
static const struct nvkm_falcon_fw_func
ga102_acr_ahesasc_0 = {
.signature = ga100_flcn_fw_signature,
.reset = gm200_flcn_fw_reset,
.setup = gp102_acr_load_setup,
.load = ga102_flcn_fw_load,
.boot = ga102_flcn_fw_boot,
};
MODULE_FIRMWARE("nvidia/ga102/acr/ucode_ahesasc.bin");
MODULE_FIRMWARE("nvidia/ga103/acr/ucode_ahesasc.bin");
MODULE_FIRMWARE("nvidia/ga104/acr/ucode_ahesasc.bin");
MODULE_FIRMWARE("nvidia/ga106/acr/ucode_ahesasc.bin");
MODULE_FIRMWARE("nvidia/ga107/acr/ucode_ahesasc.bin");
static const struct nvkm_acr_hsf_fwif
ga102_acr_ahesasc_fwif[] = {
{ 0, ga100_acr_hsfw_ctor, &ga102_acr_ahesasc_0, NVKM_ACR_HSF_SEC2 },
{}
};
static const struct nvkm_acr_func
ga102_acr = {
.ahesasc = ga102_acr_ahesasc_fwif,
.asb = ga102_acr_asb_fwif,
.unload = ga102_acr_unload_fwif,
.wpr_parse = ga102_acr_wpr_parse,
.wpr_layout = ga102_acr_wpr_layout,
.wpr_alloc = gp102_acr_wpr_alloc,
.wpr_patch = ga102_acr_wpr_patch,
.wpr_build = ga102_acr_wpr_build,
.wpr_check = ga100_acr_wpr_check,
.init = tu102_acr_init,
};
static int
ga102_acr_load(struct nvkm_acr *acr, int version,
const struct nvkm_acr_fwif *fwif)
{
struct nvkm_subdev *subdev = &acr->subdev;
const struct nvkm_acr_hsf_fwif *hsfwif;
hsfwif = nvkm_firmware_load(subdev, fwif->func->ahesasc, "AcrAHESASC",
acr, NULL, "acr/ucode_ahesasc", "AHESASC");
if (IS_ERR(hsfwif))
return PTR_ERR(hsfwif);
hsfwif = nvkm_firmware_load(subdev, fwif->func->asb, "AcrASB",
acr, NULL, "acr/ucode_asb", "ASB");
if (IS_ERR(hsfwif))
return PTR_ERR(hsfwif);
hsfwif = nvkm_firmware_load(subdev, fwif->func->unload, "AcrUnload",
acr, NULL, "acr/ucode_unload", "unload");
if (IS_ERR(hsfwif))
return PTR_ERR(hsfwif);
return 0;
}
static const struct nvkm_acr_fwif
ga102_acr_fwif[] = {
{ 0, ga102_acr_load, &ga102_acr },
{ -1, gm200_acr_nofw, &gm200_acr },
{}
};
int
ga102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_acr **pacr)
{
return nvkm_acr_new_(ga102_acr_fwif, device, type, inst, pacr);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/firmware.h>
#include <core/memory.h>
#include <subdev/mmu.h>
#include <subdev/pmu.h>
#include <nvfw/acr.h>
#include <nvfw/flcn.h>
int
gm20b_acr_wpr_alloc(struct nvkm_acr *acr, u32 wpr_size)
{
struct nvkm_subdev *subdev = &acr->subdev;
acr->func->wpr_check(acr, &acr->wpr_start, &acr->wpr_end);
if ((acr->wpr_end - acr->wpr_start) < wpr_size) {
nvkm_error(subdev, "WPR image too big for WPR!\n");
return -ENOSPC;
}
return nvkm_memory_new(subdev->device, NVKM_MEM_TARGET_INST,
wpr_size, 0, true, &acr->wpr);
}
static int
gm20b_acr_hsfw_load_bld(struct nvkm_falcon_fw *fw)
{
struct flcn_bl_dmem_desc hsdesc = {
.ctx_dma = FALCON_DMAIDX_VIRT,
.code_dma_base = fw->vma->addr >> 8,
.non_sec_code_off = fw->nmem_base,
.non_sec_code_size = fw->nmem_size,
.sec_code_off = fw->imem_base,
.sec_code_size = fw->imem_size,
.code_entry_point = 0,
.data_dma_base = (fw->vma->addr + fw->dmem_base_img) >> 8,
.data_size = fw->dmem_size,
};
flcn_bl_dmem_desc_dump(fw->falcon->user, &hsdesc);
return nvkm_falcon_pio_wr(fw->falcon, (u8 *)&hsdesc, 0, 0, DMEM, 0, sizeof(hsdesc), 0, 0);
}
static int
gm20b_acr_load_setup(struct nvkm_falcon_fw *fw)
{
struct flcn_acr_desc *desc = (void *)&fw->fw.img[fw->dmem_base_img];
struct nvkm_acr *acr = fw->falcon->owner->device->acr;
desc->ucode_blob_base = nvkm_memory_addr(acr->wpr);
desc->ucode_blob_size = nvkm_memory_size(acr->wpr);
flcn_acr_desc_dump(&acr->subdev, desc);
return 0;
}
const struct nvkm_falcon_fw_func
gm20b_acr_load_0 = {
.signature = gm200_flcn_fw_signature,
.reset = gm200_flcn_fw_reset,
.setup = gm20b_acr_load_setup,
.load = gm200_flcn_fw_load,
.load_bld = gm20b_acr_hsfw_load_bld,
.boot = gm200_flcn_fw_boot,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
#endif
static const struct nvkm_acr_hsf_fwif
gm20b_acr_load_fwif[] = {
{ 0, gm200_acr_hsfw_ctor, &gm20b_acr_load_0, NVKM_ACR_HSF_PMU, 0, 0x10 },
{}
};
static const struct nvkm_acr_func
gm20b_acr = {
.load = gm20b_acr_load_fwif,
.wpr_parse = gm200_acr_wpr_parse,
.wpr_layout = gm200_acr_wpr_layout,
.wpr_alloc = gm20b_acr_wpr_alloc,
.wpr_build = gm200_acr_wpr_build,
.wpr_patch = gm200_acr_wpr_patch,
.wpr_check = gm200_acr_wpr_check,
.init = gm200_acr_init,
};
int
gm20b_acr_load(struct nvkm_acr *acr, int ver, const struct nvkm_acr_fwif *fwif)
{
struct nvkm_subdev *subdev = &acr->subdev;
const struct nvkm_acr_hsf_fwif *hsfwif;
hsfwif = nvkm_firmware_load(subdev, fwif->func->load, "AcrLoad",
acr, "acr/bl", "acr/ucode_load", "load");
if (IS_ERR(hsfwif))
return PTR_ERR(hsfwif);
return 0;
}
static const struct nvkm_acr_fwif
gm20b_acr_fwif[] = {
{ 0, gm20b_acr_load, &gm20b_acr },
{ -1, gm200_acr_nofw, &gm200_acr },
{}
};
int
gm20b_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_acr **pacr)
{
return nvkm_acr_new_(gm20b_acr_fwif, device, type, inst, pacr);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <subdev/timer.h>
static void
gk104_privring_intr_hub(struct nvkm_subdev *privring, int i)
{
struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
nvkm_debug(privring, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
static void
gk104_privring_intr_rop(struct nvkm_subdev *privring, int i)
{
struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
nvkm_debug(privring, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
static void
gk104_privring_intr_gpc(struct nvkm_subdev *privring, int i)
{
struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
nvkm_debug(privring, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
void
gk104_privring_intr(struct nvkm_subdev *privring)
{
struct nvkm_device *device = privring->device;
u32 intr0 = nvkm_rd32(device, 0x120058);
u32 intr1 = nvkm_rd32(device, 0x12005c);
u32 hubnr = nvkm_rd32(device, 0x120070);
u32 ropnr = nvkm_rd32(device, 0x120074);
u32 gpcnr = nvkm_rd32(device, 0x120078);
u32 i;
for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
u32 stat = 0x00000100 << i;
if (intr0 & stat) {
gk104_privring_intr_hub(privring, i);
intr0 &= ~stat;
}
}
for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
u32 stat = 0x00010000 << i;
if (intr0 & stat) {
gk104_privring_intr_rop(privring, i);
intr0 &= ~stat;
}
}
for (i = 0; intr1 && i < gpcnr; i++) {
u32 stat = 0x00000001 << i;
if (intr1 & stat) {
gk104_privring_intr_gpc(privring, i);
intr1 &= ~stat;
}
}
nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
break;
);
}
static int
gk104_privring_init(struct nvkm_subdev *privring)
{
struct nvkm_device *device = privring->device;
nvkm_mask(device, 0x122318, 0x0003ffff, 0x00001000);
nvkm_mask(device, 0x12231c, 0x0003ffff, 0x00000200);
nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100);
nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff);
nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000200);
nvkm_mask(device, 0x122358, 0x0003ffff, 0x00002880);
return 0;
}
static const struct nvkm_subdev_func
gk104_privring = {
.preinit = gk104_privring_init,
.init = gk104_privring_init,
.intr = gk104_privring_intr,
};
int
gk104_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_subdev **pprivring)
{
return nvkm_subdev_new_(&gk104_privring, device, type, inst, pprivring);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk104.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <subdev/timer.h>
static void
gf100_privring_intr_hub(struct nvkm_subdev *privring, int i)
{
struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
nvkm_debug(privring, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
static void
gf100_privring_intr_rop(struct nvkm_subdev *privring, int i)
{
struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
nvkm_debug(privring, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
static void
gf100_privring_intr_gpc(struct nvkm_subdev *privring, int i)
{
struct nvkm_device *device = privring->device;
u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
nvkm_debug(privring, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
}
void
gf100_privring_intr(struct nvkm_subdev *privring)
{
struct nvkm_device *device = privring->device;
u32 intr0 = nvkm_rd32(device, 0x121c58);
u32 intr1 = nvkm_rd32(device, 0x121c5c);
u32 hubnr = nvkm_rd32(device, 0x121c70);
u32 ropnr = nvkm_rd32(device, 0x121c74);
u32 gpcnr = nvkm_rd32(device, 0x121c78);
u32 i;
for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
u32 stat = 0x00000100 << i;
if (intr0 & stat) {
gf100_privring_intr_hub(privring, i);
intr0 &= ~stat;
}
}
for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
u32 stat = 0x00010000 << i;
if (intr0 & stat) {
gf100_privring_intr_rop(privring, i);
intr0 &= ~stat;
}
}
for (i = 0; intr1 && i < gpcnr; i++) {
u32 stat = 0x00000001 << i;
if (intr1 & stat) {
gf100_privring_intr_gpc(privring, i);
intr1 &= ~stat;
}
}
nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f))
break;
);
}
static int
gf100_privring_init(struct nvkm_subdev *privring)
{
struct nvkm_device *device = privring->device;
nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
nvkm_wr32(device, 0x12232c, 0x00100064);
nvkm_wr32(device, 0x122330, 0x00100064);
nvkm_wr32(device, 0x122334, 0x00100064);
nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100);
return 0;
}
static const struct nvkm_subdev_func
gf100_privring = {
.init = gf100_privring_init,
.intr = gf100_privring_intr,
};
int
gf100_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_subdev **pprivring)
{
return nvkm_subdev_new_(&gf100_privring, device, type, inst, pprivring);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf100.c |
/*
* Copyright 2015 Samuel Pitosiet
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Samuel Pitoiset
*/
#include "priv.h"
static int
gf117_privring_init(struct nvkm_subdev *privring)
{
struct nvkm_device *device = privring->device;
nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100);
nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff);
return 0;
}
static const struct nvkm_subdev_func
gf117_privring = {
.init = gf117_privring_init,
.intr = gf100_privring_intr,
};
int
gf117_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_subdev **pprivring)
{
return nvkm_subdev_new_(&gf117_privring, device, type, inst, pprivring);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf117.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
static const struct nvkm_subdev_func
gm200_privring = {
.intr = gk104_privring_intr,
};
int
gm200_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_subdev **pprivring)
{
return nvkm_subdev_new_(&gm200_privring, device, type, inst, pprivring);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c |
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <subdev/privring.h>
#include "priv.h"
static int
gp10b_privring_init(struct nvkm_subdev *privring)
{
struct nvkm_device *device = privring->device;
nvkm_wr32(device, 0x1200a8, 0x0);
/* init ring */
nvkm_wr32(device, 0x12004c, 0x4);
nvkm_wr32(device, 0x122204, 0x2);
nvkm_rd32(device, 0x122204);
/* timeout configuration */
nvkm_wr32(device, 0x009080, 0x800186a0);
return 0;
}
static const struct nvkm_subdev_func
gp10b_privring = {
.init = gp10b_privring_init,
.intr = gk104_privring_intr,
};
int
gp10b_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_subdev **pprivring)
{
return nvkm_subdev_new_(&gp10b_privring, device, type, inst, pprivring);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/privring/gp10b.c |
/*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <subdev/privring.h>
#include <subdev/timer.h>
static void
gk20a_privring_init_privring_ring(struct nvkm_subdev *privring)
{
struct nvkm_device *device = privring->device;
nvkm_mask(device, 0x137250, 0x3f, 0);
nvkm_mask(device, 0x000200, 0x20, 0);
udelay(20);
nvkm_mask(device, 0x000200, 0x20, 0x20);
nvkm_wr32(device, 0x12004c, 0x4);
nvkm_wr32(device, 0x122204, 0x2);
nvkm_rd32(device, 0x122204);
/*
* Bug: increase clock timeout to avoid operation failure at high
* gpcclk rate.
*/
nvkm_wr32(device, 0x122354, 0x800);
nvkm_wr32(device, 0x128328, 0x800);
nvkm_wr32(device, 0x124320, 0x800);
}
static void
gk20a_privring_intr(struct nvkm_subdev *privring)
{
struct nvkm_device *device = privring->device;
u32 status0 = nvkm_rd32(device, 0x120058);
if (status0 & 0x7) {
nvkm_debug(privring, "resetting privring ring\n");
gk20a_privring_init_privring_ring(privring);
}
/* Acknowledge interrupt */
nvkm_mask(device, 0x12004c, 0x2, 0x2);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
break;
);
}
static int
gk20a_privring_init(struct nvkm_subdev *privring)
{
gk20a_privring_init_privring_ring(privring);
return 0;
}
static const struct nvkm_subdev_func
gk20a_privring = {
.init = gk20a_privring_init,
.intr = gk20a_privring_intr,
};
int
gk20a_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_subdev **pprivring)
{
return nvkm_subdev_new_(&gk20a_privring, device, type, inst, pprivring);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk20a.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
#include <nvif/if000d.h>
#include <nvif/unpack.h>
static inline void
nv04_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
{
u32 data = addr | 0x00000003; /* PRESENT, RW. */
while (ptes--) {
VMM_WO032(pt, vmm, 8 + ptei++ * 4, data);
data += 0x00001000;
}
}
static void
nv04_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
}
static void
nv04_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
#if PAGE_SHIFT == 12
nvkm_kmap(pt->memory);
while (ptes--)
VMM_WO032(pt, vmm, 8 + (ptei++ * 4), *map->dma++ | 0x00000003);
nvkm_done(pt->memory);
#else
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
#endif
}
static void
nv04_vmm_pgt_unmap(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
VMM_FO032(pt, vmm, 8 + (ptei * 4), 0, ptes);
}
static const struct nvkm_vmm_desc_func
nv04_vmm_desc_pgt = {
.unmap = nv04_vmm_pgt_unmap,
.dma = nv04_vmm_pgt_dma,
.sgl = nv04_vmm_pgt_sgl,
};
static const struct nvkm_vmm_desc
nv04_vmm_desc_12[] = {
{ PGT, 15, 4, 0x1000, &nv04_vmm_desc_pgt },
{}
};
int
nv04_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
struct nvkm_vmm_map *map)
{
union {
struct nv04_vmm_map_vn vn;
} *args = argv;
int ret = -ENOSYS;
if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
VMM_DEBUG(vmm, "args");
return ret;
}
static const struct nvkm_vmm_func
nv04_vmm = {
.valid = nv04_vmm_valid,
.page = {
{ 12, &nv04_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
{}
}
};
int
nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
u32 pd_header, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
union {
struct nv04_vmm_vn vn;
} *args = argv;
int ret;
ret = nvkm_vmm_new_(func, mmu, pd_header, managed, addr, size,
key, name, pvmm);
if (ret)
return ret;
return nvif_unvers(-ENOSYS, &argv, &argc, args->vn);
}
int
nv04_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm)
{
struct nvkm_memory *mem;
struct nvkm_vmm *vmm;
int ret;
ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, managed, addr, size,
argv, argc, key, name, &vmm);
*pvmm = vmm;
if (ret)
return ret;
mem = vmm->pd->pt[0]->memory;
nvkm_kmap(mem);
nvkm_wo32(mem, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
nvkm_wo32(mem, 0x00004, vmm->limit - 1);
nvkm_done(mem);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "mem.h"
#include "vmm.h"
#include <nvif/class.h>
const struct nvkm_mmu_func
nv04_mmu = {
.dma_bits = 32,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true },
};
int
nv04_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
return nvkm_mmu_new_(&nv04_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
#include <core/client.h>
#include <subdev/fb.h>
#include <subdev/ltc.h>
#include <subdev/timer.h>
#include <engine/gr.h>
#include <nvif/ifc00d.h>
#include <nvif/unpack.h>
static void
gp100_vmm_pfn_unmap(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
struct device *dev = vmm->mmu->subdev.device->dev;
dma_addr_t addr;
nvkm_kmap(pt->memory);
while (ptes--) {
u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0);
u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4);
u64 data = (u64)datahi << 32 | datalo;
if ((data & (3ULL << 1)) != 0) {
addr = (data >> 8) << 12;
dma_unmap_page(dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
}
ptei++;
}
nvkm_done(pt->memory);
}
static bool
gp100_vmm_pfn_clear(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
bool dma = false;
nvkm_kmap(pt->memory);
while (ptes--) {
u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0);
u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4);
u64 data = (u64)datahi << 32 | datalo;
if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
VMM_WO064(pt, vmm, ptei * 8, data & ~BIT_ULL(0));
dma = true;
}
ptei++;
}
nvkm_done(pt->memory);
return dma;
}
static void
gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
struct device *dev = vmm->mmu->subdev.device->dev;
dma_addr_t addr;
nvkm_kmap(pt->memory);
for (; ptes; ptes--, map->pfn++) {
u64 data = 0;
if (!(*map->pfn & NVKM_VMM_PFN_V))
continue;
if (!(*map->pfn & NVKM_VMM_PFN_W))
data |= BIT_ULL(6); /* RO. */
if (!(*map->pfn & NVKM_VMM_PFN_A))
data |= BIT_ULL(7); /* Atomic disable. */
if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
addr = dma_map_page(dev, pfn_to_page(addr), 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (!WARN_ON(dma_mapping_error(dev, addr))) {
data |= addr >> 4;
data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
data |= BIT_ULL(3); /* VOL. */
data |= BIT_ULL(0); /* VALID. */
}
} else {
data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
data |= BIT_ULL(0); /* VALID. */
}
VMM_WO064(pt, vmm, ptei++ * 8, data);
}
nvkm_done(pt->memory);
}
static inline void
gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
{
u64 data = (addr >> 4) | map->type;
map->type += ptes * map->ctag;
while (ptes--) {
VMM_WO064(pt, vmm, ptei++ * 8, data);
data += map->next;
}
}
static void
gp100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
}
static void
gp100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
if (map->page->shift == PAGE_SHIFT) {
VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
nvkm_kmap(pt->memory);
while (ptes--) {
const u64 data = (*map->dma++ >> 4) | map->type;
VMM_WO064(pt, vmm, ptei++ * 8, data);
map->type += map->ctag;
}
nvkm_done(pt->memory);
return;
}
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
}
static void
gp100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
}
static void
gp100_vmm_pgt_sparse(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
/* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(3) /* VOL. */, ptes);
}
static const struct nvkm_vmm_desc_func
gp100_vmm_desc_spt = {
.unmap = gf100_vmm_pgt_unmap,
.sparse = gp100_vmm_pgt_sparse,
.mem = gp100_vmm_pgt_mem,
.dma = gp100_vmm_pgt_dma,
.sgl = gp100_vmm_pgt_sgl,
.pfn = gp100_vmm_pgt_pfn,
.pfn_clear = gp100_vmm_pfn_clear,
.pfn_unmap = gp100_vmm_pfn_unmap,
};
static void
gp100_vmm_lpt_invalid(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
/* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(5) /* PRIV. */, ptes);
}
static const struct nvkm_vmm_desc_func
gp100_vmm_desc_lpt = {
.invalid = gp100_vmm_lpt_invalid,
.unmap = gf100_vmm_pgt_unmap,
.sparse = gp100_vmm_pgt_sparse,
.mem = gp100_vmm_pgt_mem,
};
static inline void
gp100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
{
u64 data = (addr >> 4) | map->type;
map->type += ptes * map->ctag;
while (ptes--) {
VMM_WO128(pt, vmm, ptei++ * 0x10, data, 0ULL);
data += map->next;
}
}
static void
gp100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pd0_pte);
}
static inline bool
gp100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data)
{
switch (nvkm_memory_target(pt->memory)) {
case NVKM_MEM_TARGET_VRAM: *data |= 1ULL << 1; break;
case NVKM_MEM_TARGET_HOST: *data |= 2ULL << 1;
*data |= BIT_ULL(3); /* VOL. */
break;
case NVKM_MEM_TARGET_NCOH: *data |= 3ULL << 1; break;
default:
WARN_ON(1);
return false;
}
*data |= pt->addr >> 4;
return true;
}
static void
gp100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
{
struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
struct nvkm_mmu_pt *pd = pgd->pt[0];
u64 data[2] = {};
if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0]))
return;
if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1]))
return;
nvkm_kmap(pd->memory);
VMM_WO128(pd, vmm, pdei * 0x10, data[0], data[1]);
nvkm_done(pd->memory);
}
static void
gp100_vmm_pd0_sparse(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
{
/* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
VMM_FO128(pt, vmm, pdei * 0x10, BIT_ULL(3) /* VOL_BIG. */, 0ULL, pdes);
}
static void
gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
{
VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes);
}
static void
gp100_vmm_pd0_pfn_unmap(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
struct device *dev = vmm->mmu->subdev.device->dev;
dma_addr_t addr;
nvkm_kmap(pt->memory);
while (ptes--) {
u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
u64 data = (u64)datahi << 32 | datalo;
if ((data & (3ULL << 1)) != 0) {
addr = (data >> 8) << 12;
dma_unmap_page(dev, addr, 1UL << 21, DMA_BIDIRECTIONAL);
}
ptei++;
}
nvkm_done(pt->memory);
}
static bool
gp100_vmm_pd0_pfn_clear(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
bool dma = false;
nvkm_kmap(pt->memory);
while (ptes--) {
u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
u64 data = (u64)datahi << 32 | datalo;
if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
VMM_WO064(pt, vmm, ptei * 16, data & ~BIT_ULL(0));
dma = true;
}
ptei++;
}
nvkm_done(pt->memory);
return dma;
}
static void
gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
struct device *dev = vmm->mmu->subdev.device->dev;
dma_addr_t addr;
nvkm_kmap(pt->memory);
for (; ptes; ptes--, map->pfn++) {
u64 data = 0;
if (!(*map->pfn & NVKM_VMM_PFN_V))
continue;
if (!(*map->pfn & NVKM_VMM_PFN_W))
data |= BIT_ULL(6); /* RO. */
if (!(*map->pfn & NVKM_VMM_PFN_A))
data |= BIT_ULL(7); /* Atomic disable. */
if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
addr = dma_map_page(dev, pfn_to_page(addr), 0,
1UL << 21, DMA_BIDIRECTIONAL);
if (!WARN_ON(dma_mapping_error(dev, addr))) {
data |= addr >> 4;
data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
data |= BIT_ULL(3); /* VOL. */
data |= BIT_ULL(0); /* VALID. */
}
} else {
data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
data |= BIT_ULL(0); /* VALID. */
}
VMM_WO064(pt, vmm, ptei++ * 16, data);
}
nvkm_done(pt->memory);
}
static const struct nvkm_vmm_desc_func
gp100_vmm_desc_pd0 = {
.unmap = gp100_vmm_pd0_unmap,
.sparse = gp100_vmm_pd0_sparse,
.pde = gp100_vmm_pd0_pde,
.mem = gp100_vmm_pd0_mem,
.pfn = gp100_vmm_pd0_pfn,
.pfn_clear = gp100_vmm_pd0_pfn_clear,
.pfn_unmap = gp100_vmm_pd0_pfn_unmap,
};
static void
gp100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
{
struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
struct nvkm_mmu_pt *pd = pgd->pt[0];
u64 data = 0;
if (!gp100_vmm_pde(pgt->pt[0], &data))
return;
nvkm_kmap(pd->memory);
VMM_WO064(pd, vmm, pdei * 8, data);
nvkm_done(pd->memory);
}
static const struct nvkm_vmm_desc_func
gp100_vmm_desc_pd1 = {
.unmap = gf100_vmm_pgt_unmap,
.sparse = gp100_vmm_pgt_sparse,
.pde = gp100_vmm_pd1_pde,
};
const struct nvkm_vmm_desc
gp100_vmm_desc_16[] = {
{ LPT, 5, 8, 0x0100, &gp100_vmm_desc_lpt },
{ PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
{ PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
{ PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
{ PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 },
{}
};
const struct nvkm_vmm_desc
gp100_vmm_desc_12[] = {
{ SPT, 9, 8, 0x1000, &gp100_vmm_desc_spt },
{ PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
{ PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
{ PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
{ PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 },
{}
};
int
gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
struct nvkm_vmm_map *map)
{
const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
const struct nvkm_vmm_page *page = map->page;
union {
struct gp100_vmm_map_vn vn;
struct gp100_vmm_map_v0 v0;
} *args = argv;
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_memory *memory = map->memory;
u8 kind, kind_inv, priv, ro, vol;
int kindn, aper, ret = -ENOSYS;
const u8 *kindm;
map->next = (1ULL << page->shift) >> 4;
map->type = 0;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
vol = !!args->v0.vol;
ro = !!args->v0.ro;
priv = !!args->v0.priv;
kind = args->v0.kind;
} else
if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
vol = target == NVKM_MEM_TARGET_HOST;
ro = 0;
priv = 0;
kind = 0x00;
} else {
VMM_DEBUG(vmm, "args");
return ret;
}
aper = vmm->func->aper(target);
if (WARN_ON(aper < 0))
return aper;
kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
if (kind >= kindn || kindm[kind] == kind_inv) {
VMM_DEBUG(vmm, "kind %02x", kind);
return -EINVAL;
}
if (kindm[kind] != kind) {
u64 tags = nvkm_memory_size(memory) >> 16;
if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
return -EINVAL;
}
if (!map->no_comp) {
ret = nvkm_memory_tags_get(memory, device, tags,
nvkm_ltc_tags_clear,
&map->tags);
if (ret) {
VMM_DEBUG(vmm, "comp %d", ret);
return ret;
}
}
if (!map->no_comp && map->tags->mn) {
tags = map->tags->mn->offset + (map->offset >> 16);
map->ctag |= ((1ULL << page->shift) >> 16) << 36;
map->type |= tags << 36;
map->next |= map->ctag;
} else {
kind = kindm[kind];
}
}
map->type |= BIT(0);
map->type |= (u64)aper << 1;
map->type |= (u64) vol << 3;
map->type |= (u64)priv << 5;
map->type |= (u64) ro << 6;
map->type |= (u64)kind << 56;
return 0;
}
static int
gp100_vmm_fault_cancel(struct nvkm_vmm *vmm, void *argv, u32 argc)
{
struct nvkm_device *device = vmm->mmu->subdev.device;
union {
struct gp100_vmm_fault_cancel_v0 v0;
} *args = argv;
int ret = -ENOSYS;
u32 aper;
if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false)))
return ret;
/* Translate MaxwellFaultBufferA instance pointer to the same
* format as the NV_GR_FECS_CURRENT_CTX register.
*/
aper = (args->v0.inst >> 8) & 3;
args->v0.inst >>= 12;
args->v0.inst |= aper << 28;
args->v0.inst |= 0x80000000;
if (!WARN_ON(nvkm_gr_ctxsw_pause(device))) {
if (nvkm_gr_ctxsw_inst(device) == args->v0.inst) {
gf100_vmm_invalidate(vmm, 0x0000001b
/* CANCEL_TARGETED. */ |
(args->v0.hub << 20) |
(args->v0.gpc << 15) |
(args->v0.client << 9));
}
WARN_ON(nvkm_gr_ctxsw_resume(device));
}
return 0;
}
static int
gp100_vmm_fault_replay(struct nvkm_vmm *vmm, void *argv, u32 argc)
{
union {
struct gp100_vmm_fault_replay_vn vn;
} *args = argv;
int ret = -ENOSYS;
if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
gf100_vmm_invalidate(vmm, 0x0000000b); /* REPLAY_GLOBAL. */
}
return ret;
}
int
gp100_vmm_mthd(struct nvkm_vmm *vmm,
struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
{
switch (mthd) {
case GP100_VMM_VN_FAULT_REPLAY:
return gp100_vmm_fault_replay(vmm, argv, argc);
case GP100_VMM_VN_FAULT_CANCEL:
return gp100_vmm_fault_cancel(vmm, argv, argc);
default:
break;
}
return -EINVAL;
}
void
gp100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
{
struct nvkm_device *device = vmm->mmu->subdev.device;
nvkm_wr32(device, 0x100cb8, lower_32_bits(addr));
nvkm_wr32(device, 0x100cec, upper_32_bits(addr));
}
void
gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */
type |= 0x00000001; /* PAGE_ALL */
gf100_vmm_invalidate(vmm, type);
}
int
gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11) /* 64KiB */;
if (vmm->replay) {
base |= BIT_ULL(4); /* FAULT_REPLAY_TEX */
base |= BIT_ULL(5); /* FAULT_REPLAY_GCC */
}
return gf100_vmm_join_(vmm, inst, base);
}
static const struct nvkm_vmm_func
gp100_vmm = {
.join = gp100_vmm_join,
.part = gf100_vmm_part,
.aper = gf100_vmm_aper,
.valid = gp100_vmm_valid,
.flush = gp100_vmm_flush,
.mthd = gp100_vmm_mthd,
.invalidate_pdb = gp100_vmm_invalidate_pdb,
.page = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
{ 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
{ 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
{ 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
{ 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
{}
}
};
int
gp100_vmm_new_(const struct nvkm_vmm_func *func,
struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
union {
struct gp100_vmm_vn vn;
struct gp100_vmm_v0 v0;
} *args = argv;
int ret = -ENOSYS;
bool replay;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
replay = args->v0.fault_replay != 0;
} else
if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
replay = false;
} else
return ret;
ret = nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
if (ret)
return ret;
(*pvmm)->replay = replay;
return 0;
}
int
gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
return gp100_vmm_new_(&gp100_vmm, mmu, managed, addr, size,
argv, argc, key, name, pvmm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "mem.h"
#include "vmm.h"
#include <nvif/class.h>
static const struct nvkm_mmu_func
gk104_mmu = {
.dma_bits = 40,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new },
.kind = gf100_mmu_kind,
.kind_sys = true,
};
int
gk104_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
return nvkm_mmu_new_(&gk104_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
#include <subdev/timer.h>
static void
nv44_vmm_pgt_fill(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
dma_addr_t *list, u32 ptei, u32 ptes)
{
u32 pteo = (ptei << 2) & ~0x0000000f;
u32 tmp[4];
tmp[0] = nvkm_ro32(pt->memory, pteo + 0x0);
tmp[1] = nvkm_ro32(pt->memory, pteo + 0x4);
tmp[2] = nvkm_ro32(pt->memory, pteo + 0x8);
tmp[3] = nvkm_ro32(pt->memory, pteo + 0xc);
while (ptes--) {
u32 addr = (list ? *list++ : vmm->null) >> 12;
switch (ptei++ & 0x3) {
case 0:
tmp[0] &= ~0x07ffffff;
tmp[0] |= addr;
break;
case 1:
tmp[0] &= ~0xf8000000;
tmp[0] |= addr << 27;
tmp[1] &= ~0x003fffff;
tmp[1] |= addr >> 5;
break;
case 2:
tmp[1] &= ~0xffc00000;
tmp[1] |= addr << 22;
tmp[2] &= ~0x0001ffff;
tmp[2] |= addr >> 10;
break;
case 3:
tmp[2] &= ~0xfffe0000;
tmp[2] |= addr << 17;
tmp[3] &= ~0x00000fff;
tmp[3] |= addr >> 15;
break;
}
}
VMM_WO032(pt, vmm, pteo + 0x0, tmp[0]);
VMM_WO032(pt, vmm, pteo + 0x4, tmp[1]);
VMM_WO032(pt, vmm, pteo + 0x8, tmp[2]);
VMM_WO032(pt, vmm, pteo + 0xc, tmp[3] | 0x40000000);
}
static void
nv44_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
{
dma_addr_t tmp[4], i;
if (ptei & 3) {
const u32 pten = min(ptes, 4 - (ptei & 3));
for (i = 0; i < pten; i++, addr += 0x1000)
tmp[i] = addr;
nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, pten);
ptei += pten;
ptes -= pten;
}
while (ptes >= 4) {
for (i = 0; i < 4; i++, addr += 0x1000)
tmp[i] = addr >> 12;
VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >> 0 | tmp[1] << 27);
VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >> 5 | tmp[2] << 22);
VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
ptes -= 4;
}
if (ptes) {
for (i = 0; i < ptes; i++, addr += 0x1000)
tmp[i] = addr;
nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, ptes);
}
}
static void
nv44_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
}
static void
nv44_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
#if PAGE_SHIFT == 12
nvkm_kmap(pt->memory);
if (ptei & 3) {
const u32 pten = min(ptes, 4 - (ptei & 3));
nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, pten);
ptei += pten;
ptes -= pten;
map->dma += pten;
}
while (ptes >= 4) {
u32 tmp[4], i;
for (i = 0; i < 4; i++)
tmp[i] = *map->dma++ >> 12;
VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >> 0 | tmp[1] << 27);
VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >> 5 | tmp[2] << 22);
VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
ptes -= 4;
}
if (ptes) {
nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, ptes);
map->dma += ptes;
}
nvkm_done(pt->memory);
#else
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
#endif
}
static void
nv44_vmm_pgt_unmap(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
nvkm_kmap(pt->memory);
if (ptei & 3) {
const u32 pten = min(ptes, 4 - (ptei & 3));
nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, pten);
ptei += pten;
ptes -= pten;
}
while (ptes > 4) {
VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
ptes -= 4;
}
if (ptes)
nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, ptes);
nvkm_done(pt->memory);
}
static const struct nvkm_vmm_desc_func
nv44_vmm_desc_pgt = {
.unmap = nv44_vmm_pgt_unmap,
.dma = nv44_vmm_pgt_dma,
.sgl = nv44_vmm_pgt_sgl,
};
static const struct nvkm_vmm_desc
nv44_vmm_desc_12[] = {
{ PGT, 17, 4, 0x80000, &nv44_vmm_desc_pgt },
{}
};
static void
nv44_vmm_flush(struct nvkm_vmm *vmm, int level)
{
struct nvkm_device *device = vmm->mmu->subdev.device;
nvkm_wr32(device, 0x100814, vmm->limit - 4096);
nvkm_wr32(device, 0x100808, 0x000000020);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x100808) & 0x00000001)
break;
);
nvkm_wr32(device, 0x100808, 0x00000000);
}
static const struct nvkm_vmm_func
nv44_vmm = {
.valid = nv04_vmm_valid,
.flush = nv44_vmm_flush,
.page = {
{ 12, &nv44_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
{}
}
};
int
nv44_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm)
{
struct nvkm_subdev *subdev = &mmu->subdev;
struct nvkm_vmm *vmm;
int ret;
ret = nv04_vmm_new_(&nv44_vmm, mmu, 0, managed, addr, size,
argv, argc, key, name, &vmm);
*pvmm = vmm;
if (ret)
return ret;
vmm->nullp = dma_alloc_coherent(subdev->device->dev, 16 * 1024,
&vmm->null, GFP_KERNEL);
if (!vmm->nullp) {
nvkm_warn(subdev, "unable to allocate dummy pages\n");
vmm->null = 0;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "mem.h"
#include "vmm.h"
#include <core/option.h>
#include <nvif/class.h>
static void
nv44_mmu_init(struct nvkm_mmu *mmu)
{
struct nvkm_device *device = mmu->subdev.device;
struct nvkm_memory *pt = mmu->vmm->pd->pt[0]->memory;
u32 addr;
/* calculate vram address of this PRAMIN block, object must be
* allocated on 512KiB alignment, and not exceed a total size
* of 512KiB for this to work correctly
*/
addr = nvkm_rd32(device, 0x10020c);
addr -= ((nvkm_memory_addr(pt) >> 19) + 1) << 19;
nvkm_wr32(device, 0x100850, 0x80000000);
nvkm_wr32(device, 0x100818, mmu->vmm->null);
nvkm_wr32(device, 0x100804, (nvkm_memory_size(pt) / 4) * 4096);
nvkm_wr32(device, 0x100850, 0x00008000);
nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
nvkm_wr32(device, 0x100820, 0x00000000);
nvkm_wr32(device, 0x10082c, 0x00000001);
nvkm_wr32(device, 0x100800, addr | 0x00000010);
}
static const struct nvkm_mmu_func
nv44_mmu = {
.init = nv44_mmu_init,
.dma_bits = 39,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true },
};
int
nv44_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
if (device->type == NVKM_DEVICE_AGP ||
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
return nv04_mmu_new(device, type, inst, pmmu);
return nvkm_mmu_new_(&nv44_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c |
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "mem.h"
#include "vmm.h"
#include <nvif/class.h>
/* Map from compressed to corresponding uncompressed storage type.
* The value 0xff represents an invalid storage type.
*/
const u8 *
gf100_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
kind[256] = {
0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
};
*count = ARRAY_SIZE(kind);
*invalid = 0xff;
return kind;
}
static const struct nvkm_mmu_func
gf100_mmu = {
.dma_bits = 40,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new },
.kind = gf100_mmu_kind,
.kind_sys = true,
};
int
gf100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
return nvkm_mmu_new_(&gf100_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
static const struct nvkm_vmm_func
mcp77_vmm = {
.join = nv50_vmm_join,
.part = nv50_vmm_part,
.valid = nv50_vmm_valid,
.flush = nv50_vmm_flush,
.page_block = 1 << 29,
.page = {
{ 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxx },
{ 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
{}
}
};
int
mcp77_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
return nv04_vmm_new_(&mcp77_vmm, mmu, 0, managed, addr, size,
argv, argc, key, name, pvmm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
static const struct nvkm_vmm_func
gm20b_vmm_17 = {
.join = gm200_vmm_join,
.part = gf100_vmm_part,
.aper = gk20a_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
.invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
{ 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SxHC },
{ 12, &gm200_vmm_desc_17_12[0], NVKM_VMM_PAGE_SxHx },
{}
}
};
static const struct nvkm_vmm_func
gm20b_vmm_16 = {
.join = gm200_vmm_join,
.part = gf100_vmm_part,
.aper = gk20a_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
.invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
{ 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SxHC },
{ 12, &gm200_vmm_desc_16_12[0], NVKM_VMM_PAGE_SxHx },
{}
}
};
int
gm20b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
return gm200_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
int
gm20b_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
return gf100_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define NVKM_VMM_LEVELS_MAX 5
#include "vmm.h"
#include <subdev/fb.h>
static void
nvkm_vmm_pt_del(struct nvkm_vmm_pt **ppgt)
{
struct nvkm_vmm_pt *pgt = *ppgt;
if (pgt) {
kvfree(pgt->pde);
kfree(pgt);
*ppgt = NULL;
}
}
static struct nvkm_vmm_pt *
nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
const struct nvkm_vmm_page *page)
{
const u32 pten = 1 << desc->bits;
struct nvkm_vmm_pt *pgt;
u32 lpte = 0;
if (desc->type > PGT) {
if (desc->type == SPT) {
const struct nvkm_vmm_desc *pair = page[-1].desc;
lpte = pten >> (desc->bits - pair->bits);
} else {
lpte = pten;
}
}
if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL)))
return NULL;
pgt->page = page ? page->shift : 0;
pgt->sparse = sparse;
if (desc->type == PGD) {
pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL);
if (!pgt->pde) {
kfree(pgt);
return NULL;
}
}
return pgt;
}
struct nvkm_vmm_iter {
const struct nvkm_vmm_page *page;
const struct nvkm_vmm_desc *desc;
struct nvkm_vmm *vmm;
u64 cnt;
u16 max, lvl;
u32 pte[NVKM_VMM_LEVELS_MAX];
struct nvkm_vmm_pt *pt[NVKM_VMM_LEVELS_MAX];
int flush;
};
#ifdef CONFIG_NOUVEAU_DEBUG_MMU
static const char *
nvkm_vmm_desc_type(const struct nvkm_vmm_desc *desc)
{
switch (desc->type) {
case PGD: return "PGD";
case PGT: return "PGT";
case SPT: return "SPT";
case LPT: return "LPT";
default:
return "UNKNOWN";
}
}
static void
nvkm_vmm_trace(struct nvkm_vmm_iter *it, char *buf)
{
int lvl;
for (lvl = it->max; lvl >= 0; lvl--) {
if (lvl >= it->lvl)
buf += sprintf(buf, "%05x:", it->pte[lvl]);
else
buf += sprintf(buf, "xxxxx:");
}
}
#define TRA(i,f,a...) do { \
char _buf[NVKM_VMM_LEVELS_MAX * 7]; \
struct nvkm_vmm_iter *_it = (i); \
nvkm_vmm_trace(_it, _buf); \
VMM_TRACE(_it->vmm, "%s "f, _buf, ##a); \
} while(0)
#else
#define TRA(i,f,a...)
#endif
static inline void
nvkm_vmm_flush_mark(struct nvkm_vmm_iter *it)
{
it->flush = min(it->flush, it->max - it->lvl);
}
static inline void
nvkm_vmm_flush(struct nvkm_vmm_iter *it)
{
if (it->flush != NVKM_VMM_LEVELS_MAX) {
if (it->vmm->func->flush) {
TRA(it, "flush: %d", it->flush);
it->vmm->func->flush(it->vmm, it->flush);
}
it->flush = NVKM_VMM_LEVELS_MAX;
}
}
static void
nvkm_vmm_unref_pdes(struct nvkm_vmm_iter *it)
{
const struct nvkm_vmm_desc *desc = it->desc;
const int type = desc[it->lvl].type == SPT;
struct nvkm_vmm_pt *pgd = it->pt[it->lvl + 1];
struct nvkm_vmm_pt *pgt = it->pt[it->lvl];
struct nvkm_mmu_pt *pt = pgt->pt[type];
struct nvkm_vmm *vmm = it->vmm;
u32 pdei = it->pte[it->lvl + 1];
/* Recurse up the tree, unreferencing/destroying unneeded PDs. */
it->lvl++;
if (--pgd->refs[0]) {
const struct nvkm_vmm_desc_func *func = desc[it->lvl].func;
/* PD has other valid PDEs, so we need a proper update. */
TRA(it, "PDE unmap %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
pgt->pt[type] = NULL;
if (!pgt->refs[!type]) {
/* PDE no longer required. */
if (pgd->pt[0]) {
if (pgt->sparse) {
func->sparse(vmm, pgd->pt[0], pdei, 1);
pgd->pde[pdei] = NVKM_VMM_PDE_SPARSE;
} else {
func->unmap(vmm, pgd->pt[0], pdei, 1);
pgd->pde[pdei] = NULL;
}
} else {
/* Special handling for Tesla-class GPUs,
* where there's no central PD, but each
* instance has its own embedded PD.
*/
func->pde(vmm, pgd, pdei);
pgd->pde[pdei] = NULL;
}
} else {
/* PDE was pointing at dual-PTs and we're removing
* one of them, leaving the other in place.
*/
func->pde(vmm, pgd, pdei);
}
/* GPU may have cached the PTs, flush before freeing. */
nvkm_vmm_flush_mark(it);
nvkm_vmm_flush(it);
} else {
/* PD has no valid PDEs left, so we can just destroy it. */
nvkm_vmm_unref_pdes(it);
}
/* Destroy PD/PT. */
TRA(it, "PDE free %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt);
if (!pgt->refs[!type])
nvkm_vmm_pt_del(&pgt);
it->lvl--;
}
static void
nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
{
const struct nvkm_vmm_desc *pair = it->page[-1].desc;
const u32 sptb = desc->bits - pair->bits;
const u32 sptn = 1 << sptb;
struct nvkm_vmm *vmm = it->vmm;
u32 spti = ptei & (sptn - 1), lpti, pteb;
/* Determine how many SPTEs are being touched under each LPTE,
* and drop reference counts.
*/
for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
const u32 pten = min(sptn - spti, ptes);
pgt->pte[lpti] -= pten;
ptes -= pten;
}
/* We're done here if there's no corresponding LPT. */
if (!pgt->refs[0])
return;
for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
/* Skip over any LPTEs that still have valid SPTEs. */
if (pgt->pte[pteb] & NVKM_VMM_PTE_SPTES) {
for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
if (!(pgt->pte[ptei] & NVKM_VMM_PTE_SPTES))
break;
}
continue;
}
/* As there's no more non-UNMAPPED SPTEs left in the range
* covered by a number of LPTEs, the LPTEs once again take
* control over their address range.
*
* Determine how many LPTEs need to transition state.
*/
pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
if (pgt->pte[ptei] & NVKM_VMM_PTE_SPTES)
break;
pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
}
if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes);
pair->func->sparse(vmm, pgt->pt[0], pteb, ptes);
} else
if (pair->func->invalid) {
/* If the MMU supports it, restore the LPTE to the
* INVALID state to tell the MMU there is no point
* trying to fetch the corresponding SPTEs.
*/
TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes);
pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
}
}
}
static bool
nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{
const struct nvkm_vmm_desc *desc = it->desc;
const int type = desc->type == SPT;
struct nvkm_vmm_pt *pgt = it->pt[0];
bool dma;
if (pfn) {
/* Need to clear PTE valid bits before we dma_unmap_page(). */
dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes);
if (dma) {
/* GPU may have cached the PT, flush before unmap. */
nvkm_vmm_flush_mark(it);
nvkm_vmm_flush(it);
desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes);
}
}
/* Drop PTE references. */
pgt->refs[type] -= ptes;
/* Dual-PTs need special handling, unless PDE becoming invalid. */
if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1]))
nvkm_vmm_unref_sptes(it, pgt, desc, ptei, ptes);
/* PT no longer needed? Destroy it. */
if (!pgt->refs[type]) {
it->lvl++;
TRA(it, "%s empty", nvkm_vmm_desc_type(desc));
it->lvl--;
nvkm_vmm_unref_pdes(it);
return false; /* PTE writes for unmap() not necessary. */
}
return true;
}
static void
nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
{
const struct nvkm_vmm_desc *pair = it->page[-1].desc;
const u32 sptb = desc->bits - pair->bits;
const u32 sptn = 1 << sptb;
struct nvkm_vmm *vmm = it->vmm;
u32 spti = ptei & (sptn - 1), lpti, pteb;
/* Determine how many SPTEs are being touched under each LPTE,
* and increase reference counts.
*/
for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
const u32 pten = min(sptn - spti, ptes);
pgt->pte[lpti] += pten;
ptes -= pten;
}
/* We're done here if there's no corresponding LPT. */
if (!pgt->refs[0])
return;
for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
/* Skip over any LPTEs that already have valid SPTEs. */
if (pgt->pte[pteb] & NVKM_VMM_PTE_VALID) {
for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
if (!(pgt->pte[ptei] & NVKM_VMM_PTE_VALID))
break;
}
continue;
}
/* As there are now non-UNMAPPED SPTEs in the range covered
* by a number of LPTEs, we need to transfer control of the
* address range to the SPTEs.
*
* Determine how many LPTEs need to transition state.
*/
pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
if (pgt->pte[ptei] & NVKM_VMM_PTE_VALID)
break;
pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
}
if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
const u32 spti = pteb * sptn;
const u32 sptc = ptes * sptn;
/* The entire LPTE is marked as sparse, we need
* to make sure that the SPTEs are too.
*/
TRA(it, "SPTE %05x: U -> S %d PTEs", spti, sptc);
desc->func->sparse(vmm, pgt->pt[1], spti, sptc);
/* Sparse LPTEs prevent SPTEs from being accessed. */
TRA(it, "LPTE %05x: S -> U %d PTEs", pteb, ptes);
pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
} else
if (pair->func->invalid) {
/* MMU supports blocking SPTEs by marking an LPTE
* as INVALID. We need to reverse that here.
*/
TRA(it, "LPTE %05x: I -> U %d PTEs", pteb, ptes);
pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
}
}
}
static bool
nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{
const struct nvkm_vmm_desc *desc = it->desc;
const int type = desc->type == SPT;
struct nvkm_vmm_pt *pgt = it->pt[0];
/* Take PTE references. */
pgt->refs[type] += ptes;
/* Dual-PTs need special handling. */
if (desc->type == SPT)
nvkm_vmm_ref_sptes(it, pgt, desc, ptei, ptes);
return true;
}
static void
nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes)
{
if (desc->type == PGD) {
while (ptes--)
pgt->pde[ptei++] = NVKM_VMM_PDE_SPARSE;
} else
if (desc->type == LPT) {
memset(&pgt->pte[ptei], NVKM_VMM_PTE_SPARSE, ptes);
}
}
static bool
nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{
struct nvkm_vmm_pt *pt = it->pt[0];
if (it->desc->type == PGD)
memset(&pt->pde[ptei], 0x00, sizeof(pt->pde[0]) * ptes);
else
if (it->desc->type == LPT)
memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes);
return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
}
static bool
nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{
nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes);
}
static bool
nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
{
const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
const int type = desc->type == SPT;
struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
const bool zero = !pgt->sparse && !desc->func->invalid;
struct nvkm_vmm *vmm = it->vmm;
struct nvkm_mmu *mmu = vmm->mmu;
struct nvkm_mmu_pt *pt;
u32 pten = 1 << desc->bits;
u32 pteb, ptei, ptes;
u32 size = desc->size * pten;
pgd->refs[0]++;
pgt->pt[type] = nvkm_mmu_ptc_get(mmu, size, desc->align, zero);
if (!pgt->pt[type]) {
it->lvl--;
nvkm_vmm_unref_pdes(it);
return false;
}
if (zero)
goto done;
pt = pgt->pt[type];
if (desc->type == LPT && pgt->refs[1]) {
/* SPT already exists covering the same range as this LPT,
* which means we need to be careful that any LPTEs which
* overlap valid SPTEs are unmapped as opposed to invalid
* or sparse, which would prevent the MMU from looking at
* the SPTEs on some GPUs.
*/
for (ptei = pteb = 0; ptei < pten; pteb = ptei) {
bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
for (ptes = 1, ptei++; ptei < pten; ptes++, ptei++) {
bool next = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
if (spte != next)
break;
}
if (!spte) {
if (pgt->sparse)
desc->func->sparse(vmm, pt, pteb, ptes);
else
desc->func->invalid(vmm, pt, pteb, ptes);
memset(&pgt->pte[pteb], 0x00, ptes);
} else {
desc->func->unmap(vmm, pt, pteb, ptes);
while (ptes--)
pgt->pte[pteb++] |= NVKM_VMM_PTE_VALID;
}
}
} else {
if (pgt->sparse) {
nvkm_vmm_sparse_ptes(desc, pgt, 0, pten);
desc->func->sparse(vmm, pt, 0, pten);
} else {
desc->func->invalid(vmm, pt, 0, pten);
}
}
done:
TRA(it, "PDE write %s", nvkm_vmm_desc_type(desc));
it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
nvkm_vmm_flush_mark(it);
return true;
}
static bool
nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
{
const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
if (!pgt) {
if (!pgd->refs[0])
nvkm_vmm_unref_pdes(it);
return false;
}
pgd->pde[pdei] = pgt;
return true;
}
static inline u64
nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, const char *name, bool ref, bool pfn,
bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32),
nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
nvkm_vmm_pxe_func CLR_PTES)
{
const struct nvkm_vmm_desc *desc = page->desc;
struct nvkm_vmm_iter it;
u64 bits = addr >> page->shift;
it.page = page;
it.desc = desc;
it.vmm = vmm;
it.cnt = size >> page->shift;
it.flush = NVKM_VMM_LEVELS_MAX;
/* Deconstruct address into PTE indices for each mapping level. */
for (it.lvl = 0; desc[it.lvl].bits; it.lvl++) {
it.pte[it.lvl] = bits & ((1 << desc[it.lvl].bits) - 1);
bits >>= desc[it.lvl].bits;
}
it.max = --it.lvl;
it.pt[it.max] = vmm->pd;
it.lvl = 0;
TRA(&it, "%s: %016llx %016llx %d %lld PTEs", name,
addr, size, page->shift, it.cnt);
it.lvl = it.max;
/* Depth-first traversal of page tables. */
while (it.cnt) {
struct nvkm_vmm_pt *pgt = it.pt[it.lvl];
const int type = desc->type == SPT;
const u32 pten = 1 << desc->bits;
const u32 ptei = it.pte[0];
const u32 ptes = min_t(u64, it.cnt, pten - ptei);
/* Walk down the tree, finding page tables for each level. */
for (; it.lvl; it.lvl--) {
const u32 pdei = it.pte[it.lvl];
struct nvkm_vmm_pt *pgd = pgt;
/* Software PT. */
if (ref && NVKM_VMM_PDE_INVALID(pgd->pde[pdei])) {
if (!nvkm_vmm_ref_swpt(&it, pgd, pdei))
goto fail;
}
it.pt[it.lvl - 1] = pgt = pgd->pde[pdei];
/* Hardware PT.
*
* This is a separate step from above due to GF100 and
* newer having dual page tables at some levels, which
* are refcounted independently.
*/
if (ref && !pgt->refs[desc[it.lvl - 1].type == SPT]) {
if (!nvkm_vmm_ref_hwpt(&it, pgd, pdei))
goto fail;
}
}
/* Handle PTE updates. */
if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) {
struct nvkm_mmu_pt *pt = pgt->pt[type];
if (MAP_PTES || CLR_PTES) {
if (MAP_PTES)
MAP_PTES(vmm, pt, ptei, ptes, map);
else
CLR_PTES(vmm, pt, ptei, ptes);
nvkm_vmm_flush_mark(&it);
}
}
/* Walk back up the tree to the next position. */
it.pte[it.lvl] += ptes;
it.cnt -= ptes;
if (it.cnt) {
while (it.pte[it.lvl] == (1 << desc[it.lvl].bits)) {
it.pte[it.lvl++] = 0;
it.pte[it.lvl]++;
}
}
}
nvkm_vmm_flush(&it);
return ~0ULL;
fail:
/* Reconstruct the failure address so the caller is able to
* reverse any partially completed operations.
*/
addr = it.pte[it.max--];
do {
addr = addr << desc[it.max].bits;
addr |= it.pte[it.max];
} while (it.max--);
return addr << page->shift;
}
static void
nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false,
nvkm_vmm_sparse_unref_ptes, NULL, NULL,
page->desc->func->invalid ?
page->desc->func->invalid : page->desc->func->unmap);
}
static int
nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
true, false, nvkm_vmm_sparse_ref_ptes,
NULL, NULL, page->desc->func->sparse);
if (fail != ~0ULL) {
if ((size = fail - addr))
nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
return -ENOMEM;
}
return 0;
}
return -EINVAL;
}
static int
nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
{
const struct nvkm_vmm_page *page = vmm->func->page;
int m = 0, i;
u64 start = addr;
u64 block;
while (size) {
/* Limit maximum page size based on remaining size. */
while (size < (1ULL << page[m].shift))
m++;
i = m;
/* Find largest page size suitable for alignment. */
while (!IS_ALIGNED(addr, 1ULL << page[i].shift))
i++;
/* Determine number of PTEs at this page size. */
if (i != m) {
/* Limited to alignment boundary of next page size. */
u64 next = 1ULL << page[i - 1].shift;
u64 part = ALIGN(addr, next) - addr;
if (size - part >= next)
block = (part >> page[i].shift) << page[i].shift;
else
block = (size >> page[i].shift) << page[i].shift;
} else {
block = (size >> page[i].shift) << page[i].shift;
}
/* Perform operation. */
if (ref) {
int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
if (ret) {
if ((size = addr - start))
nvkm_vmm_ptes_sparse(vmm, start, size, false);
return ret;
}
} else {
nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
}
size -= block;
addr += block;
}
return 0;
}
static void
nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn)
{
const struct nvkm_vmm_desc_func *func = page->desc->func;
mutex_lock(&vmm->mutex.map);
nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
NULL, NULL, NULL,
sparse ? func->sparse : func->invalid ? func->invalid :
func->unmap);
mutex_unlock(&vmm->mutex.map);
}
static void
nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
{
mutex_lock(&vmm->mutex.map);
nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
NULL, func, map, NULL);
mutex_unlock(&vmm->mutex.map);
}
static void
nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
nvkm_vmm_unref_ptes, NULL, NULL, NULL);
}
static void
nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
mutex_lock(&vmm->mutex.ref);
nvkm_vmm_ptes_put_locked(vmm, page, addr, size);
mutex_unlock(&vmm->mutex.ref);
}
static int
nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
u64 fail;
mutex_lock(&vmm->mutex.ref);
fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
nvkm_vmm_ref_ptes, NULL, NULL, NULL);
if (fail != ~0ULL) {
if (fail != addr)
nvkm_vmm_ptes_put_locked(vmm, page, addr, fail - addr);
mutex_unlock(&vmm->mutex.ref);
return -ENOMEM;
}
mutex_unlock(&vmm->mutex.ref);
return 0;
}
static void
__nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn)
{
const struct nvkm_vmm_desc_func *func = page->desc->func;
nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
sparse ? func->sparse : func->invalid ? func->invalid :
func->unmap);
}
static void
nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn)
{
if (vmm->managed.raw) {
nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn);
nvkm_vmm_ptes_put(vmm, page, addr, size);
} else {
__nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn);
}
}
static int
__nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
{
u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
false, nvkm_vmm_ref_ptes, func, map, NULL);
if (fail != ~0ULL) {
if ((size = fail - addr))
nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
return -ENOMEM;
}
return 0;
}
static int
nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
{
int ret;
if (vmm->managed.raw) {
ret = nvkm_vmm_ptes_get(vmm, page, addr, size);
if (ret)
return ret;
nvkm_vmm_ptes_map(vmm, page, addr, size, map, func);
return 0;
} else {
return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func);
}
}
struct nvkm_vma *
nvkm_vma_new(u64 addr, u64 size)
{
struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma) {
vma->addr = addr;
vma->size = size;
vma->page = NVKM_VMA_PAGE_NONE;
vma->refd = NVKM_VMA_PAGE_NONE;
}
return vma;
}
struct nvkm_vma *
nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
{
struct nvkm_vma *new;
BUG_ON(vma->size == tail);
if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail)))
return NULL;
vma->size -= tail;
new->mapref = vma->mapref;
new->sparse = vma->sparse;
new->page = vma->page;
new->refd = vma->refd;
new->used = vma->used;
new->part = vma->part;
new->busy = vma->busy;
new->mapped = vma->mapped;
list_add(&new->head, &vma->head);
return new;
}
static inline void
nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
rb_erase(&vma->tree, &vmm->free);
}
static inline void
nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
nvkm_vmm_free_remove(vmm, vma);
list_del(&vma->head);
kfree(vma);
}
static void
nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
struct rb_node **ptr = &vmm->free.rb_node;
struct rb_node *parent = NULL;
while (*ptr) {
struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
parent = *ptr;
if (vma->size < this->size)
ptr = &parent->rb_left;
else
if (vma->size > this->size)
ptr = &parent->rb_right;
else
if (vma->addr < this->addr)
ptr = &parent->rb_left;
else
if (vma->addr > this->addr)
ptr = &parent->rb_right;
else
BUG();
}
rb_link_node(&vma->tree, parent, ptr);
rb_insert_color(&vma->tree, &vmm->free);
}
static inline void
nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
rb_erase(&vma->tree, &vmm->root);
}
static inline void
nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
nvkm_vmm_node_remove(vmm, vma);
list_del(&vma->head);
kfree(vma);
}
static void
nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
struct rb_node **ptr = &vmm->root.rb_node;
struct rb_node *parent = NULL;
while (*ptr) {
struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
parent = *ptr;
if (vma->addr < this->addr)
ptr = &parent->rb_left;
else
if (vma->addr > this->addr)
ptr = &parent->rb_right;
else
BUG();
}
rb_link_node(&vma->tree, parent, ptr);
rb_insert_color(&vma->tree, &vmm->root);
}
struct nvkm_vma *
nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
{
struct rb_node *node = vmm->root.rb_node;
while (node) {
struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
if (addr < vma->addr)
node = node->rb_left;
else
if (addr >= vma->addr + vma->size)
node = node->rb_right;
else
return vma;
}
return NULL;
}
#define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \
list_entry((root)->head.dir, struct nvkm_vma, head))
static struct nvkm_vma *
nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
{
if (next) {
if (vma->size == size) {
vma->size += next->size;
nvkm_vmm_node_delete(vmm, next);
if (prev) {
prev->size += vma->size;
nvkm_vmm_node_delete(vmm, vma);
return prev;
}
return vma;
}
BUG_ON(prev);
nvkm_vmm_node_remove(vmm, next);
vma->size -= size;
next->addr -= size;
next->size += size;
nvkm_vmm_node_insert(vmm, next);
return next;
}
if (prev) {
if (vma->size != size) {
nvkm_vmm_node_remove(vmm, vma);
prev->size += size;
vma->addr += size;
vma->size -= size;
nvkm_vmm_node_insert(vmm, vma);
} else {
prev->size += vma->size;
nvkm_vmm_node_delete(vmm, vma);
}
return prev;
}
return vma;
}
struct nvkm_vma *
nvkm_vmm_node_split(struct nvkm_vmm *vmm,
struct nvkm_vma *vma, u64 addr, u64 size)
{
struct nvkm_vma *prev = NULL;
if (vma->addr != addr) {
prev = vma;
if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
return NULL;
vma->part = true;
nvkm_vmm_node_insert(vmm, vma);
}
if (vma->size != size) {
struct nvkm_vma *tmp;
if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
return NULL;
}
tmp->part = true;
nvkm_vmm_node_insert(vmm, tmp);
}
return vma;
}
static void
nvkm_vma_dump(struct nvkm_vma *vma)
{
printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n",
vma->addr, (u64)vma->size,
vma->used ? '-' : 'F',
vma->mapref ? 'R' : '-',
vma->sparse ? 'S' : '-',
vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
vma->part ? 'P' : '-',
vma->busy ? 'B' : '-',
vma->mapped ? 'M' : '-',
vma->memory);
}
static void
nvkm_vmm_dump(struct nvkm_vmm *vmm)
{
struct nvkm_vma *vma;
list_for_each_entry(vma, &vmm->list, head) {
nvkm_vma_dump(vma);
}
}
static void
nvkm_vmm_dtor(struct nvkm_vmm *vmm)
{
struct nvkm_vma *vma;
struct rb_node *node;
if (0)
nvkm_vmm_dump(vmm);
while ((node = rb_first(&vmm->root))) {
struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
nvkm_vmm_put(vmm, &vma);
}
if (vmm->bootstrapped) {
const struct nvkm_vmm_page *page = vmm->func->page;
const u64 limit = vmm->limit - vmm->start;
while (page[1].shift)
page++;
nvkm_mmu_ptc_dump(vmm->mmu);
nvkm_vmm_ptes_put(vmm, page, vmm->start, limit);
}
vma = list_first_entry(&vmm->list, typeof(*vma), head);
list_del(&vma->head);
kfree(vma);
WARN_ON(!list_empty(&vmm->list));
if (vmm->nullp) {
dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
vmm->nullp, vmm->null);
}
if (vmm->pd) {
nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
nvkm_vmm_pt_del(&vmm->pd);
}
}
static int
nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
{
struct nvkm_vma *vma;
if (!(vma = nvkm_vma_new(addr, size)))
return -ENOMEM;
vma->mapref = true;
vma->sparse = false;
vma->used = true;
nvkm_vmm_node_insert(vmm, vma);
list_add_tail(&vma->head, &vmm->list);
return 0;
}
static int
nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
u32 pd_header, bool managed, u64 addr, u64 size,
struct lock_class_key *key, const char *name,
struct nvkm_vmm *vmm)
{
static struct lock_class_key _key;
const struct nvkm_vmm_page *page = func->page;
const struct nvkm_vmm_desc *desc;
struct nvkm_vma *vma;
int levels, bits = 0, ret;
vmm->func = func;
vmm->mmu = mmu;
vmm->name = name;
vmm->debug = mmu->subdev.debug;
kref_init(&vmm->kref);
__mutex_init(&vmm->mutex.vmm, "&vmm->mutex.vmm", key ? key : &_key);
mutex_init(&vmm->mutex.ref);
mutex_init(&vmm->mutex.map);
/* Locate the smallest page size supported by the backend, it will
* have the deepest nesting of page tables.
*/
while (page[1].shift)
page++;
/* Locate the structure that describes the layout of the top-level
* page table, and determine the number of valid bits in a virtual
* address.
*/
for (levels = 0, desc = page->desc; desc->bits; desc++, levels++)
bits += desc->bits;
bits += page->shift;
desc--;
if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX))
return -EINVAL;
/* Allocate top-level page table. */
vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
if (!vmm->pd)
return -ENOMEM;
vmm->pd->refs[0] = 1;
INIT_LIST_HEAD(&vmm->join);
/* ... and the GPU storage for it, except on Tesla-class GPUs that
* have the PD embedded in the instance structure.
*/
if (desc->size) {
const u32 size = pd_header + desc->size * (1 << desc->bits);
vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
if (!vmm->pd->pt[0])
return -ENOMEM;
}
/* Initialise address-space MM. */
INIT_LIST_HEAD(&vmm->list);
vmm->free = RB_ROOT;
vmm->root = RB_ROOT;
if (managed) {
/* Address-space will be managed by the client for the most
* part, except for a specified area where NVKM allocations
* are allowed to be placed.
*/
vmm->start = 0;
vmm->limit = 1ULL << bits;
if (addr + size < addr || addr + size > vmm->limit)
return -EINVAL;
/* Client-managed area before the NVKM-managed area. */
if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
return ret;
vmm->managed.p.addr = 0;
vmm->managed.p.size = addr;
/* NVKM-managed area. */
if (size) {
if (!(vma = nvkm_vma_new(addr, size)))
return -ENOMEM;
nvkm_vmm_free_insert(vmm, vma);
list_add_tail(&vma->head, &vmm->list);
}
/* Client-managed area after the NVKM-managed area. */
addr = addr + size;
size = vmm->limit - addr;
if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
return ret;
vmm->managed.n.addr = addr;
vmm->managed.n.size = size;
} else {
/* Address-space fully managed by NVKM, requiring calls to
* nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space.
*/
vmm->start = addr;
vmm->limit = size ? (addr + size) : (1ULL << bits);
if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
return -EINVAL;
if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
return -ENOMEM;
nvkm_vmm_free_insert(vmm, vma);
list_add(&vma->head, &vmm->list);
}
return 0;
}
int
nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
u32 hdr, bool managed, u64 addr, u64 size,
struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm)
{
if (!(*pvmm = kzalloc(sizeof(**pvmm), GFP_KERNEL)))
return -ENOMEM;
return nvkm_vmm_ctor(func, mmu, hdr, managed, addr, size, key, name, *pvmm);
}
static struct nvkm_vma *
nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
u64 addr, u64 size, u8 page, bool map)
{
struct nvkm_vma *prev = NULL;
struct nvkm_vma *next = NULL;
if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
if (prev->memory || prev->mapped != map)
prev = NULL;
}
if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
if (!next->part ||
next->memory || next->mapped != map)
next = NULL;
}
if (prev || next)
return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
return nvkm_vmm_node_split(vmm, vma, addr, size);
}
int
nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
{
struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
struct nvkm_vma *next;
u64 limit = addr + size;
u64 start = addr;
if (!vma)
return -EINVAL;
do {
if (!vma->mapped || vma->memory)
continue;
size = min(limit - start, vma->size - (start - vma->addr));
nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
start, size, false, true);
next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
if (!WARN_ON(!next)) {
vma = next;
vma->refd = NVKM_VMA_PAGE_NONE;
vma->mapped = false;
}
} while ((vma = node(vma, next)) && (start = vma->addr) < limit);
return 0;
}
/*TODO:
* - Avoid PT readback (for dma_unmap etc), this might end up being dealt
* with inside HMM, which would be a lot nicer for us to deal with.
* - Support for systems without a 4KiB page size.
*/
int
nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
{
const struct nvkm_vmm_page *page = vmm->func->page;
struct nvkm_vma *vma, *tmp;
u64 limit = addr + size;
u64 start = addr;
int pm = size >> shift;
int pi = 0;
/* Only support mapping where the page size of the incoming page
* array matches a page size available for direct mapping.
*/
while (page->shift && (page->shift != shift ||
page->desc->func->pfn == NULL))
page++;
if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
!IS_ALIGNED(size, 1ULL << shift) ||
addr + size < addr || addr + size > vmm->limit) {
VMM_DEBUG(vmm, "paged map %d %d %016llx %016llx\n",
shift, page->shift, addr, size);
return -EINVAL;
}
if (!(vma = nvkm_vmm_node_search(vmm, addr)))
return -ENOENT;
do {
bool map = !!(pfn[pi] & NVKM_VMM_PFN_V);
bool mapped = vma->mapped;
u64 size = limit - start;
u64 addr = start;
int pn, ret = 0;
/* Narrow the operation window to cover a single action (page
* should be mapped or not) within a single VMA.
*/
for (pn = 0; pi + pn < pm; pn++) {
if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V))
break;
}
size = min_t(u64, size, pn << page->shift);
size = min_t(u64, size, vma->size + vma->addr - addr);
/* Reject any operation to unmanaged regions, and areas that
* have nvkm_memory objects mapped in them already.
*/
if (!vma->mapref || vma->memory) {
ret = -EINVAL;
goto next;
}
/* In order to both properly refcount GPU page tables, and
* prevent "normal" mappings and these direct mappings from
* interfering with each other, we need to track contiguous
* ranges that have been mapped with this interface.
*
* Here we attempt to either split an existing VMA so we're
* able to flag the region as either unmapped/mapped, or to
* merge with adjacent VMAs that are already compatible.
*
* If the region is already compatible, nothing is required.
*/
if (map != mapped) {
tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
page -
vmm->func->page, map);
if (WARN_ON(!tmp)) {
ret = -ENOMEM;
goto next;
}
if ((tmp->mapped = map))
tmp->refd = page - vmm->func->page;
else
tmp->refd = NVKM_VMA_PAGE_NONE;
vma = tmp;
}
/* Update HW page tables. */
if (map) {
struct nvkm_vmm_map args;
args.page = page;
args.pfn = &pfn[pi];
if (!mapped) {
ret = nvkm_vmm_ptes_get_map(vmm, page, addr,
size, &args, page->
desc->func->pfn);
} else {
nvkm_vmm_ptes_map(vmm, page, addr, size, &args,
page->desc->func->pfn);
}
} else {
if (mapped) {
nvkm_vmm_ptes_unmap_put(vmm, page, addr, size,
false, true);
}
}
next:
/* Iterate to next operation. */
if (vma->addr + vma->size == addr + size)
vma = node(vma, next);
start += size;
if (ret) {
/* Failure is signalled by clearing the valid bit on
* any PFN that couldn't be modified as requested.
*/
while (size) {
pfn[pi++] = NVKM_VMM_PFN_NONE;
size -= 1 << page->shift;
}
} else {
pi += size >> page->shift;
}
} while (vma && start < limit);
return 0;
}
void
nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
struct nvkm_vma *prev = NULL;
struct nvkm_vma *next;
nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
nvkm_memory_unref(&vma->memory);
vma->mapped = false;
if (vma->part && (prev = node(vma, prev)) && prev->mapped)
prev = NULL;
if ((next = node(vma, next)) && (!next->part || next->mapped))
next = NULL;
nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
}
void
nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
{
const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
if (vma->mapref) {
nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
vma->refd = NVKM_VMA_PAGE_NONE;
} else {
nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
}
nvkm_vmm_unmap_region(vmm, vma);
}
void
nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
if (vma->memory) {
mutex_lock(&vmm->mutex.vmm);
nvkm_vmm_unmap_locked(vmm, vma, false);
mutex_unlock(&vmm->mutex.vmm);
}
}
static int
nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
void *argv, u32 argc, struct nvkm_vmm_map *map)
{
switch (nvkm_memory_target(map->memory)) {
case NVKM_MEM_TARGET_VRAM:
if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) {
VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
return -EINVAL;
}
break;
case NVKM_MEM_TARGET_HOST:
case NVKM_MEM_TARGET_NCOH:
if (!(map->page->type & NVKM_VMM_PAGE_HOST)) {
VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
return -EINVAL;
}
break;
default:
WARN_ON(1);
return -ENOSYS;
}
if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) ||
!IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
!IS_ALIGNED( map->offset, 1ULL << map->page->shift) ||
nvkm_memory_page(map->memory) < map->page->shift) {
VMM_DEBUG(vmm, "alignment %016llx %016llx %016llx %d %d",
vma->addr, (u64)vma->size, map->offset, map->page->shift,
nvkm_memory_page(map->memory));
return -EINVAL;
}
return vmm->func->valid(vmm, argv, argc, map);
}
static int
nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
void *argv, u32 argc, struct nvkm_vmm_map *map)
{
for (map->page = vmm->func->page; map->page->shift; map->page++) {
VMM_DEBUG(vmm, "trying %d", map->page->shift);
if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
return 0;
}
return -EINVAL;
}
static int
nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
void *argv, u32 argc, struct nvkm_vmm_map *map)
{
nvkm_vmm_pte_func func;
int ret;
map->no_comp = vma->no_comp;
/* Make sure we won't overrun the end of the memory object. */
if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
nvkm_memory_size(map->memory),
map->offset, (u64)vma->size);
return -EINVAL;
}
/* Check remaining arguments for validity. */
if (vma->page == NVKM_VMA_PAGE_NONE &&
vma->refd == NVKM_VMA_PAGE_NONE) {
/* Find the largest page size we can perform the mapping at. */
const u32 debug = vmm->debug;
vmm->debug = 0;
ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
vmm->debug = debug;
if (ret) {
VMM_DEBUG(vmm, "invalid at any page size");
nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
return -EINVAL;
}
} else {
/* Page size of the VMA is already pre-determined. */
if (vma->refd != NVKM_VMA_PAGE_NONE)
map->page = &vmm->func->page[vma->refd];
else
map->page = &vmm->func->page[vma->page];
ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
if (ret) {
VMM_DEBUG(vmm, "invalid %d\n", ret);
return ret;
}
}
/* Deal with the 'offset' argument, and fetch the backend function. */
map->off = map->offset;
if (map->mem) {
for (; map->off; map->mem = map->mem->next) {
u64 size = (u64)map->mem->length << NVKM_RAM_MM_SHIFT;
if (size > map->off)
break;
map->off -= size;
}
func = map->page->desc->func->mem;
} else
if (map->sgl) {
for (; map->off; map->sgl = sg_next(map->sgl)) {
u64 size = sg_dma_len(map->sgl);
if (size > map->off)
break;
map->off -= size;
}
func = map->page->desc->func->sgl;
} else {
map->dma += map->offset >> PAGE_SHIFT;
map->off = map->offset & PAGE_MASK;
func = map->page->desc->func->dma;
}
/* Perform the map. */
if (vma->refd == NVKM_VMA_PAGE_NONE) {
ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
if (ret)
return ret;
vma->refd = map->page - vmm->func->page;
} else {
nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
}
nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
nvkm_memory_unref(&vma->memory);
vma->memory = nvkm_memory_ref(map->memory);
vma->mapped = true;
vma->tags = map->tags;
return 0;
}
int
nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
struct nvkm_vmm_map *map)
{
int ret;
if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) &&
vmm->managed.raw)
return nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
vma->busy = false;
mutex_unlock(&vmm->mutex.vmm);
return ret;
}
static void
nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
struct nvkm_vma *prev, *next;
if ((prev = node(vma, prev)) && !prev->used) {
vma->addr = prev->addr;
vma->size += prev->size;
nvkm_vmm_free_delete(vmm, prev);
}
if ((next = node(vma, next)) && !next->used) {
vma->size += next->size;
nvkm_vmm_free_delete(vmm, next);
}
nvkm_vmm_free_insert(vmm, vma);
}
void
nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
const struct nvkm_vmm_page *page = vmm->func->page;
struct nvkm_vma *next = vma;
BUG_ON(vma->part);
if (vma->mapref || !vma->sparse) {
do {
const bool mem = next->memory != NULL;
const bool map = next->mapped;
const u8 refd = next->refd;
const u64 addr = next->addr;
u64 size = next->size;
/* Merge regions that are in the same state. */
while ((next = node(next, next)) && next->part &&
(next->mapped == map) &&
(next->memory != NULL) == mem &&
(next->refd == refd))
size += next->size;
if (map) {
/* Region(s) are mapped, merge the unmap
* and dereference into a single walk of
* the page tree.
*/
nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
size, vma->sparse,
!mem);
} else
if (refd != NVKM_VMA_PAGE_NONE) {
/* Drop allocation-time PTE references. */
nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
}
} while (next && next->part);
}
/* Merge any mapped regions that were split from the initial
* address-space allocation back into the allocated VMA, and
* release memory/compression resources.
*/
next = vma;
do {
if (next->mapped)
nvkm_vmm_unmap_region(vmm, next);
} while ((next = node(vma, next)) && next->part);
if (vma->sparse && !vma->mapref) {
/* Sparse region that was allocated with a fixed page size,
* meaning all relevant PTEs were referenced once when the
* region was allocated, and remained that way, regardless
* of whether memory was mapped into it afterwards.
*
* The process of unmapping, unsparsing, and dereferencing
* PTEs can be done in a single page tree walk.
*/
nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
} else
if (vma->sparse) {
/* Sparse region that wasn't allocated with a fixed page size,
* PTE references were taken both at allocation time (to make
* the GPU see the region as sparse), and when mapping memory
* into the region.
*
* The latter was handled above, and the remaining references
* are dealt with here.
*/
nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
}
/* Remove VMA from the list of allocated nodes. */
nvkm_vmm_node_remove(vmm, vma);
/* Merge VMA back into the free list. */
vma->page = NVKM_VMA_PAGE_NONE;
vma->refd = NVKM_VMA_PAGE_NONE;
vma->used = false;
nvkm_vmm_put_region(vmm, vma);
}
void
nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
{
struct nvkm_vma *vma = *pvma;
if (vma) {
mutex_lock(&vmm->mutex.vmm);
nvkm_vmm_put_locked(vmm, vma);
mutex_unlock(&vmm->mutex.vmm);
*pvma = NULL;
}
}
int
nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
u8 shift, u8 align, u64 size, struct nvkm_vma **pvma)
{
const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE];
struct rb_node *node = NULL, *temp;
struct nvkm_vma *vma = NULL, *tmp;
u64 addr, tail;
int ret;
VMM_TRACE(vmm, "getref %d mapref %d sparse %d "
"shift: %d align: %d size: %016llx",
getref, mapref, sparse, shift, align, size);
/* Zero-sized, or lazily-allocated sparse VMAs, make no sense. */
if (unlikely(!size || (!getref && !mapref && sparse))) {
VMM_DEBUG(vmm, "args %016llx %d %d %d",
size, getref, mapref, sparse);
return -EINVAL;
}
/* Tesla-class GPUs can only select page size per-PDE, which means
* we're required to know the mapping granularity up-front to find
* a suitable region of address-space.
*
* The same goes if we're requesting up-front allocation of PTES.
*/
if (unlikely((getref || vmm->func->page_block) && !shift)) {
VMM_DEBUG(vmm, "page size required: %d %016llx",
getref, vmm->func->page_block);
return -EINVAL;
}
/* If a specific page size was requested, determine its index and
* make sure the requested size is a multiple of the page size.
*/
if (shift) {
for (page = vmm->func->page; page->shift; page++) {
if (shift == page->shift)
break;
}
if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
VMM_DEBUG(vmm, "page %d %016llx", shift, size);
return -EINVAL;
}
align = max_t(u8, align, shift);
} else {
align = max_t(u8, align, 12);
}
/* Locate smallest block that can possibly satisfy the allocation. */
temp = vmm->free.rb_node;
while (temp) {
struct nvkm_vma *this = rb_entry(temp, typeof(*this), tree);
if (this->size < size) {
temp = temp->rb_right;
} else {
node = temp;
temp = temp->rb_left;
}
}
if (unlikely(!node))
return -ENOSPC;
/* Take into account alignment restrictions, trying larger blocks
* in turn until we find a suitable free block.
*/
do {
struct nvkm_vma *this = rb_entry(node, typeof(*this), tree);
struct nvkm_vma *prev = node(this, prev);
struct nvkm_vma *next = node(this, next);
const int p = page - vmm->func->page;
addr = this->addr;
if (vmm->func->page_block && prev && prev->page != p)
addr = ALIGN(addr, vmm->func->page_block);
addr = ALIGN(addr, 1ULL << align);
tail = this->addr + this->size;
if (vmm->func->page_block && next && next->page != p)
tail = ALIGN_DOWN(tail, vmm->func->page_block);
if (addr <= tail && tail - addr >= size) {
nvkm_vmm_free_remove(vmm, this);
vma = this;
break;
}
} while ((node = rb_next(node)));
if (unlikely(!vma))
return -ENOSPC;
/* If the VMA we found isn't already exactly the requested size,
* it needs to be split, and the remaining free blocks returned.
*/
if (addr != vma->addr) {
if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) {
nvkm_vmm_put_region(vmm, vma);
return -ENOMEM;
}
nvkm_vmm_free_insert(vmm, vma);
vma = tmp;
}
if (size != vma->size) {
if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
nvkm_vmm_put_region(vmm, vma);
return -ENOMEM;
}
nvkm_vmm_free_insert(vmm, tmp);
}
/* Pre-allocate page tables and/or setup sparse mappings. */
if (sparse && getref)
ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
else if (sparse)
ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
else if (getref)
ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
else
ret = 0;
if (ret) {
nvkm_vmm_put_region(vmm, vma);
return ret;
}
vma->mapref = mapref && !getref;
vma->sparse = sparse;
vma->page = page - vmm->func->page;
vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
vma->used = true;
nvkm_vmm_node_insert(vmm, vma);
*pvma = vma;
return 0;
}
int
nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
{
int ret;
mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
mutex_unlock(&vmm->mutex.vmm);
return ret;
}
void
nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
bool sparse, u8 refd)
{
const struct nvkm_vmm_page *page = &vmm->func->page[refd];
nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, false);
}
void
nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
{
const struct nvkm_vmm_page *page = vmm->func->page;
nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
}
int
nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
{
const struct nvkm_vmm_page *page = vmm->func->page;
if (unlikely(!size))
return -EINVAL;
return nvkm_vmm_ptes_get(vmm, &page[refd], addr, size);
}
int
nvkm_vmm_raw_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
{
int ret;
mutex_lock(&vmm->mutex.ref);
ret = nvkm_vmm_ptes_sparse(vmm, addr, size, ref);
mutex_unlock(&vmm->mutex.ref);
return ret;
}
void
nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
if (inst && vmm && vmm->func->part) {
mutex_lock(&vmm->mutex.vmm);
vmm->func->part(vmm, inst);
mutex_unlock(&vmm->mutex.vmm);
}
}
int
nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
int ret = 0;
if (vmm->func->join) {
mutex_lock(&vmm->mutex.vmm);
ret = vmm->func->join(vmm, inst);
mutex_unlock(&vmm->mutex.vmm);
}
return ret;
}
static bool
nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{
const struct nvkm_vmm_desc *desc = it->desc;
const int type = desc->type == SPT;
nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm);
return false;
}
int
nvkm_vmm_boot(struct nvkm_vmm *vmm)
{
const struct nvkm_vmm_page *page = vmm->func->page;
const u64 limit = vmm->limit - vmm->start;
int ret;
while (page[1].shift)
page++;
ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit);
if (ret)
return ret;
nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false,
nvkm_vmm_boot_ptes, NULL, NULL, NULL);
vmm->bootstrapped = true;
return 0;
}
static void
nvkm_vmm_del(struct kref *kref)
{
struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref);
nvkm_vmm_dtor(vmm);
kfree(vmm);
}
void
nvkm_vmm_unref(struct nvkm_vmm **pvmm)
{
struct nvkm_vmm *vmm = *pvmm;
if (vmm) {
kref_put(&vmm->kref, nvkm_vmm_del);
*pvmm = NULL;
}
}
struct nvkm_vmm *
nvkm_vmm_ref(struct nvkm_vmm *vmm)
{
if (vmm)
kref_get(&vmm->kref);
return vmm;
}
int
nvkm_vmm_new(struct nvkm_device *device, u64 addr, u64 size, void *argv,
u32 argc, struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm)
{
struct nvkm_mmu *mmu = device->mmu;
struct nvkm_vmm *vmm = NULL;
int ret;
ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc,
key, name, &vmm);
if (ret)
nvkm_vmm_unref(&vmm);
*pvmm = vmm;
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
#include <core/memory.h>
int
gk20a_vmm_aper(enum nvkm_memory_target target)
{
switch (target) {
case NVKM_MEM_TARGET_NCOH: return 0;
default:
return -EINVAL;
}
}
static const struct nvkm_vmm_func
gk20a_vmm_17 = {
.join = gf100_vmm_join,
.part = gf100_vmm_part,
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
.invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xxHC },
{ 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xxHx },
{}
}
};
static const struct nvkm_vmm_func
gk20a_vmm_16 = {
.join = gf100_vmm_join,
.part = gf100_vmm_part,
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
.invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xxHC },
{ 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xxHx },
{}
}
};
int
gk20a_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
return gf100_vmm_new_(&gk20a_vmm_16, &gk20a_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c |
/*
* Copyright 2018 Red Hat Inc.
* Copyright 2019 NVIDIA Corporation.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "mem.h"
#include "vmm.h"
#include <core/option.h>
#include <nvif/class.h>
static const u8 *
tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
kind[16] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00 */
0x06, 0x06, 0x02, 0x01, 0x03, 0x04, 0x05, 0x07,
};
*count = ARRAY_SIZE(kind);
*invalid = 0x07;
return kind;
}
static const struct nvkm_mmu_func
tu102_mmu = {
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu102_vmm_new },
.kind = tu102_mmu_kind,
.kind_sys = true,
};
int
tu102_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
return nvkm_mmu_new_(&tu102_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c |
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "mem.h"
#include "vmm.h"
#include <nvif/class.h>
const u8 *
nv50_mmu_kind(struct nvkm_mmu *base, int *count, u8 *invalid)
{
/* 0x01: no bank swizzle
* 0x02: bank swizzled
* 0x7f: invalid
*
* 0x01/0x02 are values understood by the VRAM allocator,
* and are required to avoid mixing the two types within
* a certain range.
*/
static const u8
kind[128] = {
0x01, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x00 */
0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
0x01, 0x01, 0x01, 0x01, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x10 */
0x02, 0x02, 0x02, 0x02, 0x7f, 0x7f, 0x7f, 0x7f,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x7f, /* 0x20 */
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x30 */
0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, /* 0x40 */
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f, 0x01, 0x01, 0x01, 0x7f, /* 0x50 */
0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x7f, /* 0x60 */
0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02,
0x01, 0x7f, 0x02, 0x7f, 0x01, 0x7f, 0x02, 0x7f, /* 0x70 */
0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f
};
*count = ARRAY_SIZE(kind);
*invalid = 0x7f;
return kind;
}
static const struct nvkm_mmu_func
nv50_mmu = {
.dma_bits = 40,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x1400 },
.kind = nv50_mmu_kind,
};
int
nv50_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
return nvkm_mmu_new_(&nv50_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
#include <subdev/fb.h>
#include <subdev/timer.h>
#include <engine/gr.h>
#include <nvif/if500d.h>
#include <nvif/unpack.h>
static inline void
nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
{
u64 next = addr + map->type, data;
u32 pten;
int log2blk;
map->type += ptes * map->ctag;
while (ptes) {
for (log2blk = 7; log2blk >= 0; log2blk--) {
pten = 1 << log2blk;
if (ptes >= pten && IS_ALIGNED(ptei, pten))
break;
}
data = next | (log2blk << 7);
next += pten * map->next;
ptes -= pten;
while (pten--)
VMM_WO064(pt, vmm, ptei++ * 8, data);
}
}
static void
nv50_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
}
static void
nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
if (map->page->shift == PAGE_SHIFT) {
VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
nvkm_kmap(pt->memory);
while (ptes--) {
const u64 data = *map->dma++ + map->type;
VMM_WO064(pt, vmm, ptei++ * 8, data);
map->type += map->ctag;
}
nvkm_done(pt->memory);
return;
}
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
}
static void
nv50_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
}
static void
nv50_vmm_pgt_unmap(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
}
static const struct nvkm_vmm_desc_func
nv50_vmm_pgt = {
.unmap = nv50_vmm_pgt_unmap,
.mem = nv50_vmm_pgt_mem,
.dma = nv50_vmm_pgt_dma,
.sgl = nv50_vmm_pgt_sgl,
};
static bool
nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata)
{
struct nvkm_mmu_pt *pt;
u64 data = 0xdeadcafe00000000ULL;
if (pgt && (pt = pgt->pt[0])) {
switch (pgt->page) {
case 16: data = 0x00000001; break;
case 12: data = 0x00000003;
switch (nvkm_memory_size(pt->memory)) {
case 0x100000: data |= 0x00000000; break;
case 0x040000: data |= 0x00000020; break;
case 0x020000: data |= 0x00000040; break;
case 0x010000: data |= 0x00000060; break;
default:
WARN_ON(1);
return false;
}
break;
default:
WARN_ON(1);
return false;
}
switch (nvkm_memory_target(pt->memory)) {
case NVKM_MEM_TARGET_VRAM: data |= 0x00000000; break;
case NVKM_MEM_TARGET_HOST: data |= 0x00000008; break;
case NVKM_MEM_TARGET_NCOH: data |= 0x0000000c; break;
default:
WARN_ON(1);
return false;
}
data |= pt->addr;
}
*pdata = data;
return true;
}
static void
nv50_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
{
struct nvkm_vmm_join *join;
u32 pdeo = vmm->mmu->func->vmm.pd_offset + (pdei * 8);
u64 data;
if (!nv50_vmm_pde(vmm, pgd->pde[pdei], &data))
return;
list_for_each_entry(join, &vmm->join, head) {
nvkm_kmap(join->inst);
nvkm_wo64(join->inst, pdeo, data);
nvkm_done(join->inst);
}
}
static const struct nvkm_vmm_desc_func
nv50_vmm_pgd = {
.pde = nv50_vmm_pgd_pde,
};
const struct nvkm_vmm_desc
nv50_vmm_desc_12[] = {
{ PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
{}
};
const struct nvkm_vmm_desc
nv50_vmm_desc_16[] = {
{ PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
{}
};
void
nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
{
struct nvkm_subdev *subdev = &vmm->mmu->subdev;
struct nvkm_device *device = subdev->device;
int i, id;
mutex_lock(&vmm->mmu->mutex);
for (i = 0; i < NVKM_SUBDEV_NR; i++) {
if (!atomic_read(&vmm->engref[i]))
continue;
/* unfortunate hw bug workaround... */
if (i == NVKM_ENGINE_GR && device->gr) {
int ret = nvkm_gr_tlb_flush(device->gr);
if (ret != -ENODEV)
continue;
}
switch (i) {
case NVKM_ENGINE_GR : id = 0x00; break;
case NVKM_ENGINE_VP :
case NVKM_ENGINE_MSPDEC: id = 0x01; break;
case NVKM_SUBDEV_BAR : id = 0x06; break;
case NVKM_ENGINE_MSPPP :
case NVKM_ENGINE_MPEG : id = 0x08; break;
case NVKM_ENGINE_BSP :
case NVKM_ENGINE_MSVLD : id = 0x09; break;
case NVKM_ENGINE_CIPHER:
case NVKM_ENGINE_SEC : id = 0x0a; break;
case NVKM_ENGINE_CE : id = 0x0d; break;
default:
continue;
}
nvkm_wr32(device, 0x100c80, (id << 16) | 1);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
break;
) < 0)
nvkm_error(subdev, "%s mmu invalidate timeout\n", nvkm_subdev_type[i]);
}
mutex_unlock(&vmm->mmu->mutex);
}
int
nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
struct nvkm_vmm_map *map)
{
const struct nvkm_vmm_page *page = map->page;
union {
struct nv50_vmm_map_vn vn;
struct nv50_vmm_map_v0 v0;
} *args = argv;
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_ram *ram = device->fb->ram;
struct nvkm_memory *memory = map->memory;
u8 aper, kind, kind_inv, comp, priv, ro;
int kindn, ret = -ENOSYS;
const u8 *kindm;
map->type = map->ctag = 0;
map->next = 1 << page->shift;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
ro = !!args->v0.ro;
priv = !!args->v0.priv;
kind = args->v0.kind & 0x7f;
comp = args->v0.comp & 0x03;
} else
if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
ro = 0;
priv = 0;
kind = 0x00;
comp = 0;
} else {
VMM_DEBUG(vmm, "args");
return ret;
}
switch (nvkm_memory_target(memory)) {
case NVKM_MEM_TARGET_VRAM:
if (ram->stolen) {
map->type |= ram->stolen;
aper = 3;
} else {
aper = 0;
}
break;
case NVKM_MEM_TARGET_HOST:
aper = 2;
break;
case NVKM_MEM_TARGET_NCOH:
aper = 3;
break;
default:
WARN_ON(1);
return -EINVAL;
}
kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
if (kind >= kindn || kindm[kind] == kind_inv) {
VMM_DEBUG(vmm, "kind %02x", kind);
return -EINVAL;
}
if (map->mem && map->mem->type != kindm[kind]) {
VMM_DEBUG(vmm, "kind %02x bankswz: %d %d", kind,
kindm[kind], map->mem->type);
return -EINVAL;
}
if (comp) {
u32 tags = (nvkm_memory_size(memory) >> 16) * comp;
if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
return -EINVAL;
}
if (!map->no_comp) {
ret = nvkm_memory_tags_get(memory, device, tags, NULL,
&map->tags);
if (ret) {
VMM_DEBUG(vmm, "comp %d", ret);
return ret;
}
if (map->tags->mn) {
u32 tags = map->tags->mn->offset +
(map->offset >> 16);
map->ctag |= (u64)comp << 49;
map->type |= (u64)comp << 47;
map->type |= (u64)tags << 49;
map->next |= map->ctag;
}
}
}
map->type |= BIT(0); /* Valid. */
map->type |= (u64)ro << 3;
map->type |= (u64)aper << 4;
map->type |= (u64)priv << 6;
map->type |= (u64)kind << 40;
return 0;
}
void
nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
struct nvkm_vmm_join *join;
list_for_each_entry(join, &vmm->join, head) {
if (join->inst == inst) {
list_del(&join->head);
kfree(join);
break;
}
}
}
int
nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
struct nvkm_vmm_join *join;
int ret = 0;
u64 data;
u32 pdei;
if (!(join = kmalloc(sizeof(*join), GFP_KERNEL)))
return -ENOMEM;
join->inst = inst;
list_add_tail(&join->head, &vmm->join);
nvkm_kmap(join->inst);
for (pdei = vmm->start >> 29; pdei <= (vmm->limit - 1) >> 29; pdei++) {
if (!nv50_vmm_pde(vmm, vmm->pd->pde[pdei], &data)) {
ret = -EINVAL;
break;
}
nvkm_wo64(join->inst, pd_offset + (pdei * 8), data);
}
nvkm_done(join->inst);
return ret;
}
static const struct nvkm_vmm_func
nv50_vmm = {
.join = nv50_vmm_join,
.part = nv50_vmm_part,
.valid = nv50_vmm_valid,
.flush = nv50_vmm_flush,
.page_block = 1 << 29,
.page = {
{ 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxC },
{ 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
{}
}
};
int
nv50_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm)
{
return nv04_vmm_new_(&nv50_vmm, mmu, 0, managed, addr, size,
argv, argc, key, name, pvmm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "mem.h"
#include <core/memory.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
#include <nvif/class.h>
#include <nvif/if900b.h>
#include <nvif/if900d.h>
#include <nvif/unpack.h>
int
gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
{
struct gf100_vmm_map_v0 uvmm = {};
union {
struct gf100_mem_map_vn vn;
struct gf100_mem_map_v0 v0;
} *args = argv;
struct nvkm_device *device = mmu->subdev.device;
struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
uvmm.ro = args->v0.ro;
uvmm.kind = args->v0.kind;
} else
if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
} else
return ret;
ret = nvkm_vmm_get(bar, nvkm_memory_page(memory),
nvkm_memory_size(memory), pvma);
if (ret)
return ret;
ret = nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
if (ret)
return ret;
*paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
*psize = (*pvma)->size;
return 0;
}
int
gf100_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
void *argv, u32 argc, struct nvkm_memory **pmemory)
{
union {
struct gf100_mem_vn vn;
struct gf100_mem_v0 v0;
} *args = argv;
int ret = -ENOSYS;
bool contig;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
contig = args->v0.contig;
} else
if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
contig = false;
} else
return ret;
if (mmu->type[type].type & (NVKM_MEM_DISP | NVKM_MEM_COMP))
type = NVKM_RAM_MM_NORMAL;
else
type = NVKM_RAM_MM_MIXED;
return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
size, contig, false, pmemory);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "ummu.h"
#include "umem.h"
#include "uvmm.h"
#include <core/client.h>
#include <nvif/if0008.h>
#include <nvif/unpack.h>
static int
nvkm_ummu_sclass(struct nvkm_object *object, int index,
struct nvkm_oclass *oclass)
{
struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
if (mmu->func->mem.user.oclass) {
if (index-- == 0) {
oclass->base = mmu->func->mem.user;
oclass->ctor = nvkm_umem_new;
return 0;
}
}
if (mmu->func->vmm.user.oclass) {
if (index-- == 0) {
oclass->base = mmu->func->vmm.user;
oclass->ctor = nvkm_uvmm_new;
return 0;
}
}
return -EINVAL;
}
static int
nvkm_ummu_heap(struct nvkm_ummu *ummu, void *argv, u32 argc)
{
struct nvkm_mmu *mmu = ummu->mmu;
union {
struct nvif_mmu_heap_v0 v0;
} *args = argv;
int ret = -ENOSYS;
u8 index;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
if ((index = args->v0.index) >= mmu->heap_nr)
return -EINVAL;
args->v0.size = mmu->heap[index].size;
} else
return ret;
return 0;
}
static int
nvkm_ummu_type(struct nvkm_ummu *ummu, void *argv, u32 argc)
{
struct nvkm_mmu *mmu = ummu->mmu;
union {
struct nvif_mmu_type_v0 v0;
} *args = argv;
int ret = -ENOSYS;
u8 type, index;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
if ((index = args->v0.index) >= mmu->type_nr)
return -EINVAL;
type = mmu->type[index].type;
args->v0.heap = mmu->type[index].heap;
args->v0.vram = !!(type & NVKM_MEM_VRAM);
args->v0.host = !!(type & NVKM_MEM_HOST);
args->v0.comp = !!(type & NVKM_MEM_COMP);
args->v0.disp = !!(type & NVKM_MEM_DISP);
args->v0.kind = !!(type & NVKM_MEM_KIND);
args->v0.mappable = !!(type & NVKM_MEM_MAPPABLE);
args->v0.coherent = !!(type & NVKM_MEM_COHERENT);
args->v0.uncached = !!(type & NVKM_MEM_UNCACHED);
} else
return ret;
return 0;
}
static int
nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc)
{
struct nvkm_mmu *mmu = ummu->mmu;
union {
struct nvif_mmu_kind_v0 v0;
} *args = argv;
const u8 *kind = NULL;
int ret = -ENOSYS, count = 0;
u8 kind_inv = 0;
if (mmu->func->kind)
kind = mmu->func->kind(mmu, &count, &kind_inv);
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
if (argc != args->v0.count * sizeof(*args->v0.data))
return -EINVAL;
if (args->v0.count > count)
return -EINVAL;
args->v0.kind_inv = kind_inv;
memcpy(args->v0.data, kind, args->v0.count);
} else
return ret;
return 0;
}
static int
nvkm_ummu_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
struct nvkm_ummu *ummu = nvkm_ummu(object);
switch (mthd) {
case NVIF_MMU_V0_HEAP: return nvkm_ummu_heap(ummu, argv, argc);
case NVIF_MMU_V0_TYPE: return nvkm_ummu_type(ummu, argv, argc);
case NVIF_MMU_V0_KIND: return nvkm_ummu_kind(ummu, argv, argc);
default:
break;
}
return -EINVAL;
}
static const struct nvkm_object_func
nvkm_ummu = {
.mthd = nvkm_ummu_mthd,
.sclass = nvkm_ummu_sclass,
};
int
nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
void *argv, u32 argc, struct nvkm_object **pobject)
{
union {
struct nvif_mmu_v0 v0;
} *args = argv;
struct nvkm_mmu *mmu = device->mmu;
struct nvkm_ummu *ummu;
int ret = -ENOSYS, kinds = 0;
u8 unused = 0;
if (mmu->func->kind)
mmu->func->kind(mmu, &kinds, &unused);
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
args->v0.dmabits = mmu->dma_bits;
args->v0.heap_nr = mmu->heap_nr;
args->v0.type_nr = mmu->type_nr;
args->v0.kind_nr = kinds;
} else
return ret;
if (!(ummu = kzalloc(sizeof(*ummu), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nvkm_ummu, oclass, &ummu->object);
ummu->mmu = mmu;
*pobject = &ummu->object;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define nvkm_mem(p) container_of((p), struct nvkm_mem, memory)
#include "mem.h"
#include <core/memory.h>
#include <nvif/if000a.h>
#include <nvif/unpack.h>
struct nvkm_mem {
struct nvkm_memory memory;
enum nvkm_memory_target target;
struct nvkm_mmu *mmu;
u64 pages;
struct page **mem;
union {
struct scatterlist *sgl;
dma_addr_t *dma;
};
};
static enum nvkm_memory_target
nvkm_mem_target(struct nvkm_memory *memory)
{
return nvkm_mem(memory)->target;
}
static u8
nvkm_mem_page(struct nvkm_memory *memory)
{
return PAGE_SHIFT;
}
static u64
nvkm_mem_addr(struct nvkm_memory *memory)
{
struct nvkm_mem *mem = nvkm_mem(memory);
if (mem->pages == 1 && mem->mem)
return mem->dma[0];
return ~0ULL;
}
static u64
nvkm_mem_size(struct nvkm_memory *memory)
{
return nvkm_mem(memory)->pages << PAGE_SHIFT;
}
static int
nvkm_mem_map_dma(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
struct nvkm_vma *vma, void *argv, u32 argc)
{
struct nvkm_mem *mem = nvkm_mem(memory);
struct nvkm_vmm_map map = {
.memory = &mem->memory,
.offset = offset,
.dma = mem->dma,
};
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
}
static void *
nvkm_mem_dtor(struct nvkm_memory *memory)
{
struct nvkm_mem *mem = nvkm_mem(memory);
if (mem->mem) {
while (mem->pages--) {
dma_unmap_page(mem->mmu->subdev.device->dev,
mem->dma[mem->pages], PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(mem->mem[mem->pages]);
}
kvfree(mem->dma);
kvfree(mem->mem);
}
return mem;
}
static const struct nvkm_memory_func
nvkm_mem_dma = {
.dtor = nvkm_mem_dtor,
.target = nvkm_mem_target,
.page = nvkm_mem_page,
.addr = nvkm_mem_addr,
.size = nvkm_mem_size,
.map = nvkm_mem_map_dma,
};
static int
nvkm_mem_map_sgl(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
struct nvkm_vma *vma, void *argv, u32 argc)
{
struct nvkm_mem *mem = nvkm_mem(memory);
struct nvkm_vmm_map map = {
.memory = &mem->memory,
.offset = offset,
.sgl = mem->sgl,
};
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
}
static const struct nvkm_memory_func
nvkm_mem_sgl = {
.dtor = nvkm_mem_dtor,
.target = nvkm_mem_target,
.page = nvkm_mem_page,
.addr = nvkm_mem_addr,
.size = nvkm_mem_size,
.map = nvkm_mem_map_sgl,
};
int
nvkm_mem_map_host(struct nvkm_memory *memory, void **pmap)
{
struct nvkm_mem *mem = nvkm_mem(memory);
if (mem->mem) {
*pmap = vmap(mem->mem, mem->pages, VM_MAP, PAGE_KERNEL);
return *pmap ? 0 : -EFAULT;
}
return -EINVAL;
}
static int
nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
void *argv, u32 argc, struct nvkm_memory **pmemory)
{
struct device *dev = mmu->subdev.device->dev;
union {
struct nvif_mem_ram_vn vn;
struct nvif_mem_ram_v0 v0;
} *args = argv;
int ret = -ENOSYS;
enum nvkm_memory_target target;
struct nvkm_mem *mem;
gfp_t gfp = GFP_USER | __GFP_ZERO;
if ( (mmu->type[type].type & NVKM_MEM_COHERENT) &&
!(mmu->type[type].type & NVKM_MEM_UNCACHED))
target = NVKM_MEM_TARGET_HOST;
else
target = NVKM_MEM_TARGET_NCOH;
if (page != PAGE_SHIFT)
return -EINVAL;
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
return -ENOMEM;
mem->target = target;
mem->mmu = mmu;
*pmemory = &mem->memory;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
if (args->v0.dma) {
nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
mem->dma = args->v0.dma;
} else {
nvkm_memory_ctor(&nvkm_mem_sgl, &mem->memory);
mem->sgl = args->v0.sgl;
}
if (!IS_ALIGNED(size, PAGE_SIZE))
return -EINVAL;
mem->pages = size >> PAGE_SHIFT;
return 0;
} else
if ( (ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
kfree(mem);
return ret;
}
nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
size = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
if (!(mem->mem = kvmalloc_array(size, sizeof(*mem->mem), GFP_KERNEL)))
return -ENOMEM;
if (!(mem->dma = kvmalloc_array(size, sizeof(*mem->dma), GFP_KERNEL)))
return -ENOMEM;
if (mmu->dma_bits > 32)
gfp |= GFP_HIGHUSER;
else
gfp |= GFP_DMA32;
for (mem->pages = 0; size; size--, mem->pages++) {
struct page *p = alloc_page(gfp);
if (!p)
return -ENOMEM;
mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev,
p, 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, mem->dma[mem->pages])) {
__free_page(p);
return -ENOMEM;
}
mem->mem[mem->pages] = p;
}
return 0;
}
int
nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
void *argv, u32 argc, struct nvkm_memory **pmemory)
{
struct nvkm_memory *memory = NULL;
int ret;
if (mmu->type[type].type & NVKM_MEM_VRAM) {
ret = mmu->func->mem.vram(mmu, type, page, size,
argv, argc, &memory);
} else {
ret = nvkm_mem_new_host(mmu, type, page, size,
argv, argc, &memory);
}
if (ret)
nvkm_memory_unref(&memory);
*pmemory = memory;
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "mem.h"
#include "vmm.h"
#include <subdev/fb.h>
#include <nvif/class.h>
const u8 *
gm200_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
kind[256] = {
0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
0x28, 0x29, 0x2a, 0x2b, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
};
*count = ARRAY_SIZE(kind);
*invalid = 0xff;
return kind;
}
static const struct nvkm_mmu_func
gm200_mmu = {
.dma_bits = 40,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
static const struct nvkm_mmu_func
gm200_mmu_fixed = {
.dma_bits = 40,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
int
gm200_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
if (device->fb->page)
return nvkm_mmu_new_(&gm200_mmu_fixed, device, type, inst, pmmu);
return nvkm_mmu_new_(&gm200_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
#include <subdev/fb.h>
#include <subdev/ltc.h>
#include <nvif/ifc00d.h>
#include <nvif/unpack.h>
int
gv100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
u64 data[2], mask;
int ret = gp100_vmm_join(vmm, inst), i;
if (ret)
return ret;
nvkm_kmap(inst);
data[0] = nvkm_ro32(inst, 0x200);
data[1] = nvkm_ro32(inst, 0x204);
mask = BIT_ULL(0);
nvkm_wo32(inst, 0x21c, 0x00000000);
for (i = 0; i < 64; i++) {
if (mask & BIT_ULL(i)) {
nvkm_wo32(inst, 0x2a4 + (i * 0x10), data[1]);
nvkm_wo32(inst, 0x2a0 + (i * 0x10), data[0]);
} else {
nvkm_wo32(inst, 0x2a4 + (i * 0x10), 0x00000001);
nvkm_wo32(inst, 0x2a0 + (i * 0x10), 0x00000001);
}
nvkm_wo32(inst, 0x2a8 + (i * 0x10), 0x00000000);
}
nvkm_wo32(inst, 0x298, lower_32_bits(mask));
nvkm_wo32(inst, 0x29c, upper_32_bits(mask));
nvkm_done(inst);
return 0;
}
static const struct nvkm_vmm_func
gv100_vmm = {
.join = gv100_vmm_join,
.part = gf100_vmm_part,
.aper = gf100_vmm_aper,
.valid = gp100_vmm_valid,
.flush = gp100_vmm_flush,
.mthd = gp100_vmm_mthd,
.invalidate_pdb = gp100_vmm_invalidate_pdb,
.page = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
{ 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
{ 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
{ 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
{ 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
{}
}
};
int
gv100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
return gp100_vmm_new_(&gv100_vmm, mmu, managed, addr, size,
argv, argc, key, name, pvmm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c |
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "ummu.h"
#include "vmm.h"
#include <subdev/bar.h>
#include <subdev/fb.h>
#include <nvif/if500d.h>
#include <nvif/if900d.h>
struct nvkm_mmu_ptp {
struct nvkm_mmu_pt *pt;
struct list_head head;
u8 shift;
u16 mask;
u16 free;
};
static void
nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
{
const int slot = pt->base >> pt->ptp->shift;
struct nvkm_mmu_ptp *ptp = pt->ptp;
/* If there were no free slots in the parent allocation before,
* there will be now, so return PTP to the cache.
*/
if (!ptp->free)
list_add(&ptp->head, &mmu->ptp.list);
ptp->free |= BIT(slot);
/* If there's no more sub-allocations, destroy PTP. */
if (ptp->free == ptp->mask) {
nvkm_mmu_ptc_put(mmu, force, &ptp->pt);
list_del(&ptp->head);
kfree(ptp);
}
kfree(pt);
}
static struct nvkm_mmu_pt *
nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
{
struct nvkm_mmu_pt *pt;
struct nvkm_mmu_ptp *ptp;
int slot;
if (!(pt = kzalloc(sizeof(*pt), GFP_KERNEL)))
return NULL;
ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);
if (!ptp) {
/* Need to allocate a new parent to sub-allocate from. */
if (!(ptp = kmalloc(sizeof(*ptp), GFP_KERNEL))) {
kfree(pt);
return NULL;
}
ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);
if (!ptp->pt) {
kfree(ptp);
kfree(pt);
return NULL;
}
ptp->shift = order_base_2(size);
slot = nvkm_memory_size(ptp->pt->memory) >> ptp->shift;
ptp->mask = (1 << slot) - 1;
ptp->free = ptp->mask;
list_add(&ptp->head, &mmu->ptp.list);
}
pt->ptp = ptp;
pt->sub = true;
/* Sub-allocate from parent object, removing PTP from cache
* if there's no more free slots left.
*/
slot = __ffs(ptp->free);
ptp->free &= ~BIT(slot);
if (!ptp->free)
list_del(&ptp->head);
pt->memory = pt->ptp->pt->memory;
pt->base = slot << ptp->shift;
pt->addr = pt->ptp->pt->addr + pt->base;
return pt;
}
struct nvkm_mmu_ptc {
struct list_head head;
struct list_head item;
u32 size;
u32 refs;
};
static inline struct nvkm_mmu_ptc *
nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
{
struct nvkm_mmu_ptc *ptc;
list_for_each_entry(ptc, &mmu->ptc.list, head) {
if (ptc->size == size)
return ptc;
}
ptc = kmalloc(sizeof(*ptc), GFP_KERNEL);
if (ptc) {
INIT_LIST_HEAD(&ptc->item);
ptc->size = size;
ptc->refs = 0;
list_add(&ptc->head, &mmu->ptc.list);
}
return ptc;
}
void
nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
{
struct nvkm_mmu_pt *pt = *ppt;
if (pt) {
/* Handle sub-allocated page tables. */
if (pt->sub) {
mutex_lock(&mmu->ptp.mutex);
nvkm_mmu_ptp_put(mmu, force, pt);
mutex_unlock(&mmu->ptp.mutex);
return;
}
/* Either cache or free the object. */
mutex_lock(&mmu->ptc.mutex);
if (pt->ptc->refs < 8 /* Heuristic. */ && !force) {
list_add_tail(&pt->head, &pt->ptc->item);
pt->ptc->refs++;
} else {
nvkm_memory_unref(&pt->memory);
kfree(pt);
}
mutex_unlock(&mmu->ptc.mutex);
}
}
struct nvkm_mmu_pt *
nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
{
struct nvkm_mmu_ptc *ptc;
struct nvkm_mmu_pt *pt;
int ret;
/* Sub-allocated page table (ie. GP100 LPT). */
if (align < 0x1000) {
mutex_lock(&mmu->ptp.mutex);
pt = nvkm_mmu_ptp_get(mmu, align, zero);
mutex_unlock(&mmu->ptp.mutex);
return pt;
}
/* Lookup cache for this page table size. */
mutex_lock(&mmu->ptc.mutex);
ptc = nvkm_mmu_ptc_find(mmu, size);
if (!ptc) {
mutex_unlock(&mmu->ptc.mutex);
return NULL;
}
/* If there's a free PT in the cache, reuse it. */
pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head);
if (pt) {
if (zero)
nvkm_fo64(pt->memory, 0, 0, size >> 3);
list_del(&pt->head);
ptc->refs--;
mutex_unlock(&mmu->ptc.mutex);
return pt;
}
mutex_unlock(&mmu->ptc.mutex);
/* No such luck, we need to allocate. */
if (!(pt = kmalloc(sizeof(*pt), GFP_KERNEL)))
return NULL;
pt->ptc = ptc;
pt->sub = false;
ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
size, align, zero, &pt->memory);
if (ret) {
kfree(pt);
return NULL;
}
pt->base = 0;
pt->addr = nvkm_memory_addr(pt->memory);
return pt;
}
void
nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
{
struct nvkm_mmu_ptc *ptc;
list_for_each_entry(ptc, &mmu->ptc.list, head) {
struct nvkm_mmu_pt *pt, *tt;
list_for_each_entry_safe(pt, tt, &ptc->item, head) {
nvkm_memory_unref(&pt->memory);
list_del(&pt->head);
kfree(pt);
}
}
}
static void
nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
{
struct nvkm_mmu_ptc *ptc, *ptct;
list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) {
WARN_ON(!list_empty(&ptc->item));
list_del(&ptc->head);
kfree(ptc);
}
}
static void
nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
{
mutex_init(&mmu->ptc.mutex);
INIT_LIST_HEAD(&mmu->ptc.list);
mutex_init(&mmu->ptp.mutex);
INIT_LIST_HEAD(&mmu->ptp.list);
}
static void
nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
{
if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) {
mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type;
mmu->type[mmu->type_nr].heap = heap;
mmu->type_nr++;
}
}
static int
nvkm_mmu_heap(struct nvkm_mmu *mmu, u8 type, u64 size)
{
if (size) {
if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) {
mmu->heap[mmu->heap_nr].type = type;
mmu->heap[mmu->heap_nr].size = size;
return mmu->heap_nr++;
}
}
return -EINVAL;
}
static void
nvkm_mmu_host(struct nvkm_mmu *mmu)
{
struct nvkm_device *device = mmu->subdev.device;
u8 type = NVKM_MEM_KIND * !!mmu->func->kind_sys;
int heap;
/* Non-mappable system memory. */
heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL);
nvkm_mmu_type(mmu, heap, type);
/* Non-coherent, cached, system memory.
*
* Block-linear mappings of system memory must be done through
* BAR1, and cannot be supported on systems where we're unable
* to map BAR1 with write-combining.
*/
type |= NVKM_MEM_MAPPABLE;
if (!device->bar || device->bar->iomap_uncached)
nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
else
nvkm_mmu_type(mmu, heap, type);
/* Coherent, cached, system memory.
*
* Unsupported on systems that aren't able to support snooped
* mappings, and also for block-linear mappings which must be
* done through BAR1.
*/
type |= NVKM_MEM_COHERENT;
if (device->func->cpu_coherent)
nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
/* Uncached system memory. */
nvkm_mmu_type(mmu, heap, type |= NVKM_MEM_UNCACHED);
}
static void
nvkm_mmu_vram(struct nvkm_mmu *mmu)
{
struct nvkm_device *device = mmu->subdev.device;
struct nvkm_mm *mm = &device->fb->ram->vram;
const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
u8 heap = NVKM_MEM_VRAM;
int heapM, heapN, heapU;
/* Mixed-memory doesn't support compression or display. */
heapM = nvkm_mmu_heap(mmu, heap, sizeM << NVKM_RAM_MM_SHIFT);
heap |= NVKM_MEM_COMP;
heap |= NVKM_MEM_DISP;
heapN = nvkm_mmu_heap(mmu, heap, sizeN << NVKM_RAM_MM_SHIFT);
heapU = nvkm_mmu_heap(mmu, heap, sizeU << NVKM_RAM_MM_SHIFT);
/* Add non-mappable VRAM types first so that they're preferred
* over anything else. Mixed-memory will be slower than other
* heaps, it's prioritised last.
*/
nvkm_mmu_type(mmu, heapU, type);
nvkm_mmu_type(mmu, heapN, type);
nvkm_mmu_type(mmu, heapM, type);
/* Add host memory types next, under the assumption that users
* wanting mappable memory want to use them as staging buffers
* or the like.
*/
nvkm_mmu_host(mmu);
/* Mappable VRAM types go last, as they're basically the worst
* possible type to ask for unless there's no other choice.
*/
if (device->bar) {
/* Write-combined BAR1 access. */
type |= NVKM_MEM_MAPPABLE;
if (!device->bar->iomap_uncached) {
nvkm_mmu_type(mmu, heapN, type);
nvkm_mmu_type(mmu, heapM, type);
}
/* Uncached BAR1 access. */
type |= NVKM_MEM_COHERENT;
type |= NVKM_MEM_UNCACHED;
nvkm_mmu_type(mmu, heapN, type);
nvkm_mmu_type(mmu, heapM, type);
}
}
static int
nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_mmu *mmu = nvkm_mmu(subdev);
/* Determine available memory types. */
if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram)
nvkm_mmu_vram(mmu);
else
nvkm_mmu_host(mmu);
if (mmu->func->vmm.global) {
int ret = nvkm_vmm_new(subdev->device, 0, 0, NULL, 0, NULL,
"gart", &mmu->vmm);
if (ret)
return ret;
}
return 0;
}
static int
nvkm_mmu_init(struct nvkm_subdev *subdev)
{
struct nvkm_mmu *mmu = nvkm_mmu(subdev);
if (mmu->func->init)
mmu->func->init(mmu);
return 0;
}
static void *
nvkm_mmu_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_mmu *mmu = nvkm_mmu(subdev);
nvkm_vmm_unref(&mmu->vmm);
nvkm_mmu_ptc_fini(mmu);
mutex_destroy(&mmu->mutex);
return mmu;
}
static const struct nvkm_subdev_func
nvkm_mmu = {
.dtor = nvkm_mmu_dtor,
.oneinit = nvkm_mmu_oneinit,
.init = nvkm_mmu_init,
};
void
nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_mmu *mmu)
{
nvkm_subdev_ctor(&nvkm_mmu, device, type, inst, &mmu->subdev);
mmu->func = func;
mmu->dma_bits = func->dma_bits;
nvkm_mmu_ptc_init(mmu);
mutex_init(&mmu->mutex);
mmu->user.ctor = nvkm_ummu_new;
mmu->user.base = func->mmu.user;
}
int
nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_mmu **pmmu)
{
if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
return -ENOMEM;
nvkm_mmu_ctor(func, device, type, inst, *pmmu);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "mem.h"
#include "vmm.h"
#include <core/option.h>
#include <nvif/class.h>
static const struct nvkm_mmu_func
gp10b_mmu = {
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
int
gp10b_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
return gm20b_mmu_new(device, type, inst, pmmu);
return nvkm_mmu_new_(&gp10b_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "umem.h"
#include "ummu.h"
#include <core/client.h>
#include <core/memory.h>
#include <subdev/bar.h>
#include <nvif/class.h>
#include <nvif/if000a.h>
#include <nvif/unpack.h>
static const struct nvkm_object_func nvkm_umem;
struct nvkm_memory *
nvkm_umem_search(struct nvkm_client *client, u64 handle)
{
struct nvkm_client *master = client->object.client;
struct nvkm_memory *memory = NULL;
struct nvkm_object *object;
struct nvkm_umem *umem;
object = nvkm_object_search(client, handle, &nvkm_umem);
if (IS_ERR(object)) {
if (client != master) {
spin_lock(&master->lock);
list_for_each_entry(umem, &master->umem, head) {
if (umem->object.object == handle) {
memory = nvkm_memory_ref(umem->memory);
break;
}
}
spin_unlock(&master->lock);
}
} else {
umem = nvkm_umem(object);
memory = nvkm_memory_ref(umem->memory);
}
return memory ? memory : ERR_PTR(-ENOENT);
}
static int
nvkm_umem_unmap(struct nvkm_object *object)
{
struct nvkm_umem *umem = nvkm_umem(object);
if (!umem->map)
return -EEXIST;
if (umem->io) {
if (!IS_ERR(umem->bar)) {
struct nvkm_device *device = umem->mmu->subdev.device;
nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &umem->bar);
} else {
umem->bar = NULL;
}
} else {
vunmap(umem->map);
umem->map = NULL;
}
return 0;
}
static int
nvkm_umem_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *handle, u64 *length)
{
struct nvkm_umem *umem = nvkm_umem(object);
struct nvkm_mmu *mmu = umem->mmu;
if (!umem->mappable)
return -EINVAL;
if (umem->map)
return -EEXIST;
if ((umem->type & NVKM_MEM_HOST) && !argc) {
int ret = nvkm_mem_map_host(umem->memory, &umem->map);
if (ret)
return ret;
*handle = (unsigned long)(void *)umem->map;
*length = nvkm_memory_size(umem->memory);
*type = NVKM_OBJECT_MAP_VA;
return 0;
} else
if ((umem->type & NVKM_MEM_VRAM) ||
(umem->type & NVKM_MEM_KIND)) {
int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc,
handle, length, &umem->bar);
if (ret)
return ret;
*type = NVKM_OBJECT_MAP_IO;
} else {
return -EINVAL;
}
umem->io = (*type == NVKM_OBJECT_MAP_IO);
return 0;
}
static void *
nvkm_umem_dtor(struct nvkm_object *object)
{
struct nvkm_umem *umem = nvkm_umem(object);
spin_lock(&umem->object.client->lock);
list_del_init(&umem->head);
spin_unlock(&umem->object.client->lock);
nvkm_memory_unref(&umem->memory);
return umem;
}
static const struct nvkm_object_func
nvkm_umem = {
.dtor = nvkm_umem_dtor,
.map = nvkm_umem_map,
.unmap = nvkm_umem_unmap,
};
int
nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
union {
struct nvif_mem_v0 v0;
} *args = argv;
struct nvkm_umem *umem;
int type, ret = -ENOSYS;
u8 page;
u64 size;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
type = args->v0.type;
page = args->v0.page;
size = args->v0.size;
} else
return ret;
if (type >= mmu->type_nr)
return -EINVAL;
if (!(umem = kzalloc(sizeof(*umem), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
umem->mmu = mmu;
umem->type = mmu->type[type].type;
INIT_LIST_HEAD(&umem->head);
*pobject = &umem->object;
if (mmu->type[type].type & NVKM_MEM_MAPPABLE) {
page = max_t(u8, page, PAGE_SHIFT);
umem->mappable = true;
}
ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc,
&umem->memory);
if (ret)
return ret;
spin_lock(&umem->object.client->lock);
list_add(&umem->head, &umem->object.client->umem);
spin_unlock(&umem->object.client->lock);
args->v0.page = nvkm_memory_page(umem->memory);
args->v0.addr = nvkm_memory_addr(umem->memory);
args->v0.size = nvkm_memory_size(umem->memory);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "mem.h"
#include "vmm.h"
#include <core/option.h>
#include <nvif/class.h>
static const struct nvkm_mmu_func
gv100_mmu = {
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gv100_vmm_new },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
int
gv100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
return nvkm_mmu_new_(&gv100_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
#include <nvif/ifb00d.h>
#include <nvif/unpack.h>
static void
gm200_vmm_pgt_sparse(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
/* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(32) /* VOL. */, ptes);
}
static const struct nvkm_vmm_desc_func
gm200_vmm_spt = {
.unmap = gf100_vmm_pgt_unmap,
.sparse = gm200_vmm_pgt_sparse,
.mem = gf100_vmm_pgt_mem,
.dma = gf100_vmm_pgt_dma,
.sgl = gf100_vmm_pgt_sgl,
};
static const struct nvkm_vmm_desc_func
gm200_vmm_lpt = {
.invalid = gk104_vmm_lpt_invalid,
.unmap = gf100_vmm_pgt_unmap,
.sparse = gm200_vmm_pgt_sparse,
.mem = gf100_vmm_pgt_mem,
};
static void
gm200_vmm_pgd_sparse(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
{
/* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
VMM_FO064(pt, vmm, pdei * 8, BIT_ULL(35) /* VOL_BIG. */, pdes);
}
static const struct nvkm_vmm_desc_func
gm200_vmm_pgd = {
.unmap = gf100_vmm_pgt_unmap,
.sparse = gm200_vmm_pgd_sparse,
.pde = gf100_vmm_pgd_pde,
};
const struct nvkm_vmm_desc
gm200_vmm_desc_17_12[] = {
{ SPT, 15, 8, 0x1000, &gm200_vmm_spt },
{ PGD, 13, 8, 0x1000, &gm200_vmm_pgd },
{}
};
const struct nvkm_vmm_desc
gm200_vmm_desc_17_17[] = {
{ LPT, 10, 8, 0x1000, &gm200_vmm_lpt },
{ PGD, 13, 8, 0x1000, &gm200_vmm_pgd },
{}
};
const struct nvkm_vmm_desc
gm200_vmm_desc_16_12[] = {
{ SPT, 14, 8, 0x1000, &gm200_vmm_spt },
{ PGD, 14, 8, 0x1000, &gm200_vmm_pgd },
{}
};
const struct nvkm_vmm_desc
gm200_vmm_desc_16_16[] = {
{ LPT, 10, 8, 0x1000, &gm200_vmm_lpt },
{ PGD, 14, 8, 0x1000, &gm200_vmm_pgd },
{}
};
int
gm200_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
{
if (vmm->func->page[1].shift == 16)
base |= BIT_ULL(11);
return gf100_vmm_join_(vmm, inst, base);
}
int
gm200_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
return gm200_vmm_join_(vmm, inst, 0);
}
static const struct nvkm_vmm_func
gm200_vmm_17 = {
.join = gm200_vmm_join,
.part = gf100_vmm_part,
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
.invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
{ 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SVxC },
{ 12, &gm200_vmm_desc_17_12[0], NVKM_VMM_PAGE_SVHx },
{}
}
};
static const struct nvkm_vmm_func
gm200_vmm_16 = {
.join = gm200_vmm_join,
.part = gf100_vmm_part,
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
.invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
{ 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SVxC },
{ 12, &gm200_vmm_desc_16_12[0], NVKM_VMM_PAGE_SVHx },
{}
}
};
int
gm200_vmm_new_(const struct nvkm_vmm_func *func_16,
const struct nvkm_vmm_func *func_17,
struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
const struct nvkm_vmm_func *func;
union {
struct gm200_vmm_vn vn;
struct gm200_vmm_v0 v0;
} *args = argv;
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
switch (args->v0.bigpage) {
case 16: func = func_16; break;
case 17: func = func_17; break;
default:
return -EINVAL;
}
} else
if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
func = func_17;
} else
return ret;
return nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
}
int
gm200_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
int
gm200_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "mem.h"
#include "vmm.h"
#include <nvif/class.h>
static const struct nvkm_mmu_func
g84_mmu = {
.dma_bits = 40,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 },
.kind = nv50_mmu_kind,
.kind_sys = true,
};
int
g84_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
return nvkm_mmu_new_(&g84_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
static const struct nvkm_vmm_func
gp10b_vmm = {
.join = gp100_vmm_join,
.part = gf100_vmm_part,
.aper = gk20a_vmm_aper,
.valid = gp100_vmm_valid,
.flush = gp100_vmm_flush,
.mthd = gp100_vmm_mthd,
.invalidate_pdb = gp100_vmm_invalidate_pdb,
.page = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
{ 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
{ 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SxHC },
{ 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SxHC },
{ 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SxHx },
{}
}
};
int
gp10b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
return gp100_vmm_new_(&gp10b_vmm, mmu, managed, addr, size,
argv, argc, key, name, pvmm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "mem.h"
#include "vmm.h"
#include <core/option.h>
#include <nvif/class.h>
static void
nv41_mmu_init(struct nvkm_mmu *mmu)
{
struct nvkm_device *device = mmu->subdev.device;
nvkm_wr32(device, 0x100800, 0x00000002 | mmu->vmm->pd->pt[0]->addr);
nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
nvkm_wr32(device, 0x100820, 0x00000000);
}
static const struct nvkm_mmu_func
nv41_mmu = {
.init = nv41_mmu_init,
.dma_bits = 39,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true },
};
int
nv41_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
if (device->type == NVKM_DEVICE_AGP ||
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
return nv04_mmu_new(device, type, inst, pmmu);
return nvkm_mmu_new_(&nv41_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
void
gk104_vmm_lpt_invalid(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
/* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(1) /* PRIV. */, ptes);
}
static const struct nvkm_vmm_desc_func
gk104_vmm_lpt = {
.invalid = gk104_vmm_lpt_invalid,
.unmap = gf100_vmm_pgt_unmap,
.mem = gf100_vmm_pgt_mem,
};
const struct nvkm_vmm_desc
gk104_vmm_desc_17_12[] = {
{ SPT, 15, 8, 0x1000, &gf100_vmm_pgt },
{ PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
{}
};
const struct nvkm_vmm_desc
gk104_vmm_desc_17_17[] = {
{ LPT, 10, 8, 0x1000, &gk104_vmm_lpt },
{ PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
{}
};
const struct nvkm_vmm_desc
gk104_vmm_desc_16_12[] = {
{ SPT, 14, 8, 0x1000, &gf100_vmm_pgt },
{ PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
{}
};
const struct nvkm_vmm_desc
gk104_vmm_desc_16_16[] = {
{ LPT, 10, 8, 0x1000, &gk104_vmm_lpt },
{ PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
{}
};
static const struct nvkm_vmm_func
gk104_vmm_17 = {
.join = gf100_vmm_join,
.part = gf100_vmm_part,
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
.invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
{ 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
{}
}
};
static const struct nvkm_vmm_func
gk104_vmm_16 = {
.join = gf100_vmm_join,
.part = gf100_vmm_part,
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
.invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
{ 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
{}
}
};
int
gk104_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
return gf100_vmm_new_(&gk104_vmm_16, &gk104_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
#include <subdev/timer.h>
static void
nv41_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
{
u32 data = (addr >> 7) | 0x00000001; /* VALID. */
while (ptes--) {
VMM_WO032(pt, vmm, ptei++ * 4, data);
data += 0x00000020;
}
}
static void
nv41_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
}
static void
nv41_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
#if PAGE_SHIFT == 12
nvkm_kmap(pt->memory);
while (ptes--) {
const u32 data = (*map->dma++ >> 7) | 0x00000001;
VMM_WO032(pt, vmm, ptei++ * 4, data);
}
nvkm_done(pt->memory);
#else
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
#endif
}
static void
nv41_vmm_pgt_unmap(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
VMM_FO032(pt, vmm, ptei * 4, 0, ptes);
}
static const struct nvkm_vmm_desc_func
nv41_vmm_desc_pgt = {
.unmap = nv41_vmm_pgt_unmap,
.dma = nv41_vmm_pgt_dma,
.sgl = nv41_vmm_pgt_sgl,
};
static const struct nvkm_vmm_desc
nv41_vmm_desc_12[] = {
{ PGT, 17, 4, 0x1000, &nv41_vmm_desc_pgt },
{}
};
static void
nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
{
struct nvkm_device *device = vmm->mmu->subdev.device;
mutex_lock(&vmm->mmu->mutex);
nvkm_wr32(device, 0x100810, 0x00000022);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x100810) & 0x00000020)
break;
);
nvkm_wr32(device, 0x100810, 0x00000000);
mutex_unlock(&vmm->mmu->mutex);
}
static const struct nvkm_vmm_func
nv41_vmm = {
.valid = nv04_vmm_valid,
.flush = nv41_vmm_flush,
.page = {
{ 12, &nv41_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
{}
}
};
int
nv41_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm)
{
return nv04_vmm_new_(&nv41_vmm, mmu, 0, managed, addr, size,
argv, argc, key, name, pvmm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "mem.h"
#include <core/memory.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
#include <nvif/class.h>
#include <nvif/if500b.h>
#include <nvif/if500d.h>
#include <nvif/unpack.h>
int
nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
{
struct nv50_vmm_map_v0 uvmm = {};
union {
struct nv50_mem_map_vn vn;
struct nv50_mem_map_v0 v0;
} *args = argv;
struct nvkm_device *device = mmu->subdev.device;
struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
u64 size = nvkm_memory_size(memory);
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
uvmm.ro = args->v0.ro;
uvmm.kind = args->v0.kind;
uvmm.comp = args->v0.comp;
} else
if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
} else
return ret;
ret = nvkm_vmm_get(bar, 12, size, pvma);
if (ret)
return ret;
*paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
*psize = (*pvma)->size;
return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
}
int
nv50_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
void *argv, u32 argc, struct nvkm_memory **pmemory)
{
union {
struct nv50_mem_vn vn;
struct nv50_mem_v0 v0;
} *args = argv;
int ret = -ENOSYS;
bool contig;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
type = args->v0.bankswz ? 0x02 : 0x01;
contig = args->v0.contig;
} else
if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
type = 0x01;
contig = false;
} else
return -ENOSYS;
return nvkm_ram_get(mmu->subdev.device, NVKM_RAM_MM_NORMAL, type,
page, size, contig, false, pmemory);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "mem.h"
#include "vmm.h"
#include <core/option.h>
#include <nvif/class.h>
static const struct nvkm_mmu_func
gp100_mmu = {
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
int
gp100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
return gm200_mmu_new(device, type, inst, pmmu);
return nvkm_mmu_new_(&gp100_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "mem.h"
#include "vmm.h"
#include <nvif/class.h>
static const struct nvkm_mmu_func
gk20a_mmu = {
.dma_bits = 40,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new },
.kind = gf100_mmu_kind,
.kind_sys = true,
};
int
gk20a_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
return nvkm_mmu_new_(&gk20a_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
#include <subdev/timer.h>
static void
tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
struct nvkm_device *device = vmm->mmu->subdev.device;
u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
type |= 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */
mutex_lock(&vmm->mmu->mutex);
nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
nvkm_wr32(device, 0xb830a4, 0x00000000);
nvkm_wr32(device, 0x100e68, 0x00000000);
nvkm_wr32(device, 0xb830b0, 0x80000000 | type);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0xb830b0) & 0x80000000))
break;
);
mutex_unlock(&vmm->mmu->mutex);
}
static const struct nvkm_vmm_func
tu102_vmm = {
.join = gv100_vmm_join,
.part = gf100_vmm_part,
.aper = gf100_vmm_aper,
.valid = gp100_vmm_valid,
.flush = tu102_vmm_flush,
.mthd = gp100_vmm_mthd,
.page = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
{ 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
{ 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
{ 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
{ 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
{}
}
};
int
tu102_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
return gp100_vmm_new_(&tu102_vmm, mmu, managed, addr, size,
argv, argc, key, name, pvmm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "uvmm.h"
#include "umem.h"
#include "ummu.h"
#include <core/client.h>
#include <core/memory.h>
#include <nvif/if000c.h>
#include <nvif/unpack.h>
static const struct nvkm_object_func nvkm_uvmm;
struct nvkm_vmm *
nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
{
struct nvkm_object *object;
object = nvkm_object_search(client, handle, &nvkm_uvmm);
if (IS_ERR(object))
return (void *)object;
return nvkm_vmm_ref(nvkm_uvmm(object)->vmm);
}
static int
nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
union {
struct nvif_vmm_pfnclr_v0 v0;
} *args = argv;
struct nvkm_vmm *vmm = uvmm->vmm;
int ret = -ENOSYS;
u64 addr, size;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
addr = args->v0.addr;
size = args->v0.size;
} else
return ret;
if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
return -EINVAL;
if (size) {
mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
mutex_unlock(&vmm->mutex.vmm);
}
return ret;
}
static int
nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
union {
struct nvif_vmm_pfnmap_v0 v0;
} *args = argv;
struct nvkm_vmm *vmm = uvmm->vmm;
int ret = -ENOSYS;
u64 addr, size, *phys;
u8 page;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
page = args->v0.page;
addr = args->v0.addr;
size = args->v0.size;
phys = args->v0.phys;
if (argc != (size >> page) * sizeof(args->v0.phys[0]))
return -EINVAL;
} else
return ret;
if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
return -EINVAL;
if (size) {
mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
mutex_unlock(&vmm->mutex.vmm);
}
return ret;
}
static int
nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
union {
struct nvif_vmm_unmap_v0 v0;
} *args = argv;
struct nvkm_vmm *vmm = uvmm->vmm;
struct nvkm_vma *vma;
int ret = -ENOSYS;
u64 addr;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
addr = args->v0.addr;
} else
return ret;
if (nvkm_vmm_in_managed_range(vmm, addr, 0) && vmm->managed.raw)
return -EINVAL;
mutex_lock(&vmm->mutex.vmm);
vma = nvkm_vmm_node_search(vmm, addr);
if (ret = -ENOENT, !vma || vma->addr != addr) {
VMM_DEBUG(vmm, "lookup %016llx: %016llx",
addr, vma ? vma->addr : ~0ULL);
goto done;
}
if (ret = -ENOENT, vma->busy) {
VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
goto done;
}
if (ret = -EINVAL, !vma->memory) {
VMM_DEBUG(vmm, "unmapped");
goto done;
}
nvkm_vmm_unmap_locked(vmm, vma, false);
ret = 0;
done:
mutex_unlock(&vmm->mutex.vmm);
return ret;
}
static int
nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
struct nvkm_client *client = uvmm->object.client;
union {
struct nvif_vmm_map_v0 v0;
} *args = argv;
u64 addr, size, handle, offset;
struct nvkm_vmm *vmm = uvmm->vmm;
struct nvkm_vma *vma;
struct nvkm_memory *memory;
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
addr = args->v0.addr;
size = args->v0.size;
handle = args->v0.memory;
offset = args->v0.offset;
} else
return ret;
if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
return -EINVAL;
memory = nvkm_umem_search(client, handle);
if (IS_ERR(memory)) {
VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
return PTR_ERR(memory);
}
mutex_lock(&vmm->mutex.vmm);
if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
VMM_DEBUG(vmm, "lookup %016llx", addr);
goto fail;
}
if (ret = -ENOENT, vma->busy) {
VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
goto fail;
}
if (ret = -EINVAL, vma->mapped && !vma->memory) {
VMM_DEBUG(vmm, "pfnmap %016llx", addr);
goto fail;
}
if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
if (addr + size > vma->addr + vma->size || vma->memory ||
(vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
VMM_DEBUG(vmm, "split %d %d %d "
"%016llx %016llx %016llx %016llx",
!!vma->memory, vma->refd, vma->mapref,
addr, size, vma->addr, (u64)vma->size);
goto fail;
}
vma = nvkm_vmm_node_split(vmm, vma, addr, size);
if (!vma) {
ret = -ENOMEM;
goto fail;
}
}
vma->busy = true;
mutex_unlock(&vmm->mutex.vmm);
ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
if (ret == 0) {
/* Successful map will clear vma->busy. */
nvkm_memory_unref(&memory);
return 0;
}
mutex_lock(&vmm->mutex.vmm);
vma->busy = false;
nvkm_vmm_unmap_region(vmm, vma);
fail:
mutex_unlock(&vmm->mutex.vmm);
nvkm_memory_unref(&memory);
return ret;
}
static int
nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
union {
struct nvif_vmm_put_v0 v0;
} *args = argv;
struct nvkm_vmm *vmm = uvmm->vmm;
struct nvkm_vma *vma;
int ret = -ENOSYS;
u64 addr;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
addr = args->v0.addr;
} else
return ret;
mutex_lock(&vmm->mutex.vmm);
vma = nvkm_vmm_node_search(vmm, args->v0.addr);
if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
vma ? vma->addr : ~0ULL, vma ? vma->part : 0);
goto done;
}
if (ret = -ENOENT, vma->busy) {
VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
goto done;
}
nvkm_vmm_put_locked(vmm, vma);
ret = 0;
done:
mutex_unlock(&vmm->mutex.vmm);
return ret;
}
static int
nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
union {
struct nvif_vmm_get_v0 v0;
} *args = argv;
struct nvkm_vmm *vmm = uvmm->vmm;
struct nvkm_vma *vma;
int ret = -ENOSYS;
bool getref, mapref, sparse;
u8 page, align;
u64 size;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
getref = args->v0.type == NVIF_VMM_GET_V0_PTES;
mapref = args->v0.type == NVIF_VMM_GET_V0_ADDR;
sparse = args->v0.sparse;
page = args->v0.page;
align = args->v0.align;
size = args->v0.size;
} else
return ret;
mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
page, align, size, &vma);
mutex_unlock(&vmm->mutex.vmm);
if (ret)
return ret;
args->v0.addr = vma->addr;
return ret;
}
static int
nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
union {
struct nvif_vmm_page_v0 v0;
} *args = argv;
const struct nvkm_vmm_page *page;
int ret = -ENOSYS;
u8 type, index, nr;
page = uvmm->vmm->func->page;
for (nr = 0; page[nr].shift; nr++);
if (!(nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
if ((index = args->v0.index) >= nr)
return -EINVAL;
type = page[index].type;
args->v0.shift = page[index].shift;
args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE);
args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM);
args->v0.host = !!(type & NVKM_VMM_PAGE_HOST);
args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP);
} else
return -ENOSYS;
return 0;
}
static inline int
nvkm_uvmm_page_index(struct nvkm_uvmm *uvmm, u64 size, u8 shift, u8 *refd)
{
struct nvkm_vmm *vmm = uvmm->vmm;
const struct nvkm_vmm_page *page;
if (likely(shift)) {
for (page = vmm->func->page; page->shift; page++) {
if (shift == page->shift)
break;
}
if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
VMM_DEBUG(vmm, "page %d %016llx", shift, size);
return -EINVAL;
}
} else {
return -EINVAL;
}
*refd = page - vmm->func->page;
return 0;
}
static int
nvkm_uvmm_mthd_raw_get(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_vmm *vmm = uvmm->vmm;
u8 refd;
int ret;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
if (ret)
return ret;
return nvkm_vmm_raw_get(vmm, args->addr, args->size, refd);
}
static int
nvkm_uvmm_mthd_raw_put(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_vmm *vmm = uvmm->vmm;
u8 refd;
int ret;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
if (ret)
return ret;
nvkm_vmm_raw_put(vmm, args->addr, args->size, refd);
return 0;
}
static int
nvkm_uvmm_mthd_raw_map(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_client *client = uvmm->object.client;
struct nvkm_vmm *vmm = uvmm->vmm;
struct nvkm_vma vma = {
.addr = args->addr,
.size = args->size,
.used = true,
.mapref = false,
.no_comp = true,
};
struct nvkm_memory *memory;
void *argv = (void *)(uintptr_t)args->argv;
unsigned int argc = args->argc;
u64 handle = args->memory;
u8 refd;
int ret;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
if (ret)
return ret;
vma.page = vma.refd = refd;
memory = nvkm_umem_search(client, args->memory);
if (IS_ERR(memory)) {
VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
return PTR_ERR(memory);
}
ret = nvkm_memory_map(memory, args->offset, vmm, &vma, argv, argc);
nvkm_memory_unref(&vma.memory);
nvkm_memory_unref(&memory);
return ret;
}
static int
nvkm_uvmm_mthd_raw_unmap(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_vmm *vmm = uvmm->vmm;
u8 refd;
int ret;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
if (ret)
return ret;
nvkm_vmm_raw_unmap(vmm, args->addr, args->size,
args->sparse, refd);
return 0;
}
static int
nvkm_uvmm_mthd_raw_sparse(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_vmm *vmm = uvmm->vmm;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
return nvkm_vmm_raw_sparse(vmm, args->addr, args->size, args->ref);
}
static int
nvkm_uvmm_mthd_raw(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
union {
struct nvif_vmm_raw_v0 v0;
} *args = argv;
int ret = -ENOSYS;
if (!uvmm->vmm->managed.raw)
return -EINVAL;
if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true)))
return ret;
switch (args->v0.op) {
case NVIF_VMM_RAW_V0_GET:
return nvkm_uvmm_mthd_raw_get(uvmm, &args->v0);
case NVIF_VMM_RAW_V0_PUT:
return nvkm_uvmm_mthd_raw_put(uvmm, &args->v0);
case NVIF_VMM_RAW_V0_MAP:
return nvkm_uvmm_mthd_raw_map(uvmm, &args->v0);
case NVIF_VMM_RAW_V0_UNMAP:
return nvkm_uvmm_mthd_raw_unmap(uvmm, &args->v0);
case NVIF_VMM_RAW_V0_SPARSE:
return nvkm_uvmm_mthd_raw_sparse(uvmm, &args->v0);
default:
return -EINVAL;
};
}
static int
nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
switch (mthd) {
case NVIF_VMM_V0_PAGE : return nvkm_uvmm_mthd_page (uvmm, argv, argc);
case NVIF_VMM_V0_GET : return nvkm_uvmm_mthd_get (uvmm, argv, argc);
case NVIF_VMM_V0_PUT : return nvkm_uvmm_mthd_put (uvmm, argv, argc);
case NVIF_VMM_V0_MAP : return nvkm_uvmm_mthd_map (uvmm, argv, argc);
case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
case NVIF_VMM_V0_RAW : return nvkm_uvmm_mthd_raw (uvmm, argv, argc);
case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
if (uvmm->vmm->func->mthd) {
return uvmm->vmm->func->mthd(uvmm->vmm,
uvmm->object.client,
mthd, argv, argc);
}
break;
default:
break;
}
return -EINVAL;
}
static void *
nvkm_uvmm_dtor(struct nvkm_object *object)
{
struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
nvkm_vmm_unref(&uvmm->vmm);
return uvmm;
}
static const struct nvkm_object_func
nvkm_uvmm = {
.dtor = nvkm_uvmm_dtor,
.mthd = nvkm_uvmm_mthd,
};
int
nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
const bool more = oclass->base.maxver >= 0;
union {
struct nvif_vmm_v0 v0;
} *args = argv;
const struct nvkm_vmm_page *page;
struct nvkm_uvmm *uvmm;
int ret = -ENOSYS;
u64 addr, size;
bool managed, raw;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
managed = args->v0.type == NVIF_VMM_V0_TYPE_MANAGED;
raw = args->v0.type == NVIF_VMM_V0_TYPE_RAW;
addr = args->v0.addr;
size = args->v0.size;
} else
return ret;
if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
*pobject = &uvmm->object;
if (!mmu->vmm) {
ret = mmu->func->vmm.ctor(mmu, managed || raw, addr, size,
argv, argc, NULL, "user", &uvmm->vmm);
if (ret)
return ret;
uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug);
} else {
if (size)
return -EINVAL;
uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
}
uvmm->vmm->managed.raw = raw;
page = uvmm->vmm->func->page;
args->v0.page_nr = 0;
while (page && (page++)->shift)
args->v0.page_nr++;
args->v0.addr = uvmm->vmm->start;
args->v0.size = uvmm->vmm->limit;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "mem.h"
#include <core/memory.h>
#include <subdev/fb.h>
#include <nvif/if000b.h>
#include <nvif/unpack.h>
int
nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
{
union {
struct nv04_mem_map_vn vn;
} *args = argv;
struct nvkm_device *device = mmu->subdev.device;
const u64 addr = nvkm_memory_addr(memory);
int ret = -ENOSYS;
if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
return ret;
*paddr = device->func->resource_addr(device, 1) + addr;
*psize = nvkm_memory_size(memory);
*pvma = ERR_PTR(-ENODEV);
return 0;
}
int
nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
void *argv, u32 argc, struct nvkm_memory **pmemory)
{
union {
struct nv04_mem_vn vn;
} *args = argv;
int ret = -ENOSYS;
if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
return ret;
if (mmu->type[type].type & NVKM_MEM_MAPPABLE)
type = NVKM_RAM_MM_NORMAL;
else
type = NVKM_RAM_MM_NOMAP;
return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
size, true, false, pmemory);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
#include <subdev/fb.h>
#include <subdev/ltc.h>
#include <subdev/timer.h>
#include <nvif/if900d.h>
#include <nvif/unpack.h>
static inline void
gf100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
{
u64 base = (addr >> 8) | map->type;
u64 data = base;
if (map->ctag && !(map->next & (1ULL << 44))) {
while (ptes--) {
data = base | ((map->ctag >> 1) << 44);
if (!(map->ctag++ & 1))
data |= BIT_ULL(60);
VMM_WO064(pt, vmm, ptei++ * 8, data);
base += map->next;
}
} else {
map->type += ptes * map->ctag;
while (ptes--) {
VMM_WO064(pt, vmm, ptei++ * 8, data);
data += map->next;
}
}
}
void
gf100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
}
void
gf100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
if (map->page->shift == PAGE_SHIFT) {
VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
nvkm_kmap(pt->memory);
while (ptes--) {
const u64 data = (*map->dma++ >> 8) | map->type;
VMM_WO064(pt, vmm, ptei++ * 8, data);
map->type += map->ctag;
}
nvkm_done(pt->memory);
return;
}
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
}
void
gf100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
}
void
gf100_vmm_pgt_unmap(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
}
const struct nvkm_vmm_desc_func
gf100_vmm_pgt = {
.unmap = gf100_vmm_pgt_unmap,
.mem = gf100_vmm_pgt_mem,
.dma = gf100_vmm_pgt_dma,
.sgl = gf100_vmm_pgt_sgl,
};
void
gf100_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
{
struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
struct nvkm_mmu_pt *pd = pgd->pt[0];
struct nvkm_mmu_pt *pt;
u64 data = 0;
if ((pt = pgt->pt[0])) {
switch (nvkm_memory_target(pt->memory)) {
case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 0; break;
case NVKM_MEM_TARGET_HOST: data |= 2ULL << 0;
data |= BIT_ULL(35); /* VOL */
break;
case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 0; break;
default:
WARN_ON(1);
return;
}
data |= pt->addr >> 8;
}
if ((pt = pgt->pt[1])) {
switch (nvkm_memory_target(pt->memory)) {
case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 32; break;
case NVKM_MEM_TARGET_HOST: data |= 2ULL << 32;
data |= BIT_ULL(34); /* VOL */
break;
case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 32; break;
default:
WARN_ON(1);
return;
}
data |= pt->addr << 24;
}
nvkm_kmap(pd->memory);
VMM_WO064(pd, vmm, pdei * 8, data);
nvkm_done(pd->memory);
}
const struct nvkm_vmm_desc_func
gf100_vmm_pgd = {
.unmap = gf100_vmm_pgt_unmap,
.pde = gf100_vmm_pgd_pde,
};
static const struct nvkm_vmm_desc
gf100_vmm_desc_17_12[] = {
{ SPT, 15, 8, 0x1000, &gf100_vmm_pgt },
{ PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
{}
};
static const struct nvkm_vmm_desc
gf100_vmm_desc_17_17[] = {
{ LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
{ PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
{}
};
static const struct nvkm_vmm_desc
gf100_vmm_desc_16_12[] = {
{ SPT, 14, 8, 0x1000, &gf100_vmm_pgt },
{ PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
{}
};
static const struct nvkm_vmm_desc
gf100_vmm_desc_16_16[] = {
{ LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
{ PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
{}
};
void
gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
{
struct nvkm_device *device = vmm->mmu->subdev.device;
nvkm_wr32(device, 0x100cb8, addr);
}
void
gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type)
{
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
u64 addr = 0;
mutex_lock(&vmm->mmu->mutex);
/* Looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases.
*/
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
break;
);
if (!(type & 0x00000002) /* ALL_PDB. */) {
switch (nvkm_memory_target(pd->memory)) {
case NVKM_MEM_TARGET_VRAM: addr |= 0x00000000; break;
case NVKM_MEM_TARGET_HOST: addr |= 0x00000002; break;
case NVKM_MEM_TARGET_NCOH: addr |= 0x00000003; break;
default:
WARN_ON(1);
break;
}
addr |= (vmm->pd->pt[0]->addr >> 12) << 4;
vmm->func->invalidate_pdb(vmm, addr);
}
nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
/* Wait for flush to be queued? */
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x100c80) & 0x00008000)
break;
);
mutex_unlock(&vmm->mmu->mutex);
}
void
gf100_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
u32 type = 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */
gf100_vmm_invalidate(vmm, type);
}
int
gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
struct nvkm_vmm_map *map)
{
const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
const struct nvkm_vmm_page *page = map->page;
const bool gm20x = page->desc->func->sparse != NULL;
union {
struct gf100_vmm_map_vn vn;
struct gf100_vmm_map_v0 v0;
} *args = argv;
struct nvkm_device *device = vmm->mmu->subdev.device;
struct nvkm_memory *memory = map->memory;
u8 kind, kind_inv, priv, ro, vol;
int kindn, aper, ret = -ENOSYS;
const u8 *kindm;
map->next = (1 << page->shift) >> 8;
map->type = map->ctag = 0;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
vol = !!args->v0.vol;
ro = !!args->v0.ro;
priv = !!args->v0.priv;
kind = args->v0.kind;
} else
if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
vol = target == NVKM_MEM_TARGET_HOST;
ro = 0;
priv = 0;
kind = 0x00;
} else {
VMM_DEBUG(vmm, "args");
return ret;
}
aper = vmm->func->aper(target);
if (WARN_ON(aper < 0))
return aper;
kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
if (kind >= kindn || kindm[kind] == kind_inv) {
VMM_DEBUG(vmm, "kind %02x", kind);
return -EINVAL;
}
if (kindm[kind] != kind) {
u32 comp = (page->shift == 16 && !gm20x) ? 16 : 17;
u32 tags = ALIGN(nvkm_memory_size(memory), 1 << 17) >> comp;
if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
return -EINVAL;
}
if (!map->no_comp) {
ret = nvkm_memory_tags_get(memory, device, tags,
nvkm_ltc_tags_clear,
&map->tags);
if (ret) {
VMM_DEBUG(vmm, "comp %d", ret);
return ret;
}
}
if (!map->no_comp && map->tags->mn) {
u64 tags = map->tags->mn->offset + (map->offset >> 17);
if (page->shift == 17 || !gm20x) {
map->type |= tags << 44;
map->ctag |= 1ULL << 44;
map->next |= 1ULL << 44;
} else {
map->ctag |= tags << 1 | 1;
}
} else {
kind = kindm[kind];
}
}
map->type |= BIT(0);
map->type |= (u64)priv << 1;
map->type |= (u64) ro << 2;
map->type |= (u64) vol << 32;
map->type |= (u64)aper << 33;
map->type |= (u64)kind << 36;
return 0;
}
int
gf100_vmm_aper(enum nvkm_memory_target target)
{
switch (target) {
case NVKM_MEM_TARGET_VRAM: return 0;
case NVKM_MEM_TARGET_HOST: return 2;
case NVKM_MEM_TARGET_NCOH: return 3;
default:
return -EINVAL;
}
}
void
gf100_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
nvkm_fo64(inst, 0x0200, 0x00000000, 2);
}
int
gf100_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
{
struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
switch (nvkm_memory_target(pd->memory)) {
case NVKM_MEM_TARGET_VRAM: base |= 0ULL << 0; break;
case NVKM_MEM_TARGET_HOST: base |= 2ULL << 0;
base |= BIT_ULL(2) /* VOL. */;
break;
case NVKM_MEM_TARGET_NCOH: base |= 3ULL << 0; break;
default:
WARN_ON(1);
return -EINVAL;
}
base |= pd->addr;
nvkm_kmap(inst);
nvkm_wo64(inst, 0x0200, base);
nvkm_wo64(inst, 0x0208, vmm->limit - 1);
nvkm_done(inst);
return 0;
}
int
gf100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
return gf100_vmm_join_(vmm, inst, 0);
}
static const struct nvkm_vmm_func
gf100_vmm_17 = {
.join = gf100_vmm_join,
.part = gf100_vmm_part,
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
.invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 17, &gf100_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
{ 12, &gf100_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
{}
}
};
static const struct nvkm_vmm_func
gf100_vmm_16 = {
.join = gf100_vmm_join,
.part = gf100_vmm_part,
.aper = gf100_vmm_aper,
.valid = gf100_vmm_valid,
.flush = gf100_vmm_flush,
.invalidate_pdb = gf100_vmm_invalidate_pdb,
.page = {
{ 16, &gf100_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
{ 12, &gf100_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
{}
}
};
int
gf100_vmm_new_(const struct nvkm_vmm_func *func_16,
const struct nvkm_vmm_func *func_17,
struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
switch (mmu->subdev.device->fb->page) {
case 16: return nv04_vmm_new_(func_16, mmu, 0, managed, addr, size,
argv, argc, key, name, pvmm);
case 17: return nv04_vmm_new_(func_17, mmu, 0, managed, addr, size,
argv, argc, key, name, pvmm);
default:
WARN_ON(1);
return -EINVAL;
}
}
int
gf100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, managed, addr,
size, argv, argc, key, name, pvmm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "mem.h"
#include "vmm.h"
#include <subdev/fb.h>
#include <nvif/class.h>
static const struct nvkm_mmu_func
gm20b_mmu = {
.dma_bits = 40,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
static const struct nvkm_mmu_func
gm20b_mmu_fixed = {
.dma_bits = 40,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
int
gm20b_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
if (device->fb->page)
return nvkm_mmu_new_(&gm20b_mmu_fixed, device, type, inst, pmmu);
return nvkm_mmu_new_(&gm20b_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "mem.h"
#include "vmm.h"
#include <nvif/class.h>
static const struct nvkm_mmu_func
mcp77_mmu = {
.dma_bits = 40,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, mcp77_vmm_new, false, 0x0200 },
.kind = nv50_mmu_kind,
.kind_sys = true,
};
int
mcp77_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mmu **pmmu)
{
return nvkm_mmu_new_(&mcp77_mmu, device, type, inst, pmmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/falcon.h>
#include <core/firmware.h>
#include <subdev/acr.h>
#include <subdev/top.h>
static void *
nvkm_gsp_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_gsp *gsp = nvkm_gsp(subdev);
nvkm_falcon_dtor(&gsp->falcon);
return gsp;
}
static const struct nvkm_subdev_func
nvkm_gsp = {
.dtor = nvkm_gsp_dtor,
};
int
nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_gsp **pgsp)
{
struct nvkm_gsp *gsp;
if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_gsp, device, type, inst, &gsp->subdev);
fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp);
if (IS_ERR(fwif))
return PTR_ERR(fwif);
gsp->func = fwif->func;
return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0, &gsp->falcon);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
static const struct nvkm_falcon_func
gv100_gsp_flcn = {
.disable = gm200_flcn_disable,
.enable = gm200_flcn_enable,
.reset_eng = gp102_flcn_reset_eng,
.reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing,
.bind_inst = gm200_flcn_bind_inst,
.bind_stat = gm200_flcn_bind_stat,
.bind_intr = true,
.imem_pio = &gm200_flcn_imem_pio,
.dmem_pio = &gm200_flcn_dmem_pio,
};
static const struct nvkm_gsp_func
gv100_gsp = {
.flcn = &gv100_gsp_flcn,
};
static int
gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
{
return 0;
}
static struct nvkm_gsp_fwif
gv100_gsps[] = {
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
int
gv100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_gsp **pgsp)
{
return nvkm_gsp_new_(gv100_gsps, device, type, inst, pgsp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
static const struct nvkm_falcon_func
ga102_gsp_flcn = {
.disable = gm200_flcn_disable,
.enable = gm200_flcn_enable,
.select = ga102_flcn_select,
.addr2 = 0x1000,
.reset_eng = gp102_flcn_reset_eng,
.reset_prep = ga102_flcn_reset_prep,
.reset_wait_mem_scrubbing = ga102_flcn_reset_wait_mem_scrubbing,
.imem_dma = &ga102_flcn_dma,
.dmem_dma = &ga102_flcn_dma,
};
static const struct nvkm_gsp_func
ga102_gsp = {
.flcn = &ga102_gsp_flcn,
};
static int
ga102_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
{
return 0;
}
static struct nvkm_gsp_fwif
ga102_gsps[] = {
{ -1, ga102_gsp_nofw, &ga102_gsp },
{}
};
int
ga102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_gsp **pgsp)
{
return nvkm_gsp_new_(ga102_gsps, device, type, inst, pgsp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
static const struct nvkm_mc_map
g98_mc_reset[] = {
{ 0x04008000, NVKM_ENGINE_MSVLD },
{ 0x02004000, NVKM_ENGINE_SEC },
{ 0x01020000, NVKM_ENGINE_MSPDEC },
{ 0x00400002, NVKM_ENGINE_MSPPP },
{ 0x00201000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
{}
};
static const struct nvkm_intr_data
g98_mc_intrs[] = {
{ NVKM_ENGINE_DISP , 0, 0, 0x04000000, true },
{ NVKM_ENGINE_MSPDEC, 0, 0, 0x00020000, true },
{ NVKM_ENGINE_MSVLD , 0, 0, 0x00008000, true },
{ NVKM_ENGINE_SEC , 0, 0, 0x00004000, true },
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
{ NVKM_ENGINE_MSPPP , 0, 0, 0x00000001, true },
{ NVKM_SUBDEV_FB , 0, 0, 0x0002d101, true },
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
{ NVKM_SUBDEV_GPIO , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_I2C , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_TIMER , 0, 0, 0x00100000, true },
{},
};
static const struct nvkm_mc_func
g98_mc = {
.init = nv50_mc_init,
.intr = &nv04_mc_intr,
.intrs = g98_mc_intrs,
.device = &nv04_mc_device,
.reset = g98_mc_reset,
};
int
g98_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(&g98_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
const struct nvkm_mc_map
nv04_mc_reset[] = {
{ 0x00001000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
{}
};
static void
nv04_mc_device_disable(struct nvkm_mc *mc, u32 mask)
{
nvkm_mask(mc->subdev.device, 0x000200, mask, 0x00000000);
}
static void
nv04_mc_device_enable(struct nvkm_mc *mc, u32 mask)
{
struct nvkm_device *device = mc->subdev.device;
nvkm_mask(device, 0x000200, mask, mask);
nvkm_rd32(device, 0x000200);
}
static bool
nv04_mc_device_enabled(struct nvkm_mc *mc, u32 mask)
{
return (nvkm_rd32(mc->subdev.device, 0x000200) & mask) == mask;
}
const struct nvkm_mc_device_func
nv04_mc_device = {
.enabled = nv04_mc_device_enabled,
.enable = nv04_mc_device_enable,
.disable = nv04_mc_device_disable,
};
static const struct nvkm_intr_data
nv04_mc_intrs[] = {
{ NVKM_ENGINE_DISP , 0, 0, 0x01010000, true },
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
{ NVKM_SUBDEV_TIMER, 0, 0, 0x00100000, true },
{}
};
void
nv04_mc_intr_rearm(struct nvkm_intr *intr)
{
struct nvkm_mc *mc = container_of(intr, typeof(*mc), intr);
int leaf;
for (leaf = 0; leaf < intr->leaves; leaf++)
nvkm_wr32(mc->subdev.device, 0x000140 + (leaf * 4), 0x00000001);
}
void
nv04_mc_intr_unarm(struct nvkm_intr *intr)
{
struct nvkm_mc *mc = container_of(intr, typeof(*mc), intr);
int leaf;
for (leaf = 0; leaf < intr->leaves; leaf++)
nvkm_wr32(mc->subdev.device, 0x000140 + (leaf * 4), 0x00000000);
nvkm_rd32(mc->subdev.device, 0x000140);
}
bool
nv04_mc_intr_pending(struct nvkm_intr *intr)
{
struct nvkm_mc *mc = container_of(intr, typeof(*mc), intr);
bool pending = false;
int leaf;
for (leaf = 0; leaf < intr->leaves; leaf++) {
intr->stat[leaf] = nvkm_rd32(mc->subdev.device, 0x000100 + (leaf * 4));
if (intr->stat[leaf])
pending = true;
}
return pending;
}
const struct nvkm_intr_func
nv04_mc_intr = {
.pending = nv04_mc_intr_pending,
.unarm = nv04_mc_intr_unarm,
.rearm = nv04_mc_intr_rearm,
};
void
nv04_mc_init(struct nvkm_mc *mc)
{
struct nvkm_device *device = mc->subdev.device;
nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */
nvkm_wr32(device, 0x001850, 0x00000001); /* disable rom access */
}
static const struct nvkm_mc_func
nv04_mc = {
.init = nv04_mc_init,
.intr = &nv04_mc_intr,
.intrs = nv04_mc_intrs,
.device = &nv04_mc_device,
.reset = nv04_mc_reset,
};
int
nv04_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(&nv04_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
const struct nvkm_mc_map
gk104_mc_reset[] = {
{ 0x00000100, NVKM_ENGINE_FIFO },
{ 0x00002000, NVKM_SUBDEV_PMU, 0, true },
{}
};
const struct nvkm_intr_data
gk104_mc_intrs[] = {
{ NVKM_ENGINE_DISP , 0, 0, 0x04000000, true },
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
{ NVKM_SUBDEV_PRIVRING, 0, 0, 0x40000000, true },
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
{ NVKM_SUBDEV_FB , 0, 0, 0x08002000, true },
{ NVKM_SUBDEV_LTC , 0, 0, 0x02000000, true },
{ NVKM_SUBDEV_PMU , 0, 0, 0x01000000, true },
{ NVKM_SUBDEV_GPIO , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_I2C , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_TIMER , 0, 0, 0x00100000, true },
{ NVKM_SUBDEV_THERM , 0, 0, 0x00040000, true },
{ NVKM_SUBDEV_TOP , 0, 0, 0x00001000 },
{ NVKM_SUBDEV_TOP , 0, 0, 0xffffefff, true },
{},
};
static const struct nvkm_mc_func
gk104_mc = {
.init = nv50_mc_init,
.intr = >215_mc_intr,
.intrs = gk104_mc_intrs,
.intr_nonstall = true,
.reset = gk104_mc_reset,
.device = &nv04_mc_device,
.unk260 = gf100_mc_unk260,
};
int
gk104_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(&gk104_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
void
nv44_mc_init(struct nvkm_mc *mc)
{
struct nvkm_device *device = mc->subdev.device;
u32 tmp = nvkm_rd32(device, 0x10020c);
nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */
nvkm_wr32(device, 0x001700, tmp);
nvkm_wr32(device, 0x001704, 0);
nvkm_wr32(device, 0x001708, 0);
nvkm_wr32(device, 0x00170c, tmp);
}
static const struct nvkm_mc_func
nv44_mc = {
.init = nv44_mc_init,
.intr = &nv04_mc_intr,
.intrs = nv17_mc_intrs,
.device = &nv04_mc_device,
.reset = nv17_mc_reset,
};
int
nv44_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(&nv44_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
static const struct nvkm_mc_map
gf100_mc_reset[] = {
{ 0x00020000, NVKM_ENGINE_MSPDEC },
{ 0x00008000, NVKM_ENGINE_MSVLD },
{ 0x00002000, NVKM_SUBDEV_PMU, 0, true },
{ 0x00001000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
{ 0x00000080, NVKM_ENGINE_CE, 1 },
{ 0x00000040, NVKM_ENGINE_CE, 0 },
{ 0x00000002, NVKM_ENGINE_MSPPP },
{}
};
static const struct nvkm_intr_data
gf100_mc_intrs[] = {
{ NVKM_ENGINE_DISP , 0, 0, 0x04000000, true },
{ NVKM_ENGINE_MSPDEC , 0, 0, 0x00020000, true },
{ NVKM_ENGINE_MSVLD , 0, 0, 0x00008000, true },
{ NVKM_ENGINE_GR , 0, 0, 0x00001000 },
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
{ NVKM_ENGINE_CE , 1, 0, 0x00000040, true },
{ NVKM_ENGINE_CE , 0, 0, 0x00000020, true },
{ NVKM_ENGINE_MSPPP , 0, 0, 0x00000001, true },
{ NVKM_SUBDEV_PRIVRING, 0, 0, 0x40000000, true },
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
{ NVKM_SUBDEV_FB , 0, 0, 0x08002000, true },
{ NVKM_SUBDEV_LTC , 0, 0, 0x02000000, true },
{ NVKM_SUBDEV_PMU , 0, 0, 0x01000000, true },
{ NVKM_SUBDEV_GPIO , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_I2C , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_TIMER , 0, 0, 0x00100000, true },
{ NVKM_SUBDEV_THERM , 0, 0, 0x00040000, true },
{},
};
void
gf100_mc_unk260(struct nvkm_mc *mc, u32 data)
{
nvkm_wr32(mc->subdev.device, 0x000260, data);
}
static const struct nvkm_mc_func
gf100_mc = {
.init = nv50_mc_init,
.intr = >215_mc_intr,
.intrs = gf100_mc_intrs,
.intr_nonstall = true,
.reset = gf100_mc_reset,
.device = &nv04_mc_device,
.unk260 = gf100_mc_unk260,
};
int
gf100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(&gf100_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
static const struct nvkm_intr_data
nv50_mc_intrs[] = {
{ NVKM_ENGINE_DISP , 0, 0, 0x04000000, true },
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
{ NVKM_ENGINE_MPEG , 0, 0, 0x00000001, true },
{ NVKM_SUBDEV_FB , 0, 0, 0x00001101, true },
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
{ NVKM_SUBDEV_GPIO , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_I2C , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_TIMER, 0, 0, 0x00100000, true },
{},
};
void
nv50_mc_init(struct nvkm_mc *mc)
{
struct nvkm_device *device = mc->subdev.device;
nvkm_wr32(device, 0x000200, 0xffffffff); /* everything on */
}
static const struct nvkm_mc_func
nv50_mc = {
.init = nv50_mc_init,
.intr = &nv04_mc_intr,
.intrs = nv50_mc_intrs,
.device = &nv04_mc_device,
.reset = nv17_mc_reset,
};
int
nv50_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(&nv50_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
static void
ga100_mc_device_disable(struct nvkm_mc *mc, u32 mask)
{
struct nvkm_device *device = mc->subdev.device;
nvkm_mask(device, 0x000600, mask, 0x00000000);
nvkm_rd32(device, 0x000600);
nvkm_rd32(device, 0x000600);
}
static void
ga100_mc_device_enable(struct nvkm_mc *mc, u32 mask)
{
struct nvkm_device *device = mc->subdev.device;
nvkm_mask(device, 0x000600, mask, mask);
nvkm_rd32(device, 0x000600);
nvkm_rd32(device, 0x000600);
}
static bool
ga100_mc_device_enabled(struct nvkm_mc *mc, u32 mask)
{
return (nvkm_rd32(mc->subdev.device, 0x000600) & mask) == mask;
}
static const struct nvkm_mc_device_func
ga100_mc_device = {
.enabled = ga100_mc_device_enabled,
.enable = ga100_mc_device_enable,
.disable = ga100_mc_device_disable,
};
static void
ga100_mc_init(struct nvkm_mc *mc)
{
struct nvkm_device *device = mc->subdev.device;
nvkm_wr32(device, 0x000200, 0xffffffff);
nvkm_wr32(device, 0x000600, 0xffffffff);
}
static const struct nvkm_mc_func
ga100_mc = {
.init = ga100_mc_init,
.device = &ga100_mc_device,
};
int
ga100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(&ga100_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <core/option.h>
#include <subdev/top.h>
void
nvkm_mc_unk260(struct nvkm_device *device, u32 data)
{
struct nvkm_mc *mc = device->mc;
if (likely(mc) && mc->func->unk260)
mc->func->unk260(mc, data);
}
void
nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, bool en)
{
struct nvkm_subdev *subdev = nvkm_device_subdev(device, type, inst);
if (subdev) {
if (en)
nvkm_intr_allow(subdev, NVKM_INTR_SUBDEV);
else
nvkm_intr_block(subdev, NVKM_INTR_SUBDEV);
}
}
static u32
nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto, enum nvkm_subdev_type type, int inst)
{
struct nvkm_mc *mc = device->mc;
const struct nvkm_mc_map *map;
u64 pmc_enable = 0;
if (likely(mc)) {
if (!(pmc_enable = nvkm_top_reset(device, type, inst))) {
for (map = mc->func->reset; map && map->stat; map++) {
if (!isauto || !map->noauto) {
if (map->type == type && map->inst == inst) {
pmc_enable = map->stat;
break;
}
}
}
}
}
return pmc_enable;
}
void
nvkm_mc_reset(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
u64 pmc_enable = nvkm_mc_reset_mask(device, true, type, inst);
if (pmc_enable) {
device->mc->func->device->disable(device->mc, pmc_enable);
device->mc->func->device->enable(device->mc, pmc_enable);
}
}
void
nvkm_mc_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
if (pmc_enable)
device->mc->func->device->disable(device->mc, pmc_enable);
}
void
nvkm_mc_enable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
if (pmc_enable)
device->mc->func->device->enable(device->mc, pmc_enable);
}
bool
nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst);
return (pmc_enable != 0) && device->mc->func->device->enabled(device->mc, pmc_enable);
}
static int
nvkm_mc_init(struct nvkm_subdev *subdev)
{
struct nvkm_mc *mc = nvkm_mc(subdev);
if (mc->func->init)
mc->func->init(mc);
return 0;
}
static void *
nvkm_mc_dtor(struct nvkm_subdev *subdev)
{
return nvkm_mc(subdev);
}
static const struct nvkm_subdev_func
nvkm_mc = {
.dtor = nvkm_mc_dtor,
.init = nvkm_mc_init,
};
int
nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
struct nvkm_mc *mc;
int ret;
if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_mc, device, type, inst, &mc->subdev);
mc->func = func;
if (mc->func->intr) {
ret = nvkm_intr_add(mc->func->intr, mc->func->intrs, &mc->subdev,
mc->func->intr_nonstall ? 2 : 1, &mc->intr);
if (ret)
return ret;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c |
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
static void
gp10b_mc_init(struct nvkm_mc *mc)
{
struct nvkm_device *device = mc->subdev.device;
nvkm_wr32(device, 0x000200, 0xffffffff); /* everything on */
nvkm_wr32(device, 0x00020c, 0xffffffff); /* everything out of ELPG */
}
static const struct nvkm_mc_func
gp10b_mc = {
.init = gp10b_mc_init,
.intr = &gp100_mc_intr,
.intrs = gp100_mc_intrs,
.intr_nonstall = true,
.device = &nv04_mc_device,
.reset = gk104_mc_reset,
};
int
gp10b_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(&gp10b_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
const struct nvkm_mc_map
nv17_mc_reset[] = {
{ 0x00001000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
{ 0x00000002, NVKM_ENGINE_MPEG },
{}
};
const struct nvkm_intr_data
nv17_mc_intrs[] = {
{ NVKM_ENGINE_DISP , 0, 0, 0x03010000, true },
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
{ NVKM_ENGINE_MPEG , 0, 0, 0x00000001, true },
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
{ NVKM_SUBDEV_TIMER, 0, 0, 0x00100000, true },
{}
};
static const struct nvkm_mc_func
nv17_mc = {
.init = nv04_mc_init,
.intr = &nv04_mc_intr,
.intrs = nv17_mc_intrs,
.device = &nv04_mc_device,
.reset = nv17_mc_reset,
};
int
nv17_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(&nv17_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
static const struct nvkm_mc_map
g84_mc_reset[] = {
{ 0x04008000, NVKM_ENGINE_BSP },
{ 0x02004000, NVKM_ENGINE_CIPHER },
{ 0x01020000, NVKM_ENGINE_VP },
{ 0x00400002, NVKM_ENGINE_MPEG },
{ 0x00201000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
{}
};
static const struct nvkm_intr_data
g84_mc_intrs[] = {
{ NVKM_ENGINE_DISP , 0, 0, 0x04000000, true },
{ NVKM_ENGINE_VP , 0, 0, 0x00020000, true },
{ NVKM_ENGINE_BSP , 0, 0, 0x00008000, true },
{ NVKM_ENGINE_CIPHER, 0, 0, 0x00004000, true },
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
{ NVKM_ENGINE_MPEG , 0, 0, 0x00000001, true },
{ NVKM_SUBDEV_FB , 0, 0, 0x0002d101, true },
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
{ NVKM_SUBDEV_GPIO , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_I2C , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_TIMER , 0, 0, 0x00100000, true },
{},
};
static const struct nvkm_mc_func
g84_mc = {
.init = nv50_mc_init,
.intr = &nv04_mc_intr,
.intrs = g84_mc_intrs,
.device = &nv04_mc_device,
.reset = g84_mc_reset,
};
int
g84_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(&g84_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
static const struct nvkm_mc_map
gt215_mc_reset[] = {
{ 0x04008000, NVKM_ENGINE_MSVLD },
{ 0x01020000, NVKM_ENGINE_MSPDEC },
{ 0x00802000, NVKM_ENGINE_CE, 0 },
{ 0x00400002, NVKM_ENGINE_MSPPP },
{ 0x00201000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
{}
};
static const struct nvkm_intr_data
gt215_mc_intrs[] = {
{ NVKM_ENGINE_DISP , 0, 0, 0x04000000, true },
{ NVKM_ENGINE_CE , 0, 0, 0x00400000, true },
{ NVKM_ENGINE_MSPDEC, 0, 0, 0x00020000, true },
{ NVKM_ENGINE_MSVLD , 0, 0, 0x00008000, true },
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
{ NVKM_ENGINE_MSPPP , 0, 0, 0x00000001, true },
{ NVKM_SUBDEV_FB , 0, 0, 0x00429101, true },
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
{ NVKM_SUBDEV_GPIO , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_I2C , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_TIMER , 0, 0, 0x00100000, true },
{ NVKM_SUBDEV_THERM , 0, 0, 0x00080000, true },
{ NVKM_SUBDEV_PMU , 0, 0, 0x00040000, true },
{},
};
static void
gt215_mc_intr_allow(struct nvkm_intr *intr, int leaf, u32 mask)
{
struct nvkm_mc *mc = container_of(intr, typeof(*mc), intr);
nvkm_mask(mc->subdev.device, 0x000640 + (leaf * 4), mask, mask);
}
static void
gt215_mc_intr_block(struct nvkm_intr *intr, int leaf, u32 mask)
{
struct nvkm_mc *mc = container_of(intr, typeof(*mc), intr);
nvkm_mask(mc->subdev.device, 0x000640 + (leaf * 4), mask, 0);
}
const struct nvkm_intr_func
gt215_mc_intr = {
.pending = nv04_mc_intr_pending,
.unarm = nv04_mc_intr_unarm,
.rearm = nv04_mc_intr_rearm,
.block = gt215_mc_intr_block,
.allow = gt215_mc_intr_allow,
};
static const struct nvkm_mc_func
gt215_mc = {
.init = nv50_mc_init,
.intr = &nv04_mc_intr,
.intrs = gt215_mc_intrs,
.device = &nv04_mc_device,
.reset = gt215_mc_reset,
};
int
gt215_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(>215_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
const struct nvkm_intr_data
gp100_mc_intrs[] = {
{ NVKM_ENGINE_DISP , 0, 0, 0x04000000, true },
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
{ NVKM_SUBDEV_FAULT , 0, 0, 0x00000200, true },
{ NVKM_SUBDEV_PRIVRING, 0, 0, 0x40000000, true },
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
{ NVKM_SUBDEV_FB , 0, 0, 0x08002000, true },
{ NVKM_SUBDEV_LTC , 0, 0, 0x02000000, true },
{ NVKM_SUBDEV_PMU , 0, 0, 0x01000000, true },
{ NVKM_SUBDEV_GPIO , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_I2C , 0, 0, 0x00200000, true },
{ NVKM_SUBDEV_TIMER , 0, 0, 0x00100000, true },
{ NVKM_SUBDEV_THERM , 0, 0, 0x00040000, true },
{ NVKM_SUBDEV_TOP , 0, 0, 0x00009000 },
{ NVKM_SUBDEV_TOP , 0, 0, 0xffff6fff, true },
{},
};
static void
gp100_mc_intr_allow(struct nvkm_intr *intr, int leaf, u32 mask)
{
struct nvkm_mc *mc = container_of(intr, typeof(*mc), intr);
nvkm_wr32(mc->subdev.device, 0x000160 + (leaf * 4), mask);
}
static void
gp100_mc_intr_block(struct nvkm_intr *intr, int leaf, u32 mask)
{
struct nvkm_mc *mc = container_of(intr, typeof(*mc), intr);
nvkm_wr32(mc->subdev.device, 0x000180 + (leaf * 4), mask);
}
static void
gp100_mc_intr_rearm(struct nvkm_intr *intr)
{
int i;
for (i = 0; i < intr->leaves; i++)
intr->func->allow(intr, i, intr->mask[i]);
}
static void
gp100_mc_intr_unarm(struct nvkm_intr *intr)
{
int i;
for (i = 0; i < intr->leaves; i++)
intr->func->block(intr, i, 0xffffffff);
}
const struct nvkm_intr_func
gp100_mc_intr = {
.pending = nv04_mc_intr_pending,
.unarm = gp100_mc_intr_unarm,
.rearm = gp100_mc_intr_rearm,
.block = gp100_mc_intr_block,
.allow = gp100_mc_intr_allow,
};
static const struct nvkm_mc_func
gp100_mc = {
.init = nv50_mc_init,
.intr = &gp100_mc_intr,
.intrs = gp100_mc_intrs,
.intr_nonstall = true,
.device = &nv04_mc_device,
.reset = gk104_mc_reset,
};
int
gp100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(&gp100_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
static const struct nvkm_mc_func
gk20a_mc = {
.init = nv50_mc_init,
.intr = >215_mc_intr,
.intrs = gk104_mc_intrs,
.intr_nonstall = true,
.device = &nv04_mc_device,
.reset = gk104_mc_reset,
};
int
gk20a_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(&gk20a_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
static const struct nvkm_intr_data
nv11_mc_intrs[] = {
{ NVKM_ENGINE_DISP , 0, 0, 0x03010000, true },
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
{ NVKM_SUBDEV_TIMER, 0, 0, 0x00100000, true },
{}
};
static const struct nvkm_mc_func
nv11_mc = {
.init = nv04_mc_init,
.intr = &nv04_mc_intr,
.intrs = nv11_mc_intrs,
.device = &nv04_mc_device,
.reset = nv04_mc_reset,
};
int
nv11_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
{
return nvkm_mc_new_(&nv11_mc, device, type, inst, pmc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
static const struct nvkm_volt_func
nv40_volt = {
.vid_get = nvkm_voltgpio_get,
.vid_set = nvkm_voltgpio_set,
};
int
nv40_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_volt **pvolt)
{
struct nvkm_volt *volt;
int ret;
ret = nvkm_volt_new_(&nv40_volt, device, type, inst, &volt);
*pvolt = volt;
if (ret)
return ret;
return nvkm_voltgpio_init(volt);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c |
/*
* Copyright 2015 Martin Peres
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include "priv.h"
#include <subdev/volt.h>
#include <subdev/gpio.h>
#include <subdev/bios.h>
#include <subdev/bios/volt.h>
#include <subdev/fuse.h>
#define gk104_volt(p) container_of((p), struct gk104_volt, base)
struct gk104_volt {
struct nvkm_volt base;
struct nvbios_volt bios;
};
static int
gk104_volt_get(struct nvkm_volt *base)
{
struct nvbios_volt *bios = &gk104_volt(base)->bios;
struct nvkm_device *device = base->subdev.device;
u32 div, duty;
div = nvkm_rd32(device, 0x20340);
duty = nvkm_rd32(device, 0x20344);
return bios->base + bios->pwm_range * duty / div;
}
static int
gk104_volt_set(struct nvkm_volt *base, u32 uv)
{
struct nvbios_volt *bios = &gk104_volt(base)->bios;
struct nvkm_device *device = base->subdev.device;
u32 div, duty;
/* the blob uses this crystal frequency, let's use it too. */
div = 27648000 / bios->pwm_freq;
duty = DIV_ROUND_UP((uv - bios->base) * div, bios->pwm_range);
nvkm_wr32(device, 0x20340, div);
nvkm_wr32(device, 0x20344, 0x80000000 | duty);
return 0;
}
static int
gk104_volt_speedo_read(struct nvkm_volt *volt)
{
struct nvkm_device *device = volt->subdev.device;
struct nvkm_fuse *fuse = device->fuse;
int ret;
if (!fuse)
return -EINVAL;
nvkm_wr32(device, 0x122634, 0x0);
ret = nvkm_fuse_read(fuse, 0x3a8);
nvkm_wr32(device, 0x122634, 0x41);
return ret;
}
static const struct nvkm_volt_func
gk104_volt_pwm = {
.oneinit = gf100_volt_oneinit,
.volt_get = gk104_volt_get,
.volt_set = gk104_volt_set,
.speedo_read = gk104_volt_speedo_read,
}, gk104_volt_gpio = {
.oneinit = gf100_volt_oneinit,
.vid_get = nvkm_voltgpio_get,
.vid_set = nvkm_voltgpio_set,
.speedo_read = gk104_volt_speedo_read,
};
int
gk104_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_volt **pvolt)
{
const struct nvkm_volt_func *volt_func = &gk104_volt_gpio;
struct dcb_gpio_func gpio;
struct nvbios_volt bios;
struct gk104_volt *volt;
u8 ver, hdr, cnt, len;
const char *mode;
if (!nvbios_volt_parse(device->bios, &ver, &hdr, &cnt, &len, &bios))
return 0;
if (!nvkm_gpio_find(device->gpio, 0, DCB_GPIO_VID_PWM, 0xff, &gpio) &&
bios.type == NVBIOS_VOLT_PWM) {
volt_func = &gk104_volt_pwm;
}
if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL)))
return -ENOMEM;
nvkm_volt_ctor(volt_func, device, type, inst, &volt->base);
*pvolt = &volt->base;
volt->bios = bios;
/* now that we have a subdev, we can show an error if we found through
* the voltage table that we were supposed to use the PWN mode but we
* did not find the right GPIO for it.
*/
if (bios.type == NVBIOS_VOLT_PWM && volt_func != &gk104_volt_pwm) {
nvkm_error(&volt->base.subdev,
"Type mismatch between the voltage table type and "
"the GPIO table. Fallback to GPIO mode.\n");
}
if (volt_func == &gk104_volt_gpio) {
nvkm_voltgpio_init(&volt->base);
mode = "GPIO";
} else
mode = "PWM";
nvkm_debug(&volt->base.subdev, "Using %s mode\n", mode);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c |
/*
* Copyright 2016 Karol Herbst
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Karol Herbst
*/
#include "priv.h"
#include <subdev/fuse.h>
static int
gf100_volt_speedo_read(struct nvkm_volt *volt)
{
struct nvkm_device *device = volt->subdev.device;
struct nvkm_fuse *fuse = device->fuse;
if (!fuse)
return -EINVAL;
return nvkm_fuse_read(fuse, 0x1cc);
}
int
gf100_volt_oneinit(struct nvkm_volt *volt)
{
struct nvkm_subdev *subdev = &volt->subdev;
if (volt->speedo <= 0)
nvkm_error(subdev, "couldn't find speedo value, volting not "
"possible\n");
return 0;
}
static const struct nvkm_volt_func
gf100_volt = {
.oneinit = gf100_volt_oneinit,
.vid_get = nvkm_voltgpio_get,
.vid_set = nvkm_voltgpio_set,
.speedo_read = gf100_volt_speedo_read,
};
int
gf100_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_volt **pvolt)
{
struct nvkm_volt *volt;
int ret;
ret = nvkm_volt_new_(&gf100_volt, device, type, inst, &volt);
*pvolt = volt;
if (ret)
return ret;
return nvkm_voltgpio_init(volt);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/volt.h>
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
#include <subdev/gpio.h>
#include "priv.h"
static const u8 tags[] = {
DCB_GPIO_VID0, DCB_GPIO_VID1, DCB_GPIO_VID2, DCB_GPIO_VID3,
DCB_GPIO_VID4, DCB_GPIO_VID5, DCB_GPIO_VID6, DCB_GPIO_VID7,
};
int
nvkm_voltgpio_get(struct nvkm_volt *volt)
{
struct nvkm_gpio *gpio = volt->subdev.device->gpio;
u8 vid = 0;
int i;
for (i = 0; i < ARRAY_SIZE(tags); i++) {
if (volt->vid_mask & (1 << i)) {
int ret = nvkm_gpio_get(gpio, 0, tags[i], 0xff);
if (ret < 0)
return ret;
vid |= ret << i;
}
}
return vid;
}
int
nvkm_voltgpio_set(struct nvkm_volt *volt, u8 vid)
{
struct nvkm_gpio *gpio = volt->subdev.device->gpio;
int i;
for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) {
if (volt->vid_mask & (1 << i)) {
int ret = nvkm_gpio_set(gpio, 0, tags[i], 0xff, vid & 1);
if (ret < 0)
return ret;
}
}
return 0;
}
int
nvkm_voltgpio_init(struct nvkm_volt *volt)
{
struct nvkm_subdev *subdev = &volt->subdev;
struct nvkm_gpio *gpio = subdev->device->gpio;
struct dcb_gpio_func func;
int i;
/* check we have gpio function info for each vid bit. on some
* boards (ie. nvs295) the vid mask has more bits than there
* are valid gpio functions... from traces, nvidia appear to
* just touch the existing ones, so let's mask off the invalid
* bits and continue with life
*/
for (i = 0; i < ARRAY_SIZE(tags); i++) {
if (volt->vid_mask & (1 << i)) {
int ret = nvkm_gpio_find(gpio, 0, tags[i], 0xff, &func);
if (ret) {
if (ret != -ENOENT)
return ret;
nvkm_debug(subdev, "VID bit %d has no GPIO\n", i);
volt->vid_mask &= ~(1 << i);
}
}
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c |
/*
* Copyright 2019 Ilia Mirkin
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ilia Mirkin
*/
#include "priv.h"
#include <subdev/fuse.h>
static int
gf117_volt_speedo_read(struct nvkm_volt *volt)
{
struct nvkm_device *device = volt->subdev.device;
struct nvkm_fuse *fuse = device->fuse;
if (!fuse)
return -EINVAL;
return nvkm_fuse_read(fuse, 0x3a8);
}
static const struct nvkm_volt_func
gf117_volt = {
.oneinit = gf100_volt_oneinit,
.vid_get = nvkm_voltgpio_get,
.vid_set = nvkm_voltgpio_set,
.speedo_read = gf117_volt_speedo_read,
};
int
gf117_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_volt **pvolt)
{
struct nvkm_volt *volt;
int ret;
ret = nvkm_volt_new_(&gf117_volt, device, type, inst, &volt);
*pvolt = volt;
if (ret)
return ret;
return nvkm_voltgpio_init(volt);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <subdev/bios.h>
#include <subdev/bios/vmap.h>
#include <subdev/bios/volt.h>
#include <subdev/therm.h>
int
nvkm_volt_get(struct nvkm_volt *volt)
{
int ret, i;
if (volt->func->volt_get)
return volt->func->volt_get(volt);
ret = volt->func->vid_get(volt);
if (ret >= 0) {
for (i = 0; i < volt->vid_nr; i++) {
if (volt->vid[i].vid == ret)
return volt->vid[i].uv;
}
ret = -EINVAL;
}
return ret;
}
static int
nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
{
struct nvkm_subdev *subdev = &volt->subdev;
int i, ret = -EINVAL, best_err = volt->max_uv, best = -1;
if (volt->func->volt_set)
return volt->func->volt_set(volt, uv);
for (i = 0; i < volt->vid_nr; i++) {
int err = volt->vid[i].uv - uv;
if (err < 0 || err > best_err)
continue;
best_err = err;
best = i;
if (best_err == 0)
break;
}
if (best == -1) {
nvkm_error(subdev, "couldn't set %iuv\n", uv);
return ret;
}
ret = volt->func->vid_set(volt, volt->vid[best].vid);
nvkm_debug(subdev, "set req %duv to %duv: %d\n", uv,
volt->vid[best].uv, ret);
return ret;
}
int
nvkm_volt_map_min(struct nvkm_volt *volt, u8 id)
{
struct nvkm_bios *bios = volt->subdev.device->bios;
struct nvbios_vmap_entry info;
u8 ver, len;
u32 vmap;
vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
if (vmap) {
if (info.link != 0xff) {
int ret = nvkm_volt_map_min(volt, info.link);
if (ret < 0)
return ret;
info.min += ret;
}
return info.min;
}
return id ? id * 10000 : -ENODEV;
}
int
nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temp)
{
struct nvkm_bios *bios = volt->subdev.device->bios;
struct nvbios_vmap_entry info;
u8 ver, len;
u32 vmap;
vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
if (vmap) {
s64 result;
if (volt->speedo < 0)
return volt->speedo;
if (ver == 0x10 || (ver == 0x20 && info.mode == 0)) {
result = div64_s64((s64)info.arg[0], 10);
result += div64_s64((s64)info.arg[1] * volt->speedo, 10);
result += div64_s64((s64)info.arg[2] * volt->speedo * volt->speedo, 100000);
} else if (ver == 0x20) {
switch (info.mode) {
/* 0x0 handled above! */
case 0x1:
result = ((s64)info.arg[0] * 15625) >> 18;
result += ((s64)info.arg[1] * volt->speedo * 15625) >> 18;
result += ((s64)info.arg[2] * temp * 15625) >> 10;
result += ((s64)info.arg[3] * volt->speedo * temp * 15625) >> 18;
result += ((s64)info.arg[4] * volt->speedo * volt->speedo * 15625) >> 30;
result += ((s64)info.arg[5] * temp * temp * 15625) >> 18;
break;
case 0x3:
result = (info.min + info.max) / 2;
break;
case 0x2:
default:
result = info.min;
break;
}
} else {
return -ENODEV;
}
result = min(max(result, (s64)info.min), (s64)info.max);
if (info.link != 0xff) {
int ret = nvkm_volt_map(volt, info.link, temp);
if (ret < 0)
return ret;
result += ret;
}
return result;
}
return id ? id * 10000 : -ENODEV;
}
int
nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, u8 min_id, u8 temp,
int condition)
{
int ret;
if (volt->func->set_id)
return volt->func->set_id(volt, id, condition);
ret = nvkm_volt_map(volt, id, temp);
if (ret >= 0) {
int prev = nvkm_volt_get(volt);
if (!condition || prev < 0 ||
(condition < 0 && ret < prev) ||
(condition > 0 && ret > prev)) {
int min = nvkm_volt_map(volt, min_id, temp);
if (min >= 0)
ret = max(min, ret);
ret = nvkm_volt_set(volt, ret);
} else {
ret = 0;
}
}
return ret;
}
static void
nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
{
struct nvkm_subdev *subdev = &bios->subdev;
struct nvbios_volt_entry ivid;
struct nvbios_volt info;
u8 ver, hdr, cnt, len;
u32 data;
int i;
data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
if (data && info.vidmask && info.base && info.step && info.ranged) {
nvkm_debug(subdev, "found ranged based VIDs\n");
volt->min_uv = info.min;
volt->max_uv = info.max;
for (i = 0; i < info.vidmask + 1; i++) {
if (info.base >= info.min &&
info.base <= info.max) {
volt->vid[volt->vid_nr].uv = info.base;
volt->vid[volt->vid_nr].vid = i;
volt->vid_nr++;
}
info.base += info.step;
}
volt->vid_mask = info.vidmask;
} else if (data && info.vidmask && !info.ranged) {
nvkm_debug(subdev, "found entry based VIDs\n");
volt->min_uv = 0xffffffff;
volt->max_uv = 0;
for (i = 0; i < cnt; i++) {
data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
&ivid);
if (data) {
volt->vid[volt->vid_nr].uv = ivid.voltage;
volt->vid[volt->vid_nr].vid = ivid.vid;
volt->vid_nr++;
volt->min_uv = min(volt->min_uv, ivid.voltage);
volt->max_uv = max(volt->max_uv, ivid.voltage);
}
}
volt->vid_mask = info.vidmask;
} else if (data && info.type == NVBIOS_VOLT_PWM) {
volt->min_uv = info.base;
volt->max_uv = info.base + info.pwm_range;
}
}
static int
nvkm_volt_speedo_read(struct nvkm_volt *volt)
{
if (volt->func->speedo_read)
return volt->func->speedo_read(volt);
return -EINVAL;
}
static int
nvkm_volt_init(struct nvkm_subdev *subdev)
{
struct nvkm_volt *volt = nvkm_volt(subdev);
int ret = nvkm_volt_get(volt);
if (ret < 0) {
if (ret != -ENODEV)
nvkm_debug(subdev, "current voltage unknown\n");
return 0;
}
nvkm_debug(subdev, "current voltage: %duv\n", ret);
return 0;
}
static int
nvkm_volt_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_volt *volt = nvkm_volt(subdev);
volt->speedo = nvkm_volt_speedo_read(volt);
if (volt->speedo > 0)
nvkm_debug(&volt->subdev, "speedo %x\n", volt->speedo);
if (volt->func->oneinit)
return volt->func->oneinit(volt);
return 0;
}
static void *
nvkm_volt_dtor(struct nvkm_subdev *subdev)
{
return nvkm_volt(subdev);
}
static const struct nvkm_subdev_func
nvkm_volt = {
.dtor = nvkm_volt_dtor,
.init = nvkm_volt_init,
.oneinit = nvkm_volt_oneinit,
};
void
nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_volt *volt)
{
struct nvkm_bios *bios = device->bios;
int i;
nvkm_subdev_ctor(&nvkm_volt, device, type, inst, &volt->subdev);
volt->func = func;
/* Assuming the non-bios device should build the voltage table later */
if (bios) {
u8 ver, hdr, cnt, len;
struct nvbios_vmap vmap;
nvkm_volt_parse_bios(bios, volt);
nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n",
volt->min_uv, volt->max_uv);
if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) {
volt->max0_id = vmap.max0;
volt->max1_id = vmap.max1;
volt->max2_id = vmap.max2;
} else {
volt->max0_id = 0xff;
volt->max1_id = 0xff;
volt->max2_id = 0xff;
}
}
if (volt->vid_nr) {
for (i = 0; i < volt->vid_nr; i++) {
nvkm_debug(&volt->subdev, "VID %02x: %duv\n",
volt->vid[i].vid, volt->vid[i].uv);
}
}
}
int
nvkm_volt_new_(const struct nvkm_volt_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_volt **pvolt)
{
if (!(*pvolt = kzalloc(sizeof(**pvolt), GFP_KERNEL)))
return -ENOMEM;
nvkm_volt_ctor(func, device, type, inst, *pvolt);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c |
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define gk20a_volt(p) container_of((p), struct gk20a_volt, base)
#include "priv.h"
#include <core/tegra.h>
#include "gk20a.h"
static const struct cvb_coef gk20a_cvb_coef[] = {
/* MHz, c0, c1, c2, c3, c4, c5 */
/* 72 */ { 1209886, -36468, 515, 417, -13123, 203},
/* 108 */ { 1130804, -27659, 296, 298, -10834, 221},
/* 180 */ { 1162871, -27110, 247, 238, -10681, 268},
/* 252 */ { 1220458, -28654, 247, 179, -10376, 298},
/* 324 */ { 1280953, -30204, 247, 119, -9766, 304},
/* 396 */ { 1344547, -31777, 247, 119, -8545, 292},
/* 468 */ { 1420168, -34227, 269, 60, -7172, 256},
/* 540 */ { 1490757, -35955, 274, 60, -5188, 197},
/* 612 */ { 1599112, -42583, 398, 0, -1831, 119},
/* 648 */ { 1366986, -16459, -274, 0, -3204, 72},
/* 684 */ { 1391884, -17078, -274, -60, -1526, 30},
/* 708 */ { 1415522, -17497, -274, -60, -458, 0},
/* 756 */ { 1464061, -18331, -274, -119, 1831, -72},
/* 804 */ { 1524225, -20064, -254, -119, 4272, -155},
/* 852 */ { 1608418, -21643, -269, 0, 763, -48},
};
/**
* cvb_mv = ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0)
*/
static inline int
gk20a_volt_get_cvb_voltage(int speedo, int s_scale, const struct cvb_coef *coef)
{
int mv;
mv = DIV_ROUND_CLOSEST(coef->c2 * speedo, s_scale);
mv = DIV_ROUND_CLOSEST((mv + coef->c1) * speedo, s_scale) + coef->c0;
return mv;
}
/**
* cvb_t_mv =
* ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0) +
* ((c3 * speedo / s_scale + c4 + c5 * T / t_scale) * T / t_scale)
*/
static inline int
gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale,
const struct cvb_coef *coef)
{
int cvb_mv, mv;
cvb_mv = gk20a_volt_get_cvb_voltage(speedo, s_scale, coef);
mv = DIV_ROUND_CLOSEST(coef->c3 * speedo, s_scale) + coef->c4 +
DIV_ROUND_CLOSEST(coef->c5 * temp, t_scale);
mv = DIV_ROUND_CLOSEST(mv * temp, t_scale) + cvb_mv;
return mv;
}
static int
gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
{
static const int v_scale = 1000;
int mv;
mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef);
mv = DIV_ROUND_UP(mv, v_scale);
return mv * 1000;
}
static int
gk20a_volt_vid_get(struct nvkm_volt *base)
{
struct gk20a_volt *volt = gk20a_volt(base);
int i, uv;
uv = regulator_get_voltage(volt->vdd);
for (i = 0; i < volt->base.vid_nr; i++)
if (volt->base.vid[i].uv >= uv)
return i;
return -EINVAL;
}
static int
gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
{
struct gk20a_volt *volt = gk20a_volt(base);
struct nvkm_subdev *subdev = &volt->base.subdev;
nvkm_debug(subdev, "set voltage as %duv\n", volt->base.vid[vid].uv);
return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
}
static int
gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
{
struct gk20a_volt *volt = gk20a_volt(base);
struct nvkm_subdev *subdev = &volt->base.subdev;
int prev_uv = regulator_get_voltage(volt->vdd);
int target_uv = volt->base.vid[id].uv;
int ret;
nvkm_debug(subdev, "prev=%d, target=%d, condition=%d\n",
prev_uv, target_uv, condition);
if (!condition ||
(condition < 0 && target_uv < prev_uv) ||
(condition > 0 && target_uv > prev_uv)) {
ret = gk20a_volt_vid_set(&volt->base, volt->base.vid[id].vid);
} else {
ret = 0;
}
return ret;
}
static const struct nvkm_volt_func
gk20a_volt = {
.vid_get = gk20a_volt_vid_get,
.vid_set = gk20a_volt_vid_set,
.set_id = gk20a_volt_set_id,
};
int
gk20a_volt_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
const struct cvb_coef *coefs, int nb_coefs,
int vmin, struct gk20a_volt *volt)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
int i, uv;
nvkm_volt_ctor(&gk20a_volt, device, type, inst, &volt->base);
uv = regulator_get_voltage(tdev->vdd);
nvkm_debug(&volt->base.subdev, "the default voltage is %duV\n", uv);
volt->vdd = tdev->vdd;
volt->base.vid_nr = nb_coefs;
for (i = 0; i < volt->base.vid_nr; i++) {
volt->base.vid[i].vid = i;
volt->base.vid[i].uv = max(
gk20a_volt_calc_voltage(&coefs[i], tdev->gpu_speedo),
vmin);
nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
volt->base.vid[i].vid, volt->base.vid[i].uv);
}
return 0;
}
int
gk20a_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_volt **pvolt)
{
struct gk20a_volt *volt;
volt = kzalloc(sizeof(*volt), GFP_KERNEL);
if (!volt)
return -ENOMEM;
*pvolt = &volt->base;
return gk20a_volt_ctor(device, type, inst, gk20a_cvb_coef,
ARRAY_SIZE(gk20a_cvb_coef), 0, volt);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c |
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "gk20a.h"
#include <core/tegra.h>
static const struct cvb_coef gm20b_cvb_coef[] = {
/* KHz, c0, c1, c2 */
/* 76800 */ { 1786666, -85625, 1632 },
/* 153600 */ { 1846729, -87525, 1632 },
/* 230400 */ { 1910480, -89425, 1632 },
/* 307200 */ { 1977920, -91325, 1632 },
/* 384000 */ { 2049049, -93215, 1632 },
/* 460800 */ { 2122872, -95095, 1632 },
/* 537600 */ { 2201331, -96985, 1632 },
/* 614400 */ { 2283479, -98885, 1632 },
/* 691200 */ { 2369315, -100785, 1632 },
/* 768000 */ { 2458841, -102685, 1632 },
/* 844800 */ { 2550821, -104555, 1632 },
/* 921600 */ { 2647676, -106455, 1632 },
};
static const struct cvb_coef gm20b_na_cvb_coef[] = {
/* KHz, c0, c1, c2, c3, c4, c5 */
/* 76800 */ { 814294, 8144, -940, 808, -21583, 226 },
/* 153600 */ { 856185, 8144, -940, 808, -21583, 226 },
/* 230400 */ { 898077, 8144, -940, 808, -21583, 226 },
/* 307200 */ { 939968, 8144, -940, 808, -21583, 226 },
/* 384000 */ { 981860, 8144, -940, 808, -21583, 226 },
/* 460800 */ { 1023751, 8144, -940, 808, -21583, 226 },
/* 537600 */ { 1065642, 8144, -940, 808, -21583, 226 },
/* 614400 */ { 1107534, 8144, -940, 808, -21583, 226 },
/* 691200 */ { 1149425, 8144, -940, 808, -21583, 226 },
/* 768000 */ { 1191317, 8144, -940, 808, -21583, 226 },
/* 844800 */ { 1233208, 8144, -940, 808, -21583, 226 },
/* 921600 */ { 1275100, 8144, -940, 808, -21583, 226 },
/* 998400 */ { 1316991, 8144, -940, 808, -21583, 226 },
};
static const u32 speedo_to_vmin[] = {
/* 0, 1, 2, 3, 4, */
950000, 840000, 818750, 840000, 810000,
};
int
gm20b_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_volt **pvolt)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gk20a_volt *volt;
u32 vmin;
if (tdev->gpu_speedo_id >= ARRAY_SIZE(speedo_to_vmin)) {
nvdev_error(device, "unsupported speedo %d\n",
tdev->gpu_speedo_id);
return -EINVAL;
}
volt = kzalloc(sizeof(*volt), GFP_KERNEL);
if (!volt)
return -ENOMEM;
*pvolt = &volt->base;
vmin = speedo_to_vmin[tdev->gpu_speedo_id];
if (tdev->gpu_speedo_id >= 1)
return gk20a_volt_ctor(device, type, inst, gm20b_na_cvb_coef,
ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt);
else
return gk20a_volt_ctor(device, type, inst, gm20b_cvb_coef,
ARRAY_SIZE(gm20b_cvb_coef), vmin, volt);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
#include "priv.h"
#include <core/ramht.h>
#include <engine/gr/nv40.h>
struct nv40_instmem {
struct nvkm_instmem base;
struct nvkm_mm heap;
void __iomem *iomem;
};
/******************************************************************************
* instmem object implementation
*****************************************************************************/
#define nv40_instobj(p) container_of((p), struct nv40_instobj, base.memory)
struct nv40_instobj {
struct nvkm_instobj base;
struct nv40_instmem *imem;
struct nvkm_mm_node *node;
};
static void
nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
struct nv40_instobj *iobj = nv40_instobj(memory);
iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
}
static u32
nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{
struct nv40_instobj *iobj = nv40_instobj(memory);
return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
}
static const struct nvkm_memory_ptrs
nv40_instobj_ptrs = {
.rd32 = nv40_instobj_rd32,
.wr32 = nv40_instobj_wr32,
};
static void
nv40_instobj_release(struct nvkm_memory *memory)
{
wmb();
}
static void __iomem *
nv40_instobj_acquire(struct nvkm_memory *memory)
{
struct nv40_instobj *iobj = nv40_instobj(memory);
return iobj->imem->iomem + iobj->node->offset;
}
static u64
nv40_instobj_size(struct nvkm_memory *memory)
{
return nv40_instobj(memory)->node->length;
}
static u64
nv40_instobj_addr(struct nvkm_memory *memory)
{
return nv40_instobj(memory)->node->offset;
}
static enum nvkm_memory_target
nv40_instobj_target(struct nvkm_memory *memory)
{
return NVKM_MEM_TARGET_INST;
}
static void *
nv40_instobj_dtor(struct nvkm_memory *memory)
{
struct nv40_instobj *iobj = nv40_instobj(memory);
mutex_lock(&iobj->imem->base.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
mutex_unlock(&iobj->imem->base.mutex);
nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
static const struct nvkm_memory_func
nv40_instobj_func = {
.dtor = nv40_instobj_dtor,
.target = nv40_instobj_target,
.size = nv40_instobj_size,
.addr = nv40_instobj_addr,
.acquire = nv40_instobj_acquire,
.release = nv40_instobj_release,
};
static int
nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
struct nvkm_memory **pmemory)
{
struct nv40_instmem *imem = nv40_instmem(base);
struct nv40_instobj *iobj;
int ret;
if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
return -ENOMEM;
*pmemory = &iobj->base.memory;
nvkm_instobj_ctor(&nv40_instobj_func, &imem->base, &iobj->base);
iobj->base.memory.ptrs = &nv40_instobj_ptrs;
iobj->imem = imem;
mutex_lock(&imem->base.mutex);
ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
mutex_unlock(&imem->base.mutex);
return ret;
}
/******************************************************************************
* instmem subdev implementation
*****************************************************************************/
static u32
nv40_instmem_rd32(struct nvkm_instmem *base, u32 addr)
{
return ioread32_native(nv40_instmem(base)->iomem + addr);
}
static void
nv40_instmem_wr32(struct nvkm_instmem *base, u32 addr, u32 data)
{
iowrite32_native(data, nv40_instmem(base)->iomem + addr);
}
static int
nv40_instmem_oneinit(struct nvkm_instmem *base)
{
struct nv40_instmem *imem = nv40_instmem(base);
struct nvkm_device *device = imem->base.subdev.device;
int ret, vs;
/* PRAMIN aperture maps over the end of vram, reserve enough space
* to fit graphics contexts for every channel, the magics come
* from engine/gr/nv40.c
*/
vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8);
if (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
else if (device->chipset < 0x43) imem->base.reserved = 0x4f00 * vs;
else if (nv44_gr_class(device)) imem->base.reserved = 0x4980 * vs;
else imem->base.reserved = 0x4a40 * vs;
imem->base.reserved += 16 * 1024;
imem->base.reserved *= 32; /* per-channel */
imem->base.reserved += 512 * 1024; /* pci(e)gart table */
imem->base.reserved += 512 * 1024; /* object storage */
imem->base.reserved = round_up(imem->base.reserved, 4096);
ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
if (ret)
return ret;
/* 0x00000-0x10000: reserve for probable vbios image */
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
&imem->base.vbios);
if (ret)
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
if (ret)
return ret;
/* 0x18000-0x18200: reserve for RAMRO
* 0x18200-0x20000: padding
*/
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x08000, 0, false,
&imem->base.ramro);
if (ret)
return ret;
/* 0x20000-0x21000: reserve for RAMFC
* 0x21000-0x40000: padding and some unknown crap
*/
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x20000, 0, true,
&imem->base.ramfc);
if (ret)
return ret;
return 0;
}
static void *
nv40_instmem_dtor(struct nvkm_instmem *base)
{
struct nv40_instmem *imem = nv40_instmem(base);
nvkm_memory_unref(&imem->base.ramfc);
nvkm_memory_unref(&imem->base.ramro);
nvkm_ramht_del(&imem->base.ramht);
nvkm_memory_unref(&imem->base.vbios);
nvkm_mm_fini(&imem->heap);
if (imem->iomem)
iounmap(imem->iomem);
return imem;
}
static const struct nvkm_instmem_func
nv40_instmem = {
.dtor = nv40_instmem_dtor,
.oneinit = nv40_instmem_oneinit,
.rd32 = nv40_instmem_rd32,
.wr32 = nv40_instmem_wr32,
.memory_new = nv40_instobj_new,
.zero = false,
};
int
nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_instmem **pimem)
{
struct nv40_instmem *imem;
int bar;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(&nv40_instmem, device, type, inst, &imem->base);
*pimem = &imem->base;
/* map bar */
if (device->func->resource_size(device, 2))
bar = 2;
else
bar = 3;
imem->iomem = ioremap_wc(device->func->resource_addr(device, bar),
device->func->resource_size(device, bar));
if (!imem->iomem) {
nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
return -EFAULT;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define nv04_instmem(p) container_of((p), struct nv04_instmem, base)
#include "priv.h"
#include <core/ramht.h>
struct nv04_instmem {
struct nvkm_instmem base;
struct nvkm_mm heap;
};
/******************************************************************************
* instmem object implementation
*****************************************************************************/
#define nv04_instobj(p) container_of((p), struct nv04_instobj, base.memory)
struct nv04_instobj {
struct nvkm_instobj base;
struct nv04_instmem *imem;
struct nvkm_mm_node *node;
};
static void
nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
struct nv04_instobj *iobj = nv04_instobj(memory);
struct nvkm_device *device = iobj->imem->base.subdev.device;
nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
}
static u32
nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{
struct nv04_instobj *iobj = nv04_instobj(memory);
struct nvkm_device *device = iobj->imem->base.subdev.device;
return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
}
static const struct nvkm_memory_ptrs
nv04_instobj_ptrs = {
.rd32 = nv04_instobj_rd32,
.wr32 = nv04_instobj_wr32,
};
static void
nv04_instobj_release(struct nvkm_memory *memory)
{
}
static void __iomem *
nv04_instobj_acquire(struct nvkm_memory *memory)
{
struct nv04_instobj *iobj = nv04_instobj(memory);
struct nvkm_device *device = iobj->imem->base.subdev.device;
return device->pri + 0x700000 + iobj->node->offset;
}
static u64
nv04_instobj_size(struct nvkm_memory *memory)
{
return nv04_instobj(memory)->node->length;
}
static u64
nv04_instobj_addr(struct nvkm_memory *memory)
{
return nv04_instobj(memory)->node->offset;
}
static enum nvkm_memory_target
nv04_instobj_target(struct nvkm_memory *memory)
{
return NVKM_MEM_TARGET_INST;
}
static void *
nv04_instobj_dtor(struct nvkm_memory *memory)
{
struct nv04_instobj *iobj = nv04_instobj(memory);
mutex_lock(&iobj->imem->base.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
mutex_unlock(&iobj->imem->base.mutex);
nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
static const struct nvkm_memory_func
nv04_instobj_func = {
.dtor = nv04_instobj_dtor,
.target = nv04_instobj_target,
.size = nv04_instobj_size,
.addr = nv04_instobj_addr,
.acquire = nv04_instobj_acquire,
.release = nv04_instobj_release,
};
static int
nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
struct nvkm_memory **pmemory)
{
struct nv04_instmem *imem = nv04_instmem(base);
struct nv04_instobj *iobj;
int ret;
if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
return -ENOMEM;
*pmemory = &iobj->base.memory;
nvkm_instobj_ctor(&nv04_instobj_func, &imem->base, &iobj->base);
iobj->base.memory.ptrs = &nv04_instobj_ptrs;
iobj->imem = imem;
mutex_lock(&imem->base.mutex);
ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
mutex_unlock(&imem->base.mutex);
return ret;
}
/******************************************************************************
* instmem subdev implementation
*****************************************************************************/
static u32
nv04_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
{
return nvkm_rd32(imem->subdev.device, 0x700000 + addr);
}
static void
nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
{
nvkm_wr32(imem->subdev.device, 0x700000 + addr, data);
}
static int
nv04_instmem_oneinit(struct nvkm_instmem *base)
{
struct nv04_instmem *imem = nv04_instmem(base);
struct nvkm_device *device = imem->base.subdev.device;
int ret;
/* PRAMIN aperture maps over the end of VRAM, reserve it */
imem->base.reserved = 512 * 1024;
ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
if (ret)
return ret;
/* 0x00000-0x10000: reserve for probable vbios image */
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
&imem->base.vbios);
if (ret)
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
if (ret)
return ret;
/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00800, 0, true,
&imem->base.ramfc);
if (ret)
return ret;
/* 0x18800-0x18a00: reserve for RAMRO */
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00200, 0, false,
&imem->base.ramro);
if (ret)
return ret;
return 0;
}
static void *
nv04_instmem_dtor(struct nvkm_instmem *base)
{
struct nv04_instmem *imem = nv04_instmem(base);
nvkm_memory_unref(&imem->base.ramfc);
nvkm_memory_unref(&imem->base.ramro);
nvkm_ramht_del(&imem->base.ramht);
nvkm_memory_unref(&imem->base.vbios);
nvkm_mm_fini(&imem->heap);
return imem;
}
static const struct nvkm_instmem_func
nv04_instmem = {
.dtor = nv04_instmem_dtor,
.oneinit = nv04_instmem_oneinit,
.rd32 = nv04_instmem_rd32,
.wr32 = nv04_instmem_wr32,
.memory_new = nv04_instobj_new,
.zero = false,
};
int
nv04_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_instmem **pimem)
{
struct nv04_instmem *imem;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(&nv04_instmem, device, type, inst, &imem->base);
*pimem = &imem->base;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define nv50_instmem(p) container_of((p), struct nv50_instmem, base)
#include "priv.h"
#include <core/memory.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
struct nv50_instmem {
struct nvkm_instmem base;
u64 addr;
/* Mappings that can be evicted when BAR2 space has been exhausted. */
struct list_head lru;
};
/******************************************************************************
* instmem object implementation
*****************************************************************************/
#define nv50_instobj(p) container_of((p), struct nv50_instobj, base.memory)
struct nv50_instobj {
struct nvkm_instobj base;
struct nv50_instmem *imem;
struct nvkm_memory *ram;
struct nvkm_vma *bar;
refcount_t maps;
void *map;
struct list_head lru;
};
static void
nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
struct nv50_instmem *imem = iobj->imem;
struct nvkm_device *device = imem->base.subdev.device;
u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL;
u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL;
unsigned long flags;
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
nvkm_wr32(device, 0x001700, base >> 16);
imem->addr = base;
}
nvkm_wr32(device, 0x700000 + addr, data);
spin_unlock_irqrestore(&imem->base.lock, flags);
}
static u32
nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
struct nv50_instmem *imem = iobj->imem;
struct nvkm_device *device = imem->base.subdev.device;
u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL;
u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL;
u32 data;
unsigned long flags;
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
nvkm_wr32(device, 0x001700, base >> 16);
imem->addr = base;
}
data = nvkm_rd32(device, 0x700000 + addr);
spin_unlock_irqrestore(&imem->base.lock, flags);
return data;
}
static const struct nvkm_memory_ptrs
nv50_instobj_slow = {
.rd32 = nv50_instobj_rd32_slow,
.wr32 = nv50_instobj_wr32_slow,
};
static void
nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
iowrite32_native(data, nv50_instobj(memory)->map + offset);
}
static u32
nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{
return ioread32_native(nv50_instobj(memory)->map + offset);
}
static const struct nvkm_memory_ptrs
nv50_instobj_fast = {
.rd32 = nv50_instobj_rd32,
.wr32 = nv50_instobj_wr32,
};
static void
nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
{
struct nv50_instmem *imem = iobj->imem;
struct nv50_instobj *eobj;
struct nvkm_memory *memory = &iobj->base.memory;
struct nvkm_subdev *subdev = &imem->base.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_vma *bar = NULL, *ebar;
u64 size = nvkm_memory_size(memory);
void *emap;
int ret;
/* Attempt to allocate BAR2 address-space and map the object
* into it. The lock has to be dropped while doing this due
* to the possibility of recursion for page table allocation.
*/
mutex_unlock(&imem->base.mutex);
while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
/* Evict unused mappings, and keep retrying until we either
* succeed,or there's no more objects left on the LRU.
*/
mutex_lock(&imem->base.mutex);
eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru);
if (eobj) {
nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
nvkm_memory_addr(&eobj->base.memory),
nvkm_memory_size(&eobj->base.memory),
eobj->bar->addr);
list_del_init(&eobj->lru);
ebar = eobj->bar;
eobj->bar = NULL;
emap = eobj->map;
eobj->map = NULL;
}
mutex_unlock(&imem->base.mutex);
if (!eobj)
break;
iounmap(emap);
nvkm_vmm_put(vmm, &ebar);
}
if (ret == 0)
ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
mutex_lock(&imem->base.mutex);
if (ret || iobj->bar) {
/* We either failed, or another thread beat us. */
mutex_unlock(&imem->base.mutex);
nvkm_vmm_put(vmm, &bar);
mutex_lock(&imem->base.mutex);
return;
}
/* Make the mapping visible to the host. */
iobj->bar = bar;
iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
(u32)iobj->bar->addr, size);
if (!iobj->map) {
nvkm_warn(subdev, "PRAMIN ioremap failed\n");
nvkm_vmm_put(vmm, &iobj->bar);
}
}
static int
nv50_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
struct nvkm_vma *vma, void *argv, u32 argc)
{
memory = nv50_instobj(memory)->ram;
return nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
}
static void
nv50_instobj_release(struct nvkm_memory *memory)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
struct nv50_instmem *imem = iobj->imem;
struct nvkm_subdev *subdev = &imem->base.subdev;
wmb();
nvkm_bar_flush(subdev->device->bar);
if (refcount_dec_and_mutex_lock(&iobj->maps, &imem->base.mutex)) {
/* Add the now-unused mapping to the LRU instead of directly
* unmapping it here, in case we need to map it again later.
*/
if (likely(iobj->lru.next) && iobj->map) {
BUG_ON(!list_empty(&iobj->lru));
list_add_tail(&iobj->lru, &imem->lru);
}
/* Switch back to NULL accessors when last map is gone. */
iobj->base.memory.ptrs = NULL;
mutex_unlock(&imem->base.mutex);
}
}
static void __iomem *
nv50_instobj_acquire(struct nvkm_memory *memory)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
struct nvkm_instmem *imem = &iobj->imem->base;
struct nvkm_vmm *vmm;
void __iomem *map = NULL;
/* Already mapped? */
if (refcount_inc_not_zero(&iobj->maps))
return iobj->map;
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
*/
mutex_lock(&imem->mutex);
if (refcount_inc_not_zero(&iobj->maps)) {
mutex_unlock(&imem->mutex);
return iobj->map;
}
/* Attempt to get a direct CPU mapping of the object. */
if ((vmm = nvkm_bar_bar2_vmm(imem->subdev.device))) {
if (!iobj->map)
nv50_instobj_kmap(iobj, vmm);
map = iobj->map;
}
if (!refcount_inc_not_zero(&iobj->maps)) {
/* Exclude object from eviction while it's being accessed. */
if (likely(iobj->lru.next))
list_del_init(&iobj->lru);
if (map)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
refcount_set(&iobj->maps, 1);
}
mutex_unlock(&imem->mutex);
return map;
}
static void
nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
struct nvkm_instmem *imem = &iobj->imem->base;
/* Exclude bootstrapped objects (ie. the page tables for the
* instmem BAR itself) from eviction.
*/
mutex_lock(&imem->mutex);
if (likely(iobj->lru.next)) {
list_del_init(&iobj->lru);
iobj->lru.next = NULL;
}
nv50_instobj_kmap(iobj, vmm);
nvkm_instmem_boot(imem);
mutex_unlock(&imem->mutex);
}
static u64
nv50_instobj_size(struct nvkm_memory *memory)
{
return nvkm_memory_size(nv50_instobj(memory)->ram);
}
static u64
nv50_instobj_addr(struct nvkm_memory *memory)
{
return nvkm_memory_addr(nv50_instobj(memory)->ram);
}
static u64
nv50_instobj_bar2(struct nvkm_memory *memory)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
u64 addr = ~0ULL;
if (nv50_instobj_acquire(&iobj->base.memory)) {
iobj->lru.next = NULL; /* Exclude from eviction. */
addr = iobj->bar->addr;
}
nv50_instobj_release(&iobj->base.memory);
return addr;
}
static enum nvkm_memory_target
nv50_instobj_target(struct nvkm_memory *memory)
{
return nvkm_memory_target(nv50_instobj(memory)->ram);
}
static void *
nv50_instobj_dtor(struct nvkm_memory *memory)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
struct nvkm_instmem *imem = &iobj->imem->base;
struct nvkm_vma *bar;
void *map;
mutex_lock(&imem->mutex);
if (likely(iobj->lru.next))
list_del(&iobj->lru);
map = iobj->map;
bar = iobj->bar;
mutex_unlock(&imem->mutex);
if (map) {
struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
iounmap(map);
if (likely(vmm)) /* Can be NULL during BAR destructor. */
nvkm_vmm_put(vmm, &bar);
}
nvkm_memory_unref(&iobj->ram);
nvkm_instobj_dtor(imem, &iobj->base);
return iobj;
}
static const struct nvkm_memory_func
nv50_instobj_func = {
.dtor = nv50_instobj_dtor,
.target = nv50_instobj_target,
.bar2 = nv50_instobj_bar2,
.addr = nv50_instobj_addr,
.size = nv50_instobj_size,
.boot = nv50_instobj_boot,
.acquire = nv50_instobj_acquire,
.release = nv50_instobj_release,
.map = nv50_instobj_map,
};
static int
nv50_instobj_wrap(struct nvkm_instmem *base,
struct nvkm_memory *memory, struct nvkm_memory **pmemory)
{
struct nv50_instmem *imem = nv50_instmem(base);
struct nv50_instobj *iobj;
if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
return -ENOMEM;
*pmemory = &iobj->base.memory;
nvkm_instobj_ctor(&nv50_instobj_func, &imem->base, &iobj->base);
iobj->imem = imem;
refcount_set(&iobj->maps, 0);
INIT_LIST_HEAD(&iobj->lru);
iobj->ram = nvkm_memory_ref(memory);
return 0;
}
static int
nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
struct nvkm_memory **pmemory)
{
u8 page = max(order_base_2(align), 12);
struct nvkm_memory *ram;
int ret;
ret = nvkm_ram_get(imem->subdev.device, 0, 1, page, size, true, true, &ram);
if (ret)
return ret;
ret = nv50_instobj_wrap(imem, ram, pmemory);
nvkm_memory_unref(&ram);
return ret;
}
/******************************************************************************
* instmem subdev implementation
*****************************************************************************/
static void
nv50_instmem_fini(struct nvkm_instmem *base)
{
nv50_instmem(base)->addr = ~0ULL;
}
static const struct nvkm_instmem_func
nv50_instmem = {
.fini = nv50_instmem_fini,
.memory_new = nv50_instobj_new,
.memory_wrap = nv50_instobj_wrap,
.zero = false,
};
int
nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_instmem **pimem)
{
struct nv50_instmem *imem;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(&nv50_instmem, device, type, inst, &imem->base);
INIT_LIST_HEAD(&imem->lru);
*pimem = &imem->base;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <subdev/bar.h>
/******************************************************************************
* instmem object base implementation
*****************************************************************************/
static void
nvkm_instobj_load(struct nvkm_instobj *iobj)
{
struct nvkm_memory *memory = &iobj->memory;
const u64 size = nvkm_memory_size(memory);
void __iomem *map;
int i;
if (!(map = nvkm_kmap(memory))) {
for (i = 0; i < size; i += 4)
nvkm_wo32(memory, i, iobj->suspend[i / 4]);
} else {
memcpy_toio(map, iobj->suspend, size);
}
nvkm_done(memory);
kvfree(iobj->suspend);
iobj->suspend = NULL;
}
static int
nvkm_instobj_save(struct nvkm_instobj *iobj)
{
struct nvkm_memory *memory = &iobj->memory;
const u64 size = nvkm_memory_size(memory);
void __iomem *map;
int i;
iobj->suspend = kvmalloc(size, GFP_KERNEL);
if (!iobj->suspend)
return -ENOMEM;
if (!(map = nvkm_kmap(memory))) {
for (i = 0; i < size; i += 4)
iobj->suspend[i / 4] = nvkm_ro32(memory, i);
} else {
memcpy_fromio(iobj->suspend, map, size);
}
nvkm_done(memory);
return 0;
}
void
nvkm_instobj_dtor(struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
{
spin_lock(&imem->lock);
list_del(&iobj->head);
spin_unlock(&imem->lock);
}
void
nvkm_instobj_ctor(const struct nvkm_memory_func *func,
struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
{
nvkm_memory_ctor(func, &iobj->memory);
iobj->suspend = NULL;
spin_lock(&imem->lock);
list_add_tail(&iobj->head, &imem->list);
spin_unlock(&imem->lock);
}
int
nvkm_instobj_wrap(struct nvkm_device *device,
struct nvkm_memory *memory, struct nvkm_memory **pmemory)
{
struct nvkm_instmem *imem = device->imem;
if (!imem->func->memory_wrap)
return -ENOSYS;
return imem->func->memory_wrap(imem, memory, pmemory);
}
int
nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
struct nvkm_memory **pmemory)
{
struct nvkm_subdev *subdev = &imem->subdev;
struct nvkm_memory *memory = NULL;
u32 offset;
int ret;
ret = imem->func->memory_new(imem, size, align, zero, &memory);
if (ret) {
nvkm_error(subdev, "OOM: %08x %08x %d\n", size, align, ret);
goto done;
}
nvkm_trace(subdev, "new %08x %08x %d: %010llx %010llx\n", size, align,
zero, nvkm_memory_addr(memory), nvkm_memory_size(memory));
if (!imem->func->zero && zero) {
void __iomem *map = nvkm_kmap(memory);
if (unlikely(!map)) {
for (offset = 0; offset < size; offset += 4)
nvkm_wo32(memory, offset, 0x00000000);
} else {
memset_io(map, 0x00, size);
}
nvkm_done(memory);
}
done:
if (ret)
nvkm_memory_unref(&memory);
*pmemory = memory;
return ret;
}
/******************************************************************************
* instmem subdev base implementation
*****************************************************************************/
u32
nvkm_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
{
return imem->func->rd32(imem, addr);
}
void
nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
{
return imem->func->wr32(imem, addr, data);
}
void
nvkm_instmem_boot(struct nvkm_instmem *imem)
{
/* Separate bootstrapped objects from normal list, as we need
* to make sure they're accessed with the slowpath on suspend
* and resume.
*/
struct nvkm_instobj *iobj, *itmp;
spin_lock(&imem->lock);
list_for_each_entry_safe(iobj, itmp, &imem->list, head) {
list_move_tail(&iobj->head, &imem->boot);
}
spin_unlock(&imem->lock);
}
static int
nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
struct nvkm_instobj *iobj;
if (suspend) {
list_for_each_entry(iobj, &imem->list, head) {
int ret = nvkm_instobj_save(iobj);
if (ret)
return ret;
}
nvkm_bar_bar2_fini(subdev->device);
list_for_each_entry(iobj, &imem->boot, head) {
int ret = nvkm_instobj_save(iobj);
if (ret)
return ret;
}
}
if (imem->func->fini)
imem->func->fini(imem);
return 0;
}
static int
nvkm_instmem_init(struct nvkm_subdev *subdev)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
struct nvkm_instobj *iobj;
list_for_each_entry(iobj, &imem->boot, head) {
if (iobj->suspend)
nvkm_instobj_load(iobj);
}
nvkm_bar_bar2_init(subdev->device);
list_for_each_entry(iobj, &imem->list, head) {
if (iobj->suspend)
nvkm_instobj_load(iobj);
}
return 0;
}
static int
nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
if (imem->func->oneinit)
return imem->func->oneinit(imem);
return 0;
}
static void *
nvkm_instmem_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
void *data = imem;
if (imem->func->dtor)
data = imem->func->dtor(imem);
mutex_destroy(&imem->mutex);
return data;
}
static const struct nvkm_subdev_func
nvkm_instmem = {
.dtor = nvkm_instmem_dtor,
.oneinit = nvkm_instmem_oneinit,
.init = nvkm_instmem_init,
.fini = nvkm_instmem_fini,
};
void
nvkm_instmem_ctor(const struct nvkm_instmem_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_instmem *imem)
{
nvkm_subdev_ctor(&nvkm_instmem, device, type, inst, &imem->subdev);
imem->func = func;
spin_lock_init(&imem->lock);
INIT_LIST_HEAD(&imem->list);
INIT_LIST_HEAD(&imem->boot);
mutex_init(&imem->mutex);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c |
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* GK20A does not have dedicated video memory, and to accurately represent this
* fact Nouveau will not create a RAM device for it. Therefore its instmem
* implementation must be done directly on top of system memory, while
* preserving coherency for read and write operations.
*
* Instmem can be allocated through two means:
* 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory
* pages contiguous to the GPU. This is the preferred way.
* 2) If no IOMMU unit is probed, the DMA API is used to allocate physically
* contiguous memory.
*
* In both cases CPU read and writes are performed by creating a write-combined
* mapping. The GPU L2 cache must thus be flushed/invalidated when required. To
* be conservative we do this every time we acquire or release an instobj, but
* ideally L2 management should be handled at a higher level.
*
* To improve performance, CPU mappings are not removed upon instobj release.
* Instead they are placed into a LRU list to be recycled when the mapped space
* goes beyond a certain threshold. At the moment this limit is 1MB.
*/
#include "priv.h"
#include <core/memory.h>
#include <core/tegra.h>
#include <subdev/ltc.h>
#include <subdev/mmu.h>
struct gk20a_instobj {
struct nvkm_memory memory;
struct nvkm_mm_node *mn;
struct gk20a_instmem *imem;
/* CPU mapping */
u32 *vaddr;
};
#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
/*
* Used for objects allocated using the DMA API
*/
struct gk20a_instobj_dma {
struct gk20a_instobj base;
dma_addr_t handle;
struct nvkm_mm_node r;
};
#define gk20a_instobj_dma(p) \
container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base)
/*
* Used for objects flattened using the IOMMU API
*/
struct gk20a_instobj_iommu {
struct gk20a_instobj base;
/* to link into gk20a_instmem::vaddr_lru */
struct list_head vaddr_node;
/* how many clients are using vaddr? */
u32 use_cpt;
/* will point to the higher half of pages */
dma_addr_t *dma_addrs;
/* array of base.mem->size pages (+ dma_addr_ts) */
struct page *pages[];
};
#define gk20a_instobj_iommu(p) \
container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base)
struct gk20a_instmem {
struct nvkm_instmem base;
/* protects vaddr_* and gk20a_instobj::vaddr* */
struct mutex lock;
/* CPU mappings LRU */
unsigned int vaddr_use;
unsigned int vaddr_max;
struct list_head vaddr_lru;
/* Only used if IOMMU if present */
struct mutex *mm_mutex;
struct nvkm_mm *mm;
struct iommu_domain *domain;
unsigned long iommu_pgshift;
u16 iommu_bit;
/* Only used by DMA API */
unsigned long attrs;
};
#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
static enum nvkm_memory_target
gk20a_instobj_target(struct nvkm_memory *memory)
{
return NVKM_MEM_TARGET_NCOH;
}
static u8
gk20a_instobj_page(struct nvkm_memory *memory)
{
return 12;
}
static u64
gk20a_instobj_addr(struct nvkm_memory *memory)
{
return (u64)gk20a_instobj(memory)->mn->offset << 12;
}
static u64
gk20a_instobj_size(struct nvkm_memory *memory)
{
return (u64)gk20a_instobj(memory)->mn->length << 12;
}
/*
* Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held.
*/
static void
gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj)
{
struct gk20a_instmem *imem = obj->base.imem;
/* there should not be any user left... */
WARN_ON(obj->use_cpt);
list_del(&obj->vaddr_node);
vunmap(obj->base.vaddr);
obj->base.vaddr = NULL;
imem->vaddr_use -= nvkm_memory_size(&obj->base.memory);
nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use,
imem->vaddr_max);
}
/*
* Must be called while holding gk20a_instmem::lock
*/
static void
gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size)
{
while (imem->vaddr_use + size > imem->vaddr_max) {
/* no candidate that can be unmapped, abort... */
if (list_empty(&imem->vaddr_lru))
break;
gk20a_instobj_iommu_recycle_vaddr(
list_first_entry(&imem->vaddr_lru,
struct gk20a_instobj_iommu, vaddr_node));
}
}
static void __iomem *
gk20a_instobj_acquire_dma(struct nvkm_memory *memory)
{
struct gk20a_instobj *node = gk20a_instobj(memory);
struct gk20a_instmem *imem = node->imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
nvkm_ltc_flush(ltc);
return node->vaddr;
}
static void __iomem *
gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
{
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
const u64 size = nvkm_memory_size(memory);
nvkm_ltc_flush(ltc);
mutex_lock(&imem->lock);
if (node->base.vaddr) {
if (!node->use_cpt) {
/* remove from LRU list since mapping in use again */
list_del(&node->vaddr_node);
}
goto out;
}
/* try to free some address space if we reached the limit */
gk20a_instmem_vaddr_gc(imem, size);
/* map the pages */
node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!node->base.vaddr) {
nvkm_error(&imem->base.subdev, "cannot map instobj - "
"this is not going to end well...\n");
goto out;
}
imem->vaddr_use += size;
nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n",
imem->vaddr_use, imem->vaddr_max);
out:
node->use_cpt++;
mutex_unlock(&imem->lock);
return node->base.vaddr;
}
static void
gk20a_instobj_release_dma(struct nvkm_memory *memory)
{
struct gk20a_instobj *node = gk20a_instobj(memory);
struct gk20a_instmem *imem = node->imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
/* in case we got a write-combined mapping */
wmb();
nvkm_ltc_invalidate(ltc);
}
static void
gk20a_instobj_release_iommu(struct nvkm_memory *memory)
{
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
mutex_lock(&imem->lock);
/* we should at least have one user to release... */
if (WARN_ON(node->use_cpt == 0))
goto out;
/* add unused objs to the LRU list to recycle their mapping */
if (--node->use_cpt == 0)
list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
out:
mutex_unlock(&imem->lock);
wmb();
nvkm_ltc_invalidate(ltc);
}
static u32
gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{
struct gk20a_instobj *node = gk20a_instobj(memory);
return node->vaddr[offset / 4];
}
static void
gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
struct gk20a_instobj *node = gk20a_instobj(memory);
node->vaddr[offset / 4] = data;
}
static int
gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
struct nvkm_vma *vma, void *argv, u32 argc)
{
struct gk20a_instobj *node = gk20a_instobj(memory);
struct nvkm_vmm_map map = {
.memory = &node->memory,
.offset = offset,
.mem = node->mn,
};
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
}
static void *
gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
{
struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev;
if (unlikely(!node->base.vaddr))
goto out;
dma_free_attrs(dev, (u64)node->base.mn->length << PAGE_SHIFT,
node->base.vaddr, node->handle, imem->attrs);
out:
return node;
}
static void *
gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
{
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev;
struct nvkm_mm_node *r = node->base.mn;
int i;
if (unlikely(!r))
goto out;
mutex_lock(&imem->lock);
/* vaddr has already been recycled */
if (node->base.vaddr)
gk20a_instobj_iommu_recycle_vaddr(node);
mutex_unlock(&imem->lock);
/* clear IOMMU bit to unmap pages */
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
/* Unmap pages from GPU address space and free them */
for (i = 0; i < node->base.mn->length; i++) {
iommu_unmap(imem->domain,
(r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(node->pages[i]);
}
/* Release area from GPU address space */
mutex_lock(imem->mm_mutex);
nvkm_mm_free(imem->mm, &r);
mutex_unlock(imem->mm_mutex);
out:
return node;
}
static const struct nvkm_memory_func
gk20a_instobj_func_dma = {
.dtor = gk20a_instobj_dtor_dma,
.target = gk20a_instobj_target,
.page = gk20a_instobj_page,
.addr = gk20a_instobj_addr,
.size = gk20a_instobj_size,
.acquire = gk20a_instobj_acquire_dma,
.release = gk20a_instobj_release_dma,
.map = gk20a_instobj_map,
};
static const struct nvkm_memory_func
gk20a_instobj_func_iommu = {
.dtor = gk20a_instobj_dtor_iommu,
.target = gk20a_instobj_target,
.page = gk20a_instobj_page,
.addr = gk20a_instobj_addr,
.size = gk20a_instobj_size,
.acquire = gk20a_instobj_acquire_iommu,
.release = gk20a_instobj_release_iommu,
.map = gk20a_instobj_map,
};
static const struct nvkm_memory_ptrs
gk20a_instobj_ptrs = {
.rd32 = gk20a_instobj_rd32,
.wr32 = gk20a_instobj_wr32,
};
static int
gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
struct gk20a_instobj **_node)
{
struct gk20a_instobj_dma *node;
struct nvkm_subdev *subdev = &imem->base.subdev;
struct device *dev = subdev->device->dev;
if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
return -ENOMEM;
*_node = &node->base;
nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
node->base.memory.ptrs = &gk20a_instobj_ptrs;
node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL,
imem->attrs);
if (!node->base.vaddr) {
nvkm_error(subdev, "cannot allocate DMA memory\n");
return -ENOMEM;
}
/* alignment check */
if (unlikely(node->handle & (align - 1)))
nvkm_warn(subdev,
"memory not aligned as requested: %pad (0x%x)\n",
&node->handle, align);
/* present memory for being mapped using small pages */
node->r.type = 12;
node->r.offset = node->handle >> 12;
node->r.length = (npages << PAGE_SHIFT) >> 12;
node->base.mn = &node->r;
return 0;
}
static int
gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
struct gk20a_instobj **_node)
{
struct gk20a_instobj_iommu *node;
struct nvkm_subdev *subdev = &imem->base.subdev;
struct device *dev = subdev->device->dev;
struct nvkm_mm_node *r;
int ret;
int i;
/*
* despite their variable size, instmem allocations are small enough
* (< 1 page) to be handled by kzalloc
*/
if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) +
sizeof(*node->dma_addrs)) * npages), GFP_KERNEL)))
return -ENOMEM;
*_node = &node->base;
node->dma_addrs = (void *)(node->pages + npages);
nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
node->base.memory.ptrs = &gk20a_instobj_ptrs;
/* Allocate backing memory */
for (i = 0; i < npages; i++) {
struct page *p = alloc_page(GFP_KERNEL);
dma_addr_t dma_adr;
if (p == NULL) {
ret = -ENOMEM;
goto free_pages;
}
node->pages[i] = p;
dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, dma_adr)) {
nvkm_error(subdev, "DMA mapping error!\n");
ret = -ENOMEM;
goto free_pages;
}
node->dma_addrs[i] = dma_adr;
}
mutex_lock(imem->mm_mutex);
/* Reserve area from GPU address space */
ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
align >> imem->iommu_pgshift, &r);
mutex_unlock(imem->mm_mutex);
if (ret) {
nvkm_error(subdev, "IOMMU space is full!\n");
goto free_pages;
}
/* Map into GPU address space */
for (i = 0; i < npages; i++) {
u32 offset = (r->offset + i) << imem->iommu_pgshift;
ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE,
GFP_KERNEL);
if (ret < 0) {
nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
while (i-- > 0) {
offset -= PAGE_SIZE;
iommu_unmap(imem->domain, offset, PAGE_SIZE);
}
goto release_area;
}
}
/* IOMMU bit tells that an address is to be resolved through the IOMMU */
r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
node->base.mn = r;
return 0;
release_area:
mutex_lock(imem->mm_mutex);
nvkm_mm_free(imem->mm, &r);
mutex_unlock(imem->mm_mutex);
free_pages:
for (i = 0; i < npages && node->pages[i] != NULL; i++) {
dma_addr_t dma_addr = node->dma_addrs[i];
if (dma_addr)
dma_unmap_page(dev, dma_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(node->pages[i]);
}
return ret;
}
static int
gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
struct nvkm_memory **pmemory)
{
struct gk20a_instmem *imem = gk20a_instmem(base);
struct nvkm_subdev *subdev = &imem->base.subdev;
struct gk20a_instobj *node = NULL;
int ret;
nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
imem->domain ? "IOMMU" : "DMA", size, align);
/* Round size and align to page bounds */
size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
if (imem->domain)
ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
align, &node);
else
ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
align, &node);
*pmemory = node ? &node->memory : NULL;
if (ret)
return ret;
node->imem = imem;
nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
size, align, (u64)node->mn->offset << 12);
return 0;
}
static void *
gk20a_instmem_dtor(struct nvkm_instmem *base)
{
struct gk20a_instmem *imem = gk20a_instmem(base);
/* perform some sanity checks... */
if (!list_empty(&imem->vaddr_lru))
nvkm_warn(&base->subdev, "instobj LRU not empty!\n");
if (imem->vaddr_use != 0)
nvkm_warn(&base->subdev, "instobj vmap area not empty! "
"0x%x bytes still mapped\n", imem->vaddr_use);
return imem;
}
static const struct nvkm_instmem_func
gk20a_instmem = {
.dtor = gk20a_instmem_dtor,
.memory_new = gk20a_instobj_new,
.zero = false,
};
int
gk20a_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_instmem **pimem)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gk20a_instmem *imem;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(&gk20a_instmem, device, type, inst, &imem->base);
mutex_init(&imem->lock);
*pimem = &imem->base;
/* do not allow more than 1MB of CPU-mapped instmem */
imem->vaddr_use = 0;
imem->vaddr_max = 0x100000;
INIT_LIST_HEAD(&imem->vaddr_lru);
if (tdev->iommu.domain) {
imem->mm_mutex = &tdev->iommu.mutex;
imem->mm = &tdev->iommu.mm;
imem->domain = tdev->iommu.domain;
imem->iommu_pgshift = tdev->iommu.pgshift;
imem->iommu_bit = tdev->func->iommu_bit;
nvkm_info(&imem->base.subdev, "using IOMMU\n");
} else {
imem->attrs = DMA_ATTR_WEAK_ORDERING |
DMA_ATTR_WRITE_COMBINE;
nvkm_info(&imem->base.subdev, "using DMA API\n");
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
struct priv {
struct nvkm_bios *bios;
u32 bar0;
};
static u32
pramin_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
struct nvkm_device *device = bios->subdev.device;
u32 i;
if (offset + length <= 0x00100000) {
for (i = offset; i < offset + length; i += 4)
*(u32 *)&bios->data[i] = nvkm_rd32(device, 0x700000 + i);
return length;
}
return 0;
}
static void
pramin_fini(void *data)
{
struct priv *priv = data;
if (priv) {
struct nvkm_device *device = priv->bios->subdev.device;
nvkm_wr32(device, 0x001700, priv->bar0);
kfree(priv);
}
}
static void *
pramin_init(struct nvkm_bios *bios, const char *name)
{
struct nvkm_subdev *subdev = &bios->subdev;
struct nvkm_device *device = subdev->device;
struct priv *priv = NULL;
u64 addr = 0;
/* PRAMIN always potentially available prior to nv50 */
if (device->card_type < NV_50)
return NULL;
/* we can't get the bios image pointer without PDISP */
if (device->card_type >= GA100)
addr = device->chipset == 0x170; /*XXX: find the fuse reg for this */
else
if (device->card_type >= GM100)
addr = nvkm_rd32(device, 0x021c04);
else
if (device->card_type >= NV_C0)
addr = nvkm_rd32(device, 0x022500);
if (addr & 0x00000001) {
nvkm_debug(subdev, "... display disabled\n");
return ERR_PTR(-ENODEV);
}
/* check that the window is enabled and in vram, particularly
* important as we don't want to be touching vram on an
* uninitialised board
*/
if (device->card_type >= GV100)
addr = nvkm_rd32(device, 0x625f04);
else
addr = nvkm_rd32(device, 0x619f04);
if (!(addr & 0x00000008)) {
nvkm_debug(subdev, "... not enabled\n");
return ERR_PTR(-ENODEV);
}
if ( (addr & 0x00000003) != 1) {
nvkm_debug(subdev, "... not in vram\n");
return ERR_PTR(-ENODEV);
}
/* some alternate method inherited from xf86-video-nv... */
addr = (addr & 0xffffff00) << 8;
if (!addr) {
addr = (u64)nvkm_rd32(device, 0x001700) << 16;
addr += 0xf0000;
}
/* modify bar0 PRAMIN window to cover the bios image */
if (!(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
nvkm_error(subdev, "... out of memory\n");
return ERR_PTR(-ENOMEM);
}
priv->bios = bios;
priv->bar0 = nvkm_rd32(device, 0x001700);
nvkm_wr32(device, 0x001700, addr >> 16);
return priv;
}
const struct nvbios_source
nvbios_ramin = {
.name = "PRAMIN",
.init = pramin_init,
.fini = pramin_fini,
.read = pramin_read,
.rw = true,
};
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c |
/*
* Copyright 2016 Karol Herbst
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Karol Herbst
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/power_budget.h>
static u32
nvbios_power_budget_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt,
u8 *len)
{
struct bit_entry bit_P;
u32 power_budget;
if (bit_entry(bios, 'P', &bit_P) || bit_P.version != 2 ||
bit_P.length < 0x30)
return 0;
power_budget = nvbios_rd32(bios, bit_P.offset + 0x2c);
if (!power_budget)
return 0;
*ver = nvbios_rd08(bios, power_budget);
switch (*ver) {
case 0x20:
case 0x30:
*hdr = nvbios_rd08(bios, power_budget + 0x1);
*len = nvbios_rd08(bios, power_budget + 0x2);
*cnt = nvbios_rd08(bios, power_budget + 0x3);
return power_budget;
default:
break;
}
return 0;
}
int
nvbios_power_budget_header(struct nvkm_bios *bios,
struct nvbios_power_budget *budget)
{
u8 ver, hdr, cnt, len, cap_entry;
u32 header;
if (!bios || !budget)
return -EINVAL;
header = nvbios_power_budget_table(bios, &ver, &hdr, &cnt, &len);
if (!header || !cnt)
return -ENODEV;
switch (ver) {
case 0x20:
cap_entry = nvbios_rd08(bios, header + 0x9);
break;
case 0x30:
cap_entry = nvbios_rd08(bios, header + 0xa);
break;
default:
cap_entry = 0xff;
}
if (cap_entry >= cnt && cap_entry != 0xff) {
nvkm_warn(&bios->subdev,
"invalid cap_entry in power budget table found\n");
budget->cap_entry = 0xff;
return -EINVAL;
}
budget->offset = header;
budget->ver = ver;
budget->hlen = hdr;
budget->elen = len;
budget->ecount = cnt;
budget->cap_entry = cap_entry;
return 0;
}
int
nvbios_power_budget_entry(struct nvkm_bios *bios,
struct nvbios_power_budget *budget,
u8 idx, struct nvbios_power_budget_entry *entry)
{
u32 entry_offset;
if (!bios || !budget || !budget->offset || idx >= budget->ecount
|| !entry)
return -EINVAL;
entry_offset = budget->offset + budget->hlen + idx * budget->elen;
if (budget->ver >= 0x20) {
entry->min_w = nvbios_rd32(bios, entry_offset + 0x2);
entry->avg_w = nvbios_rd32(bios, entry_offset + 0x6);
entry->max_w = nvbios_rd32(bios, entry_offset + 0xa);
} else {
entry->min_w = 0;
entry->max_w = nvbios_rd32(bios, entry_offset + 0x2);
entry->avg_w = entry->max_w;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/ramcfg.h>
#include <subdev/bios/M0203.h>
static u8
nvbios_ramcfg_strap(struct nvkm_subdev *subdev)
{
return (nvkm_rd32(subdev->device, 0x101000) & 0x0000003c) >> 2;
}
u8
nvbios_ramcfg_count(struct nvkm_bios *bios)
{
struct bit_entry bit_M;
if (!bit_entry(bios, 'M', &bit_M)) {
if (bit_M.version == 1 && bit_M.length >= 5)
return nvbios_rd08(bios, bit_M.offset + 2);
if (bit_M.version == 2 && bit_M.length >= 3)
return nvbios_rd08(bios, bit_M.offset + 0);
}
return 0x00;
}
u8
nvbios_ramcfg_index(struct nvkm_subdev *subdev)
{
struct nvkm_bios *bios = subdev->device->bios;
u8 strap = nvbios_ramcfg_strap(subdev);
u32 xlat = 0x00000000;
struct bit_entry bit_M;
struct nvbios_M0203E M0203E;
u8 ver, hdr;
if (!bit_entry(bios, 'M', &bit_M)) {
if (bit_M.version == 1 && bit_M.length >= 5)
xlat = nvbios_rd16(bios, bit_M.offset + 3);
if (bit_M.version == 2 && bit_M.length >= 3) {
/*XXX: is M ever shorter than this?
* if not - what is xlat used for now?
* also - sigh..
*/
if (bit_M.length >= 7 &&
nvbios_M0203Em(bios, strap, &ver, &hdr, &M0203E))
return M0203E.group;
xlat = nvbios_rd16(bios, bit_M.offset + 1);
}
}
if (xlat)
strap = nvbios_rd08(bios, xlat + strap);
return strap;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
static int
acpi_read_bios(acpi_handle rom_handle, u8 *bios, u32 offset, u32 length)
{
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
acpi_status status;
union acpi_object rom_arg_elements[2], *obj;
struct acpi_object_list rom_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
rom_arg.count = 2;
rom_arg.pointer = &rom_arg_elements[0];
rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
rom_arg_elements[0].integer.value = offset;
rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
rom_arg_elements[1].integer.value = length;
status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
if (ACPI_FAILURE(status)) {
pr_info("failed to evaluate ROM got %s\n",
acpi_format_exception(status));
return -ENODEV;
}
obj = (union acpi_object *)buffer.pointer;
length = min(length, obj->buffer.length);
memcpy(bios+offset, obj->buffer.pointer, length);
kfree(buffer.pointer);
return length;
#else
return -EINVAL;
#endif
}
/* This version of the shadow function disobeys the ACPI spec and tries
* to fetch in units of more than 4KiB at a time. This is a LOT faster
* on some systems, such as Lenovo W530.
*/
static u32
acpi_read_fast(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
u32 limit = (offset + length + 0xfff) & ~0xfff;
u32 start = offset & ~0x00000fff;
u32 fetch = limit - start;
if (nvbios_extend(bios, limit) >= 0) {
int ret = acpi_read_bios(data, bios->data, start, fetch);
if (ret == fetch)
return fetch;
}
return 0;
}
/* Other systems, such as the one in fdo#55948, will report a success
* but only return 4KiB of data. The common bios fetching logic will
* detect an invalid image, and fall back to this version of the read
* function.
*/
static u32
acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
u32 limit = (offset + length + 0xfff) & ~0xfff;
u32 start = offset & ~0xfff;
u32 fetch = 0;
if (nvbios_extend(bios, limit) >= 0) {
while (start + fetch < limit) {
int ret = acpi_read_bios(data, bios->data,
start + fetch, 0x1000);
if (ret != 0x1000)
break;
fetch += 0x1000;
}
}
return fetch;
}
static void *
acpi_init(struct nvkm_bios *bios, const char *name)
{
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
acpi_status status;
acpi_handle dhandle, rom_handle;
dhandle = ACPI_HANDLE(bios->subdev.device->dev);
if (!dhandle)
return ERR_PTR(-ENODEV);
status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
if (ACPI_FAILURE(status))
return ERR_PTR(-ENODEV);
return rom_handle;
#else
return ERR_PTR(-ENODEV);
#endif
}
const struct nvbios_source
nvbios_acpi_fast = {
.name = "ACPI",
.init = acpi_init,
.read = acpi_read_fast,
.rw = false,
.require_checksum = true,
};
const struct nvbios_source
nvbios_acpi_slow = {
.name = "ACPI",
.init = acpi_init,
.read = acpi_read_slow,
.rw = false,
};
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/M0205.h>
u32
nvbios_M0205Te(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_M;
u32 data = 0x00000000;
if (!bit_entry(bios, 'M', &bit_M)) {
if (bit_M.version == 2 && bit_M.length > 0x08)
data = nvbios_rd32(bios, bit_M.offset + 0x05);
if (data) {
*ver = nvbios_rd08(bios, data + 0x00);
switch (*ver) {
case 0x10:
*hdr = nvbios_rd08(bios, data + 0x01);
*len = nvbios_rd08(bios, data + 0x02);
*ssz = nvbios_rd08(bios, data + 0x03);
*snr = nvbios_rd08(bios, data + 0x04);
*cnt = nvbios_rd08(bios, data + 0x05);
return data;
default:
break;
}
}
}
return 0x00000000;
}
u32
nvbios_M0205Tp(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz,
struct nvbios_M0205T *info)
{
u32 data = nvbios_M0205Te(bios, ver, hdr, cnt, len, snr, ssz);
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
info->freq = nvbios_rd16(bios, data + 0x06);
break;
default:
break;
}
return data;
}
u32
nvbios_M0205Ee(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
u32 data = nvbios_M0205Te(bios, ver, hdr, cnt, len, &snr, &ssz);
if (data && idx < *cnt) {
data = data + *hdr + idx * (*len + (snr * ssz));
*hdr = *len;
*cnt = snr;
*len = ssz;
return data;
}
return 0x00000000;
}
u32
nvbios_M0205Ep(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_M0205E *info)
{
u32 data = nvbios_M0205Ee(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
info->type = nvbios_rd08(bios, data + 0x00) & 0x0f;
return data;
default:
break;
}
return 0x00000000;
}
u32
nvbios_M0205Se(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len;
u32 data = nvbios_M0205Ee(bios, ent, ver, hdr, &cnt, &len);
if (data && idx < cnt) {
data = data + *hdr + idx * len;
*hdr = len;
return data;
}
return 0x00000000;
}
u32
nvbios_M0205Sp(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
struct nvbios_M0205S *info)
{
u32 data = nvbios_M0205Se(bios, ent, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
info->data = nvbios_rd08(bios, data + 0x00);
return data;
default:
break;
}
return 0x00000000;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c |
/*
* Copyright 2012 Nouveau Community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/extdev.h>
static u16
extdev_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
{
u8 dcb_ver, dcb_hdr, dcb_cnt, dcb_len;
u16 dcb, extdev = 0;
dcb = dcb_table(bios, &dcb_ver, &dcb_hdr, &dcb_cnt, &dcb_len);
if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40 && dcb_ver != 0x41))
return 0x0000;
extdev = nvbios_rd16(bios, dcb + 18);
if (!extdev)
return 0x0000;
*ver = nvbios_rd08(bios, extdev + 0);
*hdr = nvbios_rd08(bios, extdev + 1);
*cnt = nvbios_rd08(bios, extdev + 2);
*len = nvbios_rd08(bios, extdev + 3);
return extdev + *hdr;
}
bool
nvbios_extdev_skip_probe(struct nvkm_bios *bios)
{
u8 ver, hdr, len, cnt;
u16 data = extdev_table(bios, &ver, &hdr, &len, &cnt);
if (data && ver == 0x40 && hdr >= 5) {
u8 flags = nvbios_rd08(bios, data - hdr + 4);
if (flags & 1)
return true;
}
return false;
}
static u16
nvbios_extdev_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u16 extdev = extdev_table(bios, ver, &hdr, len, &cnt);
if (extdev && idx < cnt)
return extdev + idx * *len;
return 0x0000;
}
static void
extdev_parse_entry(struct nvkm_bios *bios, u16 offset,
struct nvbios_extdev_func *entry)
{
entry->type = nvbios_rd08(bios, offset + 0);
entry->addr = nvbios_rd08(bios, offset + 1);
entry->bus = (nvbios_rd08(bios, offset + 2) >> 4) & 1;
}
int
nvbios_extdev_parse(struct nvkm_bios *bios, int idx,
struct nvbios_extdev_func *func)
{
u8 ver, len;
u16 entry;
if (!(entry = nvbios_extdev_entry(bios, idx, &ver, &len)))
return -EINVAL;
extdev_parse_entry(bios, entry, func);
return 0;
}
int
nvbios_extdev_find(struct nvkm_bios *bios, enum nvbios_extdev_type type,
struct nvbios_extdev_func *func)
{
u8 ver, len, i;
u16 entry;
i = 0;
while ((entry = nvbios_extdev_entry(bios, i++, &ver, &len))) {
extdev_parse_entry(bios, entry, func);
if (func->type == type)
return 0;
}
return -EINVAL;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/P0260.h>
u32
nvbios_P0260Te(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
{
struct bit_entry bit_P;
u32 data = 0x00000000;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2 && bit_P.length > 0x63)
data = nvbios_rd32(bios, bit_P.offset + 0x60);
if (data) {
*ver = nvbios_rd08(bios, data + 0);
switch (*ver) {
case 0x10:
*hdr = nvbios_rd08(bios, data + 1);
*cnt = nvbios_rd08(bios, data + 2);
*len = 4;
*xnr = nvbios_rd08(bios, data + 3);
*xsz = 4;
return data;
default:
break;
}
}
}
return 0x00000000;
}
u32
nvbios_P0260Ee(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt, xnr, xsz;
u32 data = nvbios_P0260Te(bios, ver, &hdr, &cnt, len, &xnr, &xsz);
if (data && idx < cnt)
return data + hdr + (idx * *len);
return 0x00000000;
}
u32
nvbios_P0260Ep(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
struct nvbios_P0260E *info)
{
u32 data = nvbios_P0260Ee(bios, idx, ver, len);
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
info->data = nvbios_rd32(bios, data);
return data;
default:
break;
}
return 0x00000000;
}
u32
nvbios_P0260Xe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *xsz)
{
u8 hdr, cnt, len, xnr;
u32 data = nvbios_P0260Te(bios, ver, &hdr, &cnt, &len, &xnr, xsz);
if (data && idx < xnr)
return data + hdr + (cnt * len) + (idx * *xsz);
return 0x00000000;
}
u32
nvbios_P0260Xp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_P0260X *info)
{
u32 data = nvbios_P0260Xe(bios, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
info->data = nvbios_rd32(bios, data);
return data;
default:
break;
}
return 0x00000000;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/image.h>
#include <subdev/bios/pmu.h>
u32
nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_p;
u32 data = 0;
if (!bit_entry(bios, 'p', &bit_p)) {
if (bit_p.version == 2 && bit_p.length >= 4)
data = nvbios_rd32(bios, bit_p.offset + 0x00);
if (data) {
*ver = nvbios_rd08(bios, data + 0x00); /* maybe? */
*hdr = nvbios_rd08(bios, data + 0x01);
*len = nvbios_rd08(bios, data + 0x02);
*cnt = nvbios_rd08(bios, data + 0x03);
}
}
return data;
}
u32
nvbios_pmuEe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len;
u32 data = nvbios_pmuTe(bios, ver, hdr, &cnt, &len);
if (data && idx < cnt) {
data = data + *hdr + (idx * len);
*hdr = len;
return data;
}
return 0;
}
u32
nvbios_pmuEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_pmuE *info)
{
u32 data = nvbios_pmuEe(bios, idx, ver, hdr);
if (data) {
info->type = nvbios_rd08(bios, data + 0x00);
info->data = nvbios_rd32(bios, data + 0x02);
}
return data;
}
bool
nvbios_pmuRm(struct nvkm_bios *bios, u8 type, struct nvbios_pmuR *info)
{
struct nvbios_pmuE pmuE;
u8 ver, hdr, idx = 0;
u32 data;
memset(info, 0x00, sizeof(*info));
while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
if (pmuE.type == type && (data = pmuE.data)) {
info->init_addr_pmu = nvbios_rd32(bios, data + 0x08);
info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c);
info->boot_addr = data + 0x30;
info->boot_addr_pmu = nvbios_rd32(bios, data + 0x10) +
nvbios_rd32(bios, data + 0x18);
info->boot_size = nvbios_rd32(bios, data + 0x1c) -
nvbios_rd32(bios, data + 0x18);
info->code_addr = info->boot_addr + info->boot_size;
info->code_addr_pmu = info->boot_addr_pmu +
info->boot_size;
info->code_size = nvbios_rd32(bios, data + 0x20);
info->data_addr = data + 0x30 +
nvbios_rd32(bios, data + 0x24);
info->data_addr_pmu = nvbios_rd32(bios, data + 0x28);
info->data_size = nvbios_rd32(bios, data + 0x2c);
return true;
}
}
return false;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/image.h>
struct shadow {
u32 skip;
const struct nvbios_source *func;
void *data;
u32 size;
int score;
};
static bool
shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto)
{
const u32 limit = (upto + 3) & ~3;
const u32 start = bios->size;
void *data = mthd->data;
if (nvbios_extend(bios, limit) > 0) {
u32 read = mthd->func->read(data, start, limit - start, bios);
bios->size = start + read;
}
return bios->size >= upto;
}
static int
shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
{
struct nvkm_subdev *subdev = &bios->subdev;
struct nvbios_image image;
int score = 1;
if (mthd->func->no_pcir) {
image.base = 0;
image.type = 0;
image.size = mthd->func->size(mthd->data);
image.last = 1;
} else {
if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
nvkm_debug(subdev, "%08x: header fetch failed\n",
offset);
return 0;
}
if (!nvbios_image(bios, idx, &image)) {
nvkm_debug(subdev, "image %d invalid\n", idx);
return 0;
}
}
nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
image.base, image.type, image.size);
if (!shadow_fetch(bios, mthd, image.base + image.size)) {
nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
return 0;
}
switch (image.type) {
case 0x00:
if (!mthd->func->ignore_checksum &&
nvbios_checksum(&bios->data[image.base], image.size)) {
nvkm_debug(subdev, "%08x: checksum failed\n",
image.base);
if (!mthd->func->require_checksum) {
if (mthd->func->rw)
score += 1;
score += 1;
} else
return 0;
} else {
score += 3;
}
break;
default:
score += 3;
break;
}
if (!image.last)
score += shadow_image(bios, idx + 1, offset + image.size, mthd);
return score;
}
static int
shadow_method(struct nvkm_bios *bios, struct shadow *mthd, const char *name)
{
const struct nvbios_source *func = mthd->func;
struct nvkm_subdev *subdev = &bios->subdev;
if (func->name) {
nvkm_debug(subdev, "trying %s...\n", name ? name : func->name);
if (func->init) {
mthd->data = func->init(bios, name);
if (IS_ERR(mthd->data)) {
mthd->data = NULL;
return 0;
}
}
mthd->score = shadow_image(bios, 0, 0, mthd);
if (func->fini)
func->fini(mthd->data);
nvkm_debug(subdev, "scored %d\n", mthd->score);
mthd->data = bios->data;
mthd->size = bios->size;
bios->data = NULL;
bios->size = 0;
}
return mthd->score;
}
static u32
shadow_fw_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
const struct firmware *fw = data;
if (offset + length <= fw->size) {
memcpy(bios->data + offset, fw->data + offset, length);
return length;
}
return 0;
}
static void *
shadow_fw_init(struct nvkm_bios *bios, const char *name)
{
struct device *dev = bios->subdev.device->dev;
const struct firmware *fw;
int ret = request_firmware(&fw, name, dev);
if (ret)
return ERR_PTR(-ENOENT);
return (void *)fw;
}
static const struct nvbios_source
shadow_fw = {
.name = "firmware",
.init = shadow_fw_init,
.fini = (void(*)(void *))release_firmware,
.read = shadow_fw_read,
.rw = false,
};
int
nvbios_shadow(struct nvkm_bios *bios)
{
struct nvkm_subdev *subdev = &bios->subdev;
struct nvkm_device *device = subdev->device;
struct shadow mthds[] = {
{ 0, &nvbios_of },
{ 0, &nvbios_ramin },
{ 0, &nvbios_prom },
{ 0, &nvbios_acpi_fast },
{ 4, &nvbios_acpi_slow },
{ 1, &nvbios_pcirom },
{ 1, &nvbios_platform },
{}
}, *mthd, *best = NULL;
const char *optarg;
char *source;
int optlen;
/* handle user-specified bios source */
optarg = nvkm_stropt(device->cfgopt, "NvBios", &optlen);
source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
if (source) {
/* try to match one of the built-in methods */
for (mthd = mthds; mthd->func; mthd++) {
if (mthd->func->name &&
!strcasecmp(source, mthd->func->name)) {
best = mthd;
if (shadow_method(bios, mthd, NULL))
break;
}
}
/* otherwise, attempt to load as firmware */
if (!best && (best = mthd)) {
mthd->func = &shadow_fw;
shadow_method(bios, mthd, source);
mthd->func = NULL;
}
if (!best->score) {
nvkm_error(subdev, "%s invalid\n", source);
kfree(source);
source = NULL;
}
}
/* scan all potential bios sources, looking for best image */
if (!best || !best->score) {
for (mthd = mthds, best = mthd; mthd->func; mthd++) {
if (!mthd->skip || best->score < mthd->skip) {
if (shadow_method(bios, mthd, NULL)) {
if (mthd->score > best->score)
best = mthd;
}
}
}
}
/* cleanup the ones we didn't use */
for (mthd = mthds; mthd->func; mthd++) {
if (mthd != best)
kfree(mthd->data);
}
if (!best->score) {
nvkm_error(subdev, "unable to locate usable image\n");
return -EINVAL;
}
nvkm_debug(subdev, "using image from %s\n", best->func ?
best->func->name : source);
bios->data = best->data;
bios->size = best->size;
kfree(source);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c |
/*
* Copyright 2012 Nouveau Community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/volt.h>
u32
nvbios_volt_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
u32 volt = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
volt = nvbios_rd32(bios, bit_P.offset + 0x0c);
else
if (bit_P.version == 1)
volt = nvbios_rd32(bios, bit_P.offset + 0x10);
if (volt) {
*ver = nvbios_rd08(bios, volt + 0);
switch (*ver) {
case 0x12:
*hdr = 5;
*cnt = nvbios_rd08(bios, volt + 2);
*len = nvbios_rd08(bios, volt + 1);
return volt;
case 0x20:
*hdr = nvbios_rd08(bios, volt + 1);
*cnt = nvbios_rd08(bios, volt + 2);
*len = nvbios_rd08(bios, volt + 3);
return volt;
case 0x30:
case 0x40:
case 0x50:
*hdr = nvbios_rd08(bios, volt + 1);
*cnt = nvbios_rd08(bios, volt + 3);
*len = nvbios_rd08(bios, volt + 2);
return volt;
}
}
}
return 0;
}
u32
nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_volt *info)
{
u32 volt = nvbios_volt_table(bios, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
switch (!!volt * *ver) {
case 0x12:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x04);
info->ranged = false;
break;
case 0x20:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x05);
info->ranged = false;
break;
case 0x30:
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x04);
info->ranged = false;
break;
case 0x40:
info->type = NVBIOS_VOLT_GPIO;
info->base = nvbios_rd32(bios, volt + 0x04);
info->step = nvbios_rd16(bios, volt + 0x08);
info->vidmask = nvbios_rd08(bios, volt + 0x0b);
info->ranged = true; /* XXX: find the flag byte */
info->min = min(info->base,
info->base + info->step * info->vidmask);
info->max = nvbios_rd32(bios, volt + 0x0e);
if (!info->max)
info->max = max(info->base, info->base + info->step * info->vidmask);
break;
case 0x50:
info->min = nvbios_rd32(bios, volt + 0x0a);
info->max = nvbios_rd32(bios, volt + 0x0e);
info->base = nvbios_rd32(bios, volt + 0x12) & 0x00ffffff;
/* offset 4 seems to be a flag byte */
if (nvbios_rd32(bios, volt + 0x4) & 1) {
info->type = NVBIOS_VOLT_PWM;
info->pwm_freq = nvbios_rd32(bios, volt + 0x5) / 1000;
info->pwm_range = nvbios_rd32(bios, volt + 0x16);
} else {
info->type = NVBIOS_VOLT_GPIO;
info->vidmask = nvbios_rd08(bios, volt + 0x06);
info->step = nvbios_rd16(bios, volt + 0x16);
info->ranged =
!!(nvbios_rd08(bios, volt + 0x4) & 0x2);
}
break;
}
return volt;
}
u32
nvbios_volt_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u32 volt = nvbios_volt_table(bios, ver, &hdr, &cnt, len);
if (volt && idx < cnt) {
volt = volt + hdr + (idx * *len);
return volt;
}
return 0;
}
u32
nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
struct nvbios_volt_entry *info)
{
u32 volt = nvbios_volt_entry(bios, idx, ver, len);
memset(info, 0x00, sizeof(*info));
switch (!!volt * *ver) {
case 0x12:
case 0x20:
info->voltage = nvbios_rd08(bios, volt + 0x00) * 10000;
info->vid = nvbios_rd08(bios, volt + 0x01);
break;
case 0x30:
info->voltage = nvbios_rd08(bios, volt + 0x00) * 10000;
info->vid = nvbios_rd08(bios, volt + 0x01) >> 2;
break;
case 0x40:
break;
case 0x50:
info->voltage = nvbios_rd32(bios, volt) & 0x001fffff;
info->vid = (nvbios_rd32(bios, volt) >> 23) & 0xff;
break;
}
return volt;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.