python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
#include "priv.h"
#include <nvif/class.h>
static const struct nvkm_falcon_func
mcp89_msvld = {
.init = g98_msvld_init,
.sclass = {
{ -1, -1, IGT21A_MSVLD },
{}
}
};
int
mcp89_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
return nvkm_msvld_new_(&mcp89_msvld, device, type, inst, pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
#include "priv.h"
#include <nvif/class.h>
static const struct nvkm_falcon_func
gt215_msvld = {
.init = g98_msvld_init,
.sclass = {
{ -1, -1, GT212_MSVLD },
{}
}
};
int
gt215_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
return nvkm_msvld_new_(>215_msvld, device, type, inst, pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
static const struct nvkm_falcon_func
gm107_nvenc_flcn = {
};
static const struct nvkm_nvenc_func
gm107_nvenc = {
.flcn = &gm107_nvenc_flcn,
};
static int
gm107_nvenc_nofw(struct nvkm_nvenc *nvenc, int ver,
const struct nvkm_nvenc_fwif *fwif)
{
return 0;
}
static const struct nvkm_nvenc_fwif
gm107_nvenc_fwif[] = {
{ -1, gm107_nvenc_nofw, &gm107_nvenc },
{}
};
int
gm107_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvenc **pnvenc)
{
return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/firmware.h>
static void *
nvkm_nvenc_dtor(struct nvkm_engine *engine)
{
struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
nvkm_falcon_dtor(&nvenc->falcon);
return nvenc;
}
static const struct nvkm_engine_func
nvkm_nvenc = {
.dtor = nvkm_nvenc_dtor,
};
int
nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc)
{
struct nvkm_nvenc *nvenc;
int ret;
if (!(nvenc = *pnvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL)))
return -ENOMEM;
ret = nvkm_engine_ctor(&nvkm_nvenc, device, type, inst, true,
&nvenc->engine);
if (ret)
return ret;
fwif = nvkm_firmware_load(&nvenc->engine.subdev, fwif, "Nvenc", nvenc);
if (IS_ERR(fwif))
return -ENODEV;
nvenc->func = fwif->func;
return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev,
nvenc->engine.subdev.name, 0, &nvenc->falcon);
};
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "head.h"
#include <nvif/class.h>
static void
nv04_head_vblank_put(struct nvkm_head *head)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
nvkm_wr32(device, 0x600140 + (head->id * 0x2000) , 0x00000000);
}
static void
nv04_head_vblank_get(struct nvkm_head *head)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
nvkm_wr32(device, 0x600140 + (head->id * 0x2000) , 0x00000001);
}
static void
nv04_head_rgpos(struct nvkm_head *head, u16 *hline, u16 *vline)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
u32 data = nvkm_rd32(device, 0x600868 + (head->id * 0x2000));
*hline = (data & 0xffff0000) >> 16;
*vline = (data & 0x0000ffff);
}
static void
nv04_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
const u32 hoff = head->id * 0x0200;
state->vblanks = nvkm_rd32(device, 0x680800 + hoff) & 0x0000ffff;
state->vtotal = nvkm_rd32(device, 0x680804 + hoff) & 0x0000ffff;
state->vblanke = state->vtotal - 1;
state->hblanks = nvkm_rd32(device, 0x680820 + hoff) & 0x0000ffff;
state->htotal = nvkm_rd32(device, 0x680824 + hoff) & 0x0000ffff;
state->hblanke = state->htotal - 1;
}
static const struct nvkm_head_func
nv04_head = {
.state = nv04_head_state,
.rgpos = nv04_head_rgpos,
.vblank_get = nv04_head_vblank_get,
.vblank_put = nv04_head_vblank_put,
};
static int
nv04_head_new(struct nvkm_disp *disp, int id)
{
return nvkm_head_new_(&nv04_head, disp, id);
}
static void
nv04_disp_intr(struct nvkm_disp *disp)
{
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 crtc0 = nvkm_rd32(device, 0x600100);
u32 crtc1 = nvkm_rd32(device, 0x602100);
u32 pvideo;
if (crtc0 & 0x00000001) {
nvkm_disp_vblank(disp, 0);
nvkm_wr32(device, 0x600100, 0x00000001);
}
if (crtc1 & 0x00000001) {
nvkm_disp_vblank(disp, 1);
nvkm_wr32(device, 0x602100, 0x00000001);
}
if (device->chipset >= 0x10 && device->chipset <= 0x40) {
pvideo = nvkm_rd32(device, 0x8100);
if (pvideo & ~0x11)
nvkm_info(subdev, "PVIDEO intr: %08x\n", pvideo);
nvkm_wr32(device, 0x8100, pvideo);
}
}
static const struct nvkm_disp_func
nv04_disp = {
.intr = nv04_disp_intr,
.root = { 0, 0, NV04_DISP },
.user = { {} },
};
int
nv04_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
int ret, i;
ret = nvkm_disp_new_(&nv04_disp, device, type, inst, pdisp);
if (ret)
return ret;
for (i = 0; i < 2; i++) {
ret = nv04_head_new(*pdisp, i);
if (ret)
return ret;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "chan.h"
#include "head.h"
#include "ior.h"
#include <nvif/class.h>
void
gm107_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
u32 mask = 0x1f1f1f1f, data;
switch (pattern) {
case 0: data = 0x10101010; break;
case 1: data = 0x01010101; break;
case 2: data = 0x02020202; break;
case 3: data = 0x03030303; break;
case 4: data = 0x1b1b1b1b; break;
default:
WARN_ON(1);
return;
}
if (sor->asy.link & 1)
nvkm_mask(device, 0x61c110 + soff, mask, data);
else
nvkm_mask(device, 0x61c12c + soff, mask, data);
}
static const struct nvkm_ior_func_dp
gm107_sor_dp = {
.lanes = { 0, 1, 2, 3 },
.links = gf119_sor_dp_links,
.power = g94_sor_dp_power,
.pattern = gm107_sor_dp_pattern,
.drive = gf119_sor_dp_drive,
.vcpi = gf119_sor_dp_vcpi,
.audio = gf119_sor_dp_audio,
.audio_sym = gf119_sor_dp_audio_sym,
.watermark = gf119_sor_dp_watermark,
};
static const struct nvkm_ior_func
gm107_sor = {
.state = gf119_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
.hdmi = &gk104_sor_hdmi,
.dp = &gm107_sor_dp,
.hda = &gf119_sor_hda,
};
static int
gm107_sor_new(struct nvkm_disp *disp, int id)
{
return nvkm_ior_new_(&gm107_sor, disp, SOR, id, true);
}
static const struct nvkm_disp_func
gm107_disp = {
.oneinit = nv50_disp_oneinit,
.init = gf119_disp_init,
.fini = gf119_disp_fini,
.intr = gf119_disp_intr,
.intr_error = gf119_disp_intr_error,
.super = gf119_disp_super,
.uevent = &gf119_disp_chan_uevent,
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
.sor = { .cnt = gf119_sor_cnt, .new = gm107_sor_new },
.root = { 0,0,GM107_DISP },
.user = {
{{0,0,GK104_DISP_CURSOR }, nvkm_disp_chan_new, &gf119_disp_curs },
{{0,0,GK104_DISP_OVERLAY }, nvkm_disp_chan_new, &gf119_disp_oimm },
{{0,0,GK110_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, &gf119_disp_base },
{{0,0,GM107_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gk104_disp_core },
{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, nvkm_disp_chan_new, &gk104_disp_ovly },
{}
},
};
int
gm107_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&gm107_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "chan.h"
#include "hdmi.h"
#include "head.h"
#include "ior.h"
#include <nvif/class.h>
void
gk104_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
struct packed_hdmi_infoframe vsi;
const u32 hoff = head * 0x400;
pack_hdmi_infoframe(&vsi, data, size);
/* GENERIC(?) / Vendor InfoFrame? */
nvkm_mask(device, 0x690100 + hoff, 0x00010001, 0x00000000);
if (!size)
return;
nvkm_wr32(device, 0x690108 + hoff, vsi.header);
nvkm_wr32(device, 0x69010c + hoff, vsi.subpack0_low);
nvkm_wr32(device, 0x690110 + hoff, vsi.subpack0_high);
/* Is there a second (or further?) set of subpack registers here? */
nvkm_mask(device, 0x690100 + hoff, 0x00000001, 0x00000001);
}
void
gk104_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
struct packed_hdmi_infoframe avi;
const u32 hoff = head * 0x400;
pack_hdmi_infoframe(&avi, data, size);
/* AVI InfoFrame */
nvkm_mask(device, 0x690000 + hoff, 0x00000001, 0x00000000);
if (!size)
return;
nvkm_wr32(device, 0x690008 + hoff, avi.header);
nvkm_wr32(device, 0x69000c + hoff, avi.subpack0_low);
nvkm_wr32(device, 0x690010 + hoff, avi.subpack0_high);
nvkm_wr32(device, 0x690014 + hoff, avi.subpack1_low);
nvkm_wr32(device, 0x690018 + hoff, avi.subpack1_high);
nvkm_mask(device, 0x690000 + hoff, 0x00000001, 0x00000001);
}
void
gk104_sor_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, u8 rekey)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 ctrl = 0x40000000 * enable |
max_ac_packet << 16 |
rekey;
const u32 hoff = head * 0x800;
const u32 hdmi = head * 0x400;
if (!(ctrl & 0x40000000)) {
nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
nvkm_mask(device, 0x690100 + hdmi, 0x00000001, 0x00000000);
nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000);
return;
}
/* ??? InfoFrame? */
nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
nvkm_wr32(device, 0x6900cc + hdmi, 0x00000010);
nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000001);
/* ??? */
nvkm_wr32(device, 0x690080 + hdmi, 0x82000000);
/* HDMI_CTRL */
nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
}
const struct nvkm_ior_func_hdmi
gk104_sor_hdmi = {
.ctrl = gk104_sor_hdmi_ctrl,
.infoframe_avi = gk104_sor_hdmi_infoframe_avi,
.infoframe_vsi = gk104_sor_hdmi_infoframe_vsi,
};
static const struct nvkm_ior_func
gk104_sor = {
.state = gf119_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
.hdmi = &gk104_sor_hdmi,
.dp = &gf119_sor_dp,
.hda = &gf119_sor_hda,
};
int
gk104_sor_new(struct nvkm_disp *disp, int id)
{
return nvkm_ior_new_(&gk104_sor, disp, SOR, id, true);
}
static const struct nvkm_disp_mthd_list
gk104_disp_ovly_mthd_base = {
.mthd = 0x0000,
.data = {
{ 0x0080, 0x665080 },
{ 0x0084, 0x665084 },
{ 0x0088, 0x665088 },
{ 0x008c, 0x66508c },
{ 0x0090, 0x665090 },
{ 0x0094, 0x665094 },
{ 0x00a0, 0x6650a0 },
{ 0x00a4, 0x6650a4 },
{ 0x00b0, 0x6650b0 },
{ 0x00b4, 0x6650b4 },
{ 0x00b8, 0x6650b8 },
{ 0x00c0, 0x6650c0 },
{ 0x00c4, 0x6650c4 },
{ 0x00e0, 0x6650e0 },
{ 0x00e4, 0x6650e4 },
{ 0x00e8, 0x6650e8 },
{ 0x0100, 0x665100 },
{ 0x0104, 0x665104 },
{ 0x0108, 0x665108 },
{ 0x010c, 0x66510c },
{ 0x0110, 0x665110 },
{ 0x0118, 0x665118 },
{ 0x011c, 0x66511c },
{ 0x0120, 0x665120 },
{ 0x0124, 0x665124 },
{ 0x0130, 0x665130 },
{ 0x0134, 0x665134 },
{ 0x0138, 0x665138 },
{ 0x013c, 0x66513c },
{ 0x0140, 0x665140 },
{ 0x0144, 0x665144 },
{ 0x0148, 0x665148 },
{ 0x014c, 0x66514c },
{ 0x0150, 0x665150 },
{ 0x0154, 0x665154 },
{ 0x0158, 0x665158 },
{ 0x015c, 0x66515c },
{ 0x0160, 0x665160 },
{ 0x0164, 0x665164 },
{ 0x0168, 0x665168 },
{ 0x016c, 0x66516c },
{ 0x0400, 0x665400 },
{ 0x0404, 0x665404 },
{ 0x0408, 0x665408 },
{ 0x040c, 0x66540c },
{ 0x0410, 0x665410 },
{}
}
};
const struct nvkm_disp_chan_mthd
gk104_disp_ovly_mthd = {
.name = "Overlay",
.addr = 0x001000,
.prev = -0x020000,
.data = {
{ "Global", 1, &gk104_disp_ovly_mthd_base },
{}
}
};
const struct nvkm_disp_chan_user
gk104_disp_ovly = {
.func = &gf119_disp_dmac_func,
.ctrl = 5,
.user = 5,
.mthd = &gk104_disp_ovly_mthd,
};
static const struct nvkm_disp_mthd_list
gk104_disp_core_mthd_head = {
.mthd = 0x0300,
.addr = 0x000300,
.data = {
{ 0x0400, 0x660400 },
{ 0x0404, 0x660404 },
{ 0x0408, 0x660408 },
{ 0x040c, 0x66040c },
{ 0x0410, 0x660410 },
{ 0x0414, 0x660414 },
{ 0x0418, 0x660418 },
{ 0x041c, 0x66041c },
{ 0x0420, 0x660420 },
{ 0x0424, 0x660424 },
{ 0x0428, 0x660428 },
{ 0x042c, 0x66042c },
{ 0x0430, 0x660430 },
{ 0x0434, 0x660434 },
{ 0x0438, 0x660438 },
{ 0x0440, 0x660440 },
{ 0x0444, 0x660444 },
{ 0x0448, 0x660448 },
{ 0x044c, 0x66044c },
{ 0x0450, 0x660450 },
{ 0x0454, 0x660454 },
{ 0x0458, 0x660458 },
{ 0x045c, 0x66045c },
{ 0x0460, 0x660460 },
{ 0x0468, 0x660468 },
{ 0x046c, 0x66046c },
{ 0x0470, 0x660470 },
{ 0x0474, 0x660474 },
{ 0x047c, 0x66047c },
{ 0x0480, 0x660480 },
{ 0x0484, 0x660484 },
{ 0x0488, 0x660488 },
{ 0x048c, 0x66048c },
{ 0x0490, 0x660490 },
{ 0x0494, 0x660494 },
{ 0x0498, 0x660498 },
{ 0x04a0, 0x6604a0 },
{ 0x04b0, 0x6604b0 },
{ 0x04b8, 0x6604b8 },
{ 0x04bc, 0x6604bc },
{ 0x04c0, 0x6604c0 },
{ 0x04c4, 0x6604c4 },
{ 0x04c8, 0x6604c8 },
{ 0x04d0, 0x6604d0 },
{ 0x04d4, 0x6604d4 },
{ 0x04e0, 0x6604e0 },
{ 0x04e4, 0x6604e4 },
{ 0x04e8, 0x6604e8 },
{ 0x04ec, 0x6604ec },
{ 0x04f0, 0x6604f0 },
{ 0x04f4, 0x6604f4 },
{ 0x04f8, 0x6604f8 },
{ 0x04fc, 0x6604fc },
{ 0x0500, 0x660500 },
{ 0x0504, 0x660504 },
{ 0x0508, 0x660508 },
{ 0x050c, 0x66050c },
{ 0x0510, 0x660510 },
{ 0x0514, 0x660514 },
{ 0x0518, 0x660518 },
{ 0x051c, 0x66051c },
{ 0x0520, 0x660520 },
{ 0x0524, 0x660524 },
{ 0x052c, 0x66052c },
{ 0x0530, 0x660530 },
{ 0x054c, 0x66054c },
{ 0x0550, 0x660550 },
{ 0x0554, 0x660554 },
{ 0x0558, 0x660558 },
{ 0x055c, 0x66055c },
{}
}
};
const struct nvkm_disp_chan_mthd
gk104_disp_core_mthd = {
.name = "Core",
.addr = 0x000000,
.prev = -0x020000,
.data = {
{ "Global", 1, &gf119_disp_core_mthd_base },
{ "DAC", 3, &gf119_disp_core_mthd_dac },
{ "SOR", 8, &gf119_disp_core_mthd_sor },
{ "PIOR", 4, &gf119_disp_core_mthd_pior },
{ "HEAD", 4, &gk104_disp_core_mthd_head },
{}
}
};
const struct nvkm_disp_chan_user
gk104_disp_core = {
.func = &gf119_disp_core_func,
.ctrl = 0,
.user = 0,
.mthd = &gk104_disp_core_mthd,
};
static const struct nvkm_disp_func
gk104_disp = {
.oneinit = nv50_disp_oneinit,
.init = gf119_disp_init,
.fini = gf119_disp_fini,
.intr = gf119_disp_intr,
.intr_error = gf119_disp_intr_error,
.super = gf119_disp_super,
.uevent = &gf119_disp_chan_uevent,
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
.sor = { .cnt = gf119_sor_cnt, .new = gk104_sor_new },
.root = { 0,0,GK104_DISP },
.user = {
{{0,0,GK104_DISP_CURSOR }, nvkm_disp_chan_new, &gf119_disp_curs },
{{0,0,GK104_DISP_OVERLAY }, nvkm_disp_chan_new, &gf119_disp_oimm },
{{0,0,GK104_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, &gf119_disp_base },
{{0,0,GK104_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gk104_disp_core },
{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, nvkm_disp_chan_new, &gk104_disp_ovly },
{}
},
};
int
gk104_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&gk104_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "chan.h"
#include <core/oproxy.h>
#include <core/ramht.h>
#include <nvif/if0014.h>
static int
nvkm_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_device *device = chan->disp->engine.subdev.device;
u64 size, base = chan->func->user(chan, &size);
*data = nvkm_rd32(device, base + addr);
return 0;
}
static int
nvkm_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_device *device = chan->disp->engine.subdev.device;
u64 size, base = chan->func->user(chan, &size);
nvkm_wr32(device, base + addr, data);
return 0;
}
static int
nvkm_disp_chan_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **pevent)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_disp *disp = chan->disp;
switch (type) {
case 0:
*pevent = &disp->uevent;
return 0;
default:
break;
}
return -EINVAL;
}
static int
nvkm_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_device *device = chan->disp->engine.subdev.device;
const u64 base = device->func->resource_addr(device, 0);
*type = NVKM_OBJECT_MAP_IO;
*addr = base + chan->func->user(chan, size);
return 0;
}
struct nvkm_disp_chan_object {
struct nvkm_oproxy oproxy;
struct nvkm_disp *disp;
int hash;
};
static void
nvkm_disp_chan_child_del_(struct nvkm_oproxy *base)
{
struct nvkm_disp_chan_object *object = container_of(base, typeof(*object), oproxy);
nvkm_ramht_remove(object->disp->ramht, object->hash);
}
static const struct nvkm_oproxy_func
nvkm_disp_chan_child_func_ = {
.dtor[0] = nvkm_disp_chan_child_del_,
};
static int
nvkm_disp_chan_child_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(oclass->parent);
struct nvkm_disp *disp = chan->disp;
struct nvkm_device *device = disp->engine.subdev.device;
const struct nvkm_device_oclass *sclass = oclass->priv;
struct nvkm_disp_chan_object *object;
int ret;
if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
return -ENOMEM;
nvkm_oproxy_ctor(&nvkm_disp_chan_child_func_, oclass, &object->oproxy);
object->disp = disp;
*pobject = &object->oproxy.base;
ret = sclass->ctor(device, oclass, argv, argc, &object->oproxy.object);
if (ret)
return ret;
object->hash = chan->func->bind(chan, object->oproxy.object, oclass->handle);
if (object->hash < 0)
return object->hash;
return 0;
}
static int
nvkm_disp_chan_child_get(struct nvkm_object *object, int index, struct nvkm_oclass *sclass)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_device *device = chan->disp->engine.subdev.device;
const struct nvkm_device_oclass *oclass = NULL;
if (chan->func->bind)
sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ, 0);
else
sclass->engine = NULL;
if (sclass->engine && sclass->engine->func->base.sclass) {
sclass->engine->func->base.sclass(sclass, index, &oclass);
if (oclass) {
sclass->ctor = nvkm_disp_chan_child_new;
sclass->priv = oclass;
return 0;
}
}
return -EINVAL;
}
static int
nvkm_disp_chan_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
chan->func->fini(chan);
chan->func->intr(chan, false);
return 0;
}
static int
nvkm_disp_chan_init(struct nvkm_object *object)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
chan->func->intr(chan, true);
return chan->func->init(chan);
}
static void *
nvkm_disp_chan_dtor(struct nvkm_object *object)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_disp *disp = chan->disp;
spin_lock(&disp->client.lock);
if (disp->chan[chan->chid.user] == chan)
disp->chan[chan->chid.user] = NULL;
spin_unlock(&disp->client.lock);
nvkm_memory_unref(&chan->memory);
return chan;
}
static const struct nvkm_object_func
nvkm_disp_chan = {
.dtor = nvkm_disp_chan_dtor,
.init = nvkm_disp_chan_init,
.fini = nvkm_disp_chan_fini,
.rd32 = nvkm_disp_chan_rd32,
.wr32 = nvkm_disp_chan_wr32,
.ntfy = nvkm_disp_chan_ntfy,
.map = nvkm_disp_chan_map,
.sclass = nvkm_disp_chan_child_get,
};
static int
nvkm_disp_chan_new_(struct nvkm_disp *disp, int nr, const struct nvkm_oclass *oclass,
void *argv, u32 argc, struct nvkm_object **pobject)
{
const struct nvkm_disp_chan_user *user = NULL;
struct nvkm_disp_chan *chan;
union nvif_disp_chan_args *args = argv;
int ret, i;
for (i = 0; disp->func->user[i].ctor; i++) {
if (disp->func->user[i].base.oclass == oclass->base.oclass) {
user = disp->func->user[i].chan;
break;
}
}
if (WARN_ON(!user))
return -EINVAL;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
if (args->v0.id >= nr || !args->v0.pushbuf != !user->func->push)
return -EINVAL;
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
*pobject = &chan->object;
nvkm_object_ctor(&nvkm_disp_chan, oclass, &chan->object);
chan->func = user->func;
chan->mthd = user->mthd;
chan->disp = disp;
chan->chid.ctrl = user->ctrl + args->v0.id;
chan->chid.user = user->user + args->v0.id;
chan->head = args->v0.id;
if (chan->func->push) {
ret = chan->func->push(chan, args->v0.pushbuf);
if (ret)
return ret;
}
spin_lock(&disp->client.lock);
if (disp->chan[chan->chid.user]) {
spin_unlock(&disp->client.lock);
return -EBUSY;
}
disp->chan[chan->chid.user] = chan;
spin_unlock(&disp->client.lock);
return 0;
}
int
nvkm_disp_wndw_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
return nvkm_disp_chan_new_(disp, disp->wndw.nr, oclass, argv, argc, pobject);
}
int
nvkm_disp_chan_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
return nvkm_disp_chan_new_(disp, disp->head.nr, oclass, argv, argc, pobject);
}
int
nvkm_disp_core_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
return nvkm_disp_chan_new_(disp, 1, oclass, argv, argc, pobject);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "chan.h"
#include "priv.h"
#include "head.h"
#include "ior.h"
#include <core/gpuobj.h>
#include <subdev/timer.h>
#include <nvif/class.h>
void
tu102_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 hoff = head * 0x800;
nvkm_mask(device, 0x61657c + hoff, 0xffffffff, (aligned << 16) | pbn);
nvkm_mask(device, 0x616578 + hoff, 0x00003f3f, (slot_nr << 8) | slot);
}
static int
tu102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
const u32 loff = nv50_sor_link(sor);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
clksor |= sor->dp.bw << 18;
dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
if (sor->dp.mst)
dpctrl |= 0x40000000;
if (sor->dp.ef)
dpctrl |= 0x00004000;
nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
/*XXX*/
nvkm_msec(device, 40, NVKM_DELAY);
nvkm_mask(device, 0x612300 + soff, 0x00030000, 0x00010000);
nvkm_mask(device, 0x61c10c + loff, 0x00000003, 0x00000001);
nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
return 0;
}
static const struct nvkm_ior_func_dp
tu102_sor_dp = {
.lanes = { 0, 1, 2, 3 },
.links = tu102_sor_dp_links,
.power = g94_sor_dp_power,
.pattern = gm107_sor_dp_pattern,
.drive = gm200_sor_dp_drive,
.vcpi = tu102_sor_dp_vcpi,
.audio = gv100_sor_dp_audio,
.audio_sym = gv100_sor_dp_audio_sym,
.watermark = gv100_sor_dp_watermark,
};
static const struct nvkm_ior_func
tu102_sor = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
},
.state = gv100_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
.hdmi = &gv100_sor_hdmi,
.dp = &tu102_sor_dp,
.hda = &gv100_sor_hda,
};
static int
tu102_sor_new(struct nvkm_disp *disp, int id)
{
struct nvkm_device *device = disp->engine.subdev.device;
u32 hda = nvkm_rd32(device, 0x08a15c);
return nvkm_ior_new_(&tu102_sor, disp, SOR, id, hda & BIT(id));
}
int
tu102_disp_init(struct nvkm_disp *disp)
{
struct nvkm_device *device = disp->engine.subdev.device;
struct nvkm_head *head;
int i, j;
u32 tmp;
/* Claim ownership of display. */
if (nvkm_rd32(device, 0x6254e8) & 0x00000002) {
nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002))
break;
) < 0)
return -EBUSY;
}
/* Lock pin capabilities. */
tmp = 0x00000021; /*XXX*/
nvkm_wr32(device, 0x640008, tmp);
/* SOR capabilities. */
for (i = 0; i < disp->sor.nr; i++) {
tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i);
nvkm_wr32(device, 0x640144 + (i * 0x08), tmp);
}
/* Head capabilities. */
list_for_each_entry(head, &disp->heads, head) {
const int id = head->id;
/* RG. */
tmp = nvkm_rd32(device, 0x616300 + (id * 0x800));
nvkm_wr32(device, 0x640048 + (id * 0x020), tmp);
/* POSTCOMP. */
for (j = 0; j < 5 * 4; j += 4) {
tmp = nvkm_rd32(device, 0x616140 + (id * 0x800) + j);
nvkm_wr32(device, 0x640680 + (id * 0x20) + j, tmp);
}
}
/* Window capabilities. */
for (i = 0; i < disp->wndw.nr; i++) {
nvkm_mask(device, 0x640004, 1 << i, 1 << i);
for (j = 0; j < 6 * 4; j += 4) {
tmp = nvkm_rd32(device, 0x630100 + (i * 0x800) + j);
nvkm_mask(device, 0x640780 + (i * 0x20) + j, 0xffffffff, tmp);
}
nvkm_mask(device, 0x64000c, 0x00000100, 0x00000100);
}
/* IHUB capabilities. */
for (i = 0; i < 3; i++) {
tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04));
nvkm_wr32(device, 0x640010 + (i * 0x04), tmp);
}
nvkm_mask(device, 0x610078, 0x00000001, 0x00000001);
/* Setup instance memory. */
switch (nvkm_memory_target(disp->inst->memory)) {
case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break;
case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break;
case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break;
default:
break;
}
nvkm_wr32(device, 0x610010, 0x00000008 | tmp);
nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
/* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */
nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */
nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */
/* EXC_OTHER: CURSn, CORE. */
nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
0x00000001); /* MSK. */
nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */
/* EXC_WINIM. */
nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */
/* EXC_WIN. */
nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */
/* HEAD_TIMING(n): VBLANK. */
list_for_each_entry(head, &disp->heads, head) {
const u32 hoff = head->id * 4;
nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */
nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */
}
/* OR. */
nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */
nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */
return 0;
}
static const struct nvkm_disp_func
tu102_disp = {
.oneinit = nv50_disp_oneinit,
.init = tu102_disp_init,
.fini = gv100_disp_fini,
.intr = gv100_disp_intr,
.super = gv100_disp_super,
.uevent = &gv100_disp_chan_uevent,
.wndw = { .cnt = gv100_disp_wndw_cnt },
.head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
.sor = { .cnt = gv100_sor_cnt, .new = tu102_sor_new },
.ramht_size = 0x2000,
.root = { 0, 0,TU102_DISP },
.user = {
{{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
{{ 0, 0,TU102_DISP_CURSOR }, nvkm_disp_chan_new, &gv100_disp_curs },
{{ 0, 0,TU102_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm },
{{ 0, 0,TU102_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gv100_disp_core },
{{ 0, 0,TU102_DISP_WINDOW_CHANNEL_DMA }, nvkm_disp_wndw_new, &gv100_disp_wndw },
{}
},
};
int
tu102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&tu102_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "chan.h"
#include "head.h"
#include "ior.h"
#include "outp.h"
#include <core/client.h>
#include <core/ramht.h>
#include <subdev/bios.h>
#include <subdev/bios/disp.h>
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/devinit.h>
#include <subdev/i2c.h>
#include <subdev/mmu.h>
#include <subdev/timer.h>
#include <nvif/class.h>
#include <nvif/unpack.h>
static void
nv50_pior_clock(struct nvkm_ior *pior)
{
struct nvkm_device *device = pior->disp->engine.subdev.device;
const u32 poff = nv50_ior_base(pior);
nvkm_mask(device, 0x614380 + poff, 0x00000707, 0x00000001);
}
static int
nv50_pior_dp_links(struct nvkm_ior *pior, struct nvkm_i2c_aux *aux)
{
int ret = nvkm_i2c_aux_lnk_ctl(aux, pior->dp.nr, pior->dp.bw, pior->dp.ef);
if (ret)
return ret;
return 1;
}
static const struct nvkm_ior_func_dp
nv50_pior_dp = {
.links = nv50_pior_dp_links,
};
static void
nv50_pior_power_wait(struct nvkm_device *device, u32 poff)
{
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x61e004 + poff) & 0x80000000))
break;
);
}
static void
nv50_pior_power(struct nvkm_ior *pior, bool normal, bool pu, bool data, bool vsync, bool hsync)
{
struct nvkm_device *device = pior->disp->engine.subdev.device;
const u32 poff = nv50_ior_base(pior);
const u32 shift = normal ? 0 : 16;
const u32 state = 0x80000000 | (0x00000001 * !!pu) << shift;
const u32 field = 0x80000000 | (0x00000101 << shift);
nv50_pior_power_wait(device, poff);
nvkm_mask(device, 0x61e004 + poff, field, state);
nv50_pior_power_wait(device, poff);
}
void
nv50_pior_depth(struct nvkm_ior *ior, struct nvkm_ior_state *state, u32 ctrl)
{
/* GF119 moves this information to per-head methods, which is
* a lot more convenient, and where our shared code expect it.
*/
if (state->head && state == &ior->asy) {
struct nvkm_head *head = nvkm_head_find(ior->disp, __ffs(state->head));
if (!WARN_ON(!head)) {
struct nvkm_head_state *state = &head->asy;
switch ((ctrl & 0x000f0000) >> 16) {
case 6: state->or.depth = 30; break;
case 5: state->or.depth = 24; break;
case 2: state->or.depth = 18; break;
case 0: state->or.depth = 18; break; /*XXX*/
default:
state->or.depth = 18;
WARN_ON(1);
break;
}
}
}
}
static void
nv50_pior_state(struct nvkm_ior *pior, struct nvkm_ior_state *state)
{
struct nvkm_device *device = pior->disp->engine.subdev.device;
const u32 coff = pior->id * 8 + (state == &pior->arm) * 4;
u32 ctrl = nvkm_rd32(device, 0x610b80 + coff);
state->proto_evo = (ctrl & 0x00000f00) >> 8;
state->rgdiv = 1;
switch (state->proto_evo) {
case 0: state->proto = TMDS; break;
default:
state->proto = UNKNOWN;
break;
}
state->head = ctrl & 0x00000003;
nv50_pior_depth(pior, state, ctrl);
}
static const struct nvkm_ior_func
nv50_pior = {
.state = nv50_pior_state,
.power = nv50_pior_power,
.clock = nv50_pior_clock,
.dp = &nv50_pior_dp,
};
int
nv50_pior_new(struct nvkm_disp *disp, int id)
{
return nvkm_ior_new_(&nv50_pior, disp, PIOR, id, false);
}
int
nv50_pior_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
struct nvkm_device *device = disp->engine.subdev.device;
*pmask = (nvkm_rd32(device, 0x610184) & 0x70000000) >> 28;
return 3;
}
void
nv50_sor_clock(struct nvkm_ior *sor)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const int div = sor->asy.link == 3;
const u32 soff = nv50_ior_base(sor);
nvkm_mask(device, 0x614300 + soff, 0x00000707, (div << 8) | div);
}
static void
nv50_sor_power_wait(struct nvkm_device *device, u32 soff)
{
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
break;
);
}
void
nv50_sor_power(struct nvkm_ior *sor, bool normal, bool pu, bool data, bool vsync, bool hsync)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
const u32 shift = normal ? 0 : 16;
const u32 state = 0x80000000 | (0x00000001 * !!pu) << shift;
const u32 field = 0x80000000 | (0x00000001 << shift);
nv50_sor_power_wait(device, soff);
nvkm_mask(device, 0x61c004 + soff, field, state);
nv50_sor_power_wait(device, soff);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
break;
);
}
void
nv50_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 coff = sor->id * 8 + (state == &sor->arm) * 4;
u32 ctrl = nvkm_rd32(device, 0x610b70 + coff);
state->proto_evo = (ctrl & 0x00000f00) >> 8;
switch (state->proto_evo) {
case 0: state->proto = LVDS; state->link = 1; break;
case 1: state->proto = TMDS; state->link = 1; break;
case 2: state->proto = TMDS; state->link = 2; break;
case 5: state->proto = TMDS; state->link = 3; break;
default:
state->proto = UNKNOWN;
break;
}
state->head = ctrl & 0x00000003;
}
static const struct nvkm_ior_func
nv50_sor = {
.state = nv50_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
};
static int
nv50_sor_new(struct nvkm_disp *disp, int id)
{
return nvkm_ior_new_(&nv50_sor, disp, SOR, id, false);
}
int
nv50_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
struct nvkm_device *device = disp->engine.subdev.device;
*pmask = (nvkm_rd32(device, 0x610184) & 0x03000000) >> 24;
return 2;
}
static void
nv50_dac_clock(struct nvkm_ior *dac)
{
struct nvkm_device *device = dac->disp->engine.subdev.device;
const u32 doff = nv50_ior_base(dac);
nvkm_mask(device, 0x614280 + doff, 0x07070707, 0x00000000);
}
int
nv50_dac_sense(struct nvkm_ior *dac, u32 loadval)
{
struct nvkm_device *device = dac->disp->engine.subdev.device;
const u32 doff = nv50_ior_base(dac);
dac->func->power(dac, false, true, false, false, false);
nvkm_wr32(device, 0x61a00c + doff, 0x00100000 | loadval);
mdelay(9);
udelay(500);
loadval = nvkm_mask(device, 0x61a00c + doff, 0xffffffff, 0x00000000);
dac->func->power(dac, false, false, false, false, false);
if (!(loadval & 0x80000000))
return -ETIMEDOUT;
return (loadval & 0x38000000) >> 27;
}
static void
nv50_dac_power_wait(struct nvkm_device *device, const u32 doff)
{
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
break;
);
}
void
nv50_dac_power(struct nvkm_ior *dac, bool normal, bool pu, bool data, bool vsync, bool hsync)
{
struct nvkm_device *device = dac->disp->engine.subdev.device;
const u32 doff = nv50_ior_base(dac);
const u32 shift = normal ? 0 : 16;
const u32 state = 0x80000000 | (0x00000040 * ! pu |
0x00000010 * ! data |
0x00000004 * ! vsync |
0x00000001 * ! hsync) << shift;
const u32 field = 0xc0000000 | (0x00000055 << shift);
nv50_dac_power_wait(device, doff);
nvkm_mask(device, 0x61a004 + doff, field, state);
nv50_dac_power_wait(device, doff);
}
static void
nv50_dac_state(struct nvkm_ior *dac, struct nvkm_ior_state *state)
{
struct nvkm_device *device = dac->disp->engine.subdev.device;
const u32 coff = dac->id * 8 + (state == &dac->arm) * 4;
u32 ctrl = nvkm_rd32(device, 0x610b58 + coff);
state->proto_evo = (ctrl & 0x00000f00) >> 8;
switch (state->proto_evo) {
case 0: state->proto = CRT; break;
default:
state->proto = UNKNOWN;
break;
}
state->head = ctrl & 0x00000003;
}
static const struct nvkm_ior_func
nv50_dac = {
.state = nv50_dac_state,
.power = nv50_dac_power,
.sense = nv50_dac_sense,
.clock = nv50_dac_clock,
};
int
nv50_dac_new(struct nvkm_disp *disp, int id)
{
return nvkm_ior_new_(&nv50_dac, disp, DAC, id, false);
}
int
nv50_dac_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
struct nvkm_device *device = disp->engine.subdev.device;
*pmask = (nvkm_rd32(device, 0x610184) & 0x00700000) >> 20;
return 3;
}
static void
nv50_head_vblank_put(struct nvkm_head *head)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
nvkm_mask(device, 0x61002c, (4 << head->id), 0);
}
static void
nv50_head_vblank_get(struct nvkm_head *head)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
nvkm_mask(device, 0x61002c, (4 << head->id), (4 << head->id));
}
static void
nv50_head_rgclk(struct nvkm_head *head, int div)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
nvkm_mask(device, 0x614200 + (head->id * 0x800), 0x0000000f, div);
}
void
nv50_head_rgpos(struct nvkm_head *head, u16 *hline, u16 *vline)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
const u32 hoff = head->id * 0x800;
/* vline read locks hline. */
*vline = nvkm_rd32(device, 0x616340 + hoff) & 0x0000ffff;
*hline = nvkm_rd32(device, 0x616344 + hoff) & 0x0000ffff;
}
static void
nv50_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
const u32 hoff = head->id * 0x540 + (state == &head->arm) * 4;
u32 data;
data = nvkm_rd32(device, 0x610ae8 + hoff);
state->vblanke = (data & 0xffff0000) >> 16;
state->hblanke = (data & 0x0000ffff);
data = nvkm_rd32(device, 0x610af0 + hoff);
state->vblanks = (data & 0xffff0000) >> 16;
state->hblanks = (data & 0x0000ffff);
data = nvkm_rd32(device, 0x610af8 + hoff);
state->vtotal = (data & 0xffff0000) >> 16;
state->htotal = (data & 0x0000ffff);
data = nvkm_rd32(device, 0x610b00 + hoff);
state->vsynce = (data & 0xffff0000) >> 16;
state->hsynce = (data & 0x0000ffff);
state->hz = (nvkm_rd32(device, 0x610ad0 + hoff) & 0x003fffff) * 1000;
}
static const struct nvkm_head_func
nv50_head = {
.state = nv50_head_state,
.rgpos = nv50_head_rgpos,
.rgclk = nv50_head_rgclk,
.vblank_get = nv50_head_vblank_get,
.vblank_put = nv50_head_vblank_put,
};
int
nv50_head_new(struct nvkm_disp *disp, int id)
{
return nvkm_head_new_(&nv50_head, disp, id);
}
int
nv50_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
*pmask = 3;
return 2;
}
static void
nv50_disp_mthd_list(struct nvkm_disp *disp, int debug, u32 base, int c,
const struct nvkm_disp_mthd_list *list, int inst)
{
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
int i;
for (i = 0; list->data[i].mthd; i++) {
if (list->data[i].addr) {
u32 next = nvkm_rd32(device, list->data[i].addr + base + 0);
u32 prev = nvkm_rd32(device, list->data[i].addr + base + c);
u32 mthd = list->data[i].mthd + (list->mthd * inst);
const char *name = list->data[i].name;
char mods[16];
if (prev != next)
snprintf(mods, sizeof(mods), "-> %08x", next);
else
snprintf(mods, sizeof(mods), "%13c", ' ');
nvkm_printk_(subdev, debug, info,
"\t%04x: %08x %s%s%s\n",
mthd, prev, mods, name ? " // " : "",
name ? name : "");
}
}
}
void
nv50_disp_chan_mthd(struct nvkm_disp_chan *chan, int debug)
{
struct nvkm_disp *disp = chan->disp;
struct nvkm_subdev *subdev = &disp->engine.subdev;
const struct nvkm_disp_chan_mthd *mthd = chan->mthd;
const struct nvkm_disp_mthd_list *list;
int i, j;
if (debug > subdev->debug)
return;
if (!mthd)
return;
for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
u32 base = chan->head * mthd->addr;
for (j = 0; j < mthd->data[i].nr; j++, base += list->addr) {
const char *cname = mthd->name;
const char *sname = "";
char cname_[16], sname_[16];
if (mthd->addr) {
snprintf(cname_, sizeof(cname_), "%s %d",
mthd->name, chan->chid.user);
cname = cname_;
}
if (mthd->data[i].nr > 1) {
snprintf(sname_, sizeof(sname_), " - %s %d",
mthd->data[i].name, j);
sname = sname_;
}
nvkm_printk_(subdev, debug, info, "%s%s:\n", cname, sname);
nv50_disp_mthd_list(disp, debug, base, mthd->prev,
list, j);
}
}
}
static void
nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
{
struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
struct nvkm_device *device = disp->engine.subdev.device;
nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index);
nvkm_wr32(device, 0x610020, 0x00000001 << index);
}
static void
nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
{
struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
struct nvkm_device *device = disp->engine.subdev.device;
nvkm_wr32(device, 0x610020, 0x00000001 << index);
nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index);
}
void
nv50_disp_chan_uevent_send(struct nvkm_disp *disp, int chid)
{
nvkm_event_ntfy(&disp->uevent, chid, NVKM_DISP_EVENT_CHAN_AWAKEN);
}
const struct nvkm_event_func
nv50_disp_chan_uevent = {
.init = nv50_disp_chan_uevent_init,
.fini = nv50_disp_chan_uevent_fini,
};
u64
nv50_disp_chan_user(struct nvkm_disp_chan *chan, u64 *psize)
{
*psize = 0x1000;
return 0x640000 + (chan->chid.user * 0x1000);
}
void
nv50_disp_chan_intr(struct nvkm_disp_chan *chan, bool en)
{
struct nvkm_device *device = chan->disp->engine.subdev.device;
const u32 mask = 0x00010001 << chan->chid.user;
const u32 data = en ? 0x00010000 << chan->chid.user : 0x00000000;
nvkm_mask(device, 0x610028, mask, data);
}
static void
nv50_disp_pioc_fini(struct nvkm_disp_chan *chan)
{
struct nvkm_disp *disp = chan->disp;
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
int ctrl = chan->chid.ctrl;
int user = chan->chid.user;
nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
break;
) < 0) {
nvkm_error(subdev, "ch %d timeout: %08x\n", user,
nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
}
}
static int
nv50_disp_pioc_init(struct nvkm_disp_chan *chan)
{
struct nvkm_disp *disp = chan->disp;
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
int ctrl = chan->chid.ctrl;
int user = chan->chid.user;
nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
break;
) < 0) {
nvkm_error(subdev, "ch %d timeout0: %08x\n", user,
nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
return -EBUSY;
}
nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001);
if (nvkm_msec(device, 2000,
u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10));
if ((tmp & 0x00030000) == 0x00010000)
break;
) < 0) {
nvkm_error(subdev, "ch %d timeout1: %08x\n", user,
nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
return -EBUSY;
}
return 0;
}
const struct nvkm_disp_chan_func
nv50_disp_pioc_func = {
.init = nv50_disp_pioc_init,
.fini = nv50_disp_pioc_fini,
.intr = nv50_disp_chan_intr,
.user = nv50_disp_chan_user,
};
int
nv50_disp_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle)
{
return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -10, handle,
chan->chid.user << 28 | chan->chid.user);
}
static void
nv50_disp_dmac_fini(struct nvkm_disp_chan *chan)
{
struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
struct nvkm_device *device = subdev->device;
int ctrl = chan->chid.ctrl;
int user = chan->chid.user;
/* deactivate channel */
nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
break;
) < 0) {
nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
}
chan->suspend_put = nvkm_rd32(device, 0x640000 + (ctrl * 0x1000));
}
static int
nv50_disp_dmac_init(struct nvkm_disp_chan *chan)
{
struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
struct nvkm_device *device = subdev->device;
int ctrl = chan->chid.ctrl;
int user = chan->chid.user;
/* initialise channel for dma command submission */
nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
break;
) < 0) {
nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
return -EBUSY;
}
return 0;
}
int
nv50_disp_dmac_push(struct nvkm_disp_chan *chan, u64 object)
{
chan->memory = nvkm_umem_search(chan->object.client, object);
if (IS_ERR(chan->memory))
return PTR_ERR(chan->memory);
if (nvkm_memory_size(chan->memory) < 0x1000)
return -EINVAL;
switch (nvkm_memory_target(chan->memory)) {
case NVKM_MEM_TARGET_VRAM: chan->push = 0x00000001; break;
case NVKM_MEM_TARGET_NCOH: chan->push = 0x00000002; break;
case NVKM_MEM_TARGET_HOST: chan->push = 0x00000003; break;
default:
return -EINVAL;
}
chan->push |= nvkm_memory_addr(chan->memory) >> 8;
return 0;
}
const struct nvkm_disp_chan_func
nv50_disp_dmac_func = {
.push = nv50_disp_dmac_push,
.init = nv50_disp_dmac_init,
.fini = nv50_disp_dmac_fini,
.intr = nv50_disp_chan_intr,
.user = nv50_disp_chan_user,
.bind = nv50_disp_dmac_bind,
};
const struct nvkm_disp_chan_user
nv50_disp_curs = {
.func = &nv50_disp_pioc_func,
.ctrl = 7,
.user = 7,
};
const struct nvkm_disp_chan_user
nv50_disp_oimm = {
.func = &nv50_disp_pioc_func,
.ctrl = 5,
.user = 5,
};
static const struct nvkm_disp_mthd_list
nv50_disp_ovly_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
{ 0x0080, 0x000000 },
{ 0x0084, 0x0009a0 },
{ 0x0088, 0x0009c0 },
{ 0x008c, 0x0009c8 },
{ 0x0090, 0x6109b4 },
{ 0x0094, 0x610970 },
{ 0x00a0, 0x610998 },
{ 0x00a4, 0x610964 },
{ 0x00c0, 0x610958 },
{ 0x00e0, 0x6109a8 },
{ 0x00e4, 0x6109d0 },
{ 0x00e8, 0x6109d8 },
{ 0x0100, 0x61094c },
{ 0x0104, 0x610984 },
{ 0x0108, 0x61098c },
{ 0x0800, 0x6109f8 },
{ 0x0808, 0x610a08 },
{ 0x080c, 0x610a10 },
{ 0x0810, 0x610a00 },
{}
}
};
static const struct nvkm_disp_chan_mthd
nv50_disp_ovly_mthd = {
.name = "Overlay",
.addr = 0x000540,
.prev = 0x000004,
.data = {
{ "Global", 1, &nv50_disp_ovly_mthd_base },
{}
}
};
static const struct nvkm_disp_chan_user
nv50_disp_ovly = {
.func = &nv50_disp_dmac_func,
.ctrl = 3,
.user = 3,
.mthd = &nv50_disp_ovly_mthd,
};
static const struct nvkm_disp_mthd_list
nv50_disp_base_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
{ 0x0080, 0x000000 },
{ 0x0084, 0x0008c4 },
{ 0x0088, 0x0008d0 },
{ 0x008c, 0x0008dc },
{ 0x0090, 0x0008e4 },
{ 0x0094, 0x610884 },
{ 0x00a0, 0x6108a0 },
{ 0x00a4, 0x610878 },
{ 0x00c0, 0x61086c },
{ 0x00e0, 0x610858 },
{ 0x00e4, 0x610860 },
{ 0x00e8, 0x6108ac },
{ 0x00ec, 0x6108b4 },
{ 0x0100, 0x610894 },
{ 0x0110, 0x6108bc },
{ 0x0114, 0x61088c },
{}
}
};
const struct nvkm_disp_mthd_list
nv50_disp_base_mthd_image = {
.mthd = 0x0400,
.addr = 0x000000,
.data = {
{ 0x0800, 0x6108f0 },
{ 0x0804, 0x6108fc },
{ 0x0808, 0x61090c },
{ 0x080c, 0x610914 },
{ 0x0810, 0x610904 },
{}
}
};
static const struct nvkm_disp_chan_mthd
nv50_disp_base_mthd = {
.name = "Base",
.addr = 0x000540,
.prev = 0x000004,
.data = {
{ "Global", 1, &nv50_disp_base_mthd_base },
{ "Image", 2, &nv50_disp_base_mthd_image },
{}
}
};
static const struct nvkm_disp_chan_user
nv50_disp_base = {
.func = &nv50_disp_dmac_func,
.ctrl = 1,
.user = 1,
.mthd = &nv50_disp_base_mthd,
};
const struct nvkm_disp_mthd_list
nv50_disp_core_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
{ 0x0080, 0x000000 },
{ 0x0084, 0x610bb8 },
{ 0x0088, 0x610b9c },
{ 0x008c, 0x000000 },
{}
}
};
static const struct nvkm_disp_mthd_list
nv50_disp_core_mthd_dac = {
.mthd = 0x0080,
.addr = 0x000008,
.data = {
{ 0x0400, 0x610b58 },
{ 0x0404, 0x610bdc },
{ 0x0420, 0x610828 },
{}
}
};
const struct nvkm_disp_mthd_list
nv50_disp_core_mthd_sor = {
.mthd = 0x0040,
.addr = 0x000008,
.data = {
{ 0x0600, 0x610b70 },
{}
}
};
const struct nvkm_disp_mthd_list
nv50_disp_core_mthd_pior = {
.mthd = 0x0040,
.addr = 0x000008,
.data = {
{ 0x0700, 0x610b80 },
{}
}
};
static const struct nvkm_disp_mthd_list
nv50_disp_core_mthd_head = {
.mthd = 0x0400,
.addr = 0x000540,
.data = {
{ 0x0800, 0x610ad8 },
{ 0x0804, 0x610ad0 },
{ 0x0808, 0x610a48 },
{ 0x080c, 0x610a78 },
{ 0x0810, 0x610ac0 },
{ 0x0814, 0x610af8 },
{ 0x0818, 0x610b00 },
{ 0x081c, 0x610ae8 },
{ 0x0820, 0x610af0 },
{ 0x0824, 0x610b08 },
{ 0x0828, 0x610b10 },
{ 0x082c, 0x610a68 },
{ 0x0830, 0x610a60 },
{ 0x0834, 0x000000 },
{ 0x0838, 0x610a40 },
{ 0x0840, 0x610a24 },
{ 0x0844, 0x610a2c },
{ 0x0848, 0x610aa8 },
{ 0x084c, 0x610ab0 },
{ 0x0860, 0x610a84 },
{ 0x0864, 0x610a90 },
{ 0x0868, 0x610b18 },
{ 0x086c, 0x610b20 },
{ 0x0870, 0x610ac8 },
{ 0x0874, 0x610a38 },
{ 0x0880, 0x610a58 },
{ 0x0884, 0x610a9c },
{ 0x08a0, 0x610a70 },
{ 0x08a4, 0x610a50 },
{ 0x08a8, 0x610ae0 },
{ 0x08c0, 0x610b28 },
{ 0x08c4, 0x610b30 },
{ 0x08c8, 0x610b40 },
{ 0x08d4, 0x610b38 },
{ 0x08d8, 0x610b48 },
{ 0x08dc, 0x610b50 },
{ 0x0900, 0x610a18 },
{ 0x0904, 0x610ab8 },
{}
}
};
static const struct nvkm_disp_chan_mthd
nv50_disp_core_mthd = {
.name = "Core",
.addr = 0x000000,
.prev = 0x000004,
.data = {
{ "Global", 1, &nv50_disp_core_mthd_base },
{ "DAC", 3, &nv50_disp_core_mthd_dac },
{ "SOR", 2, &nv50_disp_core_mthd_sor },
{ "PIOR", 3, &nv50_disp_core_mthd_pior },
{ "HEAD", 2, &nv50_disp_core_mthd_head },
{}
}
};
static void
nv50_disp_core_fini(struct nvkm_disp_chan *chan)
{
struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
struct nvkm_device *device = subdev->device;
/* deactivate channel */
nvkm_mask(device, 0x610200, 0x00000010, 0x00000000);
nvkm_mask(device, 0x610200, 0x00000003, 0x00000000);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x610200) & 0x001e0000))
break;
) < 0) {
nvkm_error(subdev, "core fini: %08x\n",
nvkm_rd32(device, 0x610200));
}
chan->suspend_put = nvkm_rd32(device, 0x640000);
}
static int
nv50_disp_core_init(struct nvkm_disp_chan *chan)
{
struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
struct nvkm_device *device = subdev->device;
/* attempt to unstick channel from some unknown state */
if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000)
nvkm_mask(device, 0x610200, 0x00800000, 0x00800000);
if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000)
nvkm_mask(device, 0x610200, 0x00600000, 0x00600000);
/* initialise channel for dma command submission */
nvkm_wr32(device, 0x610204, chan->push);
nvkm_wr32(device, 0x610208, 0x00010000);
nvkm_wr32(device, 0x61020c, 0x00000000);
nvkm_mask(device, 0x610200, 0x00000010, 0x00000010);
nvkm_wr32(device, 0x640000, chan->suspend_put);
nvkm_wr32(device, 0x610200, 0x01000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x610200) & 0x80000000))
break;
) < 0) {
nvkm_error(subdev, "core init: %08x\n",
nvkm_rd32(device, 0x610200));
return -EBUSY;
}
return 0;
}
const struct nvkm_disp_chan_func
nv50_disp_core_func = {
.push = nv50_disp_dmac_push,
.init = nv50_disp_core_init,
.fini = nv50_disp_core_fini,
.intr = nv50_disp_chan_intr,
.user = nv50_disp_chan_user,
.bind = nv50_disp_dmac_bind,
};
static const struct nvkm_disp_chan_user
nv50_disp_core = {
.func = &nv50_disp_core_func,
.ctrl = 0,
.user = 0,
.mthd = &nv50_disp_core_mthd,
};
static u32
nv50_disp_super_iedt(struct nvkm_head *head, struct nvkm_outp *outp,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_outp *iedt)
{
struct nvkm_bios *bios = head->disp->engine.subdev.device->bios;
const u8 l = ffs(outp->info.link);
const u16 t = outp->info.hasht;
const u16 m = (0x0100 << head->id) | (l << 6) | outp->info.or;
u32 data = nvbios_outp_match(bios, t, m, ver, hdr, cnt, len, iedt);
if (!data)
OUTP_DBG(outp, "missing IEDT for %04x:%04x", t, m);
return data;
}
static void
nv50_disp_super_ied_on(struct nvkm_head *head,
struct nvkm_ior *ior, int id, u32 khz)
{
struct nvkm_subdev *subdev = &head->disp->engine.subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvkm_outp *outp = ior->asy.outp;
struct nvbios_ocfg iedtrs;
struct nvbios_outp iedt;
u8 ver, hdr, cnt, len, flags = 0x00;
u32 data;
if (!outp) {
IOR_DBG(ior, "nothing to attach");
return;
}
/* Lookup IED table for the device. */
data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt);
if (!data)
return;
/* Lookup IEDT runtime settings for the current configuration. */
if (ior->type == SOR) {
if (ior->asy.proto == LVDS) {
if (head->asy.or.depth == 24)
flags |= 0x02;
}
if (ior->asy.link == 3)
flags |= 0x01;
}
data = nvbios_ocfg_match(bios, data, ior->asy.proto_evo, flags,
&ver, &hdr, &cnt, &len, &iedtrs);
if (!data) {
OUTP_DBG(outp, "missing IEDT RS for %02x:%02x",
ior->asy.proto_evo, flags);
return;
}
/* Execute the OnInt[23] script for the current frequency. */
data = nvbios_oclk_match(bios, iedtrs.clkcmp[id], khz);
if (!data) {
OUTP_DBG(outp, "missing IEDT RSS %d for %02x:%02x %d khz",
id, ior->asy.proto_evo, flags, khz);
return;
}
nvbios_init(subdev, data,
init.outp = &outp->info;
init.or = ior->id;
init.link = ior->asy.link;
init.head = head->id;
);
}
static void
nv50_disp_super_ied_off(struct nvkm_head *head, struct nvkm_ior *ior, int id)
{
struct nvkm_outp *outp = ior->arm.outp;
struct nvbios_outp iedt;
u8 ver, hdr, cnt, len;
u32 data;
if (!outp) {
IOR_DBG(ior, "nothing attached");
return;
}
data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt);
if (!data)
return;
nvbios_init(&head->disp->engine.subdev, iedt.script[id],
init.outp = &outp->info;
init.or = ior->id;
init.link = ior->arm.link;
init.head = head->id;
);
}
static struct nvkm_ior *
nv50_disp_super_ior_asy(struct nvkm_head *head)
{
struct nvkm_ior *ior;
list_for_each_entry(ior, &head->disp->iors, head) {
if (ior->asy.head & (1 << head->id)) {
HEAD_DBG(head, "to %s", ior->name);
return ior;
}
}
HEAD_DBG(head, "nothing to attach");
return NULL;
}
static struct nvkm_ior *
nv50_disp_super_ior_arm(struct nvkm_head *head)
{
struct nvkm_ior *ior;
list_for_each_entry(ior, &head->disp->iors, head) {
if (ior->arm.head & (1 << head->id)) {
HEAD_DBG(head, "on %s", ior->name);
return ior;
}
}
HEAD_DBG(head, "nothing attached");
return NULL;
}
void
nv50_disp_super_3_0(struct nvkm_disp *disp, struct nvkm_head *head)
{
struct nvkm_ior *ior;
/* Determine which OR, if any, we're attaching to the head. */
HEAD_DBG(head, "supervisor 3.0");
ior = nv50_disp_super_ior_asy(head);
if (!ior)
return;
/* Execute OnInt3 IED script. */
nv50_disp_super_ied_on(head, ior, 1, head->asy.hz / 1000);
/* OR-specific handling. */
if (ior->func->war_3)
ior->func->war_3(ior);
}
static void
nv50_disp_super_2_2_dp(struct nvkm_head *head, struct nvkm_ior *ior)
{
struct nvkm_subdev *subdev = &head->disp->engine.subdev;
const u32 khz = head->asy.hz / 1000;
const u32 linkKBps = ior->dp.bw * 27000;
const u32 symbol = 100000;
int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
int TU, VTUi, VTUf, VTUa;
u64 link_data_rate, link_ratio, unk;
u32 best_diff = 64 * symbol;
u64 h, v;
/* symbols/hblank - algorithm taken from comments in tegra driver */
h = head->asy.hblanke + head->asy.htotal - head->asy.hblanks - 7;
h = h * linkKBps;
do_div(h, khz);
h = h - (3 * ior->dp.ef) - (12 / ior->dp.nr);
/* symbols/vblank - algorithm taken from comments in tegra driver */
v = head->asy.vblanks - head->asy.vblanke - 25;
v = v * linkKBps;
do_div(v, khz);
v = v - ((36 / ior->dp.nr) + 3) - 1;
ior->func->dp->audio_sym(ior, head->id, h, v);
/* watermark / activesym */
link_data_rate = (khz * head->asy.or.depth / 8) / ior->dp.nr;
/* calculate ratio of packed data rate to link symbol rate */
link_ratio = link_data_rate * symbol;
do_div(link_ratio, linkKBps);
for (TU = 64; ior->func->dp->activesym && TU >= 32; TU--) {
/* calculate average number of valid symbols in each TU */
u32 tu_valid = link_ratio * TU;
u32 calc, diff;
/* find a hw representation for the fraction.. */
VTUi = tu_valid / symbol;
calc = VTUi * symbol;
diff = tu_valid - calc;
if (diff) {
if (diff >= (symbol / 2)) {
VTUf = symbol / (symbol - diff);
if (symbol - (VTUf * diff))
VTUf++;
if (VTUf <= 15) {
VTUa = 1;
calc += symbol - (symbol / VTUf);
} else {
VTUa = 0;
VTUf = 1;
calc += symbol;
}
} else {
VTUa = 0;
VTUf = min((int)(symbol / diff), 15);
calc += symbol / VTUf;
}
diff = calc - tu_valid;
} else {
/* no remainder, but the hw doesn't like the fractional
* part to be zero. decrement the integer part and
* have the fraction add a whole symbol back
*/
VTUa = 0;
VTUf = 1;
VTUi--;
}
if (diff < best_diff) {
best_diff = diff;
bestTU = TU;
bestVTUa = VTUa;
bestVTUf = VTUf;
bestVTUi = VTUi;
if (diff == 0)
break;
}
}
if (ior->func->dp->activesym) {
if (!bestTU) {
nvkm_error(subdev, "unable to determine dp config\n");
return;
}
ior->func->dp->activesym(ior, head->id, bestTU, bestVTUa, bestVTUf, bestVTUi);
} else {
bestTU = 64;
}
/* XXX close to vbios numbers, but not right */
unk = (symbol - link_ratio) * bestTU;
unk *= link_ratio;
do_div(unk, symbol);
do_div(unk, symbol);
unk += 6;
ior->func->dp->watermark(ior, head->id, unk);
}
void
nv50_disp_super_2_2(struct nvkm_disp *disp, struct nvkm_head *head)
{
const u32 khz = head->asy.hz / 1000;
struct nvkm_outp *outp;
struct nvkm_ior *ior;
/* Determine which OR, if any, we're attaching from the head. */
HEAD_DBG(head, "supervisor 2.2");
ior = nv50_disp_super_ior_asy(head);
if (!ior)
return;
outp = ior->asy.outp;
/* For some reason, NVIDIA decided not to:
*
* A) Give dual-link LVDS a separate EVO protocol, like for TMDS.
* and
* B) Use SetControlOutputResource.PixelDepth on LVDS.
*
* Override the values we usually read from HW with the same
* data we pass though an ioctl instead.
*/
if (outp && ior->type == SOR && ior->asy.proto == LVDS) {
head->asy.or.depth = outp->lvds.bpc8 ? 24 : 18;
ior->asy.link = outp->lvds.dual ? 3 : 1;
}
/* Handle any link training, etc. */
if (outp && outp->func->acquire)
outp->func->acquire(outp);
/* Execute OnInt2 IED script. */
nv50_disp_super_ied_on(head, ior, 0, khz);
/* Program RG clock divider. */
head->func->rgclk(head, ior->asy.rgdiv);
/* Mode-specific internal DP configuration. */
if (ior->type == SOR && ior->asy.proto == DP)
nv50_disp_super_2_2_dp(head, ior);
/* OR-specific handling. */
ior->func->clock(ior);
if (ior->func->war_2)
ior->func->war_2(ior);
}
void
nv50_disp_super_2_1(struct nvkm_disp *disp, struct nvkm_head *head)
{
struct nvkm_devinit *devinit = disp->engine.subdev.device->devinit;
const u32 khz = head->asy.hz / 1000;
HEAD_DBG(head, "supervisor 2.1 - %d khz", khz);
if (khz)
nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head->id, khz);
}
void
nv50_disp_super_2_0(struct nvkm_disp *disp, struct nvkm_head *head)
{
struct nvkm_outp *outp;
struct nvkm_ior *ior;
/* Determine which OR, if any, we're detaching from the head. */
HEAD_DBG(head, "supervisor 2.0");
ior = nv50_disp_super_ior_arm(head);
if (!ior)
return;
/* Execute OffInt2 IED script. */
nv50_disp_super_ied_off(head, ior, 2);
/* If we're shutting down the OR's only active head, execute
* the output path's disable function.
*/
if (ior->arm.head == (1 << head->id)) {
if ((outp = ior->arm.outp) && outp->func->disable)
outp->func->disable(outp, ior);
}
}
void
nv50_disp_super_1_0(struct nvkm_disp *disp, struct nvkm_head *head)
{
struct nvkm_ior *ior;
/* Determine which OR, if any, we're detaching from the head. */
HEAD_DBG(head, "supervisor 1.0");
ior = nv50_disp_super_ior_arm(head);
if (!ior)
return;
/* Execute OffInt1 IED script. */
nv50_disp_super_ied_off(head, ior, 1);
}
void
nv50_disp_super_1(struct nvkm_disp *disp)
{
struct nvkm_head *head;
struct nvkm_ior *ior;
list_for_each_entry(head, &disp->heads, head) {
head->func->state(head, &head->arm);
head->func->state(head, &head->asy);
}
list_for_each_entry(ior, &disp->iors, head) {
ior->func->state(ior, &ior->arm);
ior->func->state(ior, &ior->asy);
}
}
void
nv50_disp_super(struct work_struct *work)
{
struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_head *head;
u32 super;
mutex_lock(&disp->super.mutex);
super = nvkm_rd32(device, 0x610030);
nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super.pending, super);
if (disp->super.pending & 0x00000010) {
nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
nv50_disp_super_1(disp);
list_for_each_entry(head, &disp->heads, head) {
if (!(super & (0x00000020 << head->id)))
continue;
if (!(super & (0x00000080 << head->id)))
continue;
nv50_disp_super_1_0(disp, head);
}
} else
if (disp->super.pending & 0x00000020) {
list_for_each_entry(head, &disp->heads, head) {
if (!(super & (0x00000080 << head->id)))
continue;
nv50_disp_super_2_0(disp, head);
}
nvkm_outp_route(disp);
list_for_each_entry(head, &disp->heads, head) {
if (!(super & (0x00000200 << head->id)))
continue;
nv50_disp_super_2_1(disp, head);
}
list_for_each_entry(head, &disp->heads, head) {
if (!(super & (0x00000080 << head->id)))
continue;
nv50_disp_super_2_2(disp, head);
}
} else
if (disp->super.pending & 0x00000040) {
list_for_each_entry(head, &disp->heads, head) {
if (!(super & (0x00000080 << head->id)))
continue;
nv50_disp_super_3_0(disp, head);
}
}
nvkm_wr32(device, 0x610030, 0x80000000);
mutex_unlock(&disp->super.mutex);
}
const struct nvkm_enum
nv50_disp_intr_error_type[] = {
{ 0, "NONE" },
{ 1, "PUSHBUFFER_ERR" },
{ 2, "TRAP" },
{ 3, "RESERVED_METHOD" },
{ 4, "INVALID_ARG" },
{ 5, "INVALID_STATE" },
{ 7, "UNRESOLVABLE_HANDLE" },
{}
};
static const struct nvkm_enum
nv50_disp_intr_error_code[] = {
{ 0x00, "" },
{}
};
static void
nv50_disp_intr_error(struct nvkm_disp *disp, int chid)
{
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
u32 code = (addr & 0x00ff0000) >> 16;
u32 type = (addr & 0x00007000) >> 12;
u32 mthd = (addr & 0x00000ffc);
const struct nvkm_enum *ec, *et;
et = nvkm_enum_find(nv50_disp_intr_error_type, type);
ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
nvkm_error(subdev,
"ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
type, et ? et->name : "", code, ec ? ec->name : "",
chid, mthd, data);
if (chid < ARRAY_SIZE(disp->chan)) {
switch (mthd) {
case 0x0080:
nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
break;
default:
break;
}
}
nvkm_wr32(device, 0x610020, 0x00010000 << chid);
nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
}
void
nv50_disp_intr(struct nvkm_disp *disp)
{
struct nvkm_device *device = disp->engine.subdev.device;
u32 intr0 = nvkm_rd32(device, 0x610020);
u32 intr1 = nvkm_rd32(device, 0x610024);
while (intr0 & 0x001f0000) {
u32 chid = __ffs(intr0 & 0x001f0000) - 16;
nv50_disp_intr_error(disp, chid);
intr0 &= ~(0x00010000 << chid);
}
while (intr0 & 0x0000001f) {
u32 chid = __ffs(intr0 & 0x0000001f);
nv50_disp_chan_uevent_send(disp, chid);
intr0 &= ~(0x00000001 << chid);
}
if (intr1 & 0x00000004) {
nvkm_disp_vblank(disp, 0);
nvkm_wr32(device, 0x610024, 0x00000004);
}
if (intr1 & 0x00000008) {
nvkm_disp_vblank(disp, 1);
nvkm_wr32(device, 0x610024, 0x00000008);
}
if (intr1 & 0x00000070) {
disp->super.pending = (intr1 & 0x00000070);
queue_work(disp->super.wq, &disp->super.work);
nvkm_wr32(device, 0x610024, disp->super.pending);
}
}
void
nv50_disp_fini(struct nvkm_disp *disp)
{
struct nvkm_device *device = disp->engine.subdev.device;
/* disable all interrupts */
nvkm_wr32(device, 0x610024, 0x00000000);
nvkm_wr32(device, 0x610020, 0x00000000);
}
int
nv50_disp_init(struct nvkm_disp *disp)
{
struct nvkm_device *device = disp->engine.subdev.device;
struct nvkm_head *head;
u32 tmp;
int i;
/* The below segments of code copying values from one register to
* another appear to inform EVO of the display capabilities or
* something similar. NFI what the 0x614004 caps are for..
*/
tmp = nvkm_rd32(device, 0x614004);
nvkm_wr32(device, 0x610184, tmp);
/* ... CRTC caps */
list_for_each_entry(head, &disp->heads, head) {
tmp = nvkm_rd32(device, 0x616100 + (head->id * 0x800));
nvkm_wr32(device, 0x610190 + (head->id * 0x10), tmp);
tmp = nvkm_rd32(device, 0x616104 + (head->id * 0x800));
nvkm_wr32(device, 0x610194 + (head->id * 0x10), tmp);
tmp = nvkm_rd32(device, 0x616108 + (head->id * 0x800));
nvkm_wr32(device, 0x610198 + (head->id * 0x10), tmp);
tmp = nvkm_rd32(device, 0x61610c + (head->id * 0x800));
nvkm_wr32(device, 0x61019c + (head->id * 0x10), tmp);
}
/* ... DAC caps */
for (i = 0; i < disp->dac.nr; i++) {
tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
}
/* ... SOR caps */
for (i = 0; i < disp->sor.nr; i++) {
tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
}
/* ... PIOR caps */
for (i = 0; i < disp->pior.nr; i++) {
tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
}
/* steal display away from vbios, or something like that */
if (nvkm_rd32(device, 0x610024) & 0x00000100) {
nvkm_wr32(device, 0x610024, 0x00000100);
nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
break;
) < 0)
return -EBUSY;
}
/* point at display engine memory area (hash table, objects) */
nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
/* enable supervisor interrupts, disable everything else */
nvkm_wr32(device, 0x61002c, 0x00000370);
nvkm_wr32(device, 0x610028, 0x00000000);
return 0;
}
int
nv50_disp_oneinit(struct nvkm_disp *disp)
{
const struct nvkm_disp_func *func = disp->func;
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
int ret, i;
if (func->wndw.cnt) {
disp->wndw.nr = func->wndw.cnt(disp, &disp->wndw.mask);
nvkm_debug(subdev, "Window(s): %d (%08lx)\n", disp->wndw.nr, disp->wndw.mask);
}
disp->head.nr = func->head.cnt(disp, &disp->head.mask);
nvkm_debug(subdev, " Head(s): %d (%02lx)\n", disp->head.nr, disp->head.mask);
for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
ret = func->head.new(disp, i);
if (ret)
return ret;
}
if (func->dac.cnt) {
disp->dac.nr = func->dac.cnt(disp, &disp->dac.mask);
nvkm_debug(subdev, " DAC(s): %d (%02lx)\n", disp->dac.nr, disp->dac.mask);
for_each_set_bit(i, &disp->dac.mask, disp->dac.nr) {
ret = func->dac.new(disp, i);
if (ret)
return ret;
}
}
if (func->pior.cnt) {
disp->pior.nr = func->pior.cnt(disp, &disp->pior.mask);
nvkm_debug(subdev, " PIOR(s): %d (%02lx)\n", disp->pior.nr, disp->pior.mask);
for_each_set_bit(i, &disp->pior.mask, disp->pior.nr) {
ret = func->pior.new(disp, i);
if (ret)
return ret;
}
}
disp->sor.nr = func->sor.cnt(disp, &disp->sor.mask);
nvkm_debug(subdev, " SOR(s): %d (%02lx)\n", disp->sor.nr, disp->sor.mask);
for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
ret = func->sor.new(disp, i);
if (ret)
return ret;
}
ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL, &disp->inst);
if (ret)
return ret;
return nvkm_ramht_new(device, func->ramht_size ? func->ramht_size :
0x1000, 0, disp->inst, &disp->ramht);
}
static const struct nvkm_disp_func
nv50_disp = {
.oneinit = nv50_disp_oneinit,
.init = nv50_disp_init,
.fini = nv50_disp_fini,
.intr = nv50_disp_intr,
.super = nv50_disp_super,
.uevent = &nv50_disp_chan_uevent,
.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
.sor = { .cnt = nv50_sor_cnt, .new = nv50_sor_new },
.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
.root = { 0, 0, NV50_DISP },
.user = {
{{0,0,NV50_DISP_CURSOR }, nvkm_disp_chan_new, &nv50_disp_curs },
{{0,0,NV50_DISP_OVERLAY }, nvkm_disp_chan_new, &nv50_disp_oimm },
{{0,0,NV50_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, &nv50_disp_base },
{{0,0,NV50_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &nv50_disp_core },
{{0,0,NV50_DISP_OVERLAY_CHANNEL_DMA}, nvkm_disp_chan_new, &nv50_disp_ovly },
{}
}
};
int
nv50_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&nv50_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "chan.h"
#include "head.h"
#include "ior.h"
#include <nvif/class.h>
static const struct nvkm_disp_func
gk110_disp = {
.oneinit = nv50_disp_oneinit,
.init = gf119_disp_init,
.fini = gf119_disp_fini,
.intr = gf119_disp_intr,
.intr_error = gf119_disp_intr_error,
.super = gf119_disp_super,
.uevent = &gf119_disp_chan_uevent,
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
.sor = { .cnt = gf119_sor_cnt, .new = gk104_sor_new },
.root = { 0,0,GK110_DISP },
.user = {
{{0,0,GK104_DISP_CURSOR }, nvkm_disp_chan_new, &gf119_disp_curs },
{{0,0,GK104_DISP_OVERLAY }, nvkm_disp_chan_new, &gf119_disp_oimm },
{{0,0,GK110_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, &gf119_disp_base },
{{0,0,GK110_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gk104_disp_core },
{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, nvkm_disp_chan_new, &gk104_disp_ovly },
{}
},
};
int
gk110_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&gk110_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "ior.h"
static const char *
nvkm_ior_name[] = {
[DAC] = "DAC",
[SOR] = "SOR",
[PIOR] = "PIOR",
};
struct nvkm_ior *
nvkm_ior_find(struct nvkm_disp *disp, enum nvkm_ior_type type, int id)
{
struct nvkm_ior *ior;
list_for_each_entry(ior, &disp->iors, head) {
if (ior->type == type && (id < 0 || ior->id == id))
return ior;
}
return NULL;
}
void
nvkm_ior_del(struct nvkm_ior **pior)
{
struct nvkm_ior *ior = *pior;
if (ior) {
IOR_DBG(ior, "dtor");
list_del(&ior->head);
kfree(*pior);
*pior = NULL;
}
}
int
nvkm_ior_new_(const struct nvkm_ior_func *func, struct nvkm_disp *disp,
enum nvkm_ior_type type, int id, bool hda)
{
struct nvkm_ior *ior;
if (!(ior = kzalloc(sizeof(*ior), GFP_KERNEL)))
return -ENOMEM;
ior->func = func;
ior->disp = disp;
ior->type = type;
ior->id = id;
ior->hda = hda;
snprintf(ior->name, sizeof(ior->name), "%s-%d", nvkm_ior_name[ior->type], ior->id);
list_add_tail(&ior->head, &disp->iors);
IOR_DBG(ior, "ctor");
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "chan.h"
#include "hdmi.h"
#include "head.h"
#include "ior.h"
#include "outp.h"
#include <nvif/class.h>
void
gm200_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 loff = nv50_sor_link(sor);
const u32 shift = sor->func->dp->lanes[ln] * 8;
u32 data[4];
pu &= 0x0f;
data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
data[2] = nvkm_rd32(device, 0x61c130 + loff);
if ((data[2] & 0x00000f00) < (pu << 8) || ln == 0)
data[2] = (data[2] & ~0x00000f00) | (pu << 8);
nvkm_wr32(device, 0x61c118 + loff, data[0] | (dc << shift));
nvkm_wr32(device, 0x61c120 + loff, data[1] | (pe << shift));
nvkm_wr32(device, 0x61c130 + loff, data[2]);
data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
nvkm_wr32(device, 0x61c13c + loff, data[3] | (pc << shift));
}
const struct nvkm_ior_func_dp
gm200_sor_dp = {
.lanes = { 0, 1, 2, 3 },
.links = gf119_sor_dp_links,
.power = g94_sor_dp_power,
.pattern = gm107_sor_dp_pattern,
.drive = gm200_sor_dp_drive,
.vcpi = gf119_sor_dp_vcpi,
.audio = gf119_sor_dp_audio,
.audio_sym = gf119_sor_dp_audio_sym,
.watermark = gf119_sor_dp_watermark,
};
void
gm200_sor_hdmi_scdc(struct nvkm_ior *ior, u8 scdc)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(ior);
const u32 ctrl = scdc & 0x3;
nvkm_mask(device, 0x61c5bc + soff, 0x00000003, ctrl);
ior->tmds.high_speed = !!(scdc & 0x2);
}
const struct nvkm_ior_func_hdmi
gm200_sor_hdmi = {
.ctrl = gk104_sor_hdmi_ctrl,
.scdc = gm200_sor_hdmi_scdc,
.infoframe_avi = gk104_sor_hdmi_infoframe_avi,
.infoframe_vsi = gk104_sor_hdmi_infoframe_vsi,
};
void
gm200_sor_route_set(struct nvkm_outp *outp, struct nvkm_ior *ior)
{
struct nvkm_device *device = outp->disp->engine.subdev.device;
const u32 moff = __ffs(outp->info.or) * 0x100;
const u32 sor = ior ? ior->id + 1 : 0;
u32 link = ior ? (ior->asy.link == 2) : 0;
if (outp->info.sorconf.link & 1) {
nvkm_mask(device, 0x612308 + moff, 0x0000001f, link << 4 | sor);
link++;
}
if (outp->info.sorconf.link & 2)
nvkm_mask(device, 0x612388 + moff, 0x0000001f, link << 4 | sor);
}
int
gm200_sor_route_get(struct nvkm_outp *outp, int *link)
{
struct nvkm_device *device = outp->disp->engine.subdev.device;
const int sublinks = outp->info.sorconf.link;
int lnk[2], sor[2], m, s;
for (*link = 0, m = __ffs(outp->info.or) * 2, s = 0; s < 2; m++, s++) {
if (sublinks & BIT(s)) {
u32 data = nvkm_rd32(device, 0x612308 + (m * 0x80));
lnk[s] = (data & 0x00000010) >> 4;
sor[s] = (data & 0x0000000f);
if (!sor[s])
return -1;
*link |= lnk[s];
}
}
if (sublinks == 3) {
if (sor[0] != sor[1] || WARN_ON(lnk[0] || !lnk[1]))
return -1;
}
return ((sublinks & 1) ? sor[0] : sor[1]) - 1;
}
static const struct nvkm_ior_func
gm200_sor = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
},
.state = gf119_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
.hdmi = &gm200_sor_hdmi,
.dp = &gm200_sor_dp,
.hda = &gf119_sor_hda,
};
static int
gm200_sor_new(struct nvkm_disp *disp, int id)
{
struct nvkm_device *device = disp->engine.subdev.device;
u32 hda;
if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
hda = nvkm_rd32(device, 0x101034);
return nvkm_ior_new_(&gm200_sor, disp, SOR, id, hda & BIT(id));
}
static const struct nvkm_disp_func
gm200_disp = {
.oneinit = nv50_disp_oneinit,
.init = gf119_disp_init,
.fini = gf119_disp_fini,
.intr = gf119_disp_intr,
.intr_error = gf119_disp_intr_error,
.super = gf119_disp_super,
.uevent = &gf119_disp_chan_uevent,
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
.sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
.root = { 0,0,GM200_DISP },
.user = {
{{0,0,GK104_DISP_CURSOR }, nvkm_disp_chan_new, &gf119_disp_curs },
{{0,0,GK104_DISP_OVERLAY }, nvkm_disp_chan_new, &gf119_disp_oimm },
{{0,0,GK110_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, &gf119_disp_base },
{{0,0,GM200_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gk104_disp_core },
{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, nvkm_disp_chan_new, &gk104_disp_ovly },
{}
},
};
int
gm200_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&gm200_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c |
// SPDX-License-Identifier: MIT
#include "hdmi.h"
void pack_hdmi_infoframe(struct packed_hdmi_infoframe *packed_frame,
u8 *raw_frame, ssize_t len)
{
u32 header = 0;
u32 subpack0_low = 0;
u32 subpack0_high = 0;
u32 subpack1_low = 0;
u32 subpack1_high = 0;
switch (len) {
/*
* "When in doubt, use brute force."
* -- Ken Thompson.
*/
default:
/*
* We presume that no valid frame is longer than 17
* octets, including header... And truncate to that
* if it's longer.
*/
case 17:
subpack1_high = (raw_frame[16] << 16);
fallthrough;
case 16:
subpack1_high |= (raw_frame[15] << 8);
fallthrough;
case 15:
subpack1_high |= raw_frame[14];
fallthrough;
case 14:
subpack1_low = (raw_frame[13] << 24);
fallthrough;
case 13:
subpack1_low |= (raw_frame[12] << 16);
fallthrough;
case 12:
subpack1_low |= (raw_frame[11] << 8);
fallthrough;
case 11:
subpack1_low |= raw_frame[10];
fallthrough;
case 10:
subpack0_high = (raw_frame[9] << 16);
fallthrough;
case 9:
subpack0_high |= (raw_frame[8] << 8);
fallthrough;
case 8:
subpack0_high |= raw_frame[7];
fallthrough;
case 7:
subpack0_low = (raw_frame[6] << 24);
fallthrough;
case 6:
subpack0_low |= (raw_frame[5] << 16);
fallthrough;
case 5:
subpack0_low |= (raw_frame[4] << 8);
fallthrough;
case 4:
subpack0_low |= raw_frame[3];
fallthrough;
case 3:
header = (raw_frame[2] << 16);
fallthrough;
case 2:
header |= (raw_frame[1] << 8);
fallthrough;
case 1:
header |= raw_frame[0];
fallthrough;
case 0:
break;
}
packed_frame->header = header;
packed_frame->subpack0_low = subpack0_low;
packed_frame->subpack0_high = subpack0_high;
packed_frame->subpack1_low = subpack1_low;
packed_frame->subpack1_high = subpack1_high;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "chan.h"
#include "head.h"
#include "ior.h"
#include <nvif/class.h>
static const struct nvkm_disp_mthd_list
gt200_disp_ovly_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
{ 0x0080, 0x000000 },
{ 0x0084, 0x6109a0 },
{ 0x0088, 0x6109c0 },
{ 0x008c, 0x6109c8 },
{ 0x0090, 0x6109b4 },
{ 0x0094, 0x610970 },
{ 0x00a0, 0x610998 },
{ 0x00a4, 0x610964 },
{ 0x00b0, 0x610c98 },
{ 0x00b4, 0x610ca4 },
{ 0x00b8, 0x610cac },
{ 0x00c0, 0x610958 },
{ 0x00e0, 0x6109a8 },
{ 0x00e4, 0x6109d0 },
{ 0x00e8, 0x6109d8 },
{ 0x0100, 0x61094c },
{ 0x0104, 0x610984 },
{ 0x0108, 0x61098c },
{ 0x0800, 0x6109f8 },
{ 0x0808, 0x610a08 },
{ 0x080c, 0x610a10 },
{ 0x0810, 0x610a00 },
{}
}
};
static const struct nvkm_disp_chan_mthd
gt200_disp_ovly_mthd = {
.name = "Overlay",
.addr = 0x000540,
.prev = 0x000004,
.data = {
{ "Global", 1, >200_disp_ovly_mthd_base },
{}
}
};
const struct nvkm_disp_chan_user
gt200_disp_ovly = {
.func = &nv50_disp_dmac_func,
.ctrl = 3,
.user = 3,
.mthd = >200_disp_ovly_mthd,
};
static const struct nvkm_disp_func
gt200_disp = {
.oneinit = nv50_disp_oneinit,
.init = nv50_disp_init,
.fini = nv50_disp_fini,
.intr = nv50_disp_intr,
.super = nv50_disp_super,
.uevent = &nv50_disp_chan_uevent,
.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
.sor = { .cnt = nv50_sor_cnt, .new = g84_sor_new },
.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
.root = { 0,0,GT200_DISP },
.user = {
{{0,0, G82_DISP_CURSOR }, nvkm_disp_chan_new, & nv50_disp_curs },
{{0,0, G82_DISP_OVERLAY }, nvkm_disp_chan_new, & nv50_disp_oimm },
{{0,0,GT200_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, & g84_disp_base },
{{0,0,GT200_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, & g84_disp_core },
{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, nvkm_disp_chan_new, >200_disp_ovly },
{}
},
};
int
gt200_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(>200_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
#include "chan.h"
#include "head.h"
#include "ior.h"
#include <subdev/timer.h>
#include <nvif/class.h>
static int
gp102_disp_dmac_init(struct nvkm_disp_chan *chan)
{
struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
struct nvkm_device *device = subdev->device;
int ctrl = chan->chid.ctrl;
int user = chan->chid.user;
/* initialise channel for dma command submission */
nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push);
nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000);
nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001);
nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
break;
) < 0) {
nvkm_error(subdev, "ch %d init: %08x\n", user,
nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
return -EBUSY;
}
return 0;
}
const struct nvkm_disp_chan_func
gp102_disp_dmac_func = {
.push = nv50_disp_dmac_push,
.init = gp102_disp_dmac_init,
.fini = gf119_disp_dmac_fini,
.intr = gf119_disp_chan_intr,
.user = nv50_disp_chan_user,
.bind = gf119_disp_dmac_bind,
};
static const struct nvkm_disp_chan_user
gp102_disp_curs = {
.func = &gf119_disp_pioc_func,
.ctrl = 13,
.user = 17,
};
static const struct nvkm_disp_chan_user
gp102_disp_oimm = {
.func = &gf119_disp_pioc_func,
.ctrl = 9,
.user = 13,
};
static const struct nvkm_disp_chan_user
gp102_disp_ovly = {
.func = &gp102_disp_dmac_func,
.ctrl = 5,
.user = 5,
.mthd = &gk104_disp_ovly_mthd,
};
static const struct nvkm_disp_chan_user
gp102_disp_base = {
.func = &gp102_disp_dmac_func,
.ctrl = 1,
.user = 1,
.mthd = &gf119_disp_base_mthd,
};
static int
gp102_disp_core_init(struct nvkm_disp_chan *chan)
{
struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
struct nvkm_device *device = subdev->device;
/* initialise channel for dma command submission */
nvkm_wr32(device, 0x611494, chan->push);
nvkm_wr32(device, 0x611498, 0x00010000);
nvkm_wr32(device, 0x61149c, 0x00000001);
nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
nvkm_wr32(device, 0x640000, chan->suspend_put);
nvkm_wr32(device, 0x610490, 0x01000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
break;
) < 0) {
nvkm_error(subdev, "core init: %08x\n",
nvkm_rd32(device, 0x610490));
return -EBUSY;
}
return 0;
}
static const struct nvkm_disp_chan_func
gp102_disp_core_func = {
.push = nv50_disp_dmac_push,
.init = gp102_disp_core_init,
.fini = gf119_disp_core_fini,
.intr = gf119_disp_chan_intr,
.user = nv50_disp_chan_user,
.bind = gf119_disp_dmac_bind,
};
static const struct nvkm_disp_chan_user
gp102_disp_core = {
.func = &gp102_disp_core_func,
.ctrl = 0,
.user = 0,
.mthd = &gk104_disp_core_mthd,
};
static void
gp102_disp_intr_error(struct nvkm_disp *disp, int chid)
{
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 mthd = nvkm_rd32(device, 0x6111f0 + (chid * 12));
u32 data = nvkm_rd32(device, 0x6111f4 + (chid * 12));
u32 unkn = nvkm_rd32(device, 0x6111f8 + (chid * 12));
nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
chid, (mthd & 0x0000ffc), data, mthd, unkn);
if (chid < ARRAY_SIZE(disp->chan)) {
switch (mthd & 0xffc) {
case 0x0080:
nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
break;
default:
break;
}
}
nvkm_wr32(device, 0x61009c, (1 << chid));
nvkm_wr32(device, 0x6111f0 + (chid * 12), 0x90000000);
}
static const struct nvkm_disp_func
gp102_disp = {
.oneinit = nv50_disp_oneinit,
.init = gf119_disp_init,
.fini = gf119_disp_fini,
.intr = gf119_disp_intr,
.intr_error = gp102_disp_intr_error,
.super = gf119_disp_super,
.uevent = &gf119_disp_chan_uevent,
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
.sor = { .cnt = gf119_sor_cnt, .new = gp100_sor_new },
.root = { 0,0,GP102_DISP },
.user = {
{{0,0,GK104_DISP_CURSOR }, nvkm_disp_chan_new, &gp102_disp_curs },
{{0,0,GK104_DISP_OVERLAY }, nvkm_disp_chan_new, &gp102_disp_oimm },
{{0,0,GK110_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, &gp102_disp_base },
{{0,0,GP102_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gp102_disp_core },
{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, nvkm_disp_chan_new, &gp102_disp_ovly },
{}
},
};
int
gp102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&gp102_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define nvkm_uconn(p) container_of((p), struct nvkm_conn, object)
#include "conn.h"
#include "outp.h"
#include <core/client.h>
#include <core/event.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <nvif/if0011.h>
static int
nvkm_uconn_uevent_aux(struct nvkm_object *object, u64 token, u32 bits)
{
union nvif_conn_event_args args;
args.v0.version = 0;
args.v0.types = 0;
if (bits & NVKM_I2C_PLUG)
args.v0.types |= NVIF_CONN_EVENT_V0_PLUG;
if (bits & NVKM_I2C_UNPLUG)
args.v0.types |= NVIF_CONN_EVENT_V0_UNPLUG;
if (bits & NVKM_I2C_IRQ)
args.v0.types |= NVIF_CONN_EVENT_V0_IRQ;
return object->client->event(token, &args, sizeof(args.v0));
}
static int
nvkm_uconn_uevent_gpio(struct nvkm_object *object, u64 token, u32 bits)
{
union nvif_conn_event_args args;
args.v0.version = 0;
args.v0.types = 0;
if (bits & NVKM_GPIO_HI)
args.v0.types |= NVIF_CONN_EVENT_V0_PLUG;
if (bits & NVKM_GPIO_LO)
args.v0.types |= NVIF_CONN_EVENT_V0_UNPLUG;
return object->client->event(token, &args, sizeof(args.v0));
}
static int
nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
{
struct nvkm_conn *conn = nvkm_uconn(object);
struct nvkm_device *device = conn->disp->engine.subdev.device;
struct nvkm_outp *outp;
union nvif_conn_event_args *args = argv;
u64 bits = 0;
if (!uevent) {
if (conn->info.hpd == DCB_GPIO_UNUSED)
return -ENOSYS;
return 0;
}
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
list_for_each_entry(outp, &conn->disp->outps, head) {
if (outp->info.connector == conn->index)
break;
}
if (&outp->head == &conn->disp->outps)
return -EINVAL;
if (outp->dp.aux && !outp->info.location) {
if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_I2C_PLUG;
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_I2C_UNPLUG;
if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ ) bits |= NVKM_I2C_IRQ;
return nvkm_uevent_add(uevent, &device->i2c->event, outp->dp.aux->id, bits,
nvkm_uconn_uevent_aux);
}
if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_GPIO_HI;
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_GPIO_LO;
if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ) {
/* TODO: support DP IRQ on ANX9805 and remove this hack. */
if (!outp->info.location)
return -EINVAL;
}
return nvkm_uevent_add(uevent, &device->gpio->event, conn->info.hpd, bits,
nvkm_uconn_uevent_gpio);
}
static int
nvkm_uconn_mthd_hpd_status(struct nvkm_conn *conn, void *argv, u32 argc)
{
struct nvkm_gpio *gpio = conn->disp->engine.subdev.device->gpio;
union nvif_conn_hpd_status_args *args = argv;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
args->v0.support = gpio && conn->info.hpd != DCB_GPIO_UNUSED;
args->v0.present = 0;
if (args->v0.support) {
int ret = nvkm_gpio_get(gpio, 0, DCB_GPIO_UNUSED, conn->info.hpd);
if (WARN_ON(ret < 0)) {
args->v0.support = false;
return 0;
}
args->v0.present = ret;
}
return 0;
}
static int
nvkm_uconn_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
struct nvkm_conn *conn = nvkm_uconn(object);
switch (mthd) {
case NVIF_CONN_V0_HPD_STATUS: return nvkm_uconn_mthd_hpd_status(conn, argv, argc);
default:
break;
}
return -EINVAL;
}
static void *
nvkm_uconn_dtor(struct nvkm_object *object)
{
struct nvkm_conn *conn = nvkm_uconn(object);
struct nvkm_disp *disp = conn->disp;
spin_lock(&disp->client.lock);
conn->object.func = NULL;
spin_unlock(&disp->client.lock);
return NULL;
}
static const struct nvkm_object_func
nvkm_uconn = {
.dtor = nvkm_uconn_dtor,
.mthd = nvkm_uconn_mthd,
.uevent = nvkm_uconn_uevent,
};
int
nvkm_uconn_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
struct nvkm_conn *cont, *conn = NULL;
union nvif_conn_args *args = argv;
int ret;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
list_for_each_entry(cont, &disp->conns, head) {
if (cont->index == args->v0.id) {
conn = cont;
break;
}
}
if (!conn)
return -EINVAL;
ret = -EBUSY;
spin_lock(&disp->client.lock);
if (!conn->object.func) {
nvkm_object_ctor(&nvkm_uconn, oclass, &conn->object);
*pobject = &conn->object;
ret = 0;
}
spin_unlock(&disp->client.lock);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "conn.h"
#include "outp.h"
#include "priv.h"
#include <subdev/gpio.h>
#include <nvif/event.h>
void
nvkm_conn_fini(struct nvkm_conn *conn)
{
}
void
nvkm_conn_init(struct nvkm_conn *conn)
{
}
void
nvkm_conn_del(struct nvkm_conn **pconn)
{
struct nvkm_conn *conn = *pconn;
if (conn) {
kfree(*pconn);
*pconn = NULL;
}
}
static void
nvkm_conn_ctor(struct nvkm_disp *disp, int index, struct nvbios_connE *info,
struct nvkm_conn *conn)
{
static const u8 hpd[] = { 0x07, 0x08, 0x51, 0x52, 0x5e, 0x5f, 0x60 };
struct nvkm_gpio *gpio = disp->engine.subdev.device->gpio;
struct dcb_gpio_func func;
int ret;
conn->disp = disp;
conn->index = index;
conn->info = *info;
conn->info.hpd = DCB_GPIO_UNUSED;
CONN_DBG(conn, "type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x",
info->type, info->location, info->hpd, info->dp,
info->di, info->sr, info->lcdid);
if ((info->hpd = ffs(info->hpd))) {
if (--info->hpd >= ARRAY_SIZE(hpd)) {
CONN_ERR(conn, "hpd %02x unknown", info->hpd);
return;
}
info->hpd = hpd[info->hpd];
ret = nvkm_gpio_find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
if (ret) {
CONN_ERR(conn, "func %02x lookup failed, %d", info->hpd, ret);
return;
}
conn->info.hpd = func.line;
}
}
int
nvkm_conn_new(struct nvkm_disp *disp, int index, struct nvbios_connE *info,
struct nvkm_conn **pconn)
{
if (!(*pconn = kzalloc(sizeof(**pconn), GFP_KERNEL)))
return -ENOMEM;
nvkm_conn_ctor(disp, index, info, *pconn);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "chan.h"
#include "hdmi.h"
#include "head.h"
#include "ior.h"
#include "outp.h"
#include <core/ramht.h>
#include <subdev/timer.h>
#include <nvif/class.h>
static void
gf119_sor_hda_device_entry(struct nvkm_ior *ior, int head)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 hoff = 0x800 * head;
nvkm_mask(device, 0x616548 + hoff, 0x00000070, head << 4);
}
void
gf119_sor_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 soff = 0x030 * ior->id + (head * 0x04);
int i;
for (i = 0; i < size; i++)
nvkm_wr32(device, 0x10ec00 + soff, (i << 8) | data[i]);
for (; i < 0x60; i++)
nvkm_wr32(device, 0x10ec00 + soff, (i << 8));
nvkm_mask(device, 0x10ec10 + soff, 0x80000002, 0x80000002);
}
void
gf119_sor_hda_hpd(struct nvkm_ior *ior, int head, bool present)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 soff = 0x030 * ior->id + (head * 0x04);
u32 data = 0x80000000;
u32 mask = 0x80000001;
if (present) {
ior->func->hda->device_entry(ior, head);
data |= 0x00000001;
} else {
mask |= 0x00000002;
}
nvkm_mask(device, 0x10ec10 + soff, mask, data);
}
const struct nvkm_ior_func_hda
gf119_sor_hda = {
.hpd = gf119_sor_hda_hpd,
.eld = gf119_sor_hda_eld,
.device_entry = gf119_sor_hda_device_entry,
};
void
gf119_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 hoff = head * 0x800;
nvkm_mask(device, 0x616610 + hoff, 0x0800003f, 0x08000000 | watermark);
}
void
gf119_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 hoff = head * 0x800;
nvkm_mask(device, 0x616620 + hoff, 0x0000ffff, h);
nvkm_mask(device, 0x616624 + hoff, 0x00ffffff, v);
}
void
gf119_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 hoff = 0x800 * head;
const u32 data = 0x80000000 | (0x00000001 * enable);
const u32 mask = 0x8000000d;
nvkm_mask(device, 0x616618 + hoff, mask, data);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x616618 + hoff) & 0x80000000))
break;
);
}
void
gf119_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 hoff = head * 0x800;
nvkm_mask(device, 0x616588 + hoff, 0x00003f3f, (slot_nr << 8) | slot);
nvkm_mask(device, 0x61658c + hoff, 0xffffffff, (aligned << 16) | pbn);
}
void
gf119_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 loff = nv50_sor_link(sor);
const u32 shift = sor->func->dp->lanes[ln] * 8;
u32 data[4];
data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
data[2] = nvkm_rd32(device, 0x61c130 + loff);
if ((data[2] & 0x0000ff00) < (pu << 8) || ln == 0)
data[2] = (data[2] & ~0x0000ff00) | (pu << 8);
nvkm_wr32(device, 0x61c118 + loff, data[0] | (dc << shift));
nvkm_wr32(device, 0x61c120 + loff, data[1] | (pe << shift));
nvkm_wr32(device, 0x61c130 + loff, data[2]);
data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
nvkm_wr32(device, 0x61c13c + loff, data[3] | (pc << shift));
}
static void
gf119_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
u32 data;
switch (pattern) {
case 0: data = 0x10101010; break;
case 1: data = 0x01010101; break;
case 2: data = 0x02020202; break;
case 3: data = 0x03030303; break;
default:
WARN_ON(1);
return;
}
nvkm_mask(device, 0x61c110 + soff, 0x1f1f1f1f, data);
}
int
gf119_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
const u32 loff = nv50_sor_link(sor);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
clksor |= sor->dp.bw << 18;
dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
if (sor->dp.mst)
dpctrl |= 0x40000000;
if (sor->dp.ef)
dpctrl |= 0x00004000;
nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
return 0;
}
const struct nvkm_ior_func_dp
gf119_sor_dp = {
.lanes = { 2, 1, 0, 3 },
.links = gf119_sor_dp_links,
.power = g94_sor_dp_power,
.pattern = gf119_sor_dp_pattern,
.drive = gf119_sor_dp_drive,
.vcpi = gf119_sor_dp_vcpi,
.audio = gf119_sor_dp_audio,
.audio_sym = gf119_sor_dp_audio_sym,
.watermark = gf119_sor_dp_watermark,
};
static void
gf119_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
struct packed_hdmi_infoframe vsi;
const u32 hoff = head * 0x800;
pack_hdmi_infoframe(&vsi, data, size);
nvkm_mask(device, 0x616730 + hoff, 0x00010001, 0x00010000);
if (!size)
return;
/*
* These appear to be the audio infoframe registers,
* but no other set of infoframe registers has yet
* been found.
*/
nvkm_wr32(device, 0x616738 + hoff, vsi.header);
nvkm_wr32(device, 0x61673c + hoff, vsi.subpack0_low);
nvkm_wr32(device, 0x616740 + hoff, vsi.subpack0_high);
/* Is there a second (or further?) set of subpack registers here? */
nvkm_mask(device, 0x616730 + hoff, 0x00000001, 0x00000001);
}
static void
gf119_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
struct packed_hdmi_infoframe avi;
const u32 hoff = head * 0x800;
pack_hdmi_infoframe(&avi, data, size);
nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
if (!size)
return;
nvkm_wr32(device, 0x61671c + hoff, avi.header);
nvkm_wr32(device, 0x616720 + hoff, avi.subpack0_low);
nvkm_wr32(device, 0x616724 + hoff, avi.subpack0_high);
nvkm_wr32(device, 0x616728 + hoff, avi.subpack1_low);
nvkm_wr32(device, 0x61672c + hoff, avi.subpack1_high);
nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000001);
}
static void
gf119_sor_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, u8 rekey)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 ctrl = 0x40000000 * enable |
max_ac_packet << 16 |
rekey;
const u32 hoff = head * 0x800;
if (!(ctrl & 0x40000000)) {
nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
nvkm_mask(device, 0x616730 + hoff, 0x00000001, 0x00000000);
nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
return;
}
/* ??? InfoFrame? */
nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
nvkm_wr32(device, 0x6167ac + hoff, 0x00000010);
nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000001);
/* HDMI_CTRL */
nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
}
static const struct nvkm_ior_func_hdmi
gf119_sor_hdmi = {
.ctrl = gf119_sor_hdmi_ctrl,
.infoframe_avi = gf119_sor_hdmi_infoframe_avi,
.infoframe_vsi = gf119_sor_hdmi_infoframe_vsi,
};
void
gf119_sor_clock(struct nvkm_ior *sor)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
u32 div1 = sor->asy.link == 3;
u32 div2 = sor->asy.link == 3;
if (sor->asy.proto == TMDS) {
const u32 speed = sor->tmds.high_speed ? 0x14 : 0x0a;
nvkm_mask(device, 0x612300 + soff, 0x007c0000, speed << 18);
if (sor->tmds.high_speed)
div2 = 1;
}
nvkm_mask(device, 0x612300 + soff, 0x00000707, (div2 << 8) | div1);
}
void
gf119_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 coff = (state == &sor->asy) * 0x20000 + sor->id * 0x20;
u32 ctrl = nvkm_rd32(device, 0x640200 + coff);
state->proto_evo = (ctrl & 0x00000f00) >> 8;
switch (state->proto_evo) {
case 0: state->proto = LVDS; state->link = 1; break;
case 1: state->proto = TMDS; state->link = 1; break;
case 2: state->proto = TMDS; state->link = 2; break;
case 5: state->proto = TMDS; state->link = 3; break;
case 8: state->proto = DP; state->link = 1; break;
case 9: state->proto = DP; state->link = 2; break;
default:
state->proto = UNKNOWN;
break;
}
state->head = ctrl & 0x0000000f;
}
static const struct nvkm_ior_func
gf119_sor = {
.state = gf119_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
.hdmi = &gf119_sor_hdmi,
.dp = &gf119_sor_dp,
.hda = &gf119_sor_hda,
};
static int
gf119_sor_new(struct nvkm_disp *disp, int id)
{
return nvkm_ior_new_(&gf119_sor, disp, SOR, id, true);
}
int
gf119_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
struct nvkm_device *device = disp->engine.subdev.device;
*pmask = (nvkm_rd32(device, 0x612004) & 0x0000ff00) >> 8;
return 8;
}
static void
gf119_dac_clock(struct nvkm_ior *dac)
{
struct nvkm_device *device = dac->disp->engine.subdev.device;
const u32 doff = nv50_ior_base(dac);
nvkm_mask(device, 0x612280 + doff, 0x07070707, 0x00000000);
}
static void
gf119_dac_state(struct nvkm_ior *dac, struct nvkm_ior_state *state)
{
struct nvkm_device *device = dac->disp->engine.subdev.device;
const u32 coff = (state == &dac->asy) * 0x20000 + dac->id * 0x20;
u32 ctrl = nvkm_rd32(device, 0x640180 + coff);
state->proto_evo = (ctrl & 0x00000f00) >> 8;
switch (state->proto_evo) {
case 0: state->proto = CRT; break;
default:
state->proto = UNKNOWN;
break;
}
state->head = ctrl & 0x0000000f;
}
static const struct nvkm_ior_func
gf119_dac = {
.state = gf119_dac_state,
.power = nv50_dac_power,
.sense = nv50_dac_sense,
.clock = gf119_dac_clock,
};
int
gf119_dac_new(struct nvkm_disp *disp, int id)
{
return nvkm_ior_new_(&gf119_dac, disp, DAC, id, false);
}
int
gf119_dac_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
struct nvkm_device *device = disp->engine.subdev.device;
*pmask = (nvkm_rd32(device, 0x612004) & 0x000000f0) >> 4;
return 4;
}
static void
gf119_head_vblank_put(struct nvkm_head *head)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
const u32 hoff = head->id * 0x800;
nvkm_mask(device, 0x6100c0 + hoff, 0x00000001, 0x00000000);
}
static void
gf119_head_vblank_get(struct nvkm_head *head)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
const u32 hoff = head->id * 0x800;
nvkm_mask(device, 0x6100c0 + hoff, 0x00000001, 0x00000001);
}
void
gf119_head_rgclk(struct nvkm_head *head, int div)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
nvkm_mask(device, 0x612200 + (head->id * 0x800), 0x0000000f, div);
}
static void
gf119_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
const u32 hoff = (state == &head->asy) * 0x20000 + head->id * 0x300;
u32 data;
data = nvkm_rd32(device, 0x640414 + hoff);
state->vtotal = (data & 0xffff0000) >> 16;
state->htotal = (data & 0x0000ffff);
data = nvkm_rd32(device, 0x640418 + hoff);
state->vsynce = (data & 0xffff0000) >> 16;
state->hsynce = (data & 0x0000ffff);
data = nvkm_rd32(device, 0x64041c + hoff);
state->vblanke = (data & 0xffff0000) >> 16;
state->hblanke = (data & 0x0000ffff);
data = nvkm_rd32(device, 0x640420 + hoff);
state->vblanks = (data & 0xffff0000) >> 16;
state->hblanks = (data & 0x0000ffff);
state->hz = nvkm_rd32(device, 0x640450 + hoff);
data = nvkm_rd32(device, 0x640404 + hoff);
switch ((data & 0x000003c0) >> 6) {
case 6: state->or.depth = 30; break;
case 5: state->or.depth = 24; break;
case 2: state->or.depth = 18; break;
case 0: state->or.depth = 18; break; /*XXX: "default" */
default:
state->or.depth = 18;
WARN_ON(1);
break;
}
}
static const struct nvkm_head_func
gf119_head = {
.state = gf119_head_state,
.rgpos = nv50_head_rgpos,
.rgclk = gf119_head_rgclk,
.vblank_get = gf119_head_vblank_get,
.vblank_put = gf119_head_vblank_put,
};
int
gf119_head_new(struct nvkm_disp *disp, int id)
{
return nvkm_head_new_(&gf119_head, disp, id);
}
int
gf119_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
struct nvkm_device *device = disp->engine.subdev.device;
*pmask = nvkm_rd32(device, 0x612004) & 0x0000000f;
return nvkm_rd32(device, 0x022448);
}
static void
gf119_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
{
struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
struct nvkm_device *device = disp->engine.subdev.device;
nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000000 << index);
nvkm_wr32(device, 0x61008c, 0x00000001 << index);
}
static void
gf119_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
{
struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
struct nvkm_device *device = disp->engine.subdev.device;
nvkm_wr32(device, 0x61008c, 0x00000001 << index);
nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000001 << index);
}
const struct nvkm_event_func
gf119_disp_chan_uevent = {
.init = gf119_disp_chan_uevent_init,
.fini = gf119_disp_chan_uevent_fini,
};
void
gf119_disp_chan_intr(struct nvkm_disp_chan *chan, bool en)
{
struct nvkm_device *device = chan->disp->engine.subdev.device;
const u32 mask = 0x00000001 << chan->chid.user;
if (!en) {
nvkm_mask(device, 0x610090, mask, 0x00000000);
nvkm_mask(device, 0x6100a0, mask, 0x00000000);
} else {
nvkm_mask(device, 0x6100a0, mask, mask);
}
}
static void
gf119_disp_pioc_fini(struct nvkm_disp_chan *chan)
{
struct nvkm_disp *disp = chan->disp;
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
int ctrl = chan->chid.ctrl;
int user = chan->chid.user;
nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000))
break;
) < 0) {
nvkm_error(subdev, "ch %d fini: %08x\n", user,
nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
}
}
static int
gf119_disp_pioc_init(struct nvkm_disp_chan *chan)
{
struct nvkm_disp *disp = chan->disp;
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
int ctrl = chan->chid.ctrl;
int user = chan->chid.user;
/* activate channel */
nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001);
if (nvkm_msec(device, 2000,
u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10));
if ((tmp & 0x00030000) == 0x00010000)
break;
) < 0) {
nvkm_error(subdev, "ch %d init: %08x\n", user,
nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
return -EBUSY;
}
return 0;
}
const struct nvkm_disp_chan_func
gf119_disp_pioc_func = {
.init = gf119_disp_pioc_init,
.fini = gf119_disp_pioc_fini,
.intr = gf119_disp_chan_intr,
.user = nv50_disp_chan_user,
};
int
gf119_disp_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle)
{
return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
chan->chid.user << 27 | 0x00000001);
}
void
gf119_disp_dmac_fini(struct nvkm_disp_chan *chan)
{
struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
struct nvkm_device *device = subdev->device;
int ctrl = chan->chid.ctrl;
int user = chan->chid.user;
/* deactivate channel */
nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000);
nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000))
break;
) < 0) {
nvkm_error(subdev, "ch %d fini: %08x\n", user,
nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
}
chan->suspend_put = nvkm_rd32(device, 0x640000 + (ctrl * 0x1000));
}
static int
gf119_disp_dmac_init(struct nvkm_disp_chan *chan)
{
struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
struct nvkm_device *device = subdev->device;
int ctrl = chan->chid.ctrl;
int user = chan->chid.user;
/* initialise channel for dma command submission */
nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push);
nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001);
nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
break;
) < 0) {
nvkm_error(subdev, "ch %d init: %08x\n", user,
nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
return -EBUSY;
}
return 0;
}
const struct nvkm_disp_chan_func
gf119_disp_dmac_func = {
.push = nv50_disp_dmac_push,
.init = gf119_disp_dmac_init,
.fini = gf119_disp_dmac_fini,
.intr = gf119_disp_chan_intr,
.user = nv50_disp_chan_user,
.bind = gf119_disp_dmac_bind,
};
const struct nvkm_disp_chan_user
gf119_disp_curs = {
.func = &gf119_disp_pioc_func,
.ctrl = 13,
.user = 13,
};
const struct nvkm_disp_chan_user
gf119_disp_oimm = {
.func = &gf119_disp_pioc_func,
.ctrl = 9,
.user = 9,
};
static const struct nvkm_disp_mthd_list
gf119_disp_ovly_mthd_base = {
.mthd = 0x0000,
.data = {
{ 0x0080, 0x665080 },
{ 0x0084, 0x665084 },
{ 0x0088, 0x665088 },
{ 0x008c, 0x66508c },
{ 0x0090, 0x665090 },
{ 0x0094, 0x665094 },
{ 0x00a0, 0x6650a0 },
{ 0x00a4, 0x6650a4 },
{ 0x00b0, 0x6650b0 },
{ 0x00b4, 0x6650b4 },
{ 0x00b8, 0x6650b8 },
{ 0x00c0, 0x6650c0 },
{ 0x00e0, 0x6650e0 },
{ 0x00e4, 0x6650e4 },
{ 0x00e8, 0x6650e8 },
{ 0x0100, 0x665100 },
{ 0x0104, 0x665104 },
{ 0x0108, 0x665108 },
{ 0x010c, 0x66510c },
{ 0x0110, 0x665110 },
{ 0x0118, 0x665118 },
{ 0x011c, 0x66511c },
{ 0x0120, 0x665120 },
{ 0x0124, 0x665124 },
{ 0x0130, 0x665130 },
{ 0x0134, 0x665134 },
{ 0x0138, 0x665138 },
{ 0x013c, 0x66513c },
{ 0x0140, 0x665140 },
{ 0x0144, 0x665144 },
{ 0x0148, 0x665148 },
{ 0x014c, 0x66514c },
{ 0x0150, 0x665150 },
{ 0x0154, 0x665154 },
{ 0x0158, 0x665158 },
{ 0x015c, 0x66515c },
{ 0x0160, 0x665160 },
{ 0x0164, 0x665164 },
{ 0x0168, 0x665168 },
{ 0x016c, 0x66516c },
{ 0x0400, 0x665400 },
{ 0x0408, 0x665408 },
{ 0x040c, 0x66540c },
{ 0x0410, 0x665410 },
{}
}
};
static const struct nvkm_disp_chan_mthd
gf119_disp_ovly_mthd = {
.name = "Overlay",
.addr = 0x001000,
.prev = -0x020000,
.data = {
{ "Global", 1, &gf119_disp_ovly_mthd_base },
{}
}
};
static const struct nvkm_disp_chan_user
gf119_disp_ovly = {
.func = &gf119_disp_dmac_func,
.ctrl = 5,
.user = 5,
.mthd = &gf119_disp_ovly_mthd,
};
static const struct nvkm_disp_mthd_list
gf119_disp_base_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
{ 0x0080, 0x661080 },
{ 0x0084, 0x661084 },
{ 0x0088, 0x661088 },
{ 0x008c, 0x66108c },
{ 0x0090, 0x661090 },
{ 0x0094, 0x661094 },
{ 0x00a0, 0x6610a0 },
{ 0x00a4, 0x6610a4 },
{ 0x00c0, 0x6610c0 },
{ 0x00c4, 0x6610c4 },
{ 0x00c8, 0x6610c8 },
{ 0x00cc, 0x6610cc },
{ 0x00e0, 0x6610e0 },
{ 0x00e4, 0x6610e4 },
{ 0x00e8, 0x6610e8 },
{ 0x00ec, 0x6610ec },
{ 0x00fc, 0x6610fc },
{ 0x0100, 0x661100 },
{ 0x0104, 0x661104 },
{ 0x0108, 0x661108 },
{ 0x010c, 0x66110c },
{ 0x0110, 0x661110 },
{ 0x0114, 0x661114 },
{ 0x0118, 0x661118 },
{ 0x011c, 0x66111c },
{ 0x0130, 0x661130 },
{ 0x0134, 0x661134 },
{ 0x0138, 0x661138 },
{ 0x013c, 0x66113c },
{ 0x0140, 0x661140 },
{ 0x0144, 0x661144 },
{ 0x0148, 0x661148 },
{ 0x014c, 0x66114c },
{ 0x0150, 0x661150 },
{ 0x0154, 0x661154 },
{ 0x0158, 0x661158 },
{ 0x015c, 0x66115c },
{ 0x0160, 0x661160 },
{ 0x0164, 0x661164 },
{ 0x0168, 0x661168 },
{ 0x016c, 0x66116c },
{}
}
};
static const struct nvkm_disp_mthd_list
gf119_disp_base_mthd_image = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
{ 0x0400, 0x661400 },
{ 0x0404, 0x661404 },
{ 0x0408, 0x661408 },
{ 0x040c, 0x66140c },
{ 0x0410, 0x661410 },
{}
}
};
const struct nvkm_disp_chan_mthd
gf119_disp_base_mthd = {
.name = "Base",
.addr = 0x001000,
.prev = -0x020000,
.data = {
{ "Global", 1, &gf119_disp_base_mthd_base },
{ "Image", 2, &gf119_disp_base_mthd_image },
{}
}
};
const struct nvkm_disp_chan_user
gf119_disp_base = {
.func = &gf119_disp_dmac_func,
.ctrl = 1,
.user = 1,
.mthd = &gf119_disp_base_mthd,
};
const struct nvkm_disp_mthd_list
gf119_disp_core_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
{ 0x0080, 0x660080 },
{ 0x0084, 0x660084 },
{ 0x0088, 0x660088 },
{ 0x008c, 0x000000 },
{}
}
};
const struct nvkm_disp_mthd_list
gf119_disp_core_mthd_dac = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
{ 0x0180, 0x660180 },
{ 0x0184, 0x660184 },
{ 0x0188, 0x660188 },
{ 0x0190, 0x660190 },
{}
}
};
const struct nvkm_disp_mthd_list
gf119_disp_core_mthd_sor = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
{ 0x0200, 0x660200 },
{ 0x0204, 0x660204 },
{ 0x0208, 0x660208 },
{ 0x0210, 0x660210 },
{}
}
};
const struct nvkm_disp_mthd_list
gf119_disp_core_mthd_pior = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
{ 0x0300, 0x660300 },
{ 0x0304, 0x660304 },
{ 0x0308, 0x660308 },
{ 0x0310, 0x660310 },
{}
}
};
static const struct nvkm_disp_mthd_list
gf119_disp_core_mthd_head = {
.mthd = 0x0300,
.addr = 0x000300,
.data = {
{ 0x0400, 0x660400 },
{ 0x0404, 0x660404 },
{ 0x0408, 0x660408 },
{ 0x040c, 0x66040c },
{ 0x0410, 0x660410 },
{ 0x0414, 0x660414 },
{ 0x0418, 0x660418 },
{ 0x041c, 0x66041c },
{ 0x0420, 0x660420 },
{ 0x0424, 0x660424 },
{ 0x0428, 0x660428 },
{ 0x042c, 0x66042c },
{ 0x0430, 0x660430 },
{ 0x0434, 0x660434 },
{ 0x0438, 0x660438 },
{ 0x0440, 0x660440 },
{ 0x0444, 0x660444 },
{ 0x0448, 0x660448 },
{ 0x044c, 0x66044c },
{ 0x0450, 0x660450 },
{ 0x0454, 0x660454 },
{ 0x0458, 0x660458 },
{ 0x045c, 0x66045c },
{ 0x0460, 0x660460 },
{ 0x0468, 0x660468 },
{ 0x046c, 0x66046c },
{ 0x0470, 0x660470 },
{ 0x0474, 0x660474 },
{ 0x0480, 0x660480 },
{ 0x0484, 0x660484 },
{ 0x048c, 0x66048c },
{ 0x0490, 0x660490 },
{ 0x0494, 0x660494 },
{ 0x0498, 0x660498 },
{ 0x04b0, 0x6604b0 },
{ 0x04b8, 0x6604b8 },
{ 0x04bc, 0x6604bc },
{ 0x04c0, 0x6604c0 },
{ 0x04c4, 0x6604c4 },
{ 0x04c8, 0x6604c8 },
{ 0x04d0, 0x6604d0 },
{ 0x04d4, 0x6604d4 },
{ 0x04e0, 0x6604e0 },
{ 0x04e4, 0x6604e4 },
{ 0x04e8, 0x6604e8 },
{ 0x04ec, 0x6604ec },
{ 0x04f0, 0x6604f0 },
{ 0x04f4, 0x6604f4 },
{ 0x04f8, 0x6604f8 },
{ 0x04fc, 0x6604fc },
{ 0x0500, 0x660500 },
{ 0x0504, 0x660504 },
{ 0x0508, 0x660508 },
{ 0x050c, 0x66050c },
{ 0x0510, 0x660510 },
{ 0x0514, 0x660514 },
{ 0x0518, 0x660518 },
{ 0x051c, 0x66051c },
{ 0x052c, 0x66052c },
{ 0x0530, 0x660530 },
{ 0x054c, 0x66054c },
{ 0x0550, 0x660550 },
{ 0x0554, 0x660554 },
{ 0x0558, 0x660558 },
{ 0x055c, 0x66055c },
{}
}
};
static const struct nvkm_disp_chan_mthd
gf119_disp_core_mthd = {
.name = "Core",
.addr = 0x000000,
.prev = -0x020000,
.data = {
{ "Global", 1, &gf119_disp_core_mthd_base },
{ "DAC", 3, &gf119_disp_core_mthd_dac },
{ "SOR", 8, &gf119_disp_core_mthd_sor },
{ "PIOR", 4, &gf119_disp_core_mthd_pior },
{ "HEAD", 4, &gf119_disp_core_mthd_head },
{}
}
};
void
gf119_disp_core_fini(struct nvkm_disp_chan *chan)
{
struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
struct nvkm_device *device = subdev->device;
/* deactivate channel */
nvkm_mask(device, 0x610490, 0x00000010, 0x00000000);
nvkm_mask(device, 0x610490, 0x00000003, 0x00000000);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x610490) & 0x001e0000))
break;
) < 0) {
nvkm_error(subdev, "core fini: %08x\n",
nvkm_rd32(device, 0x610490));
}
chan->suspend_put = nvkm_rd32(device, 0x640000);
}
static int
gf119_disp_core_init(struct nvkm_disp_chan *chan)
{
struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
struct nvkm_device *device = subdev->device;
/* initialise channel for dma command submission */
nvkm_wr32(device, 0x610494, chan->push);
nvkm_wr32(device, 0x610498, 0x00010000);
nvkm_wr32(device, 0x61049c, 0x00000001);
nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
nvkm_wr32(device, 0x640000, chan->suspend_put);
nvkm_wr32(device, 0x610490, 0x01000013);
/* wait for it to go inactive */
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
break;
) < 0) {
nvkm_error(subdev, "core init: %08x\n",
nvkm_rd32(device, 0x610490));
return -EBUSY;
}
return 0;
}
const struct nvkm_disp_chan_func
gf119_disp_core_func = {
.push = nv50_disp_dmac_push,
.init = gf119_disp_core_init,
.fini = gf119_disp_core_fini,
.intr = gf119_disp_chan_intr,
.user = nv50_disp_chan_user,
.bind = gf119_disp_dmac_bind,
};
static const struct nvkm_disp_chan_user
gf119_disp_core = {
.func = &gf119_disp_core_func,
.ctrl = 0,
.user = 0,
.mthd = &gf119_disp_core_mthd,
};
void
gf119_disp_super(struct work_struct *work)
{
struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_head *head;
u32 mask[4];
nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super.pending));
mutex_lock(&disp->super.mutex);
list_for_each_entry(head, &disp->heads, head) {
mask[head->id] = nvkm_rd32(device, 0x6101d4 + (head->id * 0x800));
HEAD_DBG(head, "%08x", mask[head->id]);
}
if (disp->super.pending & 0x00000001) {
nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
nv50_disp_super_1(disp);
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00001000))
continue;
nv50_disp_super_1_0(disp, head);
}
} else
if (disp->super.pending & 0x00000002) {
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00001000))
continue;
nv50_disp_super_2_0(disp, head);
}
nvkm_outp_route(disp);
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00010000))
continue;
nv50_disp_super_2_1(disp, head);
}
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00001000))
continue;
nv50_disp_super_2_2(disp, head);
}
} else
if (disp->super.pending & 0x00000004) {
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00001000))
continue;
nv50_disp_super_3_0(disp, head);
}
}
list_for_each_entry(head, &disp->heads, head)
nvkm_wr32(device, 0x6101d4 + (head->id * 0x800), 0x00000000);
nvkm_wr32(device, 0x6101d0, 0x80000000);
mutex_unlock(&disp->super.mutex);
}
void
gf119_disp_intr_error(struct nvkm_disp *disp, int chid)
{
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x6101f0 + (chid * 12));
u32 type = (stat & 0x00007000) >> 12;
u32 mthd = (stat & 0x00000ffc);
u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
u32 code = nvkm_rd32(device, 0x6101f8 + (chid * 12));
const struct nvkm_enum *reason =
nvkm_enum_find(nv50_disp_intr_error_type, type);
nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
"data %08x code %08x\n",
chid, stat, type, reason ? reason->name : "",
mthd, data, code);
if (chid < ARRAY_SIZE(disp->chan)) {
switch (mthd) {
case 0x0080:
nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
break;
default:
break;
}
}
nvkm_wr32(device, 0x61009c, (1 << chid));
nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
}
void
gf119_disp_intr(struct nvkm_disp *disp)
{
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_head *head;
u32 intr = nvkm_rd32(device, 0x610088);
if (intr & 0x00000001) {
u32 stat = nvkm_rd32(device, 0x61008c);
while (stat) {
int chid = __ffs(stat); stat &= ~(1 << chid);
nv50_disp_chan_uevent_send(disp, chid);
nvkm_wr32(device, 0x61008c, 1 << chid);
}
intr &= ~0x00000001;
}
if (intr & 0x00000002) {
u32 stat = nvkm_rd32(device, 0x61009c);
int chid = ffs(stat) - 1;
if (chid >= 0)
disp->func->intr_error(disp, chid);
intr &= ~0x00000002;
}
if (intr & 0x00100000) {
u32 stat = nvkm_rd32(device, 0x6100ac);
if (stat & 0x00000007) {
disp->super.pending = (stat & 0x00000007);
queue_work(disp->super.wq, &disp->super.work);
nvkm_wr32(device, 0x6100ac, disp->super.pending);
stat &= ~0x00000007;
}
if (stat) {
nvkm_warn(subdev, "intr24 %08x\n", stat);
nvkm_wr32(device, 0x6100ac, stat);
}
intr &= ~0x00100000;
}
list_for_each_entry(head, &disp->heads, head) {
const u32 hoff = head->id * 0x800;
u32 mask = 0x01000000 << head->id;
if (mask & intr) {
u32 stat = nvkm_rd32(device, 0x6100bc + hoff);
if (stat & 0x00000001)
nvkm_disp_vblank(disp, head->id);
nvkm_mask(device, 0x6100bc + hoff, 0, 0);
nvkm_rd32(device, 0x6100c0 + hoff);
}
}
}
void
gf119_disp_fini(struct nvkm_disp *disp)
{
struct nvkm_device *device = disp->engine.subdev.device;
/* disable all interrupts */
nvkm_wr32(device, 0x6100b0, 0x00000000);
}
int
gf119_disp_init(struct nvkm_disp *disp)
{
struct nvkm_device *device = disp->engine.subdev.device;
struct nvkm_head *head;
u32 tmp;
int i;
/* The below segments of code copying values from one register to
* another appear to inform EVO of the display capabilities or
* something similar.
*/
/* ... CRTC caps */
list_for_each_entry(head, &disp->heads, head) {
const u32 hoff = head->id * 0x800;
tmp = nvkm_rd32(device, 0x616104 + hoff);
nvkm_wr32(device, 0x6101b4 + hoff, tmp);
tmp = nvkm_rd32(device, 0x616108 + hoff);
nvkm_wr32(device, 0x6101b8 + hoff, tmp);
tmp = nvkm_rd32(device, 0x61610c + hoff);
nvkm_wr32(device, 0x6101bc + hoff, tmp);
}
/* ... DAC caps */
for (i = 0; i < disp->dac.nr; i++) {
tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
}
/* ... SOR caps */
for (i = 0; i < disp->sor.nr; i++) {
tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
}
/* steal display away from vbios, or something like that */
if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
nvkm_wr32(device, 0x6100ac, 0x00000100);
nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
break;
) < 0)
return -EBUSY;
}
/* point at display engine memory area (hash table, objects) */
nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
/* enable supervisor interrupts, disable everything else */
nvkm_wr32(device, 0x610090, 0x00000000);
nvkm_wr32(device, 0x6100a0, 0x00000000);
nvkm_wr32(device, 0x6100b0, 0x00000307);
/* disable underflow reporting, preventing an intermittent issue
* on some gk104 boards where the production vbios left this
* setting enabled by default.
*
* ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
*/
list_for_each_entry(head, &disp->heads, head) {
const u32 hoff = head->id * 0x800;
nvkm_mask(device, 0x616308 + hoff, 0x00000111, 0x00000010);
}
return 0;
}
static const struct nvkm_disp_func
gf119_disp = {
.oneinit = nv50_disp_oneinit,
.init = gf119_disp_init,
.fini = gf119_disp_fini,
.intr = gf119_disp_intr,
.intr_error = gf119_disp_intr_error,
.super = gf119_disp_super,
.uevent = &gf119_disp_chan_uevent,
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
.sor = { .cnt = gf119_sor_cnt, .new = gf119_sor_new },
.root = { 0,0,GF110_DISP },
.user = {
{{0,0,GF110_DISP_CURSOR }, nvkm_disp_chan_new, &gf119_disp_curs },
{{0,0,GF110_DISP_OVERLAY }, nvkm_disp_chan_new, &gf119_disp_oimm },
{{0,0,GF110_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, &gf119_disp_base },
{{0,0,GF110_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gf119_disp_core },
{{0,0,GF110_DISP_OVERLAY_CONTROL_DMA}, nvkm_disp_chan_new, &gf119_disp_ovly },
{}
},
};
int
gf119_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&gf119_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "conn.h"
#include "dp.h"
#include "head.h"
#include "ior.h"
#include "outp.h"
#include <core/client.h>
#include <core/ramht.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <nvif/class.h>
#include <nvif/cl0046.h>
#include <nvif/event.h>
#include <nvif/unpack.h>
static void
nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int id)
{
struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
struct nvkm_head *head = nvkm_head_find(disp, id);
if (head)
head->func->vblank_put(head);
}
static void
nvkm_disp_vblank_init(struct nvkm_event *event, int type, int id)
{
struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
struct nvkm_head *head = nvkm_head_find(disp, id);
if (head)
head->func->vblank_get(head);
}
static const struct nvkm_event_func
nvkm_disp_vblank_func = {
.init = nvkm_disp_vblank_init,
.fini = nvkm_disp_vblank_fini,
};
void
nvkm_disp_vblank(struct nvkm_disp *disp, int head)
{
nvkm_event_ntfy(&disp->vblank, head, NVKM_DISP_HEAD_EVENT_VBLANK);
}
static int
nvkm_disp_class_new(struct nvkm_device *device,
const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
return nvkm_udisp_new(oclass, data, size, pobject);
}
static const struct nvkm_device_oclass
nvkm_disp_sclass = {
.ctor = nvkm_disp_class_new,
};
static int
nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
const struct nvkm_device_oclass **class)
{
struct nvkm_disp *disp = nvkm_disp(oclass->engine);
if (index == 0) {
oclass->base = disp->func->root;
*class = &nvkm_disp_sclass;
return 0;
}
return 1;
}
static void
nvkm_disp_intr(struct nvkm_engine *engine)
{
struct nvkm_disp *disp = nvkm_disp(engine);
disp->func->intr(disp);
}
static int
nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
{
struct nvkm_disp *disp = nvkm_disp(engine);
struct nvkm_conn *conn;
struct nvkm_outp *outp;
if (disp->func->fini)
disp->func->fini(disp);
list_for_each_entry(outp, &disp->outps, head) {
nvkm_outp_fini(outp);
}
list_for_each_entry(conn, &disp->conns, head) {
nvkm_conn_fini(conn);
}
return 0;
}
static int
nvkm_disp_init(struct nvkm_engine *engine)
{
struct nvkm_disp *disp = nvkm_disp(engine);
struct nvkm_conn *conn;
struct nvkm_outp *outp;
struct nvkm_ior *ior;
list_for_each_entry(conn, &disp->conns, head) {
nvkm_conn_init(conn);
}
list_for_each_entry(outp, &disp->outps, head) {
nvkm_outp_init(outp);
}
if (disp->func->init) {
int ret = disp->func->init(disp);
if (ret)
return ret;
}
/* Set 'normal' (ie. when it's attached to a head) state for
* each output resource to 'fully enabled'.
*/
list_for_each_entry(ior, &disp->iors, head) {
ior->func->power(ior, true, true, true, true, true);
}
return 0;
}
static int
nvkm_disp_oneinit(struct nvkm_engine *engine)
{
struct nvkm_disp *disp = nvkm_disp(engine);
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvkm_outp *outp, *outt, *pair;
struct nvkm_conn *conn;
struct nvkm_head *head;
struct nvkm_ior *ior;
struct nvbios_connE connE;
struct dcb_output dcbE;
u8 hpd = 0, ver, hdr;
u32 data;
int ret, i;
/* Create output path objects for each VBIOS display path. */
i = -1;
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
if (ver < 0x40) /* No support for chipsets prior to NV50. */
break;
if (dcbE.type == DCB_OUTPUT_UNUSED)
continue;
if (dcbE.type == DCB_OUTPUT_EOL)
break;
outp = NULL;
switch (dcbE.type) {
case DCB_OUTPUT_ANALOG:
case DCB_OUTPUT_TV:
case DCB_OUTPUT_TMDS:
case DCB_OUTPUT_LVDS:
ret = nvkm_outp_new(disp, i, &dcbE, &outp);
break;
case DCB_OUTPUT_DP:
ret = nvkm_dp_new(disp, i, &dcbE, &outp);
break;
case DCB_OUTPUT_WFD:
/* No support for WFD yet. */
ret = -ENODEV;
continue;
default:
nvkm_warn(subdev, "dcb %d type %d unknown\n",
i, dcbE.type);
continue;
}
if (ret) {
if (outp) {
if (ret != -ENODEV)
OUTP_ERR(outp, "ctor failed: %d", ret);
else
OUTP_DBG(outp, "not supported");
nvkm_outp_del(&outp);
continue;
}
nvkm_error(subdev, "failed to create outp %d\n", i);
continue;
}
list_add_tail(&outp->head, &disp->outps);
hpd = max(hpd, (u8)(dcbE.connector + 1));
}
/* Create connector objects based on available output paths. */
list_for_each_entry_safe(outp, outt, &disp->outps, head) {
/* VBIOS data *should* give us the most useful information. */
data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
&connE);
/* No bios connector data... */
if (!data) {
/* Heuristic: anything with the same ccb index is
* considered to be on the same connector, any
* output path without an associated ccb entry will
* be put on its own connector.
*/
int ccb_index = outp->info.i2c_index;
if (ccb_index != 0xf) {
list_for_each_entry(pair, &disp->outps, head) {
if (pair->info.i2c_index == ccb_index) {
outp->conn = pair->conn;
break;
}
}
}
/* Connector shared with another output path. */
if (outp->conn)
continue;
memset(&connE, 0x00, sizeof(connE));
connE.type = DCB_CONNECTOR_NONE;
i = -1;
} else {
i = outp->info.connector;
}
/* Check that we haven't already created this connector. */
list_for_each_entry(conn, &disp->conns, head) {
if (conn->index == outp->info.connector) {
outp->conn = conn;
break;
}
}
if (outp->conn)
continue;
/* Apparently we need to create a new one! */
ret = nvkm_conn_new(disp, i, &connE, &outp->conn);
if (ret) {
nvkm_error(subdev, "failed to create outp %d conn: %d\n", outp->index, ret);
nvkm_conn_del(&outp->conn);
list_del(&outp->head);
nvkm_outp_del(&outp);
continue;
}
list_add_tail(&outp->conn->head, &disp->conns);
}
if (disp->func->oneinit) {
ret = disp->func->oneinit(disp);
if (ret)
return ret;
}
/* Enforce identity-mapped SOR assignment for panels, which have
* certain bits (ie. backlight controls) wired to a specific SOR.
*/
list_for_each_entry(outp, &disp->outps, head) {
if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
outp->conn->info.type == DCB_CONNECTOR_eDP) {
ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
if (!WARN_ON(!ior))
ior->identity = true;
outp->identity = true;
}
}
i = 0;
list_for_each_entry(head, &disp->heads, head)
i = max(i, head->id + 1);
return nvkm_event_init(&nvkm_disp_vblank_func, subdev, 1, i, &disp->vblank);
}
static void *
nvkm_disp_dtor(struct nvkm_engine *engine)
{
struct nvkm_disp *disp = nvkm_disp(engine);
struct nvkm_conn *conn;
struct nvkm_outp *outp;
struct nvkm_ior *ior;
struct nvkm_head *head;
void *data = disp;
nvkm_ramht_del(&disp->ramht);
nvkm_gpuobj_del(&disp->inst);
nvkm_event_fini(&disp->uevent);
if (disp->super.wq) {
destroy_workqueue(disp->super.wq);
mutex_destroy(&disp->super.mutex);
}
nvkm_event_fini(&disp->vblank);
while (!list_empty(&disp->conns)) {
conn = list_first_entry(&disp->conns, typeof(*conn), head);
list_del(&conn->head);
nvkm_conn_del(&conn);
}
while (!list_empty(&disp->outps)) {
outp = list_first_entry(&disp->outps, typeof(*outp), head);
list_del(&outp->head);
nvkm_outp_del(&outp);
}
while (!list_empty(&disp->iors)) {
ior = list_first_entry(&disp->iors, typeof(*ior), head);
nvkm_ior_del(&ior);
}
while (!list_empty(&disp->heads)) {
head = list_first_entry(&disp->heads, typeof(*head), head);
nvkm_head_del(&head);
}
return data;
}
static const struct nvkm_engine_func
nvkm_disp = {
.dtor = nvkm_disp_dtor,
.oneinit = nvkm_disp_oneinit,
.init = nvkm_disp_init,
.fini = nvkm_disp_fini,
.intr = nvkm_disp_intr,
.base.sclass = nvkm_disp_class_get,
};
int
nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
{
struct nvkm_disp *disp;
int ret;
if (!(disp = *pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
return -ENOMEM;
disp->func = func;
INIT_LIST_HEAD(&disp->heads);
INIT_LIST_HEAD(&disp->iors);
INIT_LIST_HEAD(&disp->outps);
INIT_LIST_HEAD(&disp->conns);
spin_lock_init(&disp->client.lock);
ret = nvkm_engine_ctor(&nvkm_disp, device, type, inst, true, &disp->engine);
if (ret)
return ret;
if (func->super) {
disp->super.wq = create_singlethread_workqueue("nvkm-disp");
if (!disp->super.wq)
return -ENOMEM;
INIT_WORK(&disp->super.work, func->super);
mutex_init(&disp->super.mutex);
}
return nvkm_event_init(func->uevent, &disp->engine.subdev, 1, ARRAY_SIZE(disp->chan),
&disp->uevent);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "chan.h"
#include "head.h"
#include "ior.h"
#include <nvif/class.h>
static const struct nvkm_ior_func_dp
mcp89_sor_dp = {
.lanes = { 3, 2, 1, 0 },
.links = g94_sor_dp_links,
.power = g94_sor_dp_power,
.pattern = g94_sor_dp_pattern,
.drive = g94_sor_dp_drive,
.audio = gt215_sor_dp_audio,
.audio_sym = g94_sor_dp_audio_sym,
.activesym = g94_sor_dp_activesym,
.watermark = g94_sor_dp_watermark,
};
static const struct nvkm_ior_func
mcp89_sor = {
.state = g94_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
.hdmi = >215_sor_hdmi,
.dp = &mcp89_sor_dp,
.hda = >215_sor_hda,
};
static int
mcp89_sor_new(struct nvkm_disp *disp, int id)
{
return nvkm_ior_new_(&mcp89_sor, disp, SOR, id, true);
}
static const struct nvkm_disp_func
mcp89_disp = {
.oneinit = nv50_disp_oneinit,
.init = nv50_disp_init,
.fini = nv50_disp_fini,
.intr = nv50_disp_intr,
.super = nv50_disp_super,
.uevent = &nv50_disp_chan_uevent,
.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
.sor = { .cnt = g94_sor_cnt, .new = mcp89_sor_new },
.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
.root = { 0,0,GT214_DISP },
.user = {
{{0,0,GT214_DISP_CURSOR }, nvkm_disp_chan_new, &nv50_disp_curs },
{{0,0,GT214_DISP_OVERLAY }, nvkm_disp_chan_new, &nv50_disp_oimm },
{{0,0,GT214_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, & g84_disp_base },
{{0,0,GT214_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, & g94_disp_core },
{{0,0,GT214_DISP_OVERLAY_CHANNEL_DMA}, nvkm_disp_chan_new, & g84_disp_ovly },
{}
},
};
int
mcp89_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&mcp89_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "conn.h"
#include "head.h"
#include "outp.h"
#include <nvif/class.h>
#include <nvif/if0010.h>
static int
nvkm_udisp_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *sclass)
{
struct nvkm_disp *disp = nvkm_udisp(object);
if (index-- == 0) {
sclass->base = (struct nvkm_sclass) { 0, 0, NVIF_CLASS_CONN };
sclass->ctor = nvkm_uconn_new;
return 0;
}
if (index-- == 0) {
sclass->base = (struct nvkm_sclass) { 0, 0, NVIF_CLASS_OUTP };
sclass->ctor = nvkm_uoutp_new;
return 0;
}
if (index-- == 0) {
sclass->base = (struct nvkm_sclass) { 0, 0, NVIF_CLASS_HEAD };
sclass->ctor = nvkm_uhead_new;
return 0;
}
if (disp->func->user[index].ctor) {
sclass->base = disp->func->user[index].base;
sclass->ctor = disp->func->user[index].ctor;
return 0;
}
return -EINVAL;
}
static void *
nvkm_udisp_dtor(struct nvkm_object *object)
{
struct nvkm_disp *disp = nvkm_udisp(object);
spin_lock(&disp->client.lock);
if (object == &disp->client.object)
disp->client.object.func = NULL;
spin_unlock(&disp->client.lock);
return NULL;
}
static const struct nvkm_object_func
nvkm_udisp = {
.dtor = nvkm_udisp_dtor,
.sclass = nvkm_udisp_sclass,
};
int
nvkm_udisp_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_disp(oclass->engine);
struct nvkm_conn *conn;
struct nvkm_outp *outp;
struct nvkm_head *head;
union nvif_disp_args *args = argv;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
spin_lock(&disp->client.lock);
if (disp->client.object.func) {
spin_unlock(&disp->client.lock);
return -EBUSY;
}
nvkm_object_ctor(&nvkm_udisp, oclass, &disp->client.object);
*pobject = &disp->client.object;
spin_unlock(&disp->client.lock);
args->v0.conn_mask = 0;
list_for_each_entry(conn, &disp->conns, head)
args->v0.conn_mask |= BIT(conn->index);
args->v0.outp_mask = 0;
list_for_each_entry(outp, &disp->outps, head)
args->v0.outp_mask |= BIT(outp->index);
args->v0.head_mask = 0;
list_for_each_entry(head, &disp->heads, head)
args->v0.head_mask |= BIT(head->id);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/udisp.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "chan.h"
#include "hdmi.h"
#include "head.h"
#include "ior.h"
#include "outp.h"
#include <core/client.h>
#include <core/gpuobj.h>
#include <core/ramht.h>
#include <subdev/timer.h>
#include <nvif/class.h>
#include <nvif/unpack.h>
static void
gv100_sor_hda_device_entry(struct nvkm_ior *ior, int head)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 hoff = 0x800 * head;
nvkm_mask(device, 0x616528 + hoff, 0x00000070, head << 4);
}
const struct nvkm_ior_func_hda
gv100_sor_hda = {
.hpd = gf119_sor_hda_hpd,
.eld = gf119_sor_hda_eld,
.device_entry = gv100_sor_hda_device_entry,
};
void
gv100_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 hoff = head * 0x800;
nvkm_mask(device, 0x616550 + hoff, 0x0c00003f, 0x08000000 | watermark);
}
void
gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 hoff = head * 0x800;
nvkm_mask(device, 0x616568 + hoff, 0x0000ffff, h);
nvkm_mask(device, 0x61656c + hoff, 0x00ffffff, v);
}
void
gv100_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 hoff = 0x800 * head;
const u32 data = 0x80000000 | (0x00000001 * enable);
const u32 mask = 0x8000000d;
nvkm_mask(device, 0x616560 + hoff, mask, data);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x616560 + hoff) & 0x80000000))
break;
);
}
static const struct nvkm_ior_func_dp
gv100_sor_dp = {
.lanes = { 0, 1, 2, 3 },
.links = gf119_sor_dp_links,
.power = g94_sor_dp_power,
.pattern = gm107_sor_dp_pattern,
.drive = gm200_sor_dp_drive,
.audio = gv100_sor_dp_audio,
.audio_sym = gv100_sor_dp_audio_sym,
.watermark = gv100_sor_dp_watermark,
};
static void
gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
struct packed_hdmi_infoframe vsi;
const u32 hoff = head * 0x400;
pack_hdmi_infoframe(&vsi, data, size);
nvkm_mask(device, 0x6f0100 + hoff, 0x00010001, 0x00000000);
if (!size)
return;
nvkm_wr32(device, 0x6f0108 + hoff, vsi.header);
nvkm_wr32(device, 0x6f010c + hoff, vsi.subpack0_low);
nvkm_wr32(device, 0x6f0110 + hoff, vsi.subpack0_high);
nvkm_wr32(device, 0x6f0114 + hoff, 0x00000000);
nvkm_wr32(device, 0x6f0118 + hoff, 0x00000000);
nvkm_wr32(device, 0x6f011c + hoff, 0x00000000);
nvkm_wr32(device, 0x6f0120 + hoff, 0x00000000);
nvkm_wr32(device, 0x6f0124 + hoff, 0x00000000);
nvkm_mask(device, 0x6f0100 + hoff, 0x00000001, 0x00000001);
}
static void
gv100_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
struct packed_hdmi_infoframe avi;
const u32 hoff = head * 0x400;
pack_hdmi_infoframe(&avi, data, size);
nvkm_mask(device, 0x6f0000 + hoff, 0x00000001, 0x00000000);
if (!size)
return;
nvkm_wr32(device, 0x6f0008 + hoff, avi.header);
nvkm_wr32(device, 0x6f000c + hoff, avi.subpack0_low);
nvkm_wr32(device, 0x6f0010 + hoff, avi.subpack0_high);
nvkm_wr32(device, 0x6f0014 + hoff, avi.subpack1_low);
nvkm_wr32(device, 0x6f0018 + hoff, avi.subpack1_high);
nvkm_mask(device, 0x6f0000 + hoff, 0x00000001, 0x00000001);
}
static void
gv100_sor_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, u8 rekey)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 ctrl = 0x40000000 * enable |
max_ac_packet << 16 |
rekey;
const u32 hoff = head * 0x800;
const u32 hdmi = head * 0x400;
if (!(ctrl & 0x40000000)) {
nvkm_mask(device, 0x6165c0 + hoff, 0x40000000, 0x00000000);
nvkm_mask(device, 0x6f0100 + hdmi, 0x00000001, 0x00000000);
nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000);
nvkm_mask(device, 0x6f0000 + hdmi, 0x00000001, 0x00000000);
return;
}
/* General Control (GCP). */
nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000);
nvkm_wr32(device, 0x6f00cc + hdmi, 0x00000010);
nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000001);
/* Audio Clock Regeneration (ACR). */
nvkm_wr32(device, 0x6f0080 + hdmi, 0x82000000);
/* NV_PDISP_SF_HDMI_CTRL. */
nvkm_mask(device, 0x6165c0 + hoff, 0x401f007f, ctrl);
}
const struct nvkm_ior_func_hdmi
gv100_sor_hdmi = {
.ctrl = gv100_sor_hdmi_ctrl,
.scdc = gm200_sor_hdmi_scdc,
.infoframe_avi = gv100_sor_hdmi_infoframe_avi,
.infoframe_vsi = gv100_sor_hdmi_infoframe_vsi,
};
void
gv100_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 coff = (state == &sor->arm) * 0x8000 + sor->id * 0x20;
u32 ctrl = nvkm_rd32(device, 0x680300 + coff);
state->proto_evo = (ctrl & 0x00000f00) >> 8;
switch (state->proto_evo) {
case 0: state->proto = LVDS; state->link = 1; break;
case 1: state->proto = TMDS; state->link = 1; break;
case 2: state->proto = TMDS; state->link = 2; break;
case 5: state->proto = TMDS; state->link = 3; break;
case 8: state->proto = DP; state->link = 1; break;
case 9: state->proto = DP; state->link = 2; break;
default:
state->proto = UNKNOWN;
break;
}
state->head = ctrl & 0x000000ff;
}
static const struct nvkm_ior_func
gv100_sor = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
},
.state = gv100_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
.hdmi = &gv100_sor_hdmi,
.dp = &gv100_sor_dp,
.hda = &gv100_sor_hda,
};
static int
gv100_sor_new(struct nvkm_disp *disp, int id)
{
struct nvkm_device *device = disp->engine.subdev.device;
u32 hda;
if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
hda = nvkm_rd32(device, 0x118fb0) >> 8;
return nvkm_ior_new_(&gv100_sor, disp, SOR, id, hda & BIT(id));
}
int
gv100_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
struct nvkm_device *device = disp->engine.subdev.device;
*pmask = (nvkm_rd32(device, 0x610060) & 0x0000ff00) >> 8;
return (nvkm_rd32(device, 0x610074) & 0x00000f00) >> 8;
}
static void
gv100_head_vblank_put(struct nvkm_head *head)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000004, 0x00000000);
}
static void
gv100_head_vblank_get(struct nvkm_head *head)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000004, 0x00000004);
}
static void
gv100_head_rgpos(struct nvkm_head *head, u16 *hline, u16 *vline)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
const u32 hoff = head->id * 0x800;
/* vline read locks hline. */
*vline = nvkm_rd32(device, 0x616330 + hoff) & 0x0000ffff;
*hline = nvkm_rd32(device, 0x616334 + hoff) & 0x0000ffff;
}
static void
gv100_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
{
struct nvkm_device *device = head->disp->engine.subdev.device;
const u32 hoff = (state == &head->arm) * 0x8000 + head->id * 0x400;
u32 data;
data = nvkm_rd32(device, 0x682064 + hoff);
state->vtotal = (data & 0xffff0000) >> 16;
state->htotal = (data & 0x0000ffff);
data = nvkm_rd32(device, 0x682068 + hoff);
state->vsynce = (data & 0xffff0000) >> 16;
state->hsynce = (data & 0x0000ffff);
data = nvkm_rd32(device, 0x68206c + hoff);
state->vblanke = (data & 0xffff0000) >> 16;
state->hblanke = (data & 0x0000ffff);
data = nvkm_rd32(device, 0x682070 + hoff);
state->vblanks = (data & 0xffff0000) >> 16;
state->hblanks = (data & 0x0000ffff);
state->hz = nvkm_rd32(device, 0x68200c + hoff);
data = nvkm_rd32(device, 0x682004 + hoff);
switch ((data & 0x000000f0) >> 4) {
case 5: state->or.depth = 30; break;
case 4: state->or.depth = 24; break;
case 1: state->or.depth = 18; break;
default:
state->or.depth = 18;
WARN_ON(1);
break;
}
}
static const struct nvkm_head_func
gv100_head = {
.state = gv100_head_state,
.rgpos = gv100_head_rgpos,
.rgclk = gf119_head_rgclk,
.vblank_get = gv100_head_vblank_get,
.vblank_put = gv100_head_vblank_put,
};
int
gv100_head_new(struct nvkm_disp *disp, int id)
{
struct nvkm_device *device = disp->engine.subdev.device;
if (!(nvkm_rd32(device, 0x610060) & (0x00000001 << id)))
return 0;
return nvkm_head_new_(&gv100_head, disp, id);
}
int
gv100_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
struct nvkm_device *device = disp->engine.subdev.device;
*pmask = nvkm_rd32(device, 0x610060) & 0x000000ff;
return nvkm_rd32(device, 0x610074) & 0x0000000f;
}
const struct nvkm_event_func
gv100_disp_chan_uevent = {
};
u64
gv100_disp_chan_user(struct nvkm_disp_chan *chan, u64 *psize)
{
*psize = 0x1000;
return 0x690000 + ((chan->chid.user - 1) * 0x1000);
}
static int
gv100_disp_dmac_idle(struct nvkm_disp_chan *chan)
{
struct nvkm_device *device = chan->disp->engine.subdev.device;
const u32 soff = (chan->chid.ctrl - 1) * 0x04;
nvkm_msec(device, 2000,
u32 stat = nvkm_rd32(device, 0x610664 + soff);
if ((stat & 0x000f0000) == 0x00040000)
return 0;
);
return -EBUSY;
}
int
gv100_disp_dmac_bind(struct nvkm_disp_chan *chan,
struct nvkm_object *object, u32 handle)
{
return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
chan->chid.user << 25 | 0x00000040);
}
void
gv100_disp_dmac_fini(struct nvkm_disp_chan *chan)
{
struct nvkm_device *device = chan->disp->engine.subdev.device;
const u32 uoff = (chan->chid.ctrl - 1) * 0x1000;
const u32 coff = chan->chid.ctrl * 0x04;
nvkm_mask(device, 0x6104e0 + coff, 0x00000010, 0x00000000);
gv100_disp_dmac_idle(chan);
nvkm_mask(device, 0x6104e0 + coff, 0x00000002, 0x00000000);
chan->suspend_put = nvkm_rd32(device, 0x690000 + uoff);
}
int
gv100_disp_dmac_init(struct nvkm_disp_chan *chan)
{
struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
struct nvkm_device *device = subdev->device;
const u32 uoff = (chan->chid.ctrl - 1) * 0x1000;
const u32 poff = chan->chid.ctrl * 0x10;
const u32 coff = chan->chid.ctrl * 0x04;
nvkm_wr32(device, 0x610b24 + poff, lower_32_bits(chan->push));
nvkm_wr32(device, 0x610b20 + poff, upper_32_bits(chan->push));
nvkm_wr32(device, 0x610b28 + poff, 0x00000001);
nvkm_wr32(device, 0x610b2c + poff, 0x00000040);
nvkm_mask(device, 0x6104e0 + coff, 0x00000010, 0x00000010);
nvkm_wr32(device, 0x690000 + uoff, chan->suspend_put);
nvkm_wr32(device, 0x6104e0 + coff, 0x00000013);
return gv100_disp_dmac_idle(chan);
}
static void
gv100_disp_wimm_intr(struct nvkm_disp_chan *chan, bool en)
{
struct nvkm_device *device = chan->disp->engine.subdev.device;
const u32 mask = 0x00000001 << chan->head;
const u32 data = en ? mask : 0;
nvkm_mask(device, 0x611da8, mask, data);
}
static const struct nvkm_disp_chan_func
gv100_disp_wimm_func = {
.push = nv50_disp_dmac_push,
.init = gv100_disp_dmac_init,
.fini = gv100_disp_dmac_fini,
.intr = gv100_disp_wimm_intr,
.user = gv100_disp_chan_user,
};
const struct nvkm_disp_chan_user
gv100_disp_wimm = {
.func = &gv100_disp_wimm_func,
.ctrl = 33,
.user = 33,
};
static const struct nvkm_disp_mthd_list
gv100_disp_wndw_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
{ 0x0200, 0x690200 },
{ 0x020c, 0x69020c },
{ 0x0210, 0x690210 },
{ 0x0214, 0x690214 },
{ 0x0218, 0x690218 },
{ 0x021c, 0x69021c },
{ 0x0220, 0x690220 },
{ 0x0224, 0x690224 },
{ 0x0228, 0x690228 },
{ 0x022c, 0x69022c },
{ 0x0230, 0x690230 },
{ 0x0234, 0x690234 },
{ 0x0238, 0x690238 },
{ 0x0240, 0x690240 },
{ 0x0244, 0x690244 },
{ 0x0248, 0x690248 },
{ 0x024c, 0x69024c },
{ 0x0250, 0x690250 },
{ 0x0254, 0x690254 },
{ 0x0260, 0x690260 },
{ 0x0264, 0x690264 },
{ 0x0268, 0x690268 },
{ 0x026c, 0x69026c },
{ 0x0270, 0x690270 },
{ 0x0274, 0x690274 },
{ 0x0280, 0x690280 },
{ 0x0284, 0x690284 },
{ 0x0288, 0x690288 },
{ 0x028c, 0x69028c },
{ 0x0290, 0x690290 },
{ 0x0298, 0x690298 },
{ 0x029c, 0x69029c },
{ 0x02a0, 0x6902a0 },
{ 0x02a4, 0x6902a4 },
{ 0x02a8, 0x6902a8 },
{ 0x02ac, 0x6902ac },
{ 0x02b0, 0x6902b0 },
{ 0x02b4, 0x6902b4 },
{ 0x02b8, 0x6902b8 },
{ 0x02bc, 0x6902bc },
{ 0x02c0, 0x6902c0 },
{ 0x02c4, 0x6902c4 },
{ 0x02c8, 0x6902c8 },
{ 0x02cc, 0x6902cc },
{ 0x02d0, 0x6902d0 },
{ 0x02d4, 0x6902d4 },
{ 0x02d8, 0x6902d8 },
{ 0x02dc, 0x6902dc },
{ 0x02e0, 0x6902e0 },
{ 0x02e4, 0x6902e4 },
{ 0x02e8, 0x6902e8 },
{ 0x02ec, 0x6902ec },
{ 0x02f0, 0x6902f0 },
{ 0x02f4, 0x6902f4 },
{ 0x02f8, 0x6902f8 },
{ 0x02fc, 0x6902fc },
{ 0x0300, 0x690300 },
{ 0x0304, 0x690304 },
{ 0x0308, 0x690308 },
{ 0x0310, 0x690310 },
{ 0x0314, 0x690314 },
{ 0x0318, 0x690318 },
{ 0x031c, 0x69031c },
{ 0x0320, 0x690320 },
{ 0x0324, 0x690324 },
{ 0x0328, 0x690328 },
{ 0x032c, 0x69032c },
{ 0x033c, 0x69033c },
{ 0x0340, 0x690340 },
{ 0x0344, 0x690344 },
{ 0x0348, 0x690348 },
{ 0x034c, 0x69034c },
{ 0x0350, 0x690350 },
{ 0x0354, 0x690354 },
{ 0x0358, 0x690358 },
{ 0x0364, 0x690364 },
{ 0x0368, 0x690368 },
{ 0x036c, 0x69036c },
{ 0x0370, 0x690370 },
{ 0x0374, 0x690374 },
{ 0x0380, 0x690380 },
{}
}
};
static const struct nvkm_disp_chan_mthd
gv100_disp_wndw_mthd = {
.name = "Window",
.addr = 0x001000,
.prev = 0x000800,
.data = {
{ "Global", 1, &gv100_disp_wndw_mthd_base },
{}
}
};
static void
gv100_disp_wndw_intr(struct nvkm_disp_chan *chan, bool en)
{
struct nvkm_device *device = chan->disp->engine.subdev.device;
const u32 mask = 0x00000001 << chan->head;
const u32 data = en ? mask : 0;
nvkm_mask(device, 0x611da4, mask, data);
}
static const struct nvkm_disp_chan_func
gv100_disp_wndw_func = {
.push = nv50_disp_dmac_push,
.init = gv100_disp_dmac_init,
.fini = gv100_disp_dmac_fini,
.intr = gv100_disp_wndw_intr,
.user = gv100_disp_chan_user,
.bind = gv100_disp_dmac_bind,
};
const struct nvkm_disp_chan_user
gv100_disp_wndw = {
.func = &gv100_disp_wndw_func,
.ctrl = 1,
.user = 1,
.mthd = &gv100_disp_wndw_mthd,
};
int
gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
struct nvkm_device *device = disp->engine.subdev.device;
*pmask = nvkm_rd32(device, 0x610064);
return (nvkm_rd32(device, 0x610074) & 0x03f00000) >> 20;
}
static int
gv100_disp_curs_idle(struct nvkm_disp_chan *chan)
{
struct nvkm_device *device = chan->disp->engine.subdev.device;
const u32 soff = (chan->chid.ctrl - 1) * 0x04;
nvkm_msec(device, 2000,
u32 stat = nvkm_rd32(device, 0x610664 + soff);
if ((stat & 0x00070000) == 0x00040000)
return 0;
);
return -EBUSY;
}
static void
gv100_disp_curs_intr(struct nvkm_disp_chan *chan, bool en)
{
struct nvkm_device *device = chan->disp->engine.subdev.device;
const u32 mask = 0x00010000 << chan->head;
const u32 data = en ? mask : 0;
nvkm_mask(device, 0x611dac, mask, data);
}
static void
gv100_disp_curs_fini(struct nvkm_disp_chan *chan)
{
struct nvkm_device *device = chan->disp->engine.subdev.device;
const u32 hoff = chan->chid.ctrl * 4;
nvkm_mask(device, 0x6104e0 + hoff, 0x00000010, 0x00000010);
gv100_disp_curs_idle(chan);
nvkm_mask(device, 0x6104e0 + hoff, 0x00000001, 0x00000000);
}
static int
gv100_disp_curs_init(struct nvkm_disp_chan *chan)
{
struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
struct nvkm_device *device = subdev->device;
nvkm_wr32(device, 0x6104e0 + chan->chid.ctrl * 4, 0x00000001);
return gv100_disp_curs_idle(chan);
}
static const struct nvkm_disp_chan_func
gv100_disp_curs_func = {
.init = gv100_disp_curs_init,
.fini = gv100_disp_curs_fini,
.intr = gv100_disp_curs_intr,
.user = gv100_disp_chan_user,
};
const struct nvkm_disp_chan_user
gv100_disp_curs = {
.func = &gv100_disp_curs_func,
.ctrl = 73,
.user = 73,
};
static const struct nvkm_disp_mthd_list
gv100_disp_core_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
{ 0x0200, 0x680200 },
{ 0x0208, 0x680208 },
{ 0x020c, 0x68020c },
{ 0x0210, 0x680210 },
{ 0x0214, 0x680214 },
{ 0x0218, 0x680218 },
{ 0x021c, 0x68021c },
{}
}
};
static const struct nvkm_disp_mthd_list
gv100_disp_core_mthd_sor = {
.mthd = 0x0020,
.addr = 0x000020,
.data = {
{ 0x0300, 0x680300 },
{ 0x0304, 0x680304 },
{ 0x0308, 0x680308 },
{ 0x030c, 0x68030c },
{}
}
};
static const struct nvkm_disp_mthd_list
gv100_disp_core_mthd_wndw = {
.mthd = 0x0080,
.addr = 0x000080,
.data = {
{ 0x1000, 0x681000 },
{ 0x1004, 0x681004 },
{ 0x1008, 0x681008 },
{ 0x100c, 0x68100c },
{ 0x1010, 0x681010 },
{}
}
};
static const struct nvkm_disp_mthd_list
gv100_disp_core_mthd_head = {
.mthd = 0x0400,
.addr = 0x000400,
.data = {
{ 0x2000, 0x682000 },
{ 0x2004, 0x682004 },
{ 0x2008, 0x682008 },
{ 0x200c, 0x68200c },
{ 0x2014, 0x682014 },
{ 0x2018, 0x682018 },
{ 0x201c, 0x68201c },
{ 0x2020, 0x682020 },
{ 0x2028, 0x682028 },
{ 0x202c, 0x68202c },
{ 0x2030, 0x682030 },
{ 0x2038, 0x682038 },
{ 0x203c, 0x68203c },
{ 0x2048, 0x682048 },
{ 0x204c, 0x68204c },
{ 0x2050, 0x682050 },
{ 0x2054, 0x682054 },
{ 0x2058, 0x682058 },
{ 0x205c, 0x68205c },
{ 0x2060, 0x682060 },
{ 0x2064, 0x682064 },
{ 0x2068, 0x682068 },
{ 0x206c, 0x68206c },
{ 0x2070, 0x682070 },
{ 0x2074, 0x682074 },
{ 0x2078, 0x682078 },
{ 0x207c, 0x68207c },
{ 0x2080, 0x682080 },
{ 0x2088, 0x682088 },
{ 0x2090, 0x682090 },
{ 0x209c, 0x68209c },
{ 0x20a0, 0x6820a0 },
{ 0x20a4, 0x6820a4 },
{ 0x20a8, 0x6820a8 },
{ 0x20ac, 0x6820ac },
{ 0x2180, 0x682180 },
{ 0x2184, 0x682184 },
{ 0x218c, 0x68218c },
{ 0x2194, 0x682194 },
{ 0x2198, 0x682198 },
{ 0x219c, 0x68219c },
{ 0x21a0, 0x6821a0 },
{ 0x21a4, 0x6821a4 },
{ 0x2214, 0x682214 },
{ 0x2218, 0x682218 },
{}
}
};
static const struct nvkm_disp_chan_mthd
gv100_disp_core_mthd = {
.name = "Core",
.addr = 0x000000,
.prev = 0x008000,
.data = {
{ "Global", 1, &gv100_disp_core_mthd_base },
{ "SOR", 4, &gv100_disp_core_mthd_sor },
{ "WINDOW", 8, &gv100_disp_core_mthd_wndw },
{ "HEAD", 4, &gv100_disp_core_mthd_head },
{}
}
};
static int
gv100_disp_core_idle(struct nvkm_disp_chan *chan)
{
struct nvkm_device *device = chan->disp->engine.subdev.device;
nvkm_msec(device, 2000,
u32 stat = nvkm_rd32(device, 0x610630);
if ((stat & 0x001f0000) == 0x000b0000)
return 0;
);
return -EBUSY;
}
static u64
gv100_disp_core_user(struct nvkm_disp_chan *chan, u64 *psize)
{
*psize = 0x10000;
return 0x680000;
}
static void
gv100_disp_core_intr(struct nvkm_disp_chan *chan, bool en)
{
struct nvkm_device *device = chan->disp->engine.subdev.device;
const u32 mask = 0x00000001;
const u32 data = en ? mask : 0;
nvkm_mask(device, 0x611dac, mask, data);
}
static void
gv100_disp_core_fini(struct nvkm_disp_chan *chan)
{
struct nvkm_device *device = chan->disp->engine.subdev.device;
nvkm_mask(device, 0x6104e0, 0x00000010, 0x00000000);
gv100_disp_core_idle(chan);
nvkm_mask(device, 0x6104e0, 0x00000002, 0x00000000);
chan->suspend_put = nvkm_rd32(device, 0x680000);
}
static int
gv100_disp_core_init(struct nvkm_disp_chan *chan)
{
struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
struct nvkm_device *device = subdev->device;
nvkm_wr32(device, 0x610b24, lower_32_bits(chan->push));
nvkm_wr32(device, 0x610b20, upper_32_bits(chan->push));
nvkm_wr32(device, 0x610b28, 0x00000001);
nvkm_wr32(device, 0x610b2c, 0x00000040);
nvkm_mask(device, 0x6104e0, 0x00000010, 0x00000010);
nvkm_wr32(device, 0x680000, chan->suspend_put);
nvkm_wr32(device, 0x6104e0, 0x00000013);
return gv100_disp_core_idle(chan);
}
static const struct nvkm_disp_chan_func
gv100_disp_core_func = {
.push = nv50_disp_dmac_push,
.init = gv100_disp_core_init,
.fini = gv100_disp_core_fini,
.intr = gv100_disp_core_intr,
.user = gv100_disp_core_user,
.bind = gv100_disp_dmac_bind,
};
const struct nvkm_disp_chan_user
gv100_disp_core = {
.func = &gv100_disp_core_func,
.ctrl = 0,
.user = 0,
.mthd = &gv100_disp_core_mthd,
};
#define gv100_disp_caps(p) container_of((p), struct gv100_disp_caps, object)
struct gv100_disp_caps {
struct nvkm_object object;
struct nvkm_disp *disp;
};
static int
gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct gv100_disp_caps *caps = gv100_disp_caps(object);
struct nvkm_device *device = caps->disp->engine.subdev.device;
*type = NVKM_OBJECT_MAP_IO;
*addr = 0x640000 + device->func->resource_addr(device, 0);
*size = 0x1000;
return 0;
}
static const struct nvkm_object_func
gv100_disp_caps = {
.map = gv100_disp_caps_map,
};
int
gv100_disp_caps_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
struct gv100_disp_caps *caps;
if (!(caps = kzalloc(sizeof(*caps), GFP_KERNEL)))
return -ENOMEM;
*pobject = &caps->object;
nvkm_object_ctor(&gv100_disp_caps, oclass, &caps->object);
caps->disp = disp;
return 0;
}
void
gv100_disp_super(struct work_struct *work)
{
struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_head *head;
u32 stat, mask[4];
mutex_lock(&disp->super.mutex);
stat = nvkm_rd32(device, 0x6107a8);
nvkm_debug(subdev, "supervisor %d: %08x\n", ffs(disp->super.pending), stat);
list_for_each_entry(head, &disp->heads, head) {
mask[head->id] = nvkm_rd32(device, 0x6107ac + (head->id * 4));
HEAD_DBG(head, "%08x", mask[head->id]);
}
if (disp->super.pending & 0x00000001) {
nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
nv50_disp_super_1(disp);
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00001000))
continue;
nv50_disp_super_1_0(disp, head);
}
} else
if (disp->super.pending & 0x00000002) {
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00001000))
continue;
nv50_disp_super_2_0(disp, head);
}
nvkm_outp_route(disp);
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00010000))
continue;
nv50_disp_super_2_1(disp, head);
}
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00001000))
continue;
nv50_disp_super_2_2(disp, head);
}
} else
if (disp->super.pending & 0x00000004) {
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00001000))
continue;
nv50_disp_super_3_0(disp, head);
}
}
list_for_each_entry(head, &disp->heads, head)
nvkm_wr32(device, 0x6107ac + (head->id * 4), 0x00000000);
nvkm_wr32(device, 0x6107a8, 0x80000000);
mutex_unlock(&disp->super.mutex);
}
static void
gv100_disp_exception(struct nvkm_disp *disp, int chid)
{
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12));
u32 type = (stat & 0x00007000) >> 12;
u32 mthd = (stat & 0x00000fff) << 2;
const struct nvkm_enum *reason =
nvkm_enum_find(nv50_disp_intr_error_type, type);
/*TODO: Suspect 33->41 are for WRBK channel exceptions, but we
* don't support those currently.
*
* CORE+WIN CHIDs map directly to the FE_EXCEPT() slots.
*/
if (chid <= 32) {
u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
"mthd %04x data %08x code %08x\n",
chid, stat, type, reason ? reason->name : "",
mthd, data, code);
} else {
nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
"mthd %04x\n",
chid, stat, type, reason ? reason->name : "", mthd);
}
if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
switch (mthd) {
case 0x0200:
nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
break;
default:
break;
}
}
nvkm_wr32(device, 0x611020 + (chid * 12), 0x90000000);
}
static void
gv100_disp_intr_ctrl_disp(struct nvkm_disp *disp)
{
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x611c30);
if (stat & 0x00000007) {
disp->super.pending = (stat & 0x00000007);
queue_work(disp->super.wq, &disp->super.work);
nvkm_wr32(device, 0x611860, disp->super.pending);
stat &= ~0x00000007;
}
/*TODO: I would guess this is VBIOS_RELEASE, however, NFI how to
* ACK it, nor does RM appear to bother.
*/
if (stat & 0x00000008)
stat &= ~0x00000008;
if (stat & 0x00000080) {
u32 error = nvkm_mask(device, 0x611848, 0x00000000, 0x00000000);
nvkm_warn(subdev, "error %08x\n", error);
stat &= ~0x00000080;
}
if (stat & 0x00000100) {
unsigned long wndws = nvkm_rd32(device, 0x611858);
unsigned long other = nvkm_rd32(device, 0x61185c);
int wndw;
nvkm_wr32(device, 0x611858, wndws);
nvkm_wr32(device, 0x61185c, other);
/* AWAKEN_OTHER_CORE. */
if (other & 0x00000001)
nv50_disp_chan_uevent_send(disp, 0);
/* AWAKEN_WIN_CH(n). */
for_each_set_bit(wndw, &wndws, disp->wndw.nr) {
nv50_disp_chan_uevent_send(disp, 1 + wndw);
}
}
if (stat)
nvkm_warn(subdev, "ctrl %08x\n", stat);
}
static void
gv100_disp_intr_exc_other(struct nvkm_disp *disp)
{
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x611854);
unsigned long mask;
int head;
if (stat & 0x00000001) {
nvkm_wr32(device, 0x611854, 0x00000001);
gv100_disp_exception(disp, 0);
stat &= ~0x00000001;
}
if ((mask = (stat & 0x00ff0000) >> 16)) {
for_each_set_bit(head, &mask, disp->wndw.nr) {
nvkm_wr32(device, 0x611854, 0x00010000 << head);
gv100_disp_exception(disp, 73 + head);
stat &= ~(0x00010000 << head);
}
}
if (stat) {
nvkm_warn(subdev, "exception %08x\n", stat);
nvkm_wr32(device, 0x611854, stat);
}
}
static void
gv100_disp_intr_exc_winim(struct nvkm_disp *disp)
{
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
unsigned long stat = nvkm_rd32(device, 0x611850);
int wndw;
for_each_set_bit(wndw, &stat, disp->wndw.nr) {
nvkm_wr32(device, 0x611850, BIT(wndw));
gv100_disp_exception(disp, 33 + wndw);
stat &= ~BIT(wndw);
}
if (stat) {
nvkm_warn(subdev, "wimm %08x\n", (u32)stat);
nvkm_wr32(device, 0x611850, stat);
}
}
static void
gv100_disp_intr_exc_win(struct nvkm_disp *disp)
{
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
unsigned long stat = nvkm_rd32(device, 0x61184c);
int wndw;
for_each_set_bit(wndw, &stat, disp->wndw.nr) {
nvkm_wr32(device, 0x61184c, BIT(wndw));
gv100_disp_exception(disp, 1 + wndw);
stat &= ~BIT(wndw);
}
if (stat) {
nvkm_warn(subdev, "wndw %08x\n", (u32)stat);
nvkm_wr32(device, 0x61184c, stat);
}
}
static void
gv100_disp_intr_head_timing(struct nvkm_disp *disp, int head)
{
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x611800 + (head * 0x04));
/* LAST_DATA, LOADV. */
if (stat & 0x00000003) {
nvkm_wr32(device, 0x611800 + (head * 0x04), stat & 0x00000003);
stat &= ~0x00000003;
}
if (stat & 0x00000004) {
nvkm_disp_vblank(disp, head);
nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000004);
stat &= ~0x00000004;
}
if (stat) {
nvkm_warn(subdev, "head %08x\n", stat);
nvkm_wr32(device, 0x611800 + (head * 0x04), stat);
}
}
void
gv100_disp_intr(struct nvkm_disp *disp)
{
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x611ec0);
unsigned long mask;
int head;
if ((mask = (stat & 0x000000ff))) {
for_each_set_bit(head, &mask, 8) {
gv100_disp_intr_head_timing(disp, head);
stat &= ~BIT(head);
}
}
if (stat & 0x00000200) {
gv100_disp_intr_exc_win(disp);
stat &= ~0x00000200;
}
if (stat & 0x00000400) {
gv100_disp_intr_exc_winim(disp);
stat &= ~0x00000400;
}
if (stat & 0x00000800) {
gv100_disp_intr_exc_other(disp);
stat &= ~0x00000800;
}
if (stat & 0x00001000) {
gv100_disp_intr_ctrl_disp(disp);
stat &= ~0x00001000;
}
if (stat)
nvkm_warn(subdev, "intr %08x\n", stat);
}
void
gv100_disp_fini(struct nvkm_disp *disp)
{
struct nvkm_device *device = disp->engine.subdev.device;
nvkm_wr32(device, 0x611db0, 0x00000000);
}
static int
gv100_disp_init(struct nvkm_disp *disp)
{
struct nvkm_device *device = disp->engine.subdev.device;
struct nvkm_head *head;
int i, j;
u32 tmp;
/* Claim ownership of display. */
if (nvkm_rd32(device, 0x6254e8) & 0x00000002) {
nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000);
if (nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002))
break;
) < 0)
return -EBUSY;
}
/* Lock pin capabilities. */
tmp = nvkm_rd32(device, 0x610068);
nvkm_wr32(device, 0x640008, tmp);
/* SOR capabilities. */
for (i = 0; i < disp->sor.nr; i++) {
tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i);
nvkm_wr32(device, 0x640144 + (i * 0x08), tmp);
}
/* Head capabilities. */
list_for_each_entry(head, &disp->heads, head) {
const int id = head->id;
/* RG. */
tmp = nvkm_rd32(device, 0x616300 + (id * 0x800));
nvkm_wr32(device, 0x640048 + (id * 0x020), tmp);
/* POSTCOMP. */
for (j = 0; j < 6 * 4; j += 4) {
tmp = nvkm_rd32(device, 0x616100 + (id * 0x800) + j);
nvkm_wr32(device, 0x640030 + (id * 0x20) + j, tmp);
}
}
/* Window capabilities. */
for (i = 0; i < disp->wndw.nr; i++) {
nvkm_mask(device, 0x640004, 1 << i, 1 << i);
for (j = 0; j < 6 * 4; j += 4) {
tmp = nvkm_rd32(device, 0x630050 + (i * 0x800) + j);
nvkm_wr32(device, 0x6401e4 + (i * 0x20) + j, tmp);
}
}
/* IHUB capabilities. */
for (i = 0; i < 4; i++) {
tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04));
nvkm_wr32(device, 0x640010 + (i * 0x04), tmp);
}
nvkm_mask(device, 0x610078, 0x00000001, 0x00000001);
/* Setup instance memory. */
switch (nvkm_memory_target(disp->inst->memory)) {
case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break;
case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break;
case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break;
default:
break;
}
nvkm_wr32(device, 0x610010, 0x00000008 | tmp);
nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
/* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */
nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */
nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */
/* EXC_OTHER: CURSn, CORE. */
nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
0x00000001); /* MSK. */
nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */
/* EXC_WINIM. */
nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */
/* EXC_WIN. */
nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */
/* HEAD_TIMING(n): VBLANK. */
list_for_each_entry(head, &disp->heads, head) {
const u32 hoff = head->id * 4;
nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */
nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */
}
/* OR. */
nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */
nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */
return 0;
}
static const struct nvkm_disp_func
gv100_disp = {
.oneinit = nv50_disp_oneinit,
.init = gv100_disp_init,
.fini = gv100_disp_fini,
.intr = gv100_disp_intr,
.super = gv100_disp_super,
.uevent = &gv100_disp_chan_uevent,
.wndw = { .cnt = gv100_disp_wndw_cnt },
.head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
.sor = { .cnt = gv100_sor_cnt, .new = gv100_sor_new },
.ramht_size = 0x2000,
.root = { 0, 0,GV100_DISP },
.user = {
{{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
{{ 0, 0,GV100_DISP_CURSOR }, nvkm_disp_chan_new, &gv100_disp_curs },
{{ 0, 0,GV100_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm },
{{ 0, 0,GV100_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gv100_disp_core },
{{ 0, 0,GV100_DISP_WINDOW_CHANNEL_DMA }, nvkm_disp_wndw_new, &gv100_disp_wndw },
{}
},
};
int
gv100_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&gv100_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "chan.h"
#include "head.h"
#include "ior.h"
#include <subdev/timer.h>
#include <nvif/class.h>
void
g94_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 loff = nv50_sor_link(sor);
nvkm_mask(device, 0x61c128 + loff, 0x0000003f, watermark);
}
void
g94_sor_dp_activesym(struct nvkm_ior *sor, int head,
u8 TU, u8 VTUa, u8 VTUf, u8 VTUi)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 loff = nv50_sor_link(sor);
nvkm_mask(device, 0x61c10c + loff, 0x000001fc, TU << 2);
nvkm_mask(device, 0x61c128 + loff, 0x010f7f00, VTUa << 24 | VTUf << 16 | VTUi << 8);
}
void
g94_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, h);
nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, v);
}
void
g94_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 loff = nv50_sor_link(sor);
const u32 shift = sor->func->dp->lanes[ln] * 8;
u32 data[3];
data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
data[2] = nvkm_rd32(device, 0x61c130 + loff);
if ((data[2] & 0x0000ff00) < (pu << 8) || ln == 0)
data[2] = (data[2] & ~0x0000ff00) | (pu << 8);
nvkm_wr32(device, 0x61c118 + loff, data[0] | (dc << shift));
nvkm_wr32(device, 0x61c120 + loff, data[1] | (pe << shift));
nvkm_wr32(device, 0x61c130 + loff, data[2]);
}
void
g94_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 loff = nv50_sor_link(sor);
u32 data;
switch (pattern) {
case 0: data = 0x00001000; break;
case 1: data = 0x01000000; break;
case 2: data = 0x02000000; break;
default:
WARN_ON(1);
return;
}
nvkm_mask(device, 0x61c10c + loff, 0x0f001000, data);
}
void
g94_sor_dp_power(struct nvkm_ior *sor, int nr)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
const u32 loff = nv50_sor_link(sor);
u32 mask = 0, i;
for (i = 0; i < nr; i++)
mask |= 1 << sor->func->dp->lanes[i];
nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
break;
);
}
int
g94_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
const u32 loff = nv50_sor_link(sor);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
if (sor->dp.ef)
dpctrl |= 0x00004000;
if (sor->dp.bw > 0x06)
clksor |= 0x00040000;
nvkm_mask(device, 0x614300 + soff, 0x000c0000, clksor);
nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
return 0;
}
const struct nvkm_ior_func_dp
g94_sor_dp = {
.lanes = { 2, 1, 0, 3},
.links = g94_sor_dp_links,
.power = g94_sor_dp_power,
.pattern = g94_sor_dp_pattern,
.drive = g94_sor_dp_drive,
.audio_sym = g94_sor_dp_audio_sym,
.activesym = g94_sor_dp_activesym,
.watermark = g94_sor_dp_watermark,
};
static bool
g94_sor_war_needed(struct nvkm_ior *sor)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
if (sor->asy.proto == TMDS) {
switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) {
case 0x00000000:
case 0x00030000:
return true;
default:
break;
}
}
return false;
}
static void
g94_sor_war_update_sppll1(struct nvkm_disp *disp)
{
struct nvkm_device *device = disp->engine.subdev.device;
struct nvkm_ior *ior;
bool used = false;
u32 clksor;
list_for_each_entry(ior, &disp->iors, head) {
if (ior->type != SOR)
continue;
clksor = nvkm_rd32(device, 0x614300 + nv50_ior_base(ior));
switch (clksor & 0x03000000) {
case 0x02000000:
case 0x03000000:
used = true;
break;
default:
break;
}
}
if (used)
return;
nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000);
}
static void
g94_sor_war_3(struct nvkm_ior *sor)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
u32 sorpwr;
if (!g94_sor_war_needed(sor))
return;
sorpwr = nvkm_rd32(device, 0x61c004 + soff);
if (sorpwr & 0x00000001) {
u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
u32 pd_pc = (seqctl & 0x00000f00) >> 8;
u32 pu_pc = seqctl & 0x0000000f;
nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
break;
);
nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
break;
);
nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000);
nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000);
}
nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000);
nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000);
if (sorpwr & 0x00000001)
nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001);
g94_sor_war_update_sppll1(sor->disp);
}
static void
g94_sor_war_2(struct nvkm_ior *sor)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
if (!g94_sor_war_needed(sor))
return;
nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000);
nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000);
nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001);
nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000);
nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000);
nvkm_usec(device, 400, NVKM_DELAY);
nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000);
nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000);
if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) {
u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
u32 pu_pc = seqctl & 0x0000000f;
nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000);
}
}
void
g94_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 coff = sor->id * 8 + (state == &sor->arm) * 4;
u32 ctrl = nvkm_rd32(device, 0x610794 + coff);
state->proto_evo = (ctrl & 0x00000f00) >> 8;
switch (state->proto_evo) {
case 0: state->proto = LVDS; state->link = 1; break;
case 1: state->proto = TMDS; state->link = 1; break;
case 2: state->proto = TMDS; state->link = 2; break;
case 5: state->proto = TMDS; state->link = 3; break;
case 8: state->proto = DP; state->link = 1; break;
case 9: state->proto = DP; state->link = 2; break;
default:
state->proto = UNKNOWN;
break;
}
state->head = ctrl & 0x00000003;
nv50_pior_depth(sor, state, ctrl);
}
static const struct nvkm_ior_func
g94_sor = {
.state = g94_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
.war_2 = g94_sor_war_2,
.war_3 = g94_sor_war_3,
.hdmi = &g84_sor_hdmi,
.dp = &g94_sor_dp,
};
static int
g94_sor_new(struct nvkm_disp *disp, int id)
{
return nvkm_ior_new_(&g94_sor, disp, SOR, id, false);
}
int
g94_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
struct nvkm_device *device = disp->engine.subdev.device;
*pmask = (nvkm_rd32(device, 0x610184) & 0x0f000000) >> 24;
return 4;
}
static const struct nvkm_disp_mthd_list
g94_disp_core_mthd_sor = {
.mthd = 0x0040,
.addr = 0x000008,
.data = {
{ 0x0600, 0x610794 },
{}
}
};
const struct nvkm_disp_chan_mthd
g94_disp_core_mthd = {
.name = "Core",
.addr = 0x000000,
.prev = 0x000004,
.data = {
{ "Global", 1, &nv50_disp_core_mthd_base },
{ "DAC", 3, &g84_disp_core_mthd_dac },
{ "SOR", 4, &g94_disp_core_mthd_sor },
{ "PIOR", 3, &nv50_disp_core_mthd_pior },
{ "HEAD", 2, &g84_disp_core_mthd_head },
{}
}
};
const struct nvkm_disp_chan_user
g94_disp_core = {
.func = &nv50_disp_core_func,
.ctrl = 0,
.user = 0,
.mthd = &g94_disp_core_mthd,
};
static const struct nvkm_disp_func
g94_disp = {
.oneinit = nv50_disp_oneinit,
.init = nv50_disp_init,
.fini = nv50_disp_fini,
.intr = nv50_disp_intr,
.super = nv50_disp_super,
.uevent = &nv50_disp_chan_uevent,
.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
.sor = { .cnt = g94_sor_cnt, .new = g94_sor_new },
.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
.root = { 0,0,GT206_DISP },
.user = {
{{0,0, G82_DISP_CURSOR }, nvkm_disp_chan_new, & nv50_disp_curs },
{{0,0, G82_DISP_OVERLAY }, nvkm_disp_chan_new, & nv50_disp_oimm },
{{0,0,GT200_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, & g84_disp_base },
{{0,0,GT206_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, & g94_disp_core },
{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, nvkm_disp_chan_new, >200_disp_ovly },
{}
},
};
int
g94_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&g94_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "head.h"
#include <core/client.h>
#include <nvif/cl0046.h>
#include <nvif/unpack.h>
struct nvkm_head *
nvkm_head_find(struct nvkm_disp *disp, int id)
{
struct nvkm_head *head;
list_for_each_entry(head, &disp->heads, head) {
if (head->id == id)
return head;
}
return NULL;
}
void
nvkm_head_del(struct nvkm_head **phead)
{
struct nvkm_head *head = *phead;
if (head) {
HEAD_DBG(head, "dtor");
list_del(&head->head);
kfree(*phead);
*phead = NULL;
}
}
int
nvkm_head_new_(const struct nvkm_head_func *func,
struct nvkm_disp *disp, int id)
{
struct nvkm_head *head;
if (!(head = kzalloc(sizeof(*head), GFP_KERNEL)))
return -ENOMEM;
head->func = func;
head->disp = disp;
head->id = id;
list_add_tail(&head->head, &disp->heads);
HEAD_DBG(head, "ctor");
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define nvkm_uhead(p) container_of((p), struct nvkm_head, object)
#include "head.h"
#include <core/event.h>
#include <nvif/if0013.h>
#include <nvif/event.h>
static int
nvkm_uhead_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
{
struct nvkm_head *head = nvkm_uhead(object);
union nvif_head_event_args *args = argv;
if (!uevent)
return 0;
if (argc != sizeof(args->vn))
return -ENOSYS;
return nvkm_uevent_add(uevent, &head->disp->vblank, head->id,
NVKM_DISP_HEAD_EVENT_VBLANK, NULL);
}
static int
nvkm_uhead_mthd_scanoutpos(struct nvkm_head *head, void *argv, u32 argc)
{
union nvif_head_scanoutpos_args *args = argv;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
head->func->state(head, &head->arm);
args->v0.vtotal = head->arm.vtotal;
args->v0.vblanks = head->arm.vblanks;
args->v0.vblanke = head->arm.vblanke;
args->v0.htotal = head->arm.htotal;
args->v0.hblanks = head->arm.hblanks;
args->v0.hblanke = head->arm.hblanke;
/* We don't support reading htotal/vtotal on pre-NV50 VGA,
* so we have to give up and trigger the timestamping
* fallback in the drm core.
*/
if (!args->v0.vtotal || !args->v0.htotal)
return -ENOTSUPP;
args->v0.time[0] = ktime_to_ns(ktime_get());
head->func->rgpos(head, &args->v0.hline, &args->v0.vline);
args->v0.time[1] = ktime_to_ns(ktime_get());
return 0;
}
static int
nvkm_uhead_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
struct nvkm_head *head = nvkm_uhead(object);
switch (mthd) {
case NVIF_HEAD_V0_SCANOUTPOS: return nvkm_uhead_mthd_scanoutpos(head, argv, argc);
default:
return -EINVAL;
}
}
static void *
nvkm_uhead_dtor(struct nvkm_object *object)
{
struct nvkm_head *head = nvkm_uhead(object);
struct nvkm_disp *disp = head->disp;
spin_lock(&disp->client.lock);
head->object.func = NULL;
spin_unlock(&disp->client.lock);
return NULL;
}
static const struct nvkm_object_func
nvkm_uhead = {
.dtor = nvkm_uhead_dtor,
.mthd = nvkm_uhead_mthd,
.uevent = nvkm_uhead_uevent,
};
int
nvkm_uhead_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
struct nvkm_head *head;
union nvif_head_args *args = argv;
int ret;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
if (!(head = nvkm_head_find(disp, args->v0.id)))
return -EINVAL;
ret = -EBUSY;
spin_lock(&disp->client.lock);
if (!head->object.func) {
nvkm_object_ctor(&nvkm_uhead, oclass, &head->object);
*pobject = &head->object;
ret = 0;
}
spin_unlock(&disp->client.lock);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/uhead.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "chan.h"
#include "hdmi.h"
#include "head.h"
#include "ior.h"
#include <nvif/class.h>
static void
g84_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
struct packed_hdmi_infoframe vsi;
const u32 hoff = head * 0x800;
nvkm_mask(device, 0x61653c + hoff, 0x00010001, 0x00010000);
if (!size)
return;
pack_hdmi_infoframe(&vsi, data, size);
nvkm_wr32(device, 0x616544 + hoff, vsi.header);
nvkm_wr32(device, 0x616548 + hoff, vsi.subpack0_low);
nvkm_wr32(device, 0x61654c + hoff, vsi.subpack0_high);
/* Is there a second (or up to fourth?) set of subpack registers here? */
/* nvkm_wr32(device, 0x616550 + hoff, vsi.subpack1_low); */
/* nvkm_wr32(device, 0x616554 + hoff, vsi.subpack1_high); */
nvkm_mask(device, 0x61653c + hoff, 0x00010001, 0x00010001);
}
static void
g84_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
struct packed_hdmi_infoframe avi;
const u32 hoff = head * 0x800;
pack_hdmi_infoframe(&avi, data, size);
nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000000);
if (!size)
return;
nvkm_wr32(device, 0x616528 + hoff, avi.header);
nvkm_wr32(device, 0x61652c + hoff, avi.subpack0_low);
nvkm_wr32(device, 0x616530 + hoff, avi.subpack0_high);
nvkm_wr32(device, 0x616534 + hoff, avi.subpack1_low);
nvkm_wr32(device, 0x616538 + hoff, avi.subpack1_high);
nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000001);
}
static void
g84_sor_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, u8 rekey)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 ctrl = 0x40000000 * enable |
0x1f000000 /* ??? */ |
max_ac_packet << 16 |
rekey;
const u32 hoff = head * 0x800;
if (!(ctrl & 0x40000000)) {
nvkm_mask(device, 0x6165a4 + hoff, 0x40000000, 0x00000000);
nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000);
return;
}
/* Audio InfoFrame */
nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000);
nvkm_wr32(device, 0x616508 + hoff, 0x000a0184);
nvkm_wr32(device, 0x61650c + hoff, 0x00000071);
nvkm_wr32(device, 0x616510 + hoff, 0x00000000);
nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000001);
nvkm_mask(device, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
nvkm_mask(device, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
nvkm_mask(device, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
/* ??? */
nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
/* HDMI_CTRL */
nvkm_mask(device, 0x6165a4 + hoff, 0x5f1f007f, ctrl);
}
const struct nvkm_ior_func_hdmi
g84_sor_hdmi = {
.ctrl = g84_sor_hdmi_ctrl,
.infoframe_avi = g84_sor_hdmi_infoframe_avi,
.infoframe_vsi = g84_sor_hdmi_infoframe_vsi,
};
static const struct nvkm_ior_func
g84_sor = {
.state = nv50_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
.hdmi = &g84_sor_hdmi,
};
int
g84_sor_new(struct nvkm_disp *disp, int id)
{
return nvkm_ior_new_(&g84_sor, disp, SOR, id, false);
}
static const struct nvkm_disp_mthd_list
g84_disp_ovly_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
{ 0x0080, 0x000000 },
{ 0x0084, 0x6109a0 },
{ 0x0088, 0x6109c0 },
{ 0x008c, 0x6109c8 },
{ 0x0090, 0x6109b4 },
{ 0x0094, 0x610970 },
{ 0x00a0, 0x610998 },
{ 0x00a4, 0x610964 },
{ 0x00c0, 0x610958 },
{ 0x00e0, 0x6109a8 },
{ 0x00e4, 0x6109d0 },
{ 0x00e8, 0x6109d8 },
{ 0x0100, 0x61094c },
{ 0x0104, 0x610984 },
{ 0x0108, 0x61098c },
{ 0x0800, 0x6109f8 },
{ 0x0808, 0x610a08 },
{ 0x080c, 0x610a10 },
{ 0x0810, 0x610a00 },
{}
}
};
static const struct nvkm_disp_chan_mthd
g84_disp_ovly_mthd = {
.name = "Overlay",
.addr = 0x000540,
.prev = 0x000004,
.data = {
{ "Global", 1, &g84_disp_ovly_mthd_base },
{}
}
};
const struct nvkm_disp_chan_user
g84_disp_ovly = {
.func = &nv50_disp_dmac_func,
.ctrl = 3,
.user = 3,
.mthd = &g84_disp_ovly_mthd,
};
static const struct nvkm_disp_mthd_list
g84_disp_base_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
{ 0x0080, 0x000000 },
{ 0x0084, 0x0008c4 },
{ 0x0088, 0x0008d0 },
{ 0x008c, 0x0008dc },
{ 0x0090, 0x0008e4 },
{ 0x0094, 0x610884 },
{ 0x00a0, 0x6108a0 },
{ 0x00a4, 0x610878 },
{ 0x00c0, 0x61086c },
{ 0x00c4, 0x610800 },
{ 0x00c8, 0x61080c },
{ 0x00cc, 0x610818 },
{ 0x00e0, 0x610858 },
{ 0x00e4, 0x610860 },
{ 0x00e8, 0x6108ac },
{ 0x00ec, 0x6108b4 },
{ 0x00fc, 0x610824 },
{ 0x0100, 0x610894 },
{ 0x0104, 0x61082c },
{ 0x0110, 0x6108bc },
{ 0x0114, 0x61088c },
{}
}
};
static const struct nvkm_disp_chan_mthd
g84_disp_base_mthd = {
.name = "Base",
.addr = 0x000540,
.prev = 0x000004,
.data = {
{ "Global", 1, &g84_disp_base_mthd_base },
{ "Image", 2, &nv50_disp_base_mthd_image },
{}
}
};
const struct nvkm_disp_chan_user
g84_disp_base = {
.func = &nv50_disp_dmac_func,
.ctrl = 1,
.user = 1,
.mthd = &g84_disp_base_mthd,
};
const struct nvkm_disp_mthd_list
g84_disp_core_mthd_dac = {
.mthd = 0x0080,
.addr = 0x000008,
.data = {
{ 0x0400, 0x610b58 },
{ 0x0404, 0x610bdc },
{ 0x0420, 0x610bc4 },
{}
}
};
const struct nvkm_disp_mthd_list
g84_disp_core_mthd_head = {
.mthd = 0x0400,
.addr = 0x000540,
.data = {
{ 0x0800, 0x610ad8 },
{ 0x0804, 0x610ad0 },
{ 0x0808, 0x610a48 },
{ 0x080c, 0x610a78 },
{ 0x0810, 0x610ac0 },
{ 0x0814, 0x610af8 },
{ 0x0818, 0x610b00 },
{ 0x081c, 0x610ae8 },
{ 0x0820, 0x610af0 },
{ 0x0824, 0x610b08 },
{ 0x0828, 0x610b10 },
{ 0x082c, 0x610a68 },
{ 0x0830, 0x610a60 },
{ 0x0834, 0x000000 },
{ 0x0838, 0x610a40 },
{ 0x0840, 0x610a24 },
{ 0x0844, 0x610a2c },
{ 0x0848, 0x610aa8 },
{ 0x084c, 0x610ab0 },
{ 0x085c, 0x610c5c },
{ 0x0860, 0x610a84 },
{ 0x0864, 0x610a90 },
{ 0x0868, 0x610b18 },
{ 0x086c, 0x610b20 },
{ 0x0870, 0x610ac8 },
{ 0x0874, 0x610a38 },
{ 0x0878, 0x610c50 },
{ 0x0880, 0x610a58 },
{ 0x0884, 0x610a9c },
{ 0x089c, 0x610c68 },
{ 0x08a0, 0x610a70 },
{ 0x08a4, 0x610a50 },
{ 0x08a8, 0x610ae0 },
{ 0x08c0, 0x610b28 },
{ 0x08c4, 0x610b30 },
{ 0x08c8, 0x610b40 },
{ 0x08d4, 0x610b38 },
{ 0x08d8, 0x610b48 },
{ 0x08dc, 0x610b50 },
{ 0x0900, 0x610a18 },
{ 0x0904, 0x610ab8 },
{ 0x0910, 0x610c70 },
{ 0x0914, 0x610c78 },
{}
}
};
const struct nvkm_disp_chan_mthd
g84_disp_core_mthd = {
.name = "Core",
.addr = 0x000000,
.prev = 0x000004,
.data = {
{ "Global", 1, &nv50_disp_core_mthd_base },
{ "DAC", 3, &g84_disp_core_mthd_dac },
{ "SOR", 2, &nv50_disp_core_mthd_sor },
{ "PIOR", 3, &nv50_disp_core_mthd_pior },
{ "HEAD", 2, &g84_disp_core_mthd_head },
{}
}
};
const struct nvkm_disp_chan_user
g84_disp_core = {
.func = &nv50_disp_core_func,
.ctrl = 0,
.user = 0,
.mthd = &g84_disp_core_mthd,
};
static const struct nvkm_disp_func
g84_disp = {
.oneinit = nv50_disp_oneinit,
.init = nv50_disp_init,
.fini = nv50_disp_fini,
.intr = nv50_disp_intr,
.super = nv50_disp_super,
.uevent = &nv50_disp_chan_uevent,
.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
.sor = { .cnt = nv50_sor_cnt, .new = g84_sor_new },
.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
.root = { 0,0,G82_DISP },
.user = {
{{0,0,G82_DISP_CURSOR }, nvkm_disp_chan_new, &nv50_disp_curs },
{{0,0,G82_DISP_OVERLAY }, nvkm_disp_chan_new, &nv50_disp_oimm },
{{0,0,G82_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, & g84_disp_base },
{{0,0,G82_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, & g84_disp_core },
{{0,0,G82_DISP_OVERLAY_CHANNEL_DMA}, nvkm_disp_chan_new, & g84_disp_ovly },
{}
},
};
int
g84_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&g84_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "chan.h"
#include "hdmi.h"
#include "head.h"
#include "ior.h"
#include <subdev/timer.h>
#include <nvif/class.h>
static void
gt215_sor_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 soff = ior->id * 0x800;
int i;
for (i = 0; i < size; i++)
nvkm_wr32(device, 0x61c440 + soff, (i << 8) | data[i]);
for (; i < 0x60; i++)
nvkm_wr32(device, 0x61c440 + soff, (i << 8));
nvkm_mask(device, 0x61c448 + soff, 0x80000002, 0x80000002);
}
static void
gt215_sor_hda_hpd(struct nvkm_ior *ior, int head, bool present)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
u32 data = 0x80000000;
u32 mask = 0x80000001;
if (present)
data |= 0x00000001;
else
mask |= 0x00000002;
nvkm_mask(device, 0x61c448 + ior->id * 0x800, mask, data);
}
const struct nvkm_ior_func_hda
gt215_sor_hda = {
.hpd = gt215_sor_hda_hpd,
.eld = gt215_sor_hda_eld,
};
void
gt215_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
const u32 data = 0x80000000 | (0x00000001 * enable);
const u32 mask = 0x8000000d;
nvkm_mask(device, 0x61c1e0 + soff, mask, data);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x61c1e0 + soff) & 0x80000000))
break;
);
}
static const struct nvkm_ior_func_dp
gt215_sor_dp = {
.lanes = { 2, 1, 0, 3 },
.links = g94_sor_dp_links,
.power = g94_sor_dp_power,
.pattern = g94_sor_dp_pattern,
.drive = g94_sor_dp_drive,
.audio = gt215_sor_dp_audio,
.audio_sym = g94_sor_dp_audio_sym,
.activesym = g94_sor_dp_activesym,
.watermark = g94_sor_dp_watermark,
};
static void
gt215_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
struct packed_hdmi_infoframe vsi;
const u32 soff = nv50_ior_base(ior);
pack_hdmi_infoframe(&vsi, data, size);
nvkm_mask(device, 0x61c53c + soff, 0x00010001, 0x00010000);
if (!size)
return;
nvkm_wr32(device, 0x61c544 + soff, vsi.header);
nvkm_wr32(device, 0x61c548 + soff, vsi.subpack0_low);
nvkm_wr32(device, 0x61c54c + soff, vsi.subpack0_high);
/* Is there a second (or up to fourth?) set of subpack registers here? */
/* nvkm_wr32(device, 0x61c550 + soff, vsi.subpack1_low); */
/* nvkm_wr32(device, 0x61c554 + soff, vsi.subpack1_high); */
nvkm_mask(device, 0x61c53c + soff, 0x00010001, 0x00010001);
}
static void
gt215_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
struct packed_hdmi_infoframe avi;
const u32 soff = nv50_ior_base(ior);
pack_hdmi_infoframe(&avi, data, size);
nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
if (!size)
return;
nvkm_wr32(device, 0x61c528 + soff, avi.header);
nvkm_wr32(device, 0x61c52c + soff, avi.subpack0_low);
nvkm_wr32(device, 0x61c530 + soff, avi.subpack0_high);
nvkm_wr32(device, 0x61c534 + soff, avi.subpack1_low);
nvkm_wr32(device, 0x61c538 + soff, avi.subpack1_high);
nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000001);
}
static void
gt215_sor_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, u8 rekey)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 ctrl = 0x40000000 * enable |
0x1f000000 /* ??? */ |
max_ac_packet << 16 |
rekey;
const u32 soff = nv50_ior_base(ior);
if (!(ctrl & 0x40000000)) {
nvkm_mask(device, 0x61c5a4 + soff, 0x40000000, 0x00000000);
nvkm_mask(device, 0x61c53c + soff, 0x00000001, 0x00000000);
nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000);
return;
}
/* Audio InfoFrame */
nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000);
nvkm_wr32(device, 0x61c508 + soff, 0x000a0184);
nvkm_wr32(device, 0x61c50c + soff, 0x00000071);
nvkm_wr32(device, 0x61c510 + soff, 0x00000000);
nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000001);
nvkm_mask(device, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
nvkm_mask(device, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
nvkm_mask(device, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
/* ??? */
nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
/* HDMI_CTRL */
nvkm_mask(device, 0x61c5a4 + soff, 0x5f1f007f, ctrl);
}
const struct nvkm_ior_func_hdmi
gt215_sor_hdmi = {
.ctrl = gt215_sor_hdmi_ctrl,
.infoframe_avi = gt215_sor_hdmi_infoframe_avi,
.infoframe_vsi = gt215_sor_hdmi_infoframe_vsi,
};
static const struct nvkm_ior_func
gt215_sor = {
.state = g94_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
.hdmi = >215_sor_hdmi,
.dp = >215_sor_dp,
.hda = >215_sor_hda,
};
static int
gt215_sor_new(struct nvkm_disp *disp, int id)
{
return nvkm_ior_new_(>215_sor, disp, SOR, id, true);
}
static const struct nvkm_disp_func
gt215_disp = {
.oneinit = nv50_disp_oneinit,
.init = nv50_disp_init,
.fini = nv50_disp_fini,
.intr = nv50_disp_intr,
.super = nv50_disp_super,
.uevent = &nv50_disp_chan_uevent,
.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
.sor = { .cnt = g94_sor_cnt, .new = gt215_sor_new },
.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
.root = { 0,0,GT214_DISP },
.user = {
{{0,0,GT214_DISP_CURSOR }, nvkm_disp_chan_new, & nv50_disp_curs },
{{0,0,GT214_DISP_OVERLAY }, nvkm_disp_chan_new, & nv50_disp_oimm },
{{0,0,GT214_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, & g84_disp_base },
{{0,0,GT214_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, & g94_disp_core },
{{0,0,GT214_DISP_OVERLAY_CHANNEL_DMA}, nvkm_disp_chan_new, & g84_disp_ovly },
{}
},
};
int
gt215_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(>215_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "chan.h"
#include "head.h"
#include "ior.h"
#include <subdev/timer.h>
#include <nvif/class.h>
static int
ga102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(sor);
const u32 loff = nv50_sor_link(sor);
u32 dpctrl = 0x00000000;
u32 clksor = 0x00000000;
switch (sor->dp.bw) {
case 0x06: clksor |= 0x00000000; break;
case 0x0a: clksor |= 0x00040000; break;
case 0x14: clksor |= 0x00080000; break;
case 0x1e: clksor |= 0x000c0000; break;
case 0x08: clksor |= 0x00100000; break;
case 0x09: clksor |= 0x00140000; break;
case 0x0c: clksor |= 0x00180000; break;
case 0x10: clksor |= 0x001c0000; break;
default:
WARN_ON(1);
return -EINVAL;
}
dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
if (sor->dp.mst)
dpctrl |= 0x40000000;
if (sor->dp.ef)
dpctrl |= 0x00004000;
nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
/*XXX*/
nvkm_msec(device, 40, NVKM_DELAY);
nvkm_mask(device, 0x612300 + soff, 0x00030000, 0x00010000);
nvkm_mask(device, 0x61c10c + loff, 0x00000003, 0x00000001);
nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
return 0;
}
static const struct nvkm_ior_func_dp
ga102_sor_dp = {
.lanes = { 0, 1, 2, 3 },
.links = ga102_sor_dp_links,
.power = g94_sor_dp_power,
.pattern = gm107_sor_dp_pattern,
.drive = gm200_sor_dp_drive,
.vcpi = tu102_sor_dp_vcpi,
.audio = gv100_sor_dp_audio,
.audio_sym = gv100_sor_dp_audio_sym,
.watermark = gv100_sor_dp_watermark,
};
static void
ga102_sor_clock(struct nvkm_ior *sor)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
u32 div2 = 0;
if (sor->asy.proto == TMDS) {
if (sor->tmds.high_speed)
div2 = 1;
}
nvkm_wr32(device, 0x00ec08 + (sor->id * 0x10), 0x00000000);
nvkm_wr32(device, 0x00ec04 + (sor->id * 0x10), div2);
}
static const struct nvkm_ior_func
ga102_sor = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
},
.state = gv100_sor_state,
.power = nv50_sor_power,
.clock = ga102_sor_clock,
.hdmi = &gv100_sor_hdmi,
.dp = &ga102_sor_dp,
.hda = &gv100_sor_hda,
};
static int
ga102_sor_new(struct nvkm_disp *disp, int id)
{
struct nvkm_device *device = disp->engine.subdev.device;
u32 hda = nvkm_rd32(device, 0x08a15c);
return nvkm_ior_new_(&ga102_sor, disp, SOR, id, hda & BIT(id));
}
static const struct nvkm_disp_func
ga102_disp = {
.oneinit = nv50_disp_oneinit,
.init = tu102_disp_init,
.fini = gv100_disp_fini,
.intr = gv100_disp_intr,
.super = gv100_disp_super,
.uevent = &gv100_disp_chan_uevent,
.wndw = { .cnt = gv100_disp_wndw_cnt },
.head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
.sor = { .cnt = gv100_sor_cnt, .new = ga102_sor_new },
.ramht_size = 0x2000,
.root = { 0, 0,GA102_DISP },
.user = {
{{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
{{ 0, 0,GA102_DISP_CURSOR }, nvkm_disp_chan_new, &gv100_disp_curs },
{{ 0, 0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm },
{{ 0, 0,GA102_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gv100_disp_core },
{{ 0, 0,GA102_DISP_WINDOW_CHANNEL_DMA }, nvkm_disp_wndw_new, &gv100_disp_wndw },
{}
},
};
int
ga102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&ga102_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "outp.h"
#include "dp.h"
#include "ior.h"
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/i2c.h>
void
nvkm_outp_route(struct nvkm_disp *disp)
{
struct nvkm_outp *outp;
struct nvkm_ior *ior;
list_for_each_entry(ior, &disp->iors, head) {
if ((outp = ior->arm.outp) && ior->arm.outp != ior->asy.outp) {
OUTP_DBG(outp, "release %s", ior->name);
if (ior->func->route.set)
ior->func->route.set(outp, NULL);
ior->arm.outp = NULL;
}
}
list_for_each_entry(ior, &disp->iors, head) {
if ((outp = ior->asy.outp)) {
OUTP_DBG(outp, "acquire %s", ior->name);
if (ior->asy.outp != ior->arm.outp) {
if (ior->func->route.set)
ior->func->route.set(outp, ior);
ior->arm.outp = ior->asy.outp;
}
}
}
}
static enum nvkm_ior_proto
nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type)
{
switch (outp->info.location) {
case 0:
switch (outp->info.type) {
case DCB_OUTPUT_ANALOG: *type = DAC; return CRT;
case DCB_OUTPUT_TV : *type = DAC; return TV;
case DCB_OUTPUT_TMDS : *type = SOR; return TMDS;
case DCB_OUTPUT_LVDS : *type = SOR; return LVDS;
case DCB_OUTPUT_DP : *type = SOR; return DP;
default:
break;
}
break;
case 1:
switch (outp->info.type) {
case DCB_OUTPUT_TMDS: *type = PIOR; return TMDS;
case DCB_OUTPUT_DP : *type = PIOR; return TMDS; /* not a bug */
default:
break;
}
break;
default:
break;
}
WARN_ON(1);
return UNKNOWN;
}
void
nvkm_outp_release(struct nvkm_outp *outp, u8 user)
{
struct nvkm_ior *ior = outp->ior;
OUTP_TRACE(outp, "release %02x &= %02x %p", outp->acquired, ~user, ior);
if (ior) {
outp->acquired &= ~user;
if (!outp->acquired) {
if (outp->func->release && outp->ior)
outp->func->release(outp);
outp->ior->asy.outp = NULL;
outp->ior = NULL;
}
}
}
static inline int
nvkm_outp_acquire_ior(struct nvkm_outp *outp, u8 user, struct nvkm_ior *ior)
{
outp->ior = ior;
outp->ior->asy.outp = outp;
outp->ior->asy.link = outp->info.sorconf.link;
outp->acquired |= user;
return 0;
}
static inline int
nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
u8 user, bool hda)
{
struct nvkm_ior *ior;
/* Failing that, a completely unused OR is the next best thing. */
list_for_each_entry(ior, &outp->disp->iors, head) {
if (!ior->identity && ior->hda == hda &&
!ior->asy.outp && ior->type == type && !ior->arm.outp &&
(ior->func->route.set || ior->id == __ffs(outp->info.or)))
return nvkm_outp_acquire_ior(outp, user, ior);
}
/* Last resort is to assign an OR that's already active on HW,
* but will be released during the next modeset.
*/
list_for_each_entry(ior, &outp->disp->iors, head) {
if (!ior->identity && ior->hda == hda &&
!ior->asy.outp && ior->type == type &&
(ior->func->route.set || ior->id == __ffs(outp->info.or)))
return nvkm_outp_acquire_ior(outp, user, ior);
}
return -ENOSPC;
}
int
nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
{
struct nvkm_ior *ior = outp->ior;
enum nvkm_ior_proto proto;
enum nvkm_ior_type type;
OUTP_TRACE(outp, "acquire %02x |= %02x %p", outp->acquired, user, ior);
if (ior) {
outp->acquired |= user;
return 0;
}
/* Lookup a compatible, and unused, OR to assign to the device. */
proto = nvkm_outp_xlat(outp, &type);
if (proto == UNKNOWN)
return -ENOSYS;
/* Deal with panels requiring identity-mapped SOR assignment. */
if (outp->identity) {
ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1);
if (WARN_ON(!ior))
return -ENOSPC;
return nvkm_outp_acquire_ior(outp, user, ior);
}
/* First preference is to reuse the OR that is currently armed
* on HW, if any, in order to prevent unnecessary switching.
*/
list_for_each_entry(ior, &outp->disp->iors, head) {
if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) {
/*XXX: For various complicated reasons, we can't outright switch
* the boot-time OR on the first modeset without some fairly
* invasive changes.
*
* The systems that were fixed by modifying the OR selection
* code to account for HDA support shouldn't regress here as
* the HDA-enabled ORs match the relevant output's pad macro
* index, and the firmware seems to select an OR this way.
*
* This warning is to make it obvious if that proves wrong.
*/
WARN_ON(hda && !ior->hda);
return nvkm_outp_acquire_ior(outp, user, ior);
}
}
/* If we don't need HDA, first try to acquire an OR that doesn't
* support it to leave free the ones that do.
*/
if (!hda) {
if (!nvkm_outp_acquire_hda(outp, type, user, false))
return 0;
/* Use a HDA-supporting SOR anyway. */
return nvkm_outp_acquire_hda(outp, type, user, true);
}
/* We want HDA, try to acquire an OR that supports it. */
if (!nvkm_outp_acquire_hda(outp, type, user, true))
return 0;
/* There weren't any free ORs that support HDA, grab one that
* doesn't and at least allow display to work still.
*/
return nvkm_outp_acquire_hda(outp, type, user, false);
}
void
nvkm_outp_fini(struct nvkm_outp *outp)
{
if (outp->func->fini)
outp->func->fini(outp);
}
static void
nvkm_outp_init_route(struct nvkm_outp *outp)
{
struct nvkm_disp *disp = outp->disp;
enum nvkm_ior_proto proto;
enum nvkm_ior_type type;
struct nvkm_ior *ior;
int id, link;
/* Find any OR from the class that is able to support this device. */
proto = nvkm_outp_xlat(outp, &type);
if (proto == UNKNOWN)
return;
ior = nvkm_ior_find(disp, type, -1);
if (!ior) {
WARN_ON(1);
return;
}
/* Determine the specific OR, if any, this device is attached to. */
if (ior->func->route.get) {
id = ior->func->route.get(outp, &link);
if (id < 0) {
OUTP_DBG(outp, "no route");
return;
}
} else {
/* Prior to DCB 4.1, this is hardwired like so. */
id = ffs(outp->info.or) - 1;
link = (ior->type == SOR) ? outp->info.sorconf.link : 0;
}
ior = nvkm_ior_find(disp, type, id);
if (!ior) {
WARN_ON(1);
return;
}
/* Determine if the OR is already configured for this device. */
ior->func->state(ior, &ior->arm);
if (!ior->arm.head || ior->arm.proto != proto) {
OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
ior->arm.proto, proto);
/* The EFI GOP driver on Ampere can leave unused DP links routed,
* which we don't expect. The DisableLT IED script *should* get
* us back to where we need to be.
*/
if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
nvkm_dp_disable(outp, ior);
return;
}
OUTP_DBG(outp, "on %s link %x", ior->name, ior->arm.link);
ior->arm.outp = outp;
}
void
nvkm_outp_init(struct nvkm_outp *outp)
{
nvkm_outp_init_route(outp);
if (outp->func->init)
outp->func->init(outp);
}
void
nvkm_outp_del(struct nvkm_outp **poutp)
{
struct nvkm_outp *outp = *poutp;
if (outp && !WARN_ON(!outp->func)) {
if (outp->func->dtor)
*poutp = outp->func->dtor(outp);
kfree(*poutp);
*poutp = NULL;
}
}
int
nvkm_outp_new_(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
int index, struct dcb_output *dcbE, struct nvkm_outp **poutp)
{
struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
struct nvkm_outp *outp;
enum nvkm_ior_proto proto;
enum nvkm_ior_type type;
if (!(outp = *poutp = kzalloc(sizeof(*outp), GFP_KERNEL)))
return -ENOMEM;
outp->func = func;
outp->disp = disp;
outp->index = index;
outp->info = *dcbE;
outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
"edid %x bus %d head %x",
outp->info.type, outp->info.location, outp->info.or,
outp->info.type >= 2 ? outp->info.sorconf.link : 0,
outp->info.connector, outp->info.i2c_index,
outp->info.bus, outp->info.heads);
/* Cull output paths we can't map to an output resource. */
proto = nvkm_outp_xlat(outp, &type);
if (proto == UNKNOWN)
return -ENODEV;
return 0;
}
static const struct nvkm_outp_func
nvkm_outp = {
};
int
nvkm_outp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
struct nvkm_outp **poutp)
{
return nvkm_outp_new_(&nvkm_outp, disp, index, dcbE, poutp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define nvkm_uoutp(p) container_of((p), struct nvkm_outp, object)
#include "outp.h"
#include "dp.h"
#include "head.h"
#include "ior.h"
#include <nvif/if0012.h>
static int
nvkm_uoutp_mthd_dp_mst_vcpi(struct nvkm_outp *outp, void *argv, u32 argc)
{
struct nvkm_ior *ior = outp->ior;
union nvif_outp_dp_mst_vcpi_args *args = argv;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
if (!ior->func->dp || !ior->func->dp->vcpi || !nvkm_head_find(outp->disp, args->v0.head))
return -EINVAL;
ior->func->dp->vcpi(ior, args->v0.head, args->v0.start_slot, args->v0.num_slots,
args->v0.pbn, args->v0.aligned_pbn);
return 0;
}
static int
nvkm_uoutp_mthd_dp_retrain(struct nvkm_outp *outp, void *argv, u32 argc)
{
union nvif_outp_dp_retrain_args *args = argv;
if (argc != sizeof(args->vn))
return -ENOSYS;
if (!atomic_read(&outp->dp.lt.done))
return 0;
return outp->func->acquire(outp);
}
static int
nvkm_uoutp_mthd_dp_aux_pwr(struct nvkm_outp *outp, void *argv, u32 argc)
{
union nvif_outp_dp_aux_pwr_args *args = argv;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
outp->dp.enabled = !!args->v0.state;
nvkm_dp_enable(outp, outp->dp.enabled);
return 0;
}
static int
nvkm_uoutp_mthd_hda_eld(struct nvkm_outp *outp, void *argv, u32 argc)
{
struct nvkm_ior *ior = outp->ior;
union nvif_outp_hda_eld_args *args = argv;
if (argc < sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
argc -= sizeof(args->v0);
if (!ior->hda || !nvkm_head_find(outp->disp, args->v0.head))
return -EINVAL;
if (argc > 0x60)
return -E2BIG;
if (argc && args->v0.data[0]) {
if (outp->info.type == DCB_OUTPUT_DP)
ior->func->dp->audio(ior, args->v0.head, true);
ior->func->hda->hpd(ior, args->v0.head, true);
ior->func->hda->eld(ior, args->v0.head, args->v0.data, argc);
} else {
if (outp->info.type == DCB_OUTPUT_DP)
ior->func->dp->audio(ior, args->v0.head, false);
ior->func->hda->hpd(ior, args->v0.head, false);
}
return 0;
}
static int
nvkm_uoutp_mthd_infoframe(struct nvkm_outp *outp, void *argv, u32 argc)
{
struct nvkm_ior *ior = outp->ior;
union nvif_outp_infoframe_args *args = argv;
ssize_t size = argc - sizeof(*args);
if (argc < sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
if (!nvkm_head_find(outp->disp, args->v0.head))
return -EINVAL;
switch (ior->func->hdmi ? args->v0.type : 0xff) {
case NVIF_OUTP_INFOFRAME_V0_AVI:
ior->func->hdmi->infoframe_avi(ior, args->v0.head, &args->v0.data, size);
return 0;
case NVIF_OUTP_INFOFRAME_V0_VSI:
ior->func->hdmi->infoframe_vsi(ior, args->v0.head, &args->v0.data, size);
return 0;
default:
break;
}
return -EINVAL;
}
static int
nvkm_uoutp_mthd_release(struct nvkm_outp *outp, void *argv, u32 argc)
{
struct nvkm_head *head = outp->asy.head;
struct nvkm_ior *ior = outp->ior;
union nvif_outp_release_args *args = argv;
if (argc != sizeof(args->vn))
return -ENOSYS;
if (ior->func->hdmi && head) {
ior->func->hdmi->infoframe_avi(ior, head->id, NULL, 0);
ior->func->hdmi->infoframe_vsi(ior, head->id, NULL, 0);
ior->func->hdmi->ctrl(ior, head->id, false, 0, 0);
}
nvkm_outp_release(outp, NVKM_OUTP_USER);
return 0;
}
static int
nvkm_uoutp_mthd_acquire_dp(struct nvkm_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
u8 link_nr, u8 link_bw, bool hda, bool mst)
{
int ret;
ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER, hda);
if (ret)
return ret;
memcpy(outp->dp.dpcd, dpcd, sizeof(outp->dp.dpcd));
outp->dp.lt.nr = link_nr;
outp->dp.lt.bw = link_bw;
outp->dp.lt.mst = mst;
return 0;
}
static int
nvkm_uoutp_mthd_acquire_tmds(struct nvkm_outp *outp, u8 head, u8 hdmi, u8 hdmi_max_ac_packet,
u8 hdmi_rekey, u8 hdmi_scdc, u8 hdmi_hda)
{
struct nvkm_ior *ior;
int ret;
if (!(outp->asy.head = nvkm_head_find(outp->disp, head)))
return -EINVAL;
ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER, hdmi && hdmi_hda);
if (ret)
return ret;
ior = outp->ior;
if (hdmi) {
if (!ior->func->hdmi ||
hdmi_max_ac_packet > 0x1f || hdmi_rekey > 0x7f ||
(hdmi_scdc && !ior->func->hdmi->scdc)) {
nvkm_outp_release(outp, NVKM_OUTP_USER);
return -EINVAL;
}
ior->func->hdmi->ctrl(ior, head, hdmi, hdmi_max_ac_packet, hdmi_rekey);
if (ior->func->hdmi->scdc)
ior->func->hdmi->scdc(ior, hdmi_scdc);
}
return 0;
}
static int
nvkm_uoutp_mthd_acquire_lvds(struct nvkm_outp *outp, bool dual, bool bpc8)
{
if (outp->info.type != DCB_OUTPUT_LVDS)
return -EINVAL;
outp->lvds.dual = dual;
outp->lvds.bpc8 = bpc8;
return nvkm_outp_acquire(outp, NVKM_OUTP_USER, false);
}
static int
nvkm_uoutp_mthd_acquire(struct nvkm_outp *outp, void *argv, u32 argc)
{
union nvif_outp_acquire_args *args = argv;
int ret;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
if (outp->ior)
return -EBUSY;
switch (args->v0.proto) {
case NVIF_OUTP_ACQUIRE_V0_RGB_CRT:
ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER, false);
break;
case NVIF_OUTP_ACQUIRE_V0_TMDS:
ret = nvkm_uoutp_mthd_acquire_tmds(outp, args->v0.tmds.head,
args->v0.tmds.hdmi,
args->v0.tmds.hdmi_max_ac_packet,
args->v0.tmds.hdmi_rekey,
args->v0.tmds.hdmi_scdc,
args->v0.tmds.hdmi_hda);
break;
case NVIF_OUTP_ACQUIRE_V0_LVDS:
ret = nvkm_uoutp_mthd_acquire_lvds(outp, args->v0.lvds.dual, args->v0.lvds.bpc8);
break;
case NVIF_OUTP_ACQUIRE_V0_DP:
ret = nvkm_uoutp_mthd_acquire_dp(outp, args->v0.dp.dpcd,
args->v0.dp.link_nr,
args->v0.dp.link_bw,
args->v0.dp.hda != 0,
args->v0.dp.mst != 0);
break;
default:
ret = -EINVAL;
break;
}
if (ret)
return ret;
args->v0.or = outp->ior->id;
args->v0.link = outp->ior->asy.link;
return 0;
}
static int
nvkm_uoutp_mthd_load_detect(struct nvkm_outp *outp, void *argv, u32 argc)
{
union nvif_outp_load_detect_args *args = argv;
int ret;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
ret = nvkm_outp_acquire(outp, NVKM_OUTP_PRIV, false);
if (ret == 0) {
if (outp->ior->func->sense) {
ret = outp->ior->func->sense(outp->ior, args->v0.data);
args->v0.load = ret < 0 ? 0 : ret;
} else {
ret = -EINVAL;
}
nvkm_outp_release(outp, NVKM_OUTP_PRIV);
}
return ret;
}
static int
nvkm_uoutp_mthd_acquired(struct nvkm_outp *outp, u32 mthd, void *argv, u32 argc)
{
switch (mthd) {
case NVIF_OUTP_V0_RELEASE : return nvkm_uoutp_mthd_release (outp, argv, argc);
case NVIF_OUTP_V0_INFOFRAME : return nvkm_uoutp_mthd_infoframe (outp, argv, argc);
case NVIF_OUTP_V0_HDA_ELD : return nvkm_uoutp_mthd_hda_eld (outp, argv, argc);
case NVIF_OUTP_V0_DP_RETRAIN : return nvkm_uoutp_mthd_dp_retrain (outp, argv, argc);
case NVIF_OUTP_V0_DP_MST_VCPI: return nvkm_uoutp_mthd_dp_mst_vcpi(outp, argv, argc);
default:
break;
}
return -EINVAL;
}
static int
nvkm_uoutp_mthd_noacquire(struct nvkm_outp *outp, u32 mthd, void *argv, u32 argc)
{
switch (mthd) {
case NVIF_OUTP_V0_LOAD_DETECT: return nvkm_uoutp_mthd_load_detect(outp, argv, argc);
case NVIF_OUTP_V0_ACQUIRE : return nvkm_uoutp_mthd_acquire (outp, argv, argc);
case NVIF_OUTP_V0_DP_AUX_PWR : return nvkm_uoutp_mthd_dp_aux_pwr (outp, argv, argc);
default:
break;
}
return 1;
}
static int
nvkm_uoutp_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
struct nvkm_outp *outp = nvkm_uoutp(object);
struct nvkm_disp *disp = outp->disp;
int ret;
mutex_lock(&disp->super.mutex);
ret = nvkm_uoutp_mthd_noacquire(outp, mthd, argv, argc);
if (ret <= 0)
goto done;
if (outp->ior)
ret = nvkm_uoutp_mthd_acquired(outp, mthd, argv, argc);
else
ret = -EIO;
done:
mutex_unlock(&disp->super.mutex);
return ret;
}
static void *
nvkm_uoutp_dtor(struct nvkm_object *object)
{
struct nvkm_outp *outp = nvkm_uoutp(object);
struct nvkm_disp *disp = outp->disp;
spin_lock(&disp->client.lock);
outp->object.func = NULL;
spin_unlock(&disp->client.lock);
return NULL;
}
static const struct nvkm_object_func
nvkm_uoutp = {
.dtor = nvkm_uoutp_dtor,
.mthd = nvkm_uoutp_mthd,
};
int
nvkm_uoutp_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
{
struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
struct nvkm_outp *outt, *outp = NULL;
union nvif_outp_args *args = argv;
int ret;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
list_for_each_entry(outt, &disp->outps, head) {
if (outt->index == args->v0.id) {
outp = outt;
break;
}
}
if (!outp)
return -EINVAL;
ret = -EBUSY;
spin_lock(&disp->client.lock);
if (!outp->object.func) {
nvkm_object_ctor(&nvkm_uoutp, oclass, &outp->object);
*pobject = &outp->object;
ret = 0;
}
spin_unlock(&disp->client.lock);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
#include "chan.h"
#include "head.h"
#include "ior.h"
#include <nvif/class.h>
static const struct nvkm_ior_func
gp100_sor = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
},
.state = gf119_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
.hdmi = &gm200_sor_hdmi,
.dp = &gm200_sor_dp,
.hda = &gf119_sor_hda,
};
int
gp100_sor_new(struct nvkm_disp *disp, int id)
{
struct nvkm_device *device = disp->engine.subdev.device;
u32 hda;
if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
hda = nvkm_rd32(device, 0x10ebb0) >> 8;
return nvkm_ior_new_(&gp100_sor, disp, SOR, id, hda & BIT(id));
}
static const struct nvkm_disp_func
gp100_disp = {
.oneinit = nv50_disp_oneinit,
.init = gf119_disp_init,
.fini = gf119_disp_fini,
.intr = gf119_disp_intr,
.intr_error = gf119_disp_intr_error,
.super = gf119_disp_super,
.uevent = &gf119_disp_chan_uevent,
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
.sor = { .cnt = gf119_sor_cnt, .new = gp100_sor_new },
.root = { 0,0,GP100_DISP },
.user = {
{{0,0,GK104_DISP_CURSOR }, nvkm_disp_chan_new, &gf119_disp_curs },
{{0,0,GK104_DISP_OVERLAY }, nvkm_disp_chan_new, &gf119_disp_oimm },
{{0,0,GK110_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, &gf119_disp_base },
{{0,0,GP100_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gk104_disp_core },
{{0,0,GK104_DISP_OVERLAY_CONTROL_DMA}, nvkm_disp_chan_new, &gk104_disp_ovly },
{}
},
};
int
gp100_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&gp100_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "dp.h"
#include "conn.h"
#include "head.h"
#include "ior.h"
#include <drm/display/drm_dp.h>
#include <subdev/bios.h>
#include <subdev/bios/init.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <nvif/event.h>
/* IED scripts are no longer used by UEFI/RM from Ampere, but have been updated for
* the x86 option ROM. However, the relevant VBIOS table versions weren't modified,
* so we're unable to detect this in a nice way.
*/
#define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
struct lt_state {
struct nvkm_outp *outp;
int repeaters;
int repeater;
u8 stat[6];
u8 conf[4];
bool pc2;
u8 pc2stat;
u8 pc2conf[2];
};
static int
nvkm_dp_train_sense(struct lt_state *lt, bool pc, u32 delay)
{
struct nvkm_outp *outp = lt->outp;
u32 addr;
int ret;
usleep_range(delay, delay * 2);
if (lt->repeater)
addr = DPCD_LTTPR_LANE0_1_STATUS(lt->repeater);
else
addr = DPCD_LS02;
ret = nvkm_rdaux(outp->dp.aux, addr, <->stat[0], 3);
if (ret)
return ret;
if (lt->repeater)
addr = DPCD_LTTPR_LANE0_1_ADJUST(lt->repeater);
else
addr = DPCD_LS06;
ret = nvkm_rdaux(outp->dp.aux, addr, <->stat[4], 2);
if (ret)
return ret;
if (pc) {
ret = nvkm_rdaux(outp->dp.aux, DPCD_LS0C, <->pc2stat, 1);
if (ret)
lt->pc2stat = 0x00;
OUTP_TRACE(outp, "status %6ph pc2 %02x", lt->stat, lt->pc2stat);
} else {
OUTP_TRACE(outp, "status %6ph", lt->stat);
}
return 0;
}
static int
nvkm_dp_train_drive(struct lt_state *lt, bool pc)
{
struct nvkm_outp *outp = lt->outp;
struct nvkm_ior *ior = outp->ior;
struct nvkm_bios *bios = ior->disp->engine.subdev.device->bios;
struct nvbios_dpout info;
struct nvbios_dpcfg ocfg;
u8 ver, hdr, cnt, len;
u32 addr;
u32 data;
int ret, i;
for (i = 0; i < ior->dp.nr; i++) {
u8 lane = (lt->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
u8 lpc2 = (lt->pc2stat >> (i * 2)) & 0x3;
u8 lpre = (lane & 0x0c) >> 2;
u8 lvsw = (lane & 0x03) >> 0;
u8 hivs = 3 - lpre;
u8 hipe = 3;
u8 hipc = 3;
if (lpc2 >= hipc)
lpc2 = hipc | DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED;
if (lpre >= hipe) {
lpre = hipe | DPCD_LC03_MAX_SWING_REACHED; /* yes. */
lvsw = hivs = 3 - (lpre & 3);
} else
if (lvsw >= hivs) {
lvsw = hivs | DPCD_LC03_MAX_SWING_REACHED;
}
lt->conf[i] = (lpre << 3) | lvsw;
lt->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
OUTP_TRACE(outp, "config lane %d %02x %02x", i, lt->conf[i], lpc2);
if (lt->repeater != lt->repeaters)
continue;
data = nvbios_dpout_match(bios, outp->info.hasht, outp->info.hashm,
&ver, &hdr, &cnt, &len, &info);
if (!data)
continue;
data = nvbios_dpcfg_match(bios, data, lpc2 & 3, lvsw & 3, lpre & 3,
&ver, &hdr, &cnt, &len, &ocfg);
if (!data)
continue;
ior->func->dp->drive(ior, i, ocfg.pc, ocfg.dc, ocfg.pe, ocfg.tx_pu);
}
if (lt->repeater)
addr = DPCD_LTTPR_LANE0_SET(lt->repeater);
else
addr = DPCD_LC03(0);
ret = nvkm_wraux(outp->dp.aux, addr, lt->conf, 4);
if (ret)
return ret;
if (pc) {
ret = nvkm_wraux(outp->dp.aux, DPCD_LC0F, lt->pc2conf, 2);
if (ret)
return ret;
}
return 0;
}
static void
nvkm_dp_train_pattern(struct lt_state *lt, u8 pattern)
{
struct nvkm_outp *outp = lt->outp;
u32 addr;
u8 sink_tp;
OUTP_TRACE(outp, "training pattern %d", pattern);
outp->ior->func->dp->pattern(outp->ior, pattern);
if (lt->repeater)
addr = DPCD_LTTPR_PATTERN_SET(lt->repeater);
else
addr = DPCD_LC02;
nvkm_rdaux(outp->dp.aux, addr, &sink_tp, 1);
sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
sink_tp |= (pattern != 4) ? pattern : 7;
if (pattern != 0)
sink_tp |= DPCD_LC02_SCRAMBLING_DISABLE;
else
sink_tp &= ~DPCD_LC02_SCRAMBLING_DISABLE;
nvkm_wraux(outp->dp.aux, addr, &sink_tp, 1);
}
static int
nvkm_dp_train_eq(struct lt_state *lt)
{
struct nvkm_i2c_aux *aux = lt->outp->dp.aux;
bool eq_done = false, cr_done = true;
int tries = 0, usec = 0, i;
u8 data;
if (lt->repeater) {
if (!nvkm_rdaux(aux, DPCD_LTTPR_AUX_RD_INTERVAL(lt->repeater), &data, sizeof(data)))
usec = (data & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
nvkm_dp_train_pattern(lt, 4);
} else {
if (lt->outp->dp.dpcd[DPCD_RC00_DPCD_REV] >= 0x14 &&
lt->outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED)
nvkm_dp_train_pattern(lt, 4);
else
if (lt->outp->dp.dpcd[DPCD_RC00_DPCD_REV] >= 0x12 &&
lt->outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED)
nvkm_dp_train_pattern(lt, 3);
else
nvkm_dp_train_pattern(lt, 2);
usec = (lt->outp->dp.dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
}
do {
if ((tries &&
nvkm_dp_train_drive(lt, lt->pc2)) ||
nvkm_dp_train_sense(lt, lt->pc2, usec ? usec : 400))
break;
eq_done = !!(lt->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
for (i = 0; i < lt->outp->ior->dp.nr && eq_done; i++) {
u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
if (!(lane & DPCD_LS02_LANE0_CR_DONE))
cr_done = false;
if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
!(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
eq_done = false;
}
} while (!eq_done && cr_done && ++tries <= 5);
return eq_done ? 0 : -1;
}
static int
nvkm_dp_train_cr(struct lt_state *lt)
{
bool cr_done = false, abort = false;
int voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
int tries = 0, usec = 0, i;
nvkm_dp_train_pattern(lt, 1);
if (lt->outp->dp.dpcd[DPCD_RC00_DPCD_REV] < 0x14 && !lt->repeater)
usec = (lt->outp->dp.dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
do {
if (nvkm_dp_train_drive(lt, false) ||
nvkm_dp_train_sense(lt, false, usec ? usec : 100))
break;
cr_done = true;
for (i = 0; i < lt->outp->ior->dp.nr; i++) {
u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
cr_done = false;
if (lt->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
abort = true;
break;
}
}
if ((lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
tries = 0;
}
} while (!cr_done && !abort && ++tries < 5);
return cr_done ? 0 : -1;
}
static int
nvkm_dp_train_link(struct nvkm_outp *outp, int rate)
{
struct nvkm_ior *ior = outp->ior;
struct lt_state lt = {
.outp = outp,
.pc2 = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED,
};
u8 sink[2], data;
int ret;
OUTP_DBG(outp, "training %dx%02x", ior->dp.nr, ior->dp.bw);
/* Select LTTPR non-transparent mode if we have a valid configuration,
* use transparent mode otherwise.
*/
if (outp->dp.lttpr[0] >= 0x14) {
data = DPCD_LTTPR_MODE_TRANSPARENT;
nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data));
if (outp->dp.lttprs) {
data = DPCD_LTTPR_MODE_NON_TRANSPARENT;
nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data));
lt.repeaters = outp->dp.lttprs;
}
}
/* Set desired link configuration on the sink. */
sink[0] = (outp->dp.rate[rate].dpcd < 0) ? ior->dp.bw : 0;
sink[1] = ior->dp.nr;
if (ior->dp.ef)
sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
ret = nvkm_wraux(outp->dp.aux, DPCD_LC00_LINK_BW_SET, sink, 2);
if (ret)
return ret;
if (outp->dp.rate[rate].dpcd >= 0) {
ret = nvkm_rdaux(outp->dp.aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
if (ret)
return ret;
sink[0] &= ~DPCD_LC15_LINK_RATE_SET_MASK;
sink[0] |= outp->dp.rate[rate].dpcd;
ret = nvkm_wraux(outp->dp.aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
if (ret)
return ret;
}
/* Attempt to train the link in this configuration. */
for (lt.repeater = lt.repeaters; lt.repeater >= 0; lt.repeater--) {
if (lt.repeater)
OUTP_DBG(outp, "training LTTPR%d", lt.repeater);
else
OUTP_DBG(outp, "training sink");
memset(lt.stat, 0x00, sizeof(lt.stat));
ret = nvkm_dp_train_cr(<);
if (ret == 0)
ret = nvkm_dp_train_eq(<);
nvkm_dp_train_pattern(<, 0);
}
return ret;
}
static int
nvkm_dp_train_links(struct nvkm_outp *outp, int rate)
{
struct nvkm_ior *ior = outp->ior;
struct nvkm_disp *disp = outp->disp;
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_bios *bios = subdev->device->bios;
u32 lnkcmp;
int ret;
OUTP_DBG(outp, "programming link for %dx%02x", ior->dp.nr, ior->dp.bw);
/* Intersect misc. capabilities of the OR and sink. */
if (disp->engine.subdev.device->chipset < 0x110)
outp->dp.dpcd[DPCD_RC03] &= ~DPCD_RC03_TPS4_SUPPORTED;
if (disp->engine.subdev.device->chipset < 0xd0)
outp->dp.dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
if (AMPERE_IED_HACK(disp) && (lnkcmp = outp->dp.info.script[0])) {
/* Execute BeforeLinkTraining script from DP Info table. */
while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
lnkcmp += 3;
lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
nvbios_init(&outp->disp->engine.subdev, lnkcmp,
init.outp = &outp->info;
init.or = ior->id;
init.link = ior->asy.link;
);
}
/* Set desired link configuration on the source. */
if ((lnkcmp = outp->dp.info.lnkcmp)) {
if (outp->dp.version < 0x30) {
while ((ior->dp.bw * 2700) < nvbios_rd16(bios, lnkcmp))
lnkcmp += 4;
lnkcmp = nvbios_rd16(bios, lnkcmp + 2);
} else {
while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
lnkcmp += 3;
lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
}
nvbios_init(subdev, lnkcmp,
init.outp = &outp->info;
init.or = ior->id;
init.link = ior->asy.link;
);
}
ret = ior->func->dp->links(ior, outp->dp.aux);
if (ret) {
if (ret < 0) {
OUTP_ERR(outp, "train failed with %d", ret);
return ret;
}
return 0;
}
ior->func->dp->power(ior, ior->dp.nr);
/* Attempt to train the link in this configuration. */
return nvkm_dp_train_link(outp, rate);
}
static void
nvkm_dp_train_fini(struct nvkm_outp *outp)
{
/* Execute AfterLinkTraining script from DP Info table. */
nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[1],
init.outp = &outp->info;
init.or = outp->ior->id;
init.link = outp->ior->asy.link;
);
}
static void
nvkm_dp_train_init(struct nvkm_outp *outp)
{
/* Execute EnableSpread/DisableSpread script from DP Info table. */
if (outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_MAX_DOWNSPREAD) {
nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[2],
init.outp = &outp->info;
init.or = outp->ior->id;
init.link = outp->ior->asy.link;
);
} else {
nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[3],
init.outp = &outp->info;
init.or = outp->ior->id;
init.link = outp->ior->asy.link;
);
}
if (!AMPERE_IED_HACK(outp->disp)) {
/* Execute BeforeLinkTraining script from DP Info table. */
nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[0],
init.outp = &outp->info;
init.or = outp->ior->id;
init.link = outp->ior->asy.link;
);
}
}
static int
nvkm_dp_train(struct nvkm_outp *outp, u32 dataKBps)
{
struct nvkm_ior *ior = outp->ior;
int ret = -EINVAL, nr, rate;
u8 pwr;
/* Retraining link? Skip source configuration, it can mess up the active modeset. */
if (atomic_read(&outp->dp.lt.done)) {
for (rate = 0; rate < outp->dp.rates; rate++) {
if (outp->dp.rate[rate].rate == ior->dp.bw * 27000)
return nvkm_dp_train_link(outp, ret);
}
WARN_ON(1);
return -EINVAL;
}
/* Ensure sink is not in a low-power state. */
if (!nvkm_rdaux(outp->dp.aux, DPCD_SC00, &pwr, 1)) {
if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
pwr &= ~DPCD_SC00_SET_POWER;
pwr |= DPCD_SC00_SET_POWER_D0;
nvkm_wraux(outp->dp.aux, DPCD_SC00, &pwr, 1);
}
}
ior->dp.mst = outp->dp.lt.mst;
ior->dp.ef = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
ior->dp.nr = 0;
/* Link training. */
OUTP_DBG(outp, "training");
nvkm_dp_train_init(outp);
/* Validate and train at configuration requested (if any) on ACQUIRE. */
if (outp->dp.lt.nr) {
for (nr = outp->dp.links; ret < 0 && nr; nr >>= 1) {
for (rate = 0; nr == outp->dp.lt.nr && rate < outp->dp.rates; rate++) {
if (outp->dp.rate[rate].rate / 27000 == outp->dp.lt.bw) {
ior->dp.bw = outp->dp.rate[rate].rate / 27000;
ior->dp.nr = nr;
ret = nvkm_dp_train_links(outp, rate);
}
}
}
}
/* Otherwise, loop through all valid link configurations that support the data rate. */
for (nr = outp->dp.links; ret < 0 && nr; nr >>= 1) {
for (rate = 0; ret < 0 && rate < outp->dp.rates; rate++) {
if (outp->dp.rate[rate].rate * nr >= dataKBps || WARN_ON(!ior->dp.nr)) {
/* Program selected link configuration. */
ior->dp.bw = outp->dp.rate[rate].rate / 27000;
ior->dp.nr = nr;
ret = nvkm_dp_train_links(outp, rate);
}
}
}
/* Finish up. */
nvkm_dp_train_fini(outp);
if (ret < 0)
OUTP_ERR(outp, "training failed");
else
OUTP_DBG(outp, "training done");
atomic_set(&outp->dp.lt.done, 1);
return ret;
}
void
nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
{
/* Execute DisableLT script from DP Info Table. */
nvbios_init(&ior->disp->engine.subdev, outp->dp.info.script[4],
init.outp = &outp->info;
init.or = ior->id;
init.link = ior->arm.link;
);
}
static void
nvkm_dp_release(struct nvkm_outp *outp)
{
/* Prevent link from being retrained if sink sends an IRQ. */
atomic_set(&outp->dp.lt.done, 0);
outp->ior->dp.nr = 0;
}
static int
nvkm_dp_acquire(struct nvkm_outp *outp)
{
struct nvkm_ior *ior = outp->ior;
struct nvkm_head *head;
bool retrain = true;
u32 datakbps = 0;
u32 dataKBps;
u32 linkKBps;
u8 stat[3];
int ret, i;
mutex_lock(&outp->dp.mutex);
/* Check that link configuration meets current requirements. */
list_for_each_entry(head, &outp->disp->heads, head) {
if (ior->asy.head & (1 << head->id)) {
u32 khz = (head->asy.hz >> ior->asy.rgdiv) / 1000;
datakbps += khz * head->asy.or.depth;
}
}
linkKBps = ior->dp.bw * 27000 * ior->dp.nr;
dataKBps = DIV_ROUND_UP(datakbps, 8);
OUTP_DBG(outp, "data %d KB/s link %d KB/s mst %d->%d",
dataKBps, linkKBps, ior->dp.mst, outp->dp.lt.mst);
if (linkKBps < dataKBps || ior->dp.mst != outp->dp.lt.mst) {
OUTP_DBG(outp, "link requirements changed");
goto done;
}
/* Check that link is still trained. */
ret = nvkm_rdaux(outp->dp.aux, DPCD_LS02, stat, 3);
if (ret) {
OUTP_DBG(outp, "failed to read link status, assuming no sink");
goto done;
}
if (stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE) {
for (i = 0; i < ior->dp.nr; i++) {
u8 lane = (stat[i >> 1] >> ((i & 1) * 4)) & 0x0f;
if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
!(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
OUTP_DBG(outp, "lane %d not equalised", lane);
goto done;
}
}
retrain = false;
} else {
OUTP_DBG(outp, "no inter-lane alignment");
}
done:
if (retrain || !atomic_read(&outp->dp.lt.done))
ret = nvkm_dp_train(outp, dataKBps);
mutex_unlock(&outp->dp.mutex);
return ret;
}
static bool
nvkm_dp_enable_supported_link_rates(struct nvkm_outp *outp)
{
u8 sink_rates[DPCD_RC10_SUPPORTED_LINK_RATES__SIZE];
int i, j, k;
if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
outp->dp.dpcd[DPCD_RC00_DPCD_REV] < 0x13 ||
nvkm_rdaux(outp->dp.aux, DPCD_RC10_SUPPORTED_LINK_RATES(0),
sink_rates, sizeof(sink_rates)))
return false;
for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
const u32 rate = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
if (!rate || WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate)))
break;
if (rate > outp->info.dpconf.link_bw * 27000) {
OUTP_DBG(outp, "rate %d !outp", rate);
continue;
}
for (j = 0; j < outp->dp.rates; j++) {
if (rate > outp->dp.rate[j].rate) {
for (k = outp->dp.rates; k > j; k--)
outp->dp.rate[k] = outp->dp.rate[k - 1];
break;
}
}
outp->dp.rate[j].dpcd = i / 2;
outp->dp.rate[j].rate = rate;
outp->dp.rates++;
}
for (i = 0; i < outp->dp.rates; i++)
OUTP_DBG(outp, "link_rate[%d] = %d", outp->dp.rate[i].dpcd, outp->dp.rate[i].rate);
return outp->dp.rates != 0;
}
/* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps()
* converted to work inside nvkm. This is a temporary holdover until we start
* passing the drm_dp_aux device through NVKM
*/
static int
nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp)
{
struct nvkm_i2c_aux *aux = outp->dp.aux;
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
int ret;
ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE);
if (ret < 0)
return ret;
/*
* Prior to DP1.3 the bit represented by
* DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
* If it is set DP_DPCD_REV at 0000h could be at a value less than
* the true capability of the panel. The only way to check is to
* then compare 0000h and 2200h.
*/
if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
return 0;
ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext));
if (ret < 0)
return ret;
if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n",
outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]);
return 0;
}
if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)))
return 0;
memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext));
return 0;
}
void
nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr)
{
struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
struct nvkm_i2c_aux *aux = outp->dp.aux;
if (auxpwr && !outp->dp.aux_pwr) {
/* eDP panels need powering on by us (if the VBIOS doesn't default it
* to on) before doing any AUX channel transactions. LVDS panel power
* is handled by the SOR itself, and not required for LVDS DDC.
*/
if (outp->conn->info.type == DCB_CONNECTOR_eDP) {
int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
if (power == 0) {
nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
outp->dp.aux_pwr_pu = true;
}
/* We delay here unconditionally, even if already powered,
* because some laptop panels having a significant resume
* delay before the panel begins responding.
*
* This is likely a bit of a hack, but no better idea for
* handling this at the moment.
*/
msleep(300);
}
OUTP_DBG(outp, "aux power -> always");
nvkm_i2c_aux_monitor(aux, true);
outp->dp.aux_pwr = true;
/* Detect any LTTPRs before reading DPCD receiver caps. */
if (!nvkm_rdaux(aux, DPCD_LTTPR_REV, outp->dp.lttpr, sizeof(outp->dp.lttpr)) &&
outp->dp.lttpr[0] >= 0x14 && outp->dp.lttpr[2]) {
switch (outp->dp.lttpr[2]) {
case 0x80: outp->dp.lttprs = 1; break;
case 0x40: outp->dp.lttprs = 2; break;
case 0x20: outp->dp.lttprs = 3; break;
case 0x10: outp->dp.lttprs = 4; break;
case 0x08: outp->dp.lttprs = 5; break;
case 0x04: outp->dp.lttprs = 6; break;
case 0x02: outp->dp.lttprs = 7; break;
case 0x01: outp->dp.lttprs = 8; break;
default:
/* Unknown LTTPR count, we'll switch to transparent mode. */
WARN_ON(1);
outp->dp.lttprs = 0;
break;
}
} else {
/* No LTTPR support, or zero LTTPR count - don't touch it at all. */
memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr));
}
if (!nvkm_dp_read_dpcd_caps(outp)) {
const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
const u8 *rate;
int rate_max;
outp->dp.rates = 0;
outp->dp.links = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
outp->dp.links = min(outp->dp.links, outp->info.dpconf.link_nr);
if (outp->dp.lttprs && outp->dp.lttpr[4])
outp->dp.links = min_t(int, outp->dp.links, outp->dp.lttpr[4]);
rate_max = outp->dp.dpcd[DPCD_RC01_MAX_LINK_RATE];
rate_max = min(rate_max, outp->info.dpconf.link_bw);
if (outp->dp.lttprs && outp->dp.lttpr[1])
rate_max = min_t(int, rate_max, outp->dp.lttpr[1]);
if (!nvkm_dp_enable_supported_link_rates(outp)) {
for (rate = rates; *rate; rate++) {
if (*rate > rate_max)
continue;
if (WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate)))
break;
outp->dp.rate[outp->dp.rates].dpcd = -1;
outp->dp.rate[outp->dp.rates].rate = *rate * 27000;
outp->dp.rates++;
}
}
}
} else
if (!auxpwr && outp->dp.aux_pwr) {
OUTP_DBG(outp, "aux power -> demand");
nvkm_i2c_aux_monitor(aux, false);
outp->dp.aux_pwr = false;
atomic_set(&outp->dp.lt.done, 0);
/* Restore eDP panel GPIO to its prior state if we changed it, as
* it could potentially interfere with other outputs.
*/
if (outp->conn->info.type == DCB_CONNECTOR_eDP) {
if (outp->dp.aux_pwr_pu) {
nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
outp->dp.aux_pwr_pu = false;
}
}
}
}
static void
nvkm_dp_fini(struct nvkm_outp *outp)
{
nvkm_dp_enable(outp, false);
}
static void
nvkm_dp_init(struct nvkm_outp *outp)
{
nvkm_dp_enable(outp, outp->dp.enabled);
}
static void *
nvkm_dp_dtor(struct nvkm_outp *outp)
{
return outp;
}
static const struct nvkm_outp_func
nvkm_dp_func = {
.dtor = nvkm_dp_dtor,
.init = nvkm_dp_init,
.fini = nvkm_dp_fini,
.acquire = nvkm_dp_acquire,
.release = nvkm_dp_release,
.disable = nvkm_dp_disable,
};
int
nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE, struct nvkm_outp **poutp)
{
struct nvkm_device *device = disp->engine.subdev.device;
struct nvkm_bios *bios = device->bios;
struct nvkm_i2c *i2c = device->i2c;
struct nvkm_outp *outp;
u8 hdr, cnt, len;
u32 data;
int ret;
ret = nvkm_outp_new_(&nvkm_dp_func, disp, index, dcbE, poutp);
outp = *poutp;
if (ret)
return ret;
if (dcbE->location == 0)
outp->dp.aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_CCB(dcbE->i2c_index));
else
outp->dp.aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbE->extdev));
if (!outp->dp.aux) {
OUTP_ERR(outp, "no aux");
return -EINVAL;
}
/* bios data is not optional */
data = nvbios_dpout_match(bios, outp->info.hasht, outp->info.hashm,
&outp->dp.version, &hdr, &cnt, &len, &outp->dp.info);
if (!data) {
OUTP_ERR(outp, "no bios dp data");
return -EINVAL;
}
OUTP_DBG(outp, "bios dp %02x %02x %02x %02x", outp->dp.version, hdr, cnt, len);
mutex_init(&outp->dp.mutex);
atomic_set(&outp->dp.lt.done, 0);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/vga.h>
u8
nvkm_rdport(struct nvkm_device *device, int head, u16 port)
{
if (device->card_type >= NV_50)
return nvkm_rd08(device, 0x601000 + port);
if (port == 0x03c0 || port == 0x03c1 || /* AR */
port == 0x03c2 || port == 0x03da || /* INP0 */
port == 0x03d4 || port == 0x03d5) /* CR */
return nvkm_rd08(device, 0x601000 + (head * 0x2000) + port);
if (port == 0x03c2 || port == 0x03cc || /* MISC */
port == 0x03c4 || port == 0x03c5 || /* SR */
port == 0x03ce || port == 0x03cf) { /* GR */
if (device->card_type < NV_40)
head = 0; /* CR44 selects head */
return nvkm_rd08(device, 0x0c0000 + (head * 0x2000) + port);
}
return 0x00;
}
void
nvkm_wrport(struct nvkm_device *device, int head, u16 port, u8 data)
{
if (device->card_type >= NV_50)
nvkm_wr08(device, 0x601000 + port, data);
else
if (port == 0x03c0 || port == 0x03c1 || /* AR */
port == 0x03c2 || port == 0x03da || /* INP0 */
port == 0x03d4 || port == 0x03d5) /* CR */
nvkm_wr08(device, 0x601000 + (head * 0x2000) + port, data);
else
if (port == 0x03c2 || port == 0x03cc || /* MISC */
port == 0x03c4 || port == 0x03c5 || /* SR */
port == 0x03ce || port == 0x03cf) { /* GR */
if (device->card_type < NV_40)
head = 0; /* CR44 selects head */
nvkm_wr08(device, 0x0c0000 + (head * 0x2000) + port, data);
}
}
u8
nvkm_rdvgas(struct nvkm_device *device, int head, u8 index)
{
nvkm_wrport(device, head, 0x03c4, index);
return nvkm_rdport(device, head, 0x03c5);
}
void
nvkm_wrvgas(struct nvkm_device *device, int head, u8 index, u8 value)
{
nvkm_wrport(device, head, 0x03c4, index);
nvkm_wrport(device, head, 0x03c5, value);
}
u8
nvkm_rdvgag(struct nvkm_device *device, int head, u8 index)
{
nvkm_wrport(device, head, 0x03ce, index);
return nvkm_rdport(device, head, 0x03cf);
}
void
nvkm_wrvgag(struct nvkm_device *device, int head, u8 index, u8 value)
{
nvkm_wrport(device, head, 0x03ce, index);
nvkm_wrport(device, head, 0x03cf, value);
}
u8
nvkm_rdvgac(struct nvkm_device *device, int head, u8 index)
{
nvkm_wrport(device, head, 0x03d4, index);
return nvkm_rdport(device, head, 0x03d5);
}
void
nvkm_wrvgac(struct nvkm_device *device, int head, u8 index, u8 value)
{
nvkm_wrport(device, head, 0x03d4, index);
nvkm_wrport(device, head, 0x03d5, value);
}
u8
nvkm_rdvgai(struct nvkm_device *device, int head, u16 port, u8 index)
{
if (port == 0x03c4) return nvkm_rdvgas(device, head, index);
if (port == 0x03ce) return nvkm_rdvgag(device, head, index);
if (port == 0x03d4) return nvkm_rdvgac(device, head, index);
return 0x00;
}
void
nvkm_wrvgai(struct nvkm_device *device, int head, u16 port, u8 index, u8 value)
{
if (port == 0x03c4) nvkm_wrvgas(device, head, index, value);
else if (port == 0x03ce) nvkm_wrvgag(device, head, index, value);
else if (port == 0x03d4) nvkm_wrvgac(device, head, index, value);
}
bool
nvkm_lockvgac(struct nvkm_device *device, bool lock)
{
bool locked = !nvkm_rdvgac(device, 0, 0x1f);
u8 data = lock ? 0x99 : 0x57;
if (device->card_type < NV_50)
nvkm_wrvgac(device, 0, 0x1f, data);
else
nvkm_wrvgac(device, 0, 0x3f, data);
if (device->chipset == 0x11) {
if (!(nvkm_rd32(device, 0x001084) & 0x10000000))
nvkm_wrvgac(device, 1, 0x1f, data);
}
return locked;
}
/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
* it affects only the 8 bit vga io regs, which we access using mmio at
* 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
* in general, the set value of cr44 does not matter: reg access works as
* expected and values can be set for the appropriate head by using a 0x2000
* offset as required
* however:
* a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
* cr44 must be set to 0 or 3 for accessing values on the correct head
* through the common 0xc03c* addresses
* b) in tied mode (4) head B is programmed to the values set on head A, and
* access using the head B addresses can have strange results, ergo we leave
* tied mode in init once we know to what cr44 should be restored on exit
*
* the owner parameter is slightly abused:
* 0 and 1 are treated as head values and so the set value is (owner * 3)
* other values are treated as literal values to set
*/
u8
nvkm_rdvgaowner(struct nvkm_device *device)
{
if (device->card_type < NV_50) {
if (device->chipset == 0x11) {
u32 tied = nvkm_rd32(device, 0x001084) & 0x10000000;
if (tied == 0) {
u8 slA = nvkm_rdvgac(device, 0, 0x28) & 0x80;
u8 tvA = nvkm_rdvgac(device, 0, 0x33) & 0x01;
u8 slB = nvkm_rdvgac(device, 1, 0x28) & 0x80;
u8 tvB = nvkm_rdvgac(device, 1, 0x33) & 0x01;
if (slA && !tvA) return 0x00;
if (slB && !tvB) return 0x03;
if (slA) return 0x00;
if (slB) return 0x03;
return 0x00;
}
return 0x04;
}
return nvkm_rdvgac(device, 0, 0x44);
}
return 0x00;
}
void
nvkm_wrvgaowner(struct nvkm_device *device, u8 select)
{
if (device->card_type < NV_50) {
u8 owner = (select == 1) ? 3 : select;
if (device->chipset == 0x11) {
/* workaround hw lockup bug */
nvkm_rdvgac(device, 0, 0x1f);
nvkm_rdvgac(device, 1, 0x1f);
}
nvkm_wrvgac(device, 0, 0x44, owner);
if (device->chipset == 0x11) {
nvkm_wrvgac(device, 0, 0x2e, owner);
nvkm_wrvgac(device, 0, 0x2e, owner);
}
}
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "chan.h"
#include "head.h"
#include "ior.h"
#include <nvif/class.h>
static const struct nvkm_ior_func
mcp77_sor = {
.state = g94_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
.hdmi = &g84_sor_hdmi,
.dp = &g94_sor_dp,
};
static int
mcp77_sor_new(struct nvkm_disp *disp, int id)
{
return nvkm_ior_new_(&mcp77_sor, disp, SOR, id, false);
}
static const struct nvkm_disp_func
mcp77_disp = {
.oneinit = nv50_disp_oneinit,
.init = nv50_disp_init,
.fini = nv50_disp_fini,
.intr = nv50_disp_intr,
.super = nv50_disp_super,
.uevent = &nv50_disp_chan_uevent,
.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
.sor = { .cnt = g94_sor_cnt, .new = mcp77_sor_new },
.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
.root = { 0,0,GT206_DISP },
.user = {
{{0,0, G82_DISP_CURSOR }, nvkm_disp_chan_new, & nv50_disp_curs },
{{0,0, G82_DISP_OVERLAY }, nvkm_disp_chan_new, & nv50_disp_oimm },
{{0,0,GT200_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, & g84_disp_base },
{{0,0,GT206_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, & g94_disp_core },
{{0,0,GT200_DISP_OVERLAY_CHANNEL_DMA}, nvkm_disp_chan_new, >200_disp_ovly },
{}
},
};
int
mcp77_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
return nvkm_disp_new_(&mcp77_disp, device, type, inst, pdisp);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "chan.h"
#include "runl.h"
#include <nvif/class.h>
static int
g98_fifo_runl_ctor(struct nvkm_fifo *fifo)
{
struct nvkm_runl *runl;
runl = nvkm_runl_new(fifo, 0, 0, 0);
if (IS_ERR(runl))
return PTR_ERR(runl);
nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_SW, 0);
nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_DMAOBJ, 0);
nvkm_runl_add(runl, 1, fifo->func->engn, NVKM_ENGINE_GR, 0);
nvkm_runl_add(runl, 2, fifo->func->engn, NVKM_ENGINE_MSPPP, 0);
nvkm_runl_add(runl, 3, fifo->func->engn, NVKM_ENGINE_CE, 0);
nvkm_runl_add(runl, 4, fifo->func->engn, NVKM_ENGINE_MSPDEC, 0);
nvkm_runl_add(runl, 5, fifo->func->engn, NVKM_ENGINE_SEC, 0);
nvkm_runl_add(runl, 6, fifo->func->engn, NVKM_ENGINE_MSVLD, 0);
return 0;
}
static const struct nvkm_fifo_func
g98_fifo = {
.chid_nr = nv50_fifo_chid_nr,
.chid_ctor = nv50_fifo_chid_ctor,
.runl_ctor = g98_fifo_runl_ctor,
.init = nv50_fifo_init,
.intr = nv04_fifo_intr,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.nonstall = &g84_fifo_nonstall,
.runl = &nv50_runl,
.engn = &g84_engn,
.engn_sw = &nv50_engn_sw,
.cgrp = {{ }, &nv04_cgrp },
.chan = {{ 0, 0, G82_CHANNEL_GPFIFO }, &g84_chan },
};
int
g98_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&g98_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/g98.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "cgrp.h"
#include "chan.h"
#include "chid.h"
#include "runl.h"
#include "regsnv04.h"
#include <core/ramht.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
#include <nvif/class.h>
static int
nv40_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
const u32 base = chan->id * 128;
chan->ramfc_offset = base;
nvkm_kmap(ramfc);
nvkm_wo32(ramfc, base + 0x00, offset);
nvkm_wo32(ramfc, base + 0x04, offset);
nvkm_wo32(ramfc, base + 0x0c, chan->push->addr >> 4);
nvkm_wo32(ramfc, base + 0x18, 0x30000000 |
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_wo32(ramfc, base + 0x3c, 0x0001ffff);
nvkm_done(ramfc);
return 0;
}
static const struct nvkm_chan_func_ramfc
nv40_chan_ramfc = {
.layout = (const struct nvkm_ramfc_layout[]) {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
{ 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
{ 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
{ 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
{ 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
{ 2, 28, 0x18, 28, 0x002058 },
{ 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
{ 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
{ 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
{ 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
{ 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
{ 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
{ 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
{ 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
{ 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
{ 32, 0, 0x40, 0, 0x0032e4 },
{ 32, 0, 0x44, 0, 0x0032e8 },
{ 32, 0, 0x4c, 0, 0x002088 },
{ 32, 0, 0x50, 0, 0x003300 },
{ 32, 0, 0x54, 0, 0x00330c },
{}
},
.write = nv40_chan_ramfc_write,
.clear = nv04_chan_ramfc_clear,
.ctxdma = true,
};
static const struct nvkm_chan_func_userd
nv40_chan_userd = {
.bar = 0,
.base = 0xc00000,
.size = 0x001000,
};
static const struct nvkm_chan_func
nv40_chan = {
.inst = &nv04_chan_inst,
.userd = &nv40_chan_userd,
.ramfc = &nv40_chan_ramfc,
.start = nv04_chan_start,
.stop = nv04_chan_stop,
};
static int
nv40_eobj_ramht_add(struct nvkm_engn *engn, struct nvkm_object *eobj, struct nvkm_chan *chan)
{
struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
struct nvkm_instmem *imem = fifo->engine.subdev.device->imem;
u32 context = chan->id << 23 | engn->id << 20;
int hash;
mutex_lock(&fifo->mutex);
hash = nvkm_ramht_insert(imem->ramht, eobj, chan->id, 4, eobj->handle, context);
mutex_unlock(&fifo->mutex);
return hash;
}
static void
nv40_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_memory *ramfc = device->imem->ramfc;
u32 inst = 0x00000000, reg, ctx;
int chid;
switch (engn->engine->subdev.type) {
case NVKM_ENGINE_GR:
reg = 0x0032e0;
ctx = 0x38;
break;
case NVKM_ENGINE_MPEG:
if (WARN_ON(device->chipset < 0x44))
return;
reg = 0x00330c;
ctx = 0x54;
break;
default:
WARN_ON(1);
return;
}
if (cctx)
inst = cctx->vctx->inst->addr >> 4;
spin_lock_irq(&fifo->lock);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
chid = nvkm_rd32(device, 0x003204) & (fifo->chid->nr - 1);
if (chid == chan->id)
nvkm_wr32(device, reg, inst);
nvkm_kmap(ramfc);
nvkm_wo32(ramfc, chan->ramfc_offset + ctx, inst);
nvkm_done(ramfc);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
spin_unlock_irq(&fifo->lock);
}
static const struct nvkm_engn_func
nv40_engn = {
.bind = nv40_ectx_bind,
.ramht_add = nv40_eobj_ramht_add,
.ramht_del = nv04_eobj_ramht_del,
};
static const struct nvkm_engn_func
nv40_engn_sw = {
.ramht_add = nv40_eobj_ramht_add,
.ramht_del = nv04_eobj_ramht_del,
};
static void
nv40_fifo_init(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_fb *fb = device->fb;
struct nvkm_instmem *imem = device->imem;
struct nvkm_ramht *ramht = imem->ramht;
struct nvkm_memory *ramro = imem->ramro;
struct nvkm_memory *ramfc = imem->ramfc;
nvkm_wr32(device, 0x002040, 0x000000ff);
nvkm_wr32(device, 0x002044, 0x2101ffff);
nvkm_wr32(device, 0x002058, 0x00000001);
nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((ramht->bits - 9) << 16) |
(ramht->gpuobj->addr >> 8));
nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
nvkm_wr32(device, 0x002230, 0x00000001);
fallthrough;
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x45:
case 0x48:
nvkm_wr32(device, 0x002220, 0x00030002);
break;
default:
nvkm_wr32(device, 0x002230, 0x00000000);
nvkm_wr32(device, 0x002220, ((fb->ram->size - 512 * 1024 +
nvkm_memory_addr(ramfc)) >> 16) |
0x00030000);
break;
}
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask);
nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
}
static const struct nvkm_fifo_func
nv40_fifo = {
.chid_nr = nv10_fifo_chid_nr,
.chid_ctor = nv04_fifo_chid_ctor,
.runl_ctor = nv04_fifo_runl_ctor,
.init = nv40_fifo_init,
.intr = nv04_fifo_intr,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.runl = &nv04_runl,
.engn = &nv40_engn,
.engn_sw = &nv40_engn_sw,
.cgrp = {{ }, &nv04_cgrp },
.chan = {{ 0, 0, NV40_CHANNEL_DMA }, &nv40_chan },
};
int
nv40_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&nv40_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "cgrp.h"
#include "chan.h"
#include "chid.h"
#include "runl.h"
#include "regsnv04.h"
#include <core/ramht.h>
#include <subdev/instmem.h>
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <engine/sw.h>
#include <nvif/class.h>
void
nv04_chan_stop(struct nvkm_chan *chan)
{
struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_memory *fctx = device->imem->ramfc;
const struct nvkm_ramfc_layout *c;
unsigned long flags;
u32 data = chan->ramfc_offset;
u32 chid;
/* prevent fifo context switches */
spin_lock_irqsave(&fifo->lock, flags);
nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
/* if this channel is active, replace it with a null context */
chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->chid->mask;
if (chid == chan->id) {
nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
c = chan->func->ramfc->layout;
nvkm_kmap(fctx);
do {
u32 rm = ((1ULL << c->bits) - 1) << c->regs;
u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
} while ((++c)->bits);
nvkm_done(fctx);
c = chan->func->ramfc->layout;
do {
nvkm_wr32(device, c->regp, 0x00000000);
} while ((++c)->bits);
nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
}
/* restore normal operation, after disabling dma mode */
nvkm_mask(device, NV04_PFIFO_MODE, BIT(chan->id), 0);
nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&fifo->lock, flags);
}
void
nv04_chan_start(struct nvkm_chan *chan)
{
struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags);
nvkm_mask(fifo->engine.subdev.device, NV04_PFIFO_MODE, BIT(chan->id), BIT(chan->id));
spin_unlock_irqrestore(&fifo->lock, flags);
}
void
nv04_chan_ramfc_clear(struct nvkm_chan *chan)
{
struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
const struct nvkm_ramfc_layout *c = chan->func->ramfc->layout;
nvkm_kmap(ramfc);
do {
nvkm_wo32(ramfc, chan->ramfc_offset + c->ctxp, 0x00000000);
} while ((++c)->bits);
nvkm_done(ramfc);
}
static int
nv04_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
const u32 base = chan->id * 32;
chan->ramfc_offset = base;
nvkm_kmap(ramfc);
nvkm_wo32(ramfc, base + 0x00, offset);
nvkm_wo32(ramfc, base + 0x04, offset);
nvkm_wo32(ramfc, base + 0x08, chan->push->addr >> 4);
nvkm_wo32(ramfc, base + 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_done(ramfc);
return 0;
}
static const struct nvkm_chan_func_ramfc
nv04_chan_ramfc = {
.layout = (const struct nvkm_ramfc_layout[]) {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
{ 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
{ 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
{ 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
{ 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
{ 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
{}
},
.write = nv04_chan_ramfc_write,
.clear = nv04_chan_ramfc_clear,
.ctxdma = true,
};
const struct nvkm_chan_func_userd
nv04_chan_userd = {
.bar = 0,
.base = 0x800000,
.size = 0x010000,
};
const struct nvkm_chan_func_inst
nv04_chan_inst = {
.size = 0x1000,
};
static const struct nvkm_chan_func
nv04_chan = {
.inst = &nv04_chan_inst,
.userd = &nv04_chan_userd,
.ramfc = &nv04_chan_ramfc,
.start = nv04_chan_start,
.stop = nv04_chan_stop,
};
const struct nvkm_cgrp_func
nv04_cgrp = {
};
void
nv04_eobj_ramht_del(struct nvkm_chan *chan, int hash)
{
struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
struct nvkm_instmem *imem = fifo->engine.subdev.device->imem;
mutex_lock(&fifo->mutex);
nvkm_ramht_remove(imem->ramht, hash);
mutex_unlock(&fifo->mutex);
}
static int
nv04_eobj_ramht_add(struct nvkm_engn *engn, struct nvkm_object *eobj, struct nvkm_chan *chan)
{
struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
struct nvkm_instmem *imem = fifo->engine.subdev.device->imem;
u32 context = 0x80000000 | chan->id << 24 | engn->id << 16;
int hash;
mutex_lock(&fifo->mutex);
hash = nvkm_ramht_insert(imem->ramht, eobj, chan->id, 4, eobj->handle, context);
mutex_unlock(&fifo->mutex);
return hash;
}
const struct nvkm_engn_func
nv04_engn = {
.ramht_add = nv04_eobj_ramht_add,
.ramht_del = nv04_eobj_ramht_del,
};
void
nv04_fifo_pause(struct nvkm_fifo *fifo, unsigned long *pflags)
__acquires(fifo->lock)
{
struct nvkm_device *device = fifo->engine.subdev.device;
unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags);
*pflags = flags;
nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
/* in some cases the puller may be left in an inconsistent state
* if you try to stop it while it's busy translating handles.
* sometimes you get a CACHE_ERROR, sometimes it just fails
* silently; sending incorrect instance offsets to PGRAPH after
* it's started up again.
*
* to avoid this, we invalidate the most recently calculated
* instance.
*/
nvkm_msec(device, 2000,
u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
break;
);
if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
}
void
nv04_fifo_start(struct nvkm_fifo *fifo, unsigned long *pflags)
__releases(fifo->lock)
{
struct nvkm_device *device = fifo->engine.subdev.device;
unsigned long flags = *pflags;
nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
spin_unlock_irqrestore(&fifo->lock, flags);
}
const struct nvkm_runl_func
nv04_runl = {
};
static const char *
nv_dma_state_err(u32 state)
{
static const char * const desc[] = {
"NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
"INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
};
return desc[(state >> 29) & 0x7];
}
static bool
nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
{
struct nvkm_sw *sw = device->sw;
const int subc = (addr & 0x0000e000) >> 13;
const int mthd = (addr & 0x00001ffc);
const u32 mask = 0x0000000f << (subc * 4);
u32 engine = nvkm_rd32(device, 0x003280);
bool handled = false;
switch (mthd) {
case 0x0000 ... 0x0000: /* subchannel's engine -> software */
nvkm_wr32(device, 0x003280, (engine &= ~mask));
fallthrough;
case 0x0180 ... 0x01fc: /* handle -> instance */
data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
fallthrough;
case 0x0100 ... 0x017c:
case 0x0200 ... 0x1ffc: /* pass method down to sw */
if (!(engine & mask) && sw)
handled = nvkm_sw_mthd(sw, chid, subc, mthd, data);
break;
default:
break;
}
return handled;
}
static void
nv04_fifo_intr_cache_error(struct nvkm_fifo *fifo, u32 chid, u32 get)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_chan *chan;
unsigned long flags;
u32 pull0 = nvkm_rd32(device, 0x003250);
u32 mthd, data;
int ptr;
/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
* G80 chips, but CACHE1 isn't big enough for this much data.. Tests
* show that it wraps around to the start at GET=0x800.. No clue as to
* why..
*/
ptr = (get & 0x7ff) >> 2;
if (device->card_type < NV_40) {
mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr));
data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr));
} else {
mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr));
data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr));
}
if (!(pull0 & 0x00000100) ||
!nv04_fifo_swmthd(device, chid, mthd, data)) {
chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags);
nvkm_error(subdev, "CACHE_ERROR - "
"ch %d [%s] subc %d mthd %04x data %08x\n",
chid, chan ? chan->name : "unknown",
(mthd >> 13) & 7, mthd & 0x1ffc, data);
nvkm_chan_put(&chan, flags);
}
nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1);
nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1);
nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0);
nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH,
nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
}
static void
nv04_fifo_intr_dma_pusher(struct nvkm_fifo *fifo, u32 chid)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 dma_get = nvkm_rd32(device, 0x003244);
u32 dma_put = nvkm_rd32(device, 0x003240);
u32 push = nvkm_rd32(device, 0x003220);
u32 state = nvkm_rd32(device, 0x003228);
struct nvkm_chan *chan;
unsigned long flags;
const char *name;
chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags);
name = chan ? chan->name : "unknown";
if (device->card_type == NV_50) {
u32 ho_get = nvkm_rd32(device, 0x003328);
u32 ho_put = nvkm_rd32(device, 0x003320);
u32 ib_get = nvkm_rd32(device, 0x003334);
u32 ib_put = nvkm_rd32(device, 0x003330);
nvkm_error(subdev, "DMA_PUSHER - "
"ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
"ib_put %08x state %08x (err: %s) push %08x\n",
chid, name, ho_get, dma_get, ho_put, dma_put,
ib_get, ib_put, state, nv_dma_state_err(state),
push);
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
nvkm_wr32(device, 0x003364, 0x00000000);
if (dma_get != dma_put || ho_get != ho_put) {
nvkm_wr32(device, 0x003244, dma_put);
nvkm_wr32(device, 0x003328, ho_put);
} else
if (ib_get != ib_put)
nvkm_wr32(device, 0x003334, ib_put);
} else {
nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
"state %08x (err: %s) push %08x\n",
chid, name, dma_get, dma_put, state,
nv_dma_state_err(state), push);
if (dma_get != dma_put)
nvkm_wr32(device, 0x003244, dma_put);
}
nvkm_chan_put(&chan, flags);
nvkm_wr32(device, 0x003228, 0x00000000);
nvkm_wr32(device, 0x003220, 0x00000001);
nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
}
irqreturn_t
nv04_fifo_intr(struct nvkm_inth *inth)
{
struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
u32 reassign, chid, get, sem;
reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->chid->mask;
get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
nv04_fifo_intr_cache_error(fifo, chid, get);
stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
}
if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
nv04_fifo_intr_dma_pusher(fifo, chid);
stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
}
if (stat & NV_PFIFO_INTR_SEMAPHORE) {
stat &= ~NV_PFIFO_INTR_SEMAPHORE;
nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE);
nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
}
if (device->card_type == NV_50) {
if (stat & 0x00000010) {
stat &= ~0x00000010;
nvkm_wr32(device, 0x002100, 0x00000010);
}
if (stat & 0x40000000) {
nvkm_wr32(device, 0x002100, 0x40000000);
nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
stat &= ~0x40000000;
}
}
if (stat) {
nvkm_warn(subdev, "intr %08x\n", stat);
nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
nvkm_wr32(device, NV03_PFIFO_INTR_0, stat);
}
nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
return IRQ_HANDLED;
}
void
nv04_fifo_init(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_instmem *imem = device->imem;
struct nvkm_ramht *ramht = imem->ramht;
struct nvkm_memory *ramro = imem->ramro;
struct nvkm_memory *ramfc = imem->ramfc;
nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((ramht->bits - 9) << 16) |
(ramht->gpuobj->addr >> 8));
nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask);
nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
}
int
nv04_fifo_runl_ctor(struct nvkm_fifo *fifo)
{
struct nvkm_runl *runl;
runl = nvkm_runl_new(fifo, 0, 0, 0);
if (IS_ERR(runl))
return PTR_ERR(runl);
nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_SW, 0);
nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_DMAOBJ, 0);
nvkm_runl_add(runl, 1, fifo->func->engn , NVKM_ENGINE_GR, 0);
nvkm_runl_add(runl, 2, fifo->func->engn , NVKM_ENGINE_MPEG, 0); /* NV31- */
return 0;
}
int
nv04_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr)
{
/* The last CHID is reserved by HW as a "channel invalid" marker. */
return nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 0, nr - 1, &fifo->chid);
}
static int
nv04_fifo_chid_nr(struct nvkm_fifo *fifo)
{
return 16;
}
static const struct nvkm_fifo_func
nv04_fifo = {
.chid_nr = nv04_fifo_chid_nr,
.chid_ctor = nv04_fifo_chid_ctor,
.runl_ctor = nv04_fifo_runl_ctor,
.init = nv04_fifo_init,
.intr = nv04_fifo_intr,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.runl = &nv04_runl,
.engn = &nv04_engn,
.engn_sw = &nv04_engn,
.cgrp = {{ }, &nv04_cgrp },
.chan = {{ 0, 0, NV03_CHANNEL_DMA }, &nv04_chan },
};
int
nv04_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&nv04_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "chan.h"
#include "runl.h"
#include <core/gpuobj.h>
#include <subdev/fault.h>
#include <nvif/class.h>
const struct nvkm_chan_func
gm107_chan = {
.inst = &gf100_chan_inst,
.userd = &gk104_chan_userd,
.ramfc = &gk104_chan_ramfc,
.bind = gk104_chan_bind_inst,
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
.stop = gk104_chan_stop,
.preempt = gk110_chan_preempt,
};
static void
gm107_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
{
nvkm_wo32(memory, offset + 0, chan->id);
nvkm_wo32(memory, offset + 4, chan->inst->addr >> 12);
}
const struct nvkm_runl_func
gm107_runl = {
.size = 8,
.update = nv50_runl_update,
.insert_cgrp = gk110_runl_insert_cgrp,
.insert_chan = gm107_runl_insert_chan,
.commit = gk104_runl_commit,
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
.fault_clear = gk104_runl_fault_clear,
.preempt_pending = gf100_runl_preempt_pending,
};
static const struct nvkm_enum
gm107_fifo_mmu_fault_engine[] = {
{ 0x01, "DISPLAY" },
{ 0x02, "CAPTURE" },
{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
{ 0x06, "SCHED" },
{ 0x07, "HOST0" },
{ 0x08, "HOST1" },
{ 0x09, "HOST2" },
{ 0x0a, "HOST3" },
{ 0x0b, "HOST4" },
{ 0x0c, "HOST5" },
{ 0x0d, "HOST6" },
{ 0x0e, "HOST7" },
{ 0x0f, "HOSTSR" },
{ 0x13, "PERF" },
{ 0x17, "PMU" },
{ 0x18, "PTP" },
{}
};
const struct nvkm_fifo_func_mmu_fault
gm107_fifo_mmu_fault = {
.recover = gf100_fifo_mmu_fault_recover,
.access = gf100_fifo_mmu_fault_access,
.engine = gm107_fifo_mmu_fault_engine,
.reason = gk104_fifo_mmu_fault_reason,
.hubclient = gk104_fifo_mmu_fault_hubclient,
.gpcclient = gk104_fifo_mmu_fault_gpcclient,
};
void
gm107_fifo_intr_mmu_fault_unit(struct nvkm_fifo *fifo, int unit)
{
struct nvkm_device *device = fifo->engine.subdev.device;
u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
struct nvkm_fault_data info;
info.inst = (u64)inst << 12;
info.addr = ((u64)vahi << 32) | valo;
info.time = 0;
info.engine = unit;
info.valid = 1;
info.gpc = (type & 0x1f000000) >> 24;
info.client = (type & 0x00003f00) >> 8;
info.access = (type & 0x00000080) >> 7;
info.hub = (type & 0x00000040) >> 6;
info.reason = (type & 0x0000000f);
nvkm_fifo_fault(fifo, &info);
}
static int
gm107_fifo_chid_nr(struct nvkm_fifo *fifo)
{
return 2048;
}
static const struct nvkm_fifo_func
gm107_fifo = {
.chid_nr = gm107_fifo_chid_nr,
.chid_ctor = gk110_fifo_chid_ctor,
.runq_nr = gf100_fifo_runq_nr,
.runl_ctor = gk104_fifo_runl_ctor,
.init = gk104_fifo_init,
.init_pbdmas = gk104_fifo_init_pbdmas,
.intr = gk104_fifo_intr,
.intr_mmu_fault_unit = gm107_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gm107_fifo_mmu_fault,
.nonstall = &gf100_fifo_nonstall,
.runl = &gm107_runl,
.runq = &gk208_runq,
.engn = &gk104_engn,
.engn_ce = &gk104_engn_ce,
.cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp },
.chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_B }, &gm107_chan },
};
int
gm107_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&gm107_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "cgrp.h"
#include "chan.h"
#include "chid.h"
#include "runl.h"
#include "runq.h"
#include <core/gpuobj.h>
#include <subdev/mc.h>
#include <subdev/mmu.h>
#include <subdev/top.h>
#include <nvif/class.h>
#include <nvif/if900d.h>
void
gk104_chan_stop(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
}
void
gk104_chan_start(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
}
void
gk104_chan_unbind(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x00000000);
}
void
gk104_chan_bind_inst(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x80000000 | chan->inst->addr >> 12);
}
void
gk104_chan_bind(struct nvkm_chan *chan)
{
struct nvkm_runl *runl = chan->cgrp->runl;
struct nvkm_device *device = runl->fifo->engine.subdev.device;
nvkm_mask(device, 0x800004 + (chan->id * 8), 0x000f0000, runl->id << 16);
gk104_chan_bind_inst(chan);
}
static int
gk104_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
const u64 userd = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
const u32 limit2 = ilog2(length / 8);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x08, lower_32_bits(userd));
nvkm_wo32(chan->inst, 0x0c, upper_32_bits(userd));
nvkm_wo32(chan->inst, 0x10, 0x0000face);
nvkm_wo32(chan->inst, 0x30, 0xfffff902);
nvkm_wo32(chan->inst, 0x48, lower_32_bits(offset));
nvkm_wo32(chan->inst, 0x4c, upper_32_bits(offset) | (limit2 << 16));
nvkm_wo32(chan->inst, 0x84, 0x20400000);
nvkm_wo32(chan->inst, 0x94, 0x30000000 | devm);
nvkm_wo32(chan->inst, 0x9c, 0x00000100);
nvkm_wo32(chan->inst, 0xac, 0x0000001f);
nvkm_wo32(chan->inst, 0xe4, priv ? 0x00000020 : 0x00000000);
nvkm_wo32(chan->inst, 0xe8, chan->id);
nvkm_wo32(chan->inst, 0xb8, 0xf8000000);
nvkm_wo32(chan->inst, 0xf8, 0x10003080); /* 0x002310 */
nvkm_wo32(chan->inst, 0xfc, 0x10000010); /* 0x002350 */
nvkm_done(chan->inst);
return 0;
}
const struct nvkm_chan_func_ramfc
gk104_chan_ramfc = {
.write = gk104_chan_ramfc_write,
.devm = 0xfff,
.priv = true,
};
const struct nvkm_chan_func_userd
gk104_chan_userd = {
.bar = 1,
.size = 0x200,
.clear = gf100_chan_userd_clear,
};
static const struct nvkm_chan_func
gk104_chan = {
.inst = &gf100_chan_inst,
.userd = &gk104_chan_userd,
.ramfc = &gk104_chan_ramfc,
.bind = gk104_chan_bind,
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
.stop = gk104_chan_stop,
.preempt = gf100_chan_preempt,
};
static void
gk104_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
u32 ptr0, ptr1 = 0;
u64 addr = 0ULL;
switch (engn->engine->subdev.type) {
case NVKM_ENGINE_SW : return;
case NVKM_ENGINE_GR : ptr0 = 0x0210; break;
case NVKM_ENGINE_SEC : ptr0 = 0x0220; break;
case NVKM_ENGINE_MSPDEC: ptr0 = 0x0250; break;
case NVKM_ENGINE_MSPPP : ptr0 = 0x0260; break;
case NVKM_ENGINE_MSVLD : ptr0 = 0x0270; break;
case NVKM_ENGINE_VIC : ptr0 = 0x0280; break;
case NVKM_ENGINE_MSENC : ptr0 = 0x0290; break;
case NVKM_ENGINE_NVDEC :
ptr1 = 0x0270;
ptr0 = 0x0210;
break;
case NVKM_ENGINE_NVENC :
if (!engn->engine->subdev.inst)
ptr1 = 0x0290;
ptr0 = 0x0210;
break;
default:
WARN_ON(1);
return;
}
if (cctx) {
addr = cctx->vctx->vma->addr;
addr |= 4ULL;
}
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, ptr0 + 0, lower_32_bits(addr));
nvkm_wo32(chan->inst, ptr0 + 4, upper_32_bits(addr));
if (ptr1) {
nvkm_wo32(chan->inst, ptr1 + 0, lower_32_bits(addr));
nvkm_wo32(chan->inst, ptr1 + 4, upper_32_bits(addr));
}
nvkm_done(chan->inst);
}
int
gk104_ectx_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx)
{
struct gf100_vmm_map_v0 args = { .priv = 1 };
int ret;
ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
if (ret)
return ret;
return nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, &args, sizeof(args));
}
/*TODO: clean this up */
struct gk104_engn_status {
bool busy;
bool faulted;
bool chsw;
bool save;
bool load;
struct {
bool tsg;
u32 id;
} prev, next, *chan;
};
static void
gk104_engn_status(struct nvkm_engn *engn, struct gk104_engn_status *status)
{
u32 stat = nvkm_rd32(engn->runl->fifo->engine.subdev.device, 0x002640 + (engn->id * 0x08));
status->busy = !!(stat & 0x80000000);
status->faulted = !!(stat & 0x40000000);
status->next.tsg = !!(stat & 0x10000000);
status->next.id = (stat & 0x0fff0000) >> 16;
status->chsw = !!(stat & 0x00008000);
status->save = !!(stat & 0x00004000);
status->load = !!(stat & 0x00002000);
status->prev.tsg = !!(stat & 0x00001000);
status->prev.id = (stat & 0x00000fff);
status->chan = NULL;
if (status->busy && status->chsw) {
if (status->load && status->save) {
if (nvkm_engine_chsw_load(engn->engine))
status->chan = &status->next;
else
status->chan = &status->prev;
} else
if (status->load) {
status->chan = &status->next;
} else {
status->chan = &status->prev;
}
} else
if (status->load) {
status->chan = &status->prev;
}
ENGN_DEBUG(engn, "%08x: busy %d faulted %d chsw %d save %d load %d %sid %d%s-> %sid %d%s",
stat, status->busy, status->faulted, status->chsw, status->save, status->load,
status->prev.tsg ? "tsg" : "ch", status->prev.id,
status->chan == &status->prev ? "*" : " ",
status->next.tsg ? "tsg" : "ch", status->next.id,
status->chan == &status->next ? "*" : " ");
}
int
gk104_engn_cxid(struct nvkm_engn *engn, bool *cgid)
{
struct gk104_engn_status status;
gk104_engn_status(engn, &status);
if (status.chan) {
*cgid = status.chan->tsg;
return status.chan->id;
}
return -ENODEV;
}
bool
gk104_engn_chsw(struct nvkm_engn *engn)
{
struct gk104_engn_status status;
gk104_engn_status(engn, &status);
if (status.busy && status.chsw)
return true;
return false;
}
const struct nvkm_engn_func
gk104_engn = {
.chsw = gk104_engn_chsw,
.cxid = gk104_engn_cxid,
.mmu_fault_trigger = gf100_engn_mmu_fault_trigger,
.mmu_fault_triggered = gf100_engn_mmu_fault_triggered,
.ctor = gk104_ectx_ctor,
.bind = gk104_ectx_bind,
};
const struct nvkm_engn_func
gk104_engn_ce = {
.chsw = gk104_engn_chsw,
.cxid = gk104_engn_cxid,
.mmu_fault_trigger = gf100_engn_mmu_fault_trigger,
.mmu_fault_triggered = gf100_engn_mmu_fault_triggered,
};
bool
gk104_runq_idle(struct nvkm_runq *runq)
{
struct nvkm_device *device = runq->fifo->engine.subdev.device;
return !(nvkm_rd32(device, 0x003080 + (runq->id * 4)) & 0x0000e000);
}
static const struct nvkm_bitfield
gk104_runq_intr_1_names[] = {
{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
{ 0x00000002, "HCE_RE_ALIGNB" },
{ 0x00000004, "HCE_PRIV" },
{ 0x00000008, "HCE_ILLEGAL_MTHD" },
{ 0x00000010, "HCE_ILLEGAL_CLASS" },
{}
};
static bool
gk104_runq_intr_1(struct nvkm_runq *runq)
{
struct nvkm_subdev *subdev = &runq->fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 mask = nvkm_rd32(device, 0x04014c + (runq->id * 0x2000));
u32 stat = nvkm_rd32(device, 0x040148 + (runq->id * 0x2000)) & mask;
u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x2000)) & 0xfff;
char msg[128];
if (stat & 0x80000000) {
if (runq->func->intr_1_ctxnotvalid &&
runq->func->intr_1_ctxnotvalid(runq, chid))
stat &= ~0x80000000;
}
if (stat) {
nvkm_snprintbf(msg, sizeof(msg), gk104_runq_intr_1_names, stat);
nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
runq->id, stat, msg, chid,
nvkm_rd32(device, 0x040150 + (runq->id * 0x2000)),
nvkm_rd32(device, 0x040154 + (runq->id * 0x2000)));
}
nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), stat);
return true;
}
const struct nvkm_bitfield
gk104_runq_intr_0_names[] = {
{ 0x00000001, "MEMREQ" },
{ 0x00000002, "MEMACK_TIMEOUT" },
{ 0x00000004, "MEMACK_EXTRA" },
{ 0x00000008, "MEMDAT_TIMEOUT" },
{ 0x00000010, "MEMDAT_EXTRA" },
{ 0x00000020, "MEMFLUSH" },
{ 0x00000040, "MEMOP" },
{ 0x00000080, "LBCONNECT" },
{ 0x00000100, "LBREQ" },
{ 0x00000200, "LBACK_TIMEOUT" },
{ 0x00000400, "LBACK_EXTRA" },
{ 0x00000800, "LBDAT_TIMEOUT" },
{ 0x00001000, "LBDAT_EXTRA" },
{ 0x00002000, "GPFIFO" },
{ 0x00004000, "GPPTR" },
{ 0x00008000, "GPENTRY" },
{ 0x00010000, "GPCRC" },
{ 0x00020000, "PBPTR" },
{ 0x00040000, "PBENTRY" },
{ 0x00080000, "PBCRC" },
{ 0x00100000, "XBARCONNECT" },
{ 0x00200000, "METHOD" },
{ 0x00400000, "METHODCRC" },
{ 0x00800000, "DEVICE" },
{ 0x02000000, "SEMAPHORE" },
{ 0x04000000, "ACQUIRE" },
{ 0x08000000, "PRI" },
{ 0x20000000, "NO_CTXSW_SEG" },
{ 0x40000000, "PBSEG" },
{ 0x80000000, "SIGNATURE" },
{}
};
bool
gk104_runq_intr(struct nvkm_runq *runq, struct nvkm_runl *null)
{
bool intr0 = gf100_runq_intr(runq, NULL);
bool intr1 = gk104_runq_intr_1(runq);
return intr0 || intr1;
}
void
gk104_runq_init(struct nvkm_runq *runq)
{
struct nvkm_device *device = runq->fifo->engine.subdev.device;
gf100_runq_init(runq);
nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), 0xffffffff); /* HCE.INTR */
nvkm_wr32(device, 0x04014c + (runq->id * 0x2000), 0xffffffff); /* HCE.INTREN */
}
static u32
gk104_runq_runm(struct nvkm_runq *runq)
{
return nvkm_rd32(runq->fifo->engine.subdev.device, 0x002390 + (runq->id * 0x04));
}
const struct nvkm_runq_func
gk104_runq = {
.init = gk104_runq_init,
.intr = gk104_runq_intr,
.intr_0_names = gk104_runq_intr_0_names,
.idle = gk104_runq_idle,
};
void
gk104_runl_fault_clear(struct nvkm_runl *runl)
{
nvkm_wr32(runl->fifo->engine.subdev.device, 0x00262c, BIT(runl->id));
}
void
gk104_runl_allow(struct nvkm_runl *runl, u32 engm)
{
nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), 0x00000000);
}
void
gk104_runl_block(struct nvkm_runl *runl, u32 engm)
{
nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), BIT(runl->id));
}
bool
gk104_runl_pending(struct nvkm_runl *runl)
{
struct nvkm_device *device = runl->fifo->engine.subdev.device;
return nvkm_rd32(device, 0x002284 + (runl->id * 0x08)) & 0x00100000;
}
void
gk104_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
{
struct nvkm_fifo *fifo = runl->fifo;
struct nvkm_device *device = fifo->engine.subdev.device;
u64 addr = nvkm_memory_addr(memory) + start;
int target;
switch (nvkm_memory_target(memory)) {
case NVKM_MEM_TARGET_VRAM: target = 0; break;
case NVKM_MEM_TARGET_NCOH: target = 3; break;
default:
WARN_ON(1);
return;
}
spin_lock_irq(&fifo->lock);
nvkm_wr32(device, 0x002270, (target << 28) | (addr >> 12));
nvkm_wr32(device, 0x002274, (runl->id << 20) | count);
spin_unlock_irq(&fifo->lock);
}
void
gk104_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
{
nvkm_wo32(memory, offset + 0, chan->id);
nvkm_wo32(memory, offset + 4, 0x00000000);
}
static const struct nvkm_runl_func
gk104_runl = {
.size = 8,
.update = nv50_runl_update,
.insert_chan = gk104_runl_insert_chan,
.commit = gk104_runl_commit,
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
.fault_clear = gk104_runl_fault_clear,
.preempt_pending = gf100_runl_preempt_pending,
};
static const struct nvkm_enum
gk104_fifo_mmu_fault_engine[] = {
{ 0x00, "GR", NULL, NVKM_ENGINE_GR },
{ 0x01, "DISPLAY" },
{ 0x02, "CAPTURE" },
{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
{ 0x06, "SCHED" },
{ 0x07, "HOST0" },
{ 0x08, "HOST1" },
{ 0x09, "HOST2" },
{ 0x0a, "HOST3" },
{ 0x0b, "HOST4" },
{ 0x0c, "HOST5" },
{ 0x0d, "HOST6" },
{ 0x0e, "HOST7" },
{ 0x0f, "HOSTSR" },
{ 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
{ 0x13, "PERF" },
{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
{ 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 },
{ 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 },
{ 0x17, "PMU" },
{ 0x18, "PTP" },
{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
{ 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 },
{}
};
const struct nvkm_enum
gk104_fifo_mmu_fault_reason[] = {
{ 0x00, "PDE" },
{ 0x01, "PDE_SIZE" },
{ 0x02, "PTE" },
{ 0x03, "VA_LIMIT_VIOLATION" },
{ 0x04, "UNBOUND_INST_BLOCK" },
{ 0x05, "PRIV_VIOLATION" },
{ 0x06, "RO_VIOLATION" },
{ 0x07, "WO_VIOLATION" },
{ 0x08, "PITCH_MASK_VIOLATION" },
{ 0x09, "WORK_CREATION" },
{ 0x0a, "UNSUPPORTED_APERTURE" },
{ 0x0b, "COMPRESSION_FAILURE" },
{ 0x0c, "UNSUPPORTED_KIND" },
{ 0x0d, "REGION_VIOLATION" },
{ 0x0e, "BOTH_PTES_VALID" },
{ 0x0f, "INFO_TYPE_POISONED" },
{}
};
const struct nvkm_enum
gk104_fifo_mmu_fault_hubclient[] = {
{ 0x00, "VIP" },
{ 0x01, "CE0" },
{ 0x02, "CE1" },
{ 0x03, "DNISO" },
{ 0x04, "FE" },
{ 0x05, "FECS" },
{ 0x06, "HOST" },
{ 0x07, "HOST_CPU" },
{ 0x08, "HOST_CPU_NB" },
{ 0x09, "ISO" },
{ 0x0a, "MMU" },
{ 0x0b, "MSPDEC" },
{ 0x0c, "MSPPP" },
{ 0x0d, "MSVLD" },
{ 0x0e, "NISO" },
{ 0x0f, "P2P" },
{ 0x10, "PD" },
{ 0x11, "PERF" },
{ 0x12, "PMU" },
{ 0x13, "RASTERTWOD" },
{ 0x14, "SCC" },
{ 0x15, "SCC_NB" },
{ 0x16, "SEC" },
{ 0x17, "SSYNC" },
{ 0x18, "GR_CE" },
{ 0x19, "CE2" },
{ 0x1a, "XV" },
{ 0x1b, "MMU_NB" },
{ 0x1c, "MSENC" },
{ 0x1d, "DFALCON" },
{ 0x1e, "SKED" },
{ 0x1f, "AFALCON" },
{}
};
const struct nvkm_enum
gk104_fifo_mmu_fault_gpcclient[] = {
{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
{ 0x0c, "RAST" },
{ 0x0d, "GCC" },
{ 0x0e, "GPCCS" },
{ 0x0f, "PROP_0" },
{ 0x10, "PROP_1" },
{ 0x11, "PROP_2" },
{ 0x12, "PROP_3" },
{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
{ 0x1f, "GPM" },
{ 0x20, "LTP_UTLB_0" },
{ 0x21, "LTP_UTLB_1" },
{ 0x22, "LTP_UTLB_2" },
{ 0x23, "LTP_UTLB_3" },
{ 0x24, "GPC_RGG_UTLB" },
{}
};
const struct nvkm_fifo_func_mmu_fault
gk104_fifo_mmu_fault = {
.recover = gf100_fifo_mmu_fault_recover,
.access = gf100_fifo_mmu_fault_access,
.engine = gk104_fifo_mmu_fault_engine,
.reason = gk104_fifo_mmu_fault_reason,
.hubclient = gk104_fifo_mmu_fault_hubclient,
.gpcclient = gk104_fifo_mmu_fault_gpcclient,
};
static const struct nvkm_enum
gk104_fifo_intr_bind_reason[] = {
{ 0x01, "BIND_NOT_UNBOUND" },
{ 0x02, "SNOOP_WITHOUT_BAR1" },
{ 0x03, "UNBIND_WHILE_RUNNING" },
{ 0x05, "INVALID_RUNLIST" },
{ 0x06, "INVALID_CTX_TGT" },
{ 0x0b, "UNBIND_WHILE_PARKED" },
{}
};
void
gk104_fifo_intr_bind(struct nvkm_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
u32 intr = nvkm_rd32(subdev->device, 0x00252c);
u32 code = intr & 0x000000ff;
const struct nvkm_enum *en = nvkm_enum_find(gk104_fifo_intr_bind_reason, code);
nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
}
void
gk104_fifo_intr_chsw(struct nvkm_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x00256c);
nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
nvkm_wr32(device, 0x00256c, stat);
}
static void
gk104_fifo_intr_dropped_fault(struct nvkm_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
u32 stat = nvkm_rd32(subdev->device, 0x00259c);
nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
}
void
gk104_fifo_intr_runlist(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_runl *runl;
u32 mask = nvkm_rd32(device, 0x002a00);
nvkm_runl_foreach_cond(runl, fifo, mask & BIT(runl->id)) {
nvkm_wr32(device, 0x002a00, BIT(runl->id));
}
}
irqreturn_t
gk104_fifo_intr(struct nvkm_inth *inth)
{
struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 mask = nvkm_rd32(device, 0x002140);
u32 stat = nvkm_rd32(device, 0x002100) & mask;
if (stat & 0x00000001) {
gk104_fifo_intr_bind(fifo);
nvkm_wr32(device, 0x002100, 0x00000001);
stat &= ~0x00000001;
}
if (stat & 0x00000010) {
nvkm_error(subdev, "PIO_ERROR\n");
nvkm_wr32(device, 0x002100, 0x00000010);
stat &= ~0x00000010;
}
if (stat & 0x00000100) {
gf100_fifo_intr_sched(fifo);
nvkm_wr32(device, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
if (stat & 0x00010000) {
gk104_fifo_intr_chsw(fifo);
nvkm_wr32(device, 0x002100, 0x00010000);
stat &= ~0x00010000;
}
if (stat & 0x00800000) {
nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
nvkm_wr32(device, 0x002100, 0x00800000);
stat &= ~0x00800000;
}
if (stat & 0x01000000) {
nvkm_error(subdev, "LB_ERROR\n");
nvkm_wr32(device, 0x002100, 0x01000000);
stat &= ~0x01000000;
}
if (stat & 0x08000000) {
gk104_fifo_intr_dropped_fault(fifo);
nvkm_wr32(device, 0x002100, 0x08000000);
stat &= ~0x08000000;
}
if (stat & 0x10000000) {
gf100_fifo_intr_mmu_fault(fifo);
stat &= ~0x10000000;
}
if (stat & 0x20000000) {
if (gf100_fifo_intr_pbdma(fifo))
stat &= ~0x20000000;
}
if (stat & 0x40000000) {
gk104_fifo_intr_runlist(fifo);
stat &= ~0x40000000;
}
if (stat & 0x80000000) {
nvkm_wr32(device, 0x002100, 0x80000000);
nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
stat &= ~0x80000000;
}
if (stat) {
nvkm_error(subdev, "INTR %08x\n", stat);
spin_lock(&fifo->lock);
nvkm_mask(device, 0x002140, stat, 0x00000000);
spin_unlock(&fifo->lock);
nvkm_wr32(device, 0x002100, stat);
}
return IRQ_HANDLED;
}
void
gk104_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask)
{
struct nvkm_device *device = fifo->engine.subdev.device;
nvkm_wr32(device, 0x000204, mask);
nvkm_mask(device, 0x002a04, 0xbfffffff, 0xbfffffff);
}
void
gk104_fifo_init(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
if (fifo->func->chan.func->userd->bar == 1)
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12);
nvkm_wr32(device, 0x002100, 0xffffffff);
nvkm_wr32(device, 0x002140, 0x7fffffff);
}
int
gk104_fifo_runl_ctor(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_top_device *tdev;
struct nvkm_runl *runl;
struct nvkm_runq *runq;
const struct nvkm_engn_func *func;
nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist >= 0) {
runl = nvkm_runl_get(fifo, tdev->runlist, tdev->runlist);
if (!runl) {
runl = nvkm_runl_new(fifo, tdev->runlist, tdev->runlist, 0);
if (IS_ERR(runl))
return PTR_ERR(runl);
nvkm_runq_foreach_cond(runq, fifo, gk104_runq_runm(runq) & BIT(runl->id)) {
if (WARN_ON(runl->runq_nr == ARRAY_SIZE(runl->runq)))
return -ENOMEM;
runl->runq[runl->runq_nr++] = runq;
}
}
if (tdev->engine < 0)
continue;
switch (tdev->type) {
case NVKM_ENGINE_CE:
func = fifo->func->engn_ce;
break;
case NVKM_ENGINE_GR:
nvkm_runl_add(runl, 15, &gf100_engn_sw, NVKM_ENGINE_SW, 0);
fallthrough;
default:
func = fifo->func->engn;
break;
}
nvkm_runl_add(runl, tdev->engine, func, tdev->type, tdev->inst);
}
return 0;
}
int
gk104_fifo_chid_nr(struct nvkm_fifo *fifo)
{
return 4096;
}
static const struct nvkm_fifo_func
gk104_fifo = {
.chid_nr = gk104_fifo_chid_nr,
.chid_ctor = gf100_fifo_chid_ctor,
.runq_nr = gf100_fifo_runq_nr,
.runl_ctor = gk104_fifo_runl_ctor,
.init = gk104_fifo_init,
.init_pbdmas = gk104_fifo_init_pbdmas,
.intr = gk104_fifo_intr,
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gk104_fifo_mmu_fault,
.nonstall = &gf100_fifo_nonstall,
.runl = &gk104_runl,
.runq = &gk104_runq,
.engn = &gk104_engn,
.engn_ce = &gk104_engn_ce,
.cgrp = {{ }, &nv04_cgrp },
.chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_A }, &gk104_chan },
};
int
gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&gk104_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "chan.h"
#include "chid.h"
#include "cgrp.h"
#include "chid.h"
#include "runl.h"
#include "priv.h"
#include <core/ramht.h>
#include <subdev/mmu.h>
#include <engine/dma.h>
#include <nvif/if0020.h>
const struct nvkm_event_func
nvkm_chan_event = {
};
void
nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx *cctx)
{
struct nvkm_cgrp *cgrp = chan->cgrp;
struct nvkm_runl *runl = cgrp->runl;
struct nvkm_engine *engine = engn->engine;
if (!engn->func->bind)
return;
CHAN_TRACE(chan, "%sbind cctx %d[%s]", cctx ? "" : "un", engn->id, engine->subdev.name);
/* Prevent any channel in channel group from being rescheduled, kick them
* off host and any engine(s) they're loaded on.
*/
if (cgrp->hw)
nvkm_runl_block(runl);
else
nvkm_chan_block(chan);
nvkm_chan_preempt(chan, true);
/* Update context pointer. */
engn->func->bind(engn, cctx, chan);
/* Resume normal operation. */
if (cgrp->hw)
nvkm_runl_allow(runl);
else
nvkm_chan_allow(chan);
}
void
nvkm_chan_cctx_put(struct nvkm_chan *chan, struct nvkm_cctx **pcctx)
{
struct nvkm_cctx *cctx = *pcctx;
if (cctx) {
struct nvkm_engn *engn = cctx->vctx->ectx->engn;
if (refcount_dec_and_mutex_lock(&cctx->refs, &chan->cgrp->mutex)) {
CHAN_TRACE(chan, "dtor cctx %d[%s]", engn->id, engn->engine->subdev.name);
nvkm_cgrp_vctx_put(chan->cgrp, &cctx->vctx);
list_del(&cctx->head);
kfree(cctx);
mutex_unlock(&chan->cgrp->mutex);
}
*pcctx = NULL;
}
}
int
nvkm_chan_cctx_get(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx **pcctx,
struct nvkm_client *client)
{
struct nvkm_cgrp *cgrp = chan->cgrp;
struct nvkm_vctx *vctx;
struct nvkm_cctx *cctx;
int ret;
/* Look for an existing channel context for this engine+VEID. */
mutex_lock(&cgrp->mutex);
cctx = nvkm_list_find(cctx, &chan->cctxs, head,
cctx->vctx->ectx->engn == engn && cctx->vctx->vmm == chan->vmm);
if (cctx) {
refcount_inc(&cctx->refs);
*pcctx = cctx;
mutex_unlock(&chan->cgrp->mutex);
return 0;
}
/* Nope - create a fresh one. But, sub-context first. */
ret = nvkm_cgrp_vctx_get(cgrp, engn, chan, &vctx, client);
if (ret) {
CHAN_ERROR(chan, "vctx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret);
goto done;
}
/* Now, create the channel context - to track engine binding. */
CHAN_TRACE(chan, "ctor cctx %d[%s]", engn->id, engn->engine->subdev.name);
if (!(cctx = *pcctx = kzalloc(sizeof(*cctx), GFP_KERNEL))) {
nvkm_cgrp_vctx_put(cgrp, &vctx);
ret = -ENOMEM;
goto done;
}
cctx->vctx = vctx;
refcount_set(&cctx->refs, 1);
refcount_set(&cctx->uses, 0);
list_add_tail(&cctx->head, &chan->cctxs);
done:
mutex_unlock(&cgrp->mutex);
return ret;
}
int
nvkm_chan_preempt_locked(struct nvkm_chan *chan, bool wait)
{
struct nvkm_runl *runl = chan->cgrp->runl;
CHAN_TRACE(chan, "preempt");
chan->func->preempt(chan);
if (!wait)
return 0;
return nvkm_runl_preempt_wait(runl);
}
int
nvkm_chan_preempt(struct nvkm_chan *chan, bool wait)
{
int ret;
if (!chan->func->preempt)
return 0;
mutex_lock(&chan->cgrp->runl->mutex);
ret = nvkm_chan_preempt_locked(chan, wait);
mutex_unlock(&chan->cgrp->runl->mutex);
return ret;
}
void
nvkm_chan_remove_locked(struct nvkm_chan *chan)
{
struct nvkm_cgrp *cgrp = chan->cgrp;
struct nvkm_runl *runl = cgrp->runl;
if (list_empty(&chan->head))
return;
CHAN_TRACE(chan, "remove");
if (!--cgrp->chan_nr) {
runl->cgrp_nr--;
list_del(&cgrp->head);
}
runl->chan_nr--;
list_del_init(&chan->head);
atomic_set(&runl->changed, 1);
}
void
nvkm_chan_remove(struct nvkm_chan *chan, bool preempt)
{
struct nvkm_runl *runl = chan->cgrp->runl;
mutex_lock(&runl->mutex);
if (preempt && chan->func->preempt)
nvkm_chan_preempt_locked(chan, true);
nvkm_chan_remove_locked(chan);
nvkm_runl_update_locked(runl, true);
mutex_unlock(&runl->mutex);
}
void
nvkm_chan_insert(struct nvkm_chan *chan)
{
struct nvkm_cgrp *cgrp = chan->cgrp;
struct nvkm_runl *runl = cgrp->runl;
mutex_lock(&runl->mutex);
if (WARN_ON(!list_empty(&chan->head))) {
mutex_unlock(&runl->mutex);
return;
}
CHAN_TRACE(chan, "insert");
list_add_tail(&chan->head, &cgrp->chans);
runl->chan_nr++;
if (!cgrp->chan_nr++) {
list_add_tail(&cgrp->head, &cgrp->runl->cgrps);
runl->cgrp_nr++;
}
atomic_set(&runl->changed, 1);
nvkm_runl_update_locked(runl, true);
mutex_unlock(&runl->mutex);
}
static void
nvkm_chan_block_locked(struct nvkm_chan *chan)
{
CHAN_TRACE(chan, "block %d", atomic_read(&chan->blocked));
if (atomic_inc_return(&chan->blocked) == 1)
chan->func->stop(chan);
}
void
nvkm_chan_error(struct nvkm_chan *chan, bool preempt)
{
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
if (atomic_inc_return(&chan->errored) == 1) {
CHAN_ERROR(chan, "errored - disabling channel");
nvkm_chan_block_locked(chan);
if (preempt)
chan->func->preempt(chan);
nvkm_event_ntfy(&chan->cgrp->runl->chid->event, chan->id, NVKM_CHAN_EVENT_ERRORED);
}
spin_unlock_irqrestore(&chan->lock, flags);
}
void
nvkm_chan_block(struct nvkm_chan *chan)
{
spin_lock_irq(&chan->lock);
nvkm_chan_block_locked(chan);
spin_unlock_irq(&chan->lock);
}
void
nvkm_chan_allow(struct nvkm_chan *chan)
{
spin_lock_irq(&chan->lock);
CHAN_TRACE(chan, "allow %d", atomic_read(&chan->blocked));
if (atomic_dec_and_test(&chan->blocked))
chan->func->start(chan);
spin_unlock_irq(&chan->lock);
}
void
nvkm_chan_del(struct nvkm_chan **pchan)
{
struct nvkm_chan *chan = *pchan;
if (!chan)
return;
if (chan->func->ramfc->clear)
chan->func->ramfc->clear(chan);
nvkm_ramht_del(&chan->ramht);
nvkm_gpuobj_del(&chan->pgd);
nvkm_gpuobj_del(&chan->eng);
nvkm_gpuobj_del(&chan->cache);
nvkm_gpuobj_del(&chan->ramfc);
nvkm_memory_unref(&chan->userd.mem);
if (chan->cgrp) {
nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
nvkm_cgrp_unref(&chan->cgrp);
}
if (chan->vmm) {
nvkm_vmm_part(chan->vmm, chan->inst->memory);
nvkm_vmm_unref(&chan->vmm);
}
nvkm_gpuobj_del(&chan->push);
nvkm_gpuobj_del(&chan->inst);
kfree(chan);
}
void
nvkm_chan_put(struct nvkm_chan **pchan, unsigned long irqflags)
{
struct nvkm_chan *chan = *pchan;
if (!chan)
return;
*pchan = NULL;
spin_unlock_irqrestore(&chan->cgrp->lock, irqflags);
}
struct nvkm_chan *
nvkm_chan_get_inst(struct nvkm_engine *engine, u64 inst, unsigned long *pirqflags)
{
struct nvkm_fifo *fifo = engine->subdev.device->fifo;
struct nvkm_runl *runl;
struct nvkm_engn *engn;
struct nvkm_chan *chan;
nvkm_runl_foreach(runl, fifo) {
nvkm_runl_foreach_engn(engn, runl) {
if (engine == &fifo->engine || engn->engine == engine) {
chan = nvkm_runl_chan_get_inst(runl, inst, pirqflags);
if (chan || engn->engine == engine)
return chan;
}
}
}
return NULL;
}
struct nvkm_chan *
nvkm_chan_get_chid(struct nvkm_engine *engine, int id, unsigned long *pirqflags)
{
struct nvkm_fifo *fifo = engine->subdev.device->fifo;
struct nvkm_runl *runl;
struct nvkm_engn *engn;
nvkm_runl_foreach(runl, fifo) {
nvkm_runl_foreach_engn(engn, runl) {
if (fifo->chid || engn->engine == engine)
return nvkm_runl_chan_get_chid(runl, id, pirqflags);
}
}
return NULL;
}
int
nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int runq,
struct nvkm_cgrp *cgrp, const char *name, bool priv, u32 devm, struct nvkm_vmm *vmm,
struct nvkm_dmaobj *dmaobj, u64 offset, u64 length,
struct nvkm_memory *userd, u64 ouserd, struct nvkm_chan **pchan)
{
struct nvkm_fifo *fifo = runl->fifo;
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_chan *chan;
int ret;
/* Validate arguments against class requirements. */
if ((runq && runq >= runl->func->runqs) ||
(!func->inst->vmm != !vmm) ||
((func->userd->bar < 0) == !userd) ||
(!func->ramfc->ctxdma != !dmaobj) ||
((func->ramfc->devm < devm) && devm != BIT(0)) ||
(!func->ramfc->priv && priv)) {
RUNL_DEBUG(runl, "args runq:%d:%d vmm:%d:%p userd:%d:%p "
"push:%d:%p devm:%08x:%08x priv:%d:%d",
runl->func->runqs, runq, func->inst->vmm, vmm,
func->userd->bar < 0, userd, func->ramfc->ctxdma, dmaobj,
func->ramfc->devm, devm, func->ramfc->priv, priv);
return -EINVAL;
}
if (!(chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
chan->func = func;
strscpy(chan->name, name, sizeof(chan->name));
chan->runq = runq;
chan->id = -1;
spin_lock_init(&chan->lock);
atomic_set(&chan->blocked, 1);
atomic_set(&chan->errored, 0);
INIT_LIST_HEAD(&chan->cctxs);
INIT_LIST_HEAD(&chan->head);
/* Join channel group.
*
* GK110 and newer support channel groups (aka TSGs), where individual channels
* share a timeslice, and, engine context(s).
*
* As such, engine contexts are tracked in nvkm_cgrp and we need them even when
* channels aren't in an API channel group, and on HW that doesn't support TSGs.
*/
if (!cgrp) {
ret = nvkm_cgrp_new(runl, chan->name, vmm, fifo->func->cgrp.force, &chan->cgrp);
if (ret) {
RUNL_DEBUG(runl, "cgrp %d", ret);
return ret;
}
cgrp = chan->cgrp;
} else {
if (cgrp->runl != runl || cgrp->vmm != vmm) {
RUNL_DEBUG(runl, "cgrp %d %d", cgrp->runl != runl, cgrp->vmm != vmm);
return -EINVAL;
}
chan->cgrp = nvkm_cgrp_ref(cgrp);
}
/* Allocate instance block. */
ret = nvkm_gpuobj_new(device, func->inst->size, 0x1000, func->inst->zero, NULL,
&chan->inst);
if (ret) {
RUNL_DEBUG(runl, "inst %d", ret);
return ret;
}
/* Initialise virtual address-space. */
if (func->inst->vmm) {
if (WARN_ON(vmm->mmu != device->mmu))
return -EINVAL;
ret = nvkm_vmm_join(vmm, chan->inst->memory);
if (ret) {
RUNL_DEBUG(runl, "vmm %d", ret);
return ret;
}
chan->vmm = nvkm_vmm_ref(vmm);
}
/* Allocate HW ctxdma for push buffer. */
if (func->ramfc->ctxdma) {
ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16, &chan->push);
if (ret) {
RUNL_DEBUG(runl, "bind %d", ret);
return ret;
}
}
/* Allocate channel ID. */
chan->id = nvkm_chid_get(runl->chid, chan);
if (chan->id < 0) {
RUNL_ERROR(runl, "!chids");
return -ENOSPC;
}
if (cgrp->id < 0)
cgrp->id = chan->id;
/* Initialise USERD. */
if (func->userd->bar < 0) {
if (ouserd + chan->func->userd->size >= nvkm_memory_size(userd)) {
RUNL_DEBUG(runl, "ouserd %llx", ouserd);
return -EINVAL;
}
ret = nvkm_memory_kmap(userd, &chan->userd.mem);
if (ret) {
RUNL_DEBUG(runl, "userd %d", ret);
return ret;
}
chan->userd.base = ouserd;
} else {
chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
chan->userd.base = chan->id * chan->func->userd->size;
}
if (chan->func->userd->clear)
chan->func->userd->clear(chan);
/* Initialise RAMFC. */
ret = chan->func->ramfc->write(chan, offset, length, devm, priv);
if (ret) {
RUNL_DEBUG(runl, "ramfc %d", ret);
return ret;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define nvkm_uchan(p) container_of((p), struct nvkm_uchan, object)
#include "priv.h"
#include "cgrp.h"
#include "chan.h"
#include "chid.h"
#include "runl.h"
#include <core/gpuobj.h>
#include <core/oproxy.h>
#include <subdev/mmu.h>
#include <engine/dma.h>
#include <nvif/if0020.h>
struct nvkm_uchan {
struct nvkm_object object;
struct nvkm_chan *chan;
};
static int
nvkm_uchan_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
{
struct nvkm_chan *chan = nvkm_uchan(object)->chan;
struct nvkm_runl *runl = chan->cgrp->runl;
union nvif_chan_event_args *args = argv;
if (!uevent)
return 0;
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
switch (args->v0.type) {
case NVIF_CHAN_EVENT_V0_NON_STALL_INTR:
return nvkm_uevent_add(uevent, &runl->fifo->nonstall.event, runl->id,
NVKM_FIFO_NONSTALL_EVENT, NULL);
case NVIF_CHAN_EVENT_V0_KILLED:
return nvkm_uevent_add(uevent, &runl->chid->event, chan->id,
NVKM_CHAN_EVENT_ERRORED, NULL);
default:
break;
}
return -ENOSYS;
}
struct nvkm_uobj {
struct nvkm_oproxy oproxy;
struct nvkm_chan *chan;
struct nvkm_cctx *cctx;
int hash;
};
static int
nvkm_uchan_object_fini_1(struct nvkm_oproxy *oproxy, bool suspend)
{
struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
struct nvkm_chan *chan = uobj->chan;
struct nvkm_cctx *cctx = uobj->cctx;
struct nvkm_ectx *ectx = cctx->vctx->ectx;
if (!ectx->object)
return 0;
/* Unbind engine context from channel, if no longer required. */
if (refcount_dec_and_mutex_lock(&cctx->uses, &chan->cgrp->mutex)) {
nvkm_chan_cctx_bind(chan, ectx->engn, NULL);
if (refcount_dec_and_test(&ectx->uses))
nvkm_object_fini(ectx->object, false);
mutex_unlock(&chan->cgrp->mutex);
}
return 0;
}
static int
nvkm_uchan_object_init_0(struct nvkm_oproxy *oproxy)
{
struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
struct nvkm_chan *chan = uobj->chan;
struct nvkm_cctx *cctx = uobj->cctx;
struct nvkm_ectx *ectx = cctx->vctx->ectx;
int ret = 0;
if (!ectx->object)
return 0;
/* Bind engine context to channel, if it hasn't been already. */
if (!refcount_inc_not_zero(&cctx->uses)) {
mutex_lock(&chan->cgrp->mutex);
if (!refcount_inc_not_zero(&cctx->uses)) {
if (!refcount_inc_not_zero(&ectx->uses)) {
ret = nvkm_object_init(ectx->object);
if (ret == 0)
refcount_set(&ectx->uses, 1);
}
if (ret == 0) {
nvkm_chan_cctx_bind(chan, ectx->engn, cctx);
refcount_set(&cctx->uses, 1);
}
}
mutex_unlock(&chan->cgrp->mutex);
}
return ret;
}
static void
nvkm_uchan_object_dtor(struct nvkm_oproxy *oproxy)
{
struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
struct nvkm_engn *engn;
if (!uobj->cctx)
return;
engn = uobj->cctx->vctx->ectx->engn;
if (engn->func->ramht_del)
engn->func->ramht_del(uobj->chan, uobj->hash);
nvkm_chan_cctx_put(uobj->chan, &uobj->cctx);
}
static const struct nvkm_oproxy_func
nvkm_uchan_object = {
.dtor[1] = nvkm_uchan_object_dtor,
.init[0] = nvkm_uchan_object_init_0,
.fini[1] = nvkm_uchan_object_fini_1,
};
static int
nvkm_uchan_object_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_chan *chan = nvkm_uchan(oclass->parent)->chan;
struct nvkm_cgrp *cgrp = chan->cgrp;
struct nvkm_engn *engn;
struct nvkm_uobj *uobj;
int ret;
/* Lookup host engine state for target engine. */
engn = nvkm_runl_find_engn(engn, cgrp->runl, engn->engine == oclass->engine);
if (WARN_ON(!engn))
return -EINVAL;
/* Allocate SW object. */
if (!(uobj = kzalloc(sizeof(*uobj), GFP_KERNEL)))
return -ENOMEM;
nvkm_oproxy_ctor(&nvkm_uchan_object, oclass, &uobj->oproxy);
uobj->chan = chan;
*pobject = &uobj->oproxy.base;
/* Ref. channel context for target engine.*/
ret = nvkm_chan_cctx_get(chan, engn, &uobj->cctx, oclass->client);
if (ret)
return ret;
/* Allocate HW object. */
ret = oclass->base.ctor(&(const struct nvkm_oclass) {
.base = oclass->base,
.engn = oclass->engn,
.handle = oclass->handle,
.object = oclass->object,
.client = oclass->client,
.parent = uobj->cctx->vctx->ectx->object ?: oclass->parent,
.engine = engn->engine,
}, argv, argc, &uobj->oproxy.object);
if (ret)
return ret;
if (engn->func->ramht_add) {
uobj->hash = engn->func->ramht_add(engn, uobj->oproxy.object, uobj->chan);
if (uobj->hash < 0)
return uobj->hash;
}
return 0;
}
static int
nvkm_uchan_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
{
struct nvkm_chan *chan = nvkm_uchan(object)->chan;
struct nvkm_engn *engn;
int ret, runq = 0;
nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
struct nvkm_engine *engine = engn->engine;
int c = 0;
/* Each runqueue, on runlists with multiple, has its own LCE. */
if (engn->runl->func->runqs) {
if (engine->subdev.type == NVKM_ENGINE_CE) {
if (chan->runq != runq++)
continue;
}
}
oclass->engine = engine;
oclass->base.oclass = 0;
if (engine->func->fifo.sclass) {
ret = engine->func->fifo.sclass(oclass, index);
if (oclass->base.oclass) {
if (!oclass->base.ctor)
oclass->base.ctor = nvkm_object_new;
oclass->ctor = nvkm_uchan_object_new;
return 0;
}
index -= ret;
continue;
}
while (engine->func->sclass[c].oclass) {
if (c++ == index) {
oclass->base = engine->func->sclass[index];
if (!oclass->base.ctor)
oclass->base.ctor = nvkm_object_new;
oclass->ctor = nvkm_uchan_object_new;
return 0;
}
}
index -= c;
}
return -EINVAL;
}
static int
nvkm_uchan_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nvkm_chan *chan = nvkm_uchan(object)->chan;
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
if (chan->func->userd->bar < 0)
return -ENOSYS;
*type = NVKM_OBJECT_MAP_IO;
*addr = device->func->resource_addr(device, chan->func->userd->bar) +
chan->func->userd->base + chan->userd.base;
*size = chan->func->userd->size;
return 0;
}
static int
nvkm_uchan_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_chan *chan = nvkm_uchan(object)->chan;
nvkm_chan_block(chan);
nvkm_chan_remove(chan, true);
if (chan->func->unbind)
chan->func->unbind(chan);
return 0;
}
static int
nvkm_uchan_init(struct nvkm_object *object)
{
struct nvkm_chan *chan = nvkm_uchan(object)->chan;
if (atomic_read(&chan->errored))
return 0;
if (chan->func->bind)
chan->func->bind(chan);
nvkm_chan_allow(chan);
nvkm_chan_insert(chan);
return 0;
}
static void *
nvkm_uchan_dtor(struct nvkm_object *object)
{
struct nvkm_uchan *uchan = nvkm_uchan(object);
nvkm_chan_del(&uchan->chan);
return uchan;
}
static const struct nvkm_object_func
nvkm_uchan = {
.dtor = nvkm_uchan_dtor,
.init = nvkm_uchan_init,
.fini = nvkm_uchan_fini,
.map = nvkm_uchan_map,
.sclass = nvkm_uchan_sclass,
.uevent = nvkm_uchan_uevent,
};
int
nvkm_uchan_new(struct nvkm_fifo *fifo, struct nvkm_cgrp *cgrp, const struct nvkm_oclass *oclass,
void *argv, u32 argc, struct nvkm_object **pobject)
{
union nvif_chan_args *args = argv;
struct nvkm_runl *runl;
struct nvkm_vmm *vmm = NULL;
struct nvkm_dmaobj *ctxdma = NULL;
struct nvkm_memory *userd = NULL;
struct nvkm_uchan *uchan;
struct nvkm_chan *chan;
int ret;
if (argc < sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
argc -= sizeof(args->v0);
if (args->v0.namelen != argc)
return -EINVAL;
/* Lookup objects referenced in args. */
runl = nvkm_runl_get(fifo, args->v0.runlist, 0);
if (!runl)
return -EINVAL;
if (args->v0.vmm) {
vmm = nvkm_uvmm_search(oclass->client, args->v0.vmm);
if (IS_ERR(vmm))
return PTR_ERR(vmm);
}
if (args->v0.ctxdma) {
ctxdma = nvkm_dmaobj_search(oclass->client, args->v0.ctxdma);
if (IS_ERR(ctxdma)) {
ret = PTR_ERR(ctxdma);
goto done;
}
}
if (args->v0.huserd) {
userd = nvkm_umem_search(oclass->client, args->v0.huserd);
if (IS_ERR(userd)) {
ret = PTR_ERR(userd);
userd = NULL;
goto done;
}
}
/* Allocate channel. */
if (!(uchan = kzalloc(sizeof(*uchan), GFP_KERNEL))) {
ret = -ENOMEM;
goto done;
}
nvkm_object_ctor(&nvkm_uchan, oclass, &uchan->object);
*pobject = &uchan->object;
ret = nvkm_chan_new_(fifo->func->chan.func, runl, args->v0.runq, cgrp, args->v0.name,
args->v0.priv != 0, args->v0.devm, vmm, ctxdma, args->v0.offset,
args->v0.length, userd, args->v0.ouserd, &uchan->chan);
if (ret)
goto done;
chan = uchan->chan;
/* Return channel info to caller. */
if (chan->func->doorbell_handle)
args->v0.token = chan->func->doorbell_handle(chan);
else
args->v0.token = ~0;
args->v0.chid = chan->id;
switch (nvkm_memory_target(chan->inst->memory)) {
case NVKM_MEM_TARGET_INST: args->v0.aper = NVIF_CHAN_V0_INST_APER_INST; break;
case NVKM_MEM_TARGET_VRAM: args->v0.aper = NVIF_CHAN_V0_INST_APER_VRAM; break;
case NVKM_MEM_TARGET_HOST: args->v0.aper = NVIF_CHAN_V0_INST_APER_HOST; break;
case NVKM_MEM_TARGET_NCOH: args->v0.aper = NVIF_CHAN_V0_INST_APER_NCOH; break;
default:
WARN_ON(1);
ret = -EFAULT;
break;
}
args->v0.inst = nvkm_memory_addr(chan->inst->memory);
done:
nvkm_memory_unref(&userd);
nvkm_vmm_unref(&vmm);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "cgrp.h"
#include "chan.h"
#include "chid.h"
#include "runl.h"
#include "runq.h"
#include <core/gpuobj.h>
#include <subdev/bar.h>
#include <subdev/fault.h>
#include <subdev/mc.h>
#include <subdev/mmu.h>
#include <engine/sw.h>
#include <nvif/class.h>
void
gf100_chan_preempt(struct nvkm_chan *chan)
{
nvkm_wr32(chan->cgrp->runl->fifo->engine.subdev.device, 0x002634, chan->id);
}
static void
gf100_chan_stop(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_mask(device, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
}
static void
gf100_chan_start(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_wr32(device, 0x003004 + (chan->id * 8), 0x001f0001);
}
static void gf100_fifo_intr_engine(struct nvkm_fifo *);
static void
gf100_chan_unbind(struct nvkm_chan *chan)
{
struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
struct nvkm_device *device = fifo->engine.subdev.device;
/*TODO: Is this cargo-culted, or necessary? RM does *something* here... Why? */
gf100_fifo_intr_engine(fifo);
nvkm_wr32(device, 0x003000 + (chan->id * 8), 0x00000000);
}
static void
gf100_chan_bind(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_wr32(device, 0x003000 + (chan->id * 8), 0xc0000000 | chan->inst->addr >> 12);
}
static int
gf100_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
const u64 userd = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
const u32 limit2 = ilog2(length / 8);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x08, lower_32_bits(userd));
nvkm_wo32(chan->inst, 0x0c, upper_32_bits(userd));
nvkm_wo32(chan->inst, 0x10, 0x0000face);
nvkm_wo32(chan->inst, 0x30, 0xfffff902);
nvkm_wo32(chan->inst, 0x48, lower_32_bits(offset));
nvkm_wo32(chan->inst, 0x4c, upper_32_bits(offset) | (limit2 << 16));
nvkm_wo32(chan->inst, 0x54, 0x00000002);
nvkm_wo32(chan->inst, 0x84, 0x20400000);
nvkm_wo32(chan->inst, 0x94, 0x30000000 | devm);
nvkm_wo32(chan->inst, 0x9c, 0x00000100);
nvkm_wo32(chan->inst, 0xa4, 0x1f1f1f1f);
nvkm_wo32(chan->inst, 0xa8, 0x1f1f1f1f);
nvkm_wo32(chan->inst, 0xac, 0x0000001f);
nvkm_wo32(chan->inst, 0xb8, 0xf8000000);
nvkm_wo32(chan->inst, 0xf8, 0x10003080); /* 0x002310 */
nvkm_wo32(chan->inst, 0xfc, 0x10000010); /* 0x002350 */
nvkm_done(chan->inst);
return 0;
}
static const struct nvkm_chan_func_ramfc
gf100_chan_ramfc = {
.write = gf100_chan_ramfc_write,
.devm = 0xfff,
};
void
gf100_chan_userd_clear(struct nvkm_chan *chan)
{
nvkm_kmap(chan->userd.mem);
nvkm_wo32(chan->userd.mem, chan->userd.base + 0x040, 0x00000000);
nvkm_wo32(chan->userd.mem, chan->userd.base + 0x044, 0x00000000);
nvkm_wo32(chan->userd.mem, chan->userd.base + 0x048, 0x00000000);
nvkm_wo32(chan->userd.mem, chan->userd.base + 0x04c, 0x00000000);
nvkm_wo32(chan->userd.mem, chan->userd.base + 0x050, 0x00000000);
nvkm_wo32(chan->userd.mem, chan->userd.base + 0x058, 0x00000000);
nvkm_wo32(chan->userd.mem, chan->userd.base + 0x05c, 0x00000000);
nvkm_wo32(chan->userd.mem, chan->userd.base + 0x060, 0x00000000);
nvkm_wo32(chan->userd.mem, chan->userd.base + 0x088, 0x00000000);
nvkm_wo32(chan->userd.mem, chan->userd.base + 0x08c, 0x00000000);
nvkm_done(chan->userd.mem);
}
static const struct nvkm_chan_func_userd
gf100_chan_userd = {
.bar = 1,
.size = 0x1000,
.clear = gf100_chan_userd_clear,
};
const struct nvkm_chan_func_inst
gf100_chan_inst = {
.size = 0x1000,
.zero = true,
.vmm = true,
};
static const struct nvkm_chan_func
gf100_chan = {
.inst = &gf100_chan_inst,
.userd = &gf100_chan_userd,
.ramfc = &gf100_chan_ramfc,
.bind = gf100_chan_bind,
.unbind = gf100_chan_unbind,
.start = gf100_chan_start,
.stop = gf100_chan_stop,
.preempt = gf100_chan_preempt,
};
static void
gf100_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
u64 addr = 0ULL;
u32 ptr0;
switch (engn->engine->subdev.type) {
case NVKM_ENGINE_SW : return;
case NVKM_ENGINE_GR : ptr0 = 0x0210; break;
case NVKM_ENGINE_CE : ptr0 = 0x0230 + (engn->engine->subdev.inst * 0x10); break;
case NVKM_ENGINE_MSPDEC: ptr0 = 0x0250; break;
case NVKM_ENGINE_MSPPP : ptr0 = 0x0260; break;
case NVKM_ENGINE_MSVLD : ptr0 = 0x0270; break;
default:
WARN_ON(1);
return;
}
if (cctx) {
addr = cctx->vctx->vma->addr;
addr |= 4ULL;
}
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, ptr0 + 0, lower_32_bits(addr));
nvkm_wo32(chan->inst, ptr0 + 4, upper_32_bits(addr));
nvkm_done(chan->inst);
}
static int
gf100_ectx_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx)
{
int ret;
ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
if (ret)
return ret;
return nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0);
}
bool
gf100_engn_mmu_fault_triggered(struct nvkm_engn *engn)
{
struct nvkm_runl *runl = engn->runl;
struct nvkm_fifo *fifo = runl->fifo;
struct nvkm_device *device = fifo->engine.subdev.device;
u32 data = nvkm_rd32(device, 0x002a30 + (engn->id * 4));
ENGN_DEBUG(engn, "%08x: mmu fault triggered", data);
if (!(data & 0x00000100))
return false;
spin_lock(&fifo->lock);
nvkm_mask(device, 0x002a30 + (engn->id * 4), 0x00000100, 0x00000000);
if (atomic_dec_and_test(&runl->rc_triggered))
nvkm_mask(device, 0x002140, 0x00000100, 0x00000100);
spin_unlock(&fifo->lock);
return true;
}
void
gf100_engn_mmu_fault_trigger(struct nvkm_engn *engn)
{
struct nvkm_runl *runl = engn->runl;
struct nvkm_fifo *fifo = runl->fifo;
struct nvkm_device *device = fifo->engine.subdev.device;
ENGN_DEBUG(engn, "triggering mmu fault on 0x%02x", engn->fault);
spin_lock(&fifo->lock);
if (atomic_inc_return(&runl->rc_triggered) == 1)
nvkm_mask(device, 0x002140, 0x00000100, 0x00000000);
nvkm_wr32(device, 0x002100, 0x00000100);
nvkm_wr32(device, 0x002a30 + (engn->id * 4), 0x00000100 | engn->fault);
spin_unlock(&fifo->lock);
}
/*TODO: clean all this up. */
struct gf100_engn_status {
bool busy;
bool save;
bool unk0;
bool unk1;
u8 chid;
};
static void
gf100_engn_status(struct nvkm_engn *engn, struct gf100_engn_status *status)
{
u32 stat = nvkm_rd32(engn->engine->subdev.device, 0x002640 + (engn->id * 4));
status->busy = (stat & 0x10000000);
status->save = (stat & 0x00100000);
status->unk0 = (stat & 0x00004000);
status->unk1 = (stat & 0x00001000);
status->chid = (stat & 0x0000007f);
ENGN_DEBUG(engn, "%08x: busy %d save %d unk0 %d unk1 %d chid %d",
stat, status->busy, status->save, status->unk0, status->unk1, status->chid);
}
static int
gf100_engn_cxid(struct nvkm_engn *engn, bool *cgid)
{
struct gf100_engn_status status;
gf100_engn_status(engn, &status);
if (status.busy) {
*cgid = false;
return status.chid;
}
return -ENODEV;
}
static bool
gf100_engn_chsw(struct nvkm_engn *engn)
{
struct gf100_engn_status status;
gf100_engn_status(engn, &status);
if (status.busy && (status.unk0 || status.unk1))
return true;
return false;
}
static const struct nvkm_engn_func
gf100_engn = {
.chsw = gf100_engn_chsw,
.cxid = gf100_engn_cxid,
.mmu_fault_trigger = gf100_engn_mmu_fault_trigger,
.mmu_fault_triggered = gf100_engn_mmu_fault_triggered,
.ctor = gf100_ectx_ctor,
.bind = gf100_ectx_bind,
};
const struct nvkm_engn_func
gf100_engn_sw = {
};
static const struct nvkm_bitfield
gf100_runq_intr_0_names[] = {
/* { 0x00008000, "" } seen with null ib push */
{ 0x00200000, "ILLEGAL_MTHD" },
{ 0x00800000, "EMPTY_SUBC" },
{}
};
bool
gf100_runq_intr(struct nvkm_runq *runq, struct nvkm_runl *null)
{
struct nvkm_subdev *subdev = &runq->fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 mask = nvkm_rd32(device, 0x04010c + (runq->id * 0x2000));
u32 stat = nvkm_rd32(device, 0x040108 + (runq->id * 0x2000)) & mask;
u32 addr = nvkm_rd32(device, 0x0400c0 + (runq->id * 0x2000));
u32 data = nvkm_rd32(device, 0x0400c4 + (runq->id * 0x2000));
u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x2000)) & runq->fifo->chid->mask;
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00003ffc);
u32 show = stat;
struct nvkm_chan *chan;
unsigned long flags;
char msg[128];
if (stat & 0x00800000) {
if (device->sw) {
if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
show &= ~0x00800000;
}
}
if (show) {
nvkm_snprintbf(msg, sizeof(msg), runq->func->intr_0_names, show);
chan = nvkm_chan_get_chid(&runq->fifo->engine, chid, &flags);
nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
"subc %d mthd %04x data %08x\n",
runq->id, show, msg, chid, chan ? chan->inst->addr : 0,
chan ? chan->name : "unknown", subc, mthd, data);
/*TODO: use proper procedure for clearing each exception / debug output */
if ((stat & 0xc67fe000) && chan)
nvkm_chan_error(chan, true);
nvkm_chan_put(&chan, flags);
}
nvkm_wr32(device, 0x0400c0 + (runq->id * 0x2000), 0x80600008);
nvkm_wr32(device, 0x040108 + (runq->id * 0x2000), stat);
return true;
}
void
gf100_runq_init(struct nvkm_runq *runq)
{
struct nvkm_device *device = runq->fifo->engine.subdev.device;
nvkm_mask(device, 0x04013c + (runq->id * 0x2000), 0x10000100, 0x00000000);
nvkm_wr32(device, 0x040108 + (runq->id * 0x2000), 0xffffffff); /* INTR */
nvkm_wr32(device, 0x04010c + (runq->id * 0x2000), 0xfffffeff); /* INTREN */
}
static const struct nvkm_runq_func
gf100_runq = {
.init = gf100_runq_init,
.intr = gf100_runq_intr,
.intr_0_names = gf100_runq_intr_0_names,
};
bool
gf100_runl_preempt_pending(struct nvkm_runl *runl)
{
return nvkm_rd32(runl->fifo->engine.subdev.device, 0x002634) & 0x00100000;
}
static void
gf100_runl_fault_clear(struct nvkm_runl *runl)
{
nvkm_mask(runl->fifo->engine.subdev.device, 0x00262c, 0x00000000, 0x00000000);
}
static void
gf100_runl_allow(struct nvkm_runl *runl, u32 engm)
{
nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, 0x00000000);
}
static void
gf100_runl_block(struct nvkm_runl *runl, u32 engm)
{
nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, engm);
}
static bool
gf100_runl_pending(struct nvkm_runl *runl)
{
return nvkm_rd32(runl->fifo->engine.subdev.device, 0x00227c) & 0x00100000;
}
static void
gf100_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
{
struct nvkm_device *device = runl->fifo->engine.subdev.device;
u64 addr = nvkm_memory_addr(memory) + start;
int target;
switch (nvkm_memory_target(memory)) {
case NVKM_MEM_TARGET_VRAM: target = 0; break;
case NVKM_MEM_TARGET_NCOH: target = 3; break;
default:
WARN_ON(1);
return;
}
nvkm_wr32(device, 0x002270, (target << 28) | (addr >> 12));
nvkm_wr32(device, 0x002274, 0x01f00000 | count);
}
static void
gf100_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
{
nvkm_wo32(memory, offset + 0, chan->id);
nvkm_wo32(memory, offset + 4, 0x00000004);
}
static const struct nvkm_runl_func
gf100_runl = {
.size = 8,
.update = nv50_runl_update,
.insert_chan = gf100_runl_insert_chan,
.commit = gf100_runl_commit,
.wait = nv50_runl_wait,
.pending = gf100_runl_pending,
.block = gf100_runl_block,
.allow = gf100_runl_allow,
.fault_clear = gf100_runl_fault_clear,
.preempt_pending = gf100_runl_preempt_pending,
};
static void
gf100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
{
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags);
nvkm_mask(fifo->engine.subdev.device, 0x002140, 0x80000000, 0x80000000);
spin_unlock_irqrestore(&fifo->lock, flags);
}
static void
gf100_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
{
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags);
nvkm_mask(fifo->engine.subdev.device, 0x002140, 0x80000000, 0x00000000);
spin_unlock_irqrestore(&fifo->lock, flags);
}
const struct nvkm_event_func
gf100_fifo_nonstall = {
.init = gf100_fifo_nonstall_allow,
.fini = gf100_fifo_nonstall_block,
};
static const struct nvkm_enum
gf100_fifo_mmu_fault_engine[] = {
{ 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
{ 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
{ 0x07, "PFIFO" },
{ 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
{ 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
{ 0x13, "PCOUNTER" },
{ 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
{ 0x15, "PCE0", NULL, NVKM_ENGINE_CE, 0 },
{ 0x16, "PCE1", NULL, NVKM_ENGINE_CE, 1 },
{ 0x17, "PMU" },
{}
};
static const struct nvkm_enum
gf100_fifo_mmu_fault_reason[] = {
{ 0x00, "PT_NOT_PRESENT" },
{ 0x01, "PT_TOO_SHORT" },
{ 0x02, "PAGE_NOT_PRESENT" },
{ 0x03, "VM_LIMIT_EXCEEDED" },
{ 0x04, "NO_CHANNEL" },
{ 0x05, "PAGE_SYSTEM_ONLY" },
{ 0x06, "PAGE_READ_ONLY" },
{ 0x0a, "COMPRESSED_SYSRAM" },
{ 0x0c, "INVALID_STORAGE_TYPE" },
{}
};
static const struct nvkm_enum
gf100_fifo_mmu_fault_hubclient[] = {
{ 0x01, "PCOPY0" },
{ 0x02, "PCOPY1" },
{ 0x04, "DISPATCH" },
{ 0x05, "CTXCTL" },
{ 0x06, "PFIFO" },
{ 0x07, "BAR_READ" },
{ 0x08, "BAR_WRITE" },
{ 0x0b, "PVP" },
{ 0x0c, "PMSPPP" },
{ 0x0d, "PMSVLD" },
{ 0x11, "PCOUNTER" },
{ 0x12, "PMU" },
{ 0x14, "CCACHE" },
{ 0x15, "CCACHE_POST" },
{}
};
static const struct nvkm_enum
gf100_fifo_mmu_fault_gpcclient[] = {
{ 0x01, "TEX" },
{ 0x0c, "ESETUP" },
{ 0x0e, "CTXCTL" },
{ 0x0f, "PROP" },
{}
};
const struct nvkm_enum
gf100_fifo_mmu_fault_access[] = {
{ 0x00, "READ" },
{ 0x01, "WRITE" },
{}
};
void
gf100_fifo_mmu_fault_recover(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
const struct nvkm_enum *er, *ee, *ec, *ea;
struct nvkm_engine *engine = NULL;
struct nvkm_runl *runl;
struct nvkm_engn *engn;
struct nvkm_chan *chan;
unsigned long flags;
char ct[8] = "HUB/";
/* Lookup engine by MMU fault ID. */
nvkm_runl_foreach(runl, fifo) {
engn = nvkm_runl_find_engn(engn, runl, engn->fault == info->engine);
if (engn) {
/* Fault triggered by CTXSW_TIMEOUT recovery procedure. */
if (engn->func->mmu_fault_triggered &&
engn->func->mmu_fault_triggered(engn)) {
nvkm_runl_rc_engn(runl, engn);
return;
}
engine = engn->engine;
break;
}
}
er = nvkm_enum_find(fifo->func->mmu_fault->reason, info->reason);
ee = nvkm_enum_find(fifo->func->mmu_fault->engine, info->engine);
if (info->hub) {
ec = nvkm_enum_find(fifo->func->mmu_fault->hubclient, info->client);
} else {
ec = nvkm_enum_find(fifo->func->mmu_fault->gpcclient, info->client);
snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
}
ea = nvkm_enum_find(fifo->func->mmu_fault->access, info->access);
/* Handle BAR faults. */
if (ee && ee->data2) {
switch (ee->data2) {
case NVKM_SUBDEV_BAR:
nvkm_bar_bar1_reset(device);
break;
case NVKM_SUBDEV_INSTMEM:
nvkm_bar_bar2_reset(device);
break;
case NVKM_ENGINE_IFB:
nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
break;
default:
break;
}
}
chan = nvkm_chan_get_inst(&fifo->engine, info->inst, &flags);
nvkm_error(subdev,
"fault %02x [%s] at %016llx engine %02x [%s] client %02x "
"[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
info->access, ea ? ea->name : "", info->addr,
info->engine, ee ? ee->name : engine ? engine->subdev.name : "",
info->client, ct, ec ? ec->name : "",
info->reason, er ? er->name : "",
chan ? chan->id : -1, info->inst, chan ? chan->name : "unknown");
/* Handle host/engine faults. */
if (chan)
nvkm_runl_rc_cgrp(chan->cgrp);
nvkm_chan_put(&chan, flags);
}
static const struct nvkm_fifo_func_mmu_fault
gf100_fifo_mmu_fault = {
.recover = gf100_fifo_mmu_fault_recover,
.access = gf100_fifo_mmu_fault_access,
.engine = gf100_fifo_mmu_fault_engine,
.reason = gf100_fifo_mmu_fault_reason,
.hubclient = gf100_fifo_mmu_fault_hubclient,
.gpcclient = gf100_fifo_mmu_fault_gpcclient,
};
void
gf100_fifo_intr_ctxsw_timeout(struct nvkm_fifo *fifo, u32 engm)
{
struct nvkm_runl *runl;
struct nvkm_engn *engn, *engn2;
bool cgid, cgid2;
int id, id2;
nvkm_runl_foreach(runl, fifo) {
/* Stop the runlist, and go through all engines serving it. */
nvkm_runl_block(runl);
nvkm_runl_foreach_engn_cond(engn, runl, engm & BIT(engn->id)) {
/* Determine what channel (group) the engine is on. */
id = engn->func->cxid(engn, &cgid);
if (id >= 0) {
/* Trigger MMU fault on any engine(s) on that channel (group). */
nvkm_runl_foreach_engn_cond(engn2, runl, engn2->func->cxid) {
id2 = engn2->func->cxid(engn2, &cgid2);
if (cgid2 == cgid && id2 == id)
engn2->func->mmu_fault_trigger(engn2);
}
}
}
nvkm_runl_allow(runl); /* HW will keep runlist blocked via ERROR_SCHED_DISABLE. */
}
}
static void
gf100_fifo_intr_sched_ctxsw(struct nvkm_fifo *fifo)
{
struct nvkm_runl *runl;
struct nvkm_engn *engn;
u32 engm = 0;
/* Look for any engines that are busy, and awaiting chsw ack. */
nvkm_runl_foreach(runl, fifo) {
nvkm_runl_foreach_engn_cond(engn, runl, engn->func->chsw) {
if (WARN_ON(engn->fault < 0) || !engn->func->chsw(engn))
continue;
engm |= BIT(engn->id);
}
}
if (!engm)
return;
fifo->func->intr_ctxsw_timeout(fifo, engm);
}
static const struct nvkm_enum
gf100_fifo_intr_sched_names[] = {
{ 0x0a, "CTXSW_TIMEOUT" },
{}
};
void
gf100_fifo_intr_sched(struct nvkm_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 intr = nvkm_rd32(device, 0x00254c);
u32 code = intr & 0x000000ff;
const struct nvkm_enum *en;
en = nvkm_enum_find(gf100_fifo_intr_sched_names, code);
nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
switch (code) {
case 0x0a:
gf100_fifo_intr_sched_ctxsw(fifo);
break;
default:
break;
}
}
void
gf100_fifo_intr_mmu_fault_unit(struct nvkm_fifo *fifo, int unit)
{
struct nvkm_device *device = fifo->engine.subdev.device;
u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
struct nvkm_fault_data info;
info.inst = (u64)inst << 12;
info.addr = ((u64)vahi << 32) | valo;
info.time = 0;
info.engine = unit;
info.valid = 1;
info.gpc = (type & 0x1f000000) >> 24;
info.client = (type & 0x00001f00) >> 8;
info.access = (type & 0x00000080) >> 7;
info.hub = (type & 0x00000040) >> 6;
info.reason = (type & 0x0000000f);
nvkm_fifo_fault(fifo, &info);
}
void
gf100_fifo_intr_mmu_fault(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
unsigned long mask = nvkm_rd32(device, 0x00259c);
int unit;
for_each_set_bit(unit, &mask, 32) {
fifo->func->intr_mmu_fault_unit(fifo, unit);
nvkm_wr32(device, 0x00259c, BIT(unit));
}
}
bool
gf100_fifo_intr_pbdma(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_runq *runq;
u32 mask = nvkm_rd32(device, 0x0025a0);
bool handled = false;
nvkm_runq_foreach_cond(runq, fifo, mask & BIT(runq->id)) {
if (runq->func->intr(runq, NULL))
handled = true;
nvkm_wr32(device, 0x0025a0, BIT(runq->id));
}
return handled;
}
static void
gf100_fifo_intr_runlist(struct nvkm_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 intr = nvkm_rd32(device, 0x002a00);
if (intr & 0x10000000) {
nvkm_wr32(device, 0x002a00, 0x10000000);
intr &= ~0x10000000;
}
if (intr) {
nvkm_error(subdev, "RUNLIST %08x\n", intr);
nvkm_wr32(device, 0x002a00, intr);
}
}
static void
gf100_fifo_intr_engine_unit(struct nvkm_fifo *fifo, int engn)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
u32 inte = nvkm_rd32(device, 0x002628);
u32 unkn;
nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
for (unkn = 0; unkn < 8; unkn++) {
u32 ints = (intr >> (unkn * 0x04)) & inte;
if (ints & 0x1) {
nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
ints &= ~1;
}
if (ints) {
nvkm_error(subdev, "ENGINE %d %d %01x", engn, unkn, ints);
nvkm_mask(device, 0x002628, ints, 0);
}
}
}
static void
gf100_fifo_intr_engine(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
u32 mask = nvkm_rd32(device, 0x0025a4);
while (mask) {
u32 unit = __ffs(mask);
gf100_fifo_intr_engine_unit(fifo, unit);
mask &= ~(1 << unit);
}
}
static irqreturn_t
gf100_fifo_intr(struct nvkm_inth *inth)
{
struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 mask = nvkm_rd32(device, 0x002140);
u32 stat = nvkm_rd32(device, 0x002100) & mask;
if (stat & 0x00000001) {
u32 intr = nvkm_rd32(device, 0x00252c);
nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
nvkm_wr32(device, 0x002100, 0x00000001);
stat &= ~0x00000001;
}
if (stat & 0x00000100) {
gf100_fifo_intr_sched(fifo);
nvkm_wr32(device, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
if (stat & 0x00010000) {
u32 intr = nvkm_rd32(device, 0x00256c);
nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
nvkm_wr32(device, 0x002100, 0x00010000);
stat &= ~0x00010000;
}
if (stat & 0x01000000) {
u32 intr = nvkm_rd32(device, 0x00258c);
nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
nvkm_wr32(device, 0x002100, 0x01000000);
stat &= ~0x01000000;
}
if (stat & 0x10000000) {
gf100_fifo_intr_mmu_fault(fifo);
stat &= ~0x10000000;
}
if (stat & 0x20000000) {
if (gf100_fifo_intr_pbdma(fifo))
stat &= ~0x20000000;
}
if (stat & 0x40000000) {
gf100_fifo_intr_runlist(fifo);
stat &= ~0x40000000;
}
if (stat & 0x80000000) {
gf100_fifo_intr_engine(fifo);
stat &= ~0x80000000;
}
if (stat) {
nvkm_error(subdev, "INTR %08x\n", stat);
spin_lock(&fifo->lock);
nvkm_mask(device, 0x002140, stat, 0x00000000);
spin_unlock(&fifo->lock);
nvkm_wr32(device, 0x002100, stat);
}
return IRQ_HANDLED;
}
static void
gf100_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask)
{
struct nvkm_device *device = fifo->engine.subdev.device;
/* Enable PBDMAs. */
nvkm_wr32(device, 0x000204, mask);
nvkm_wr32(device, 0x002204, mask);
/* Assign engines to PBDMAs. */
if ((mask & 7) == 7) {
nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
}
nvkm_mask(device, 0x002a04, 0xbfffffff, 0xbfffffff);
}
static void
gf100_fifo_init(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12);
nvkm_wr32(device, 0x002100, 0xffffffff);
nvkm_wr32(device, 0x002140, 0x7fffffff);
nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
}
static int
gf100_fifo_runl_ctor(struct nvkm_fifo *fifo)
{
struct nvkm_runl *runl;
runl = nvkm_runl_new(fifo, 0, 0, 0);
if (IS_ERR(runl))
return PTR_ERR(runl);
nvkm_runl_add(runl, 0, fifo->func->engn, NVKM_ENGINE_GR, 0);
nvkm_runl_add(runl, 1, fifo->func->engn, NVKM_ENGINE_MSPDEC, 0);
nvkm_runl_add(runl, 2, fifo->func->engn, NVKM_ENGINE_MSPPP, 0);
nvkm_runl_add(runl, 3, fifo->func->engn, NVKM_ENGINE_MSVLD, 0);
nvkm_runl_add(runl, 4, fifo->func->engn, NVKM_ENGINE_CE, 0);
nvkm_runl_add(runl, 5, fifo->func->engn, NVKM_ENGINE_CE, 1);
nvkm_runl_add(runl, 15, &gf100_engn_sw, NVKM_ENGINE_SW, 0);
return 0;
}
int
gf100_fifo_runq_nr(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
u32 save;
/* Determine number of PBDMAs by checking valid enable bits. */
save = nvkm_mask(device, 0x000204, 0xffffffff, 0xffffffff);
save = nvkm_mask(device, 0x000204, 0xffffffff, save);
return hweight32(save);
}
int
gf100_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr)
{
return nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 0, nr, &fifo->chid);
}
static const struct nvkm_fifo_func
gf100_fifo = {
.chid_nr = nv50_fifo_chid_nr,
.chid_ctor = gf100_fifo_chid_ctor,
.runq_nr = gf100_fifo_runq_nr,
.runl_ctor = gf100_fifo_runl_ctor,
.init = gf100_fifo_init,
.init_pbdmas = gf100_fifo_init_pbdmas,
.intr = gf100_fifo_intr,
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gf100_fifo_mmu_fault,
.nonstall = &gf100_fifo_nonstall,
.runl = &gf100_runl,
.runq = &gf100_runq,
.engn = &gf100_engn,
.cgrp = {{ }, &nv04_cgrp },
.chan = {{ 0, 0, FERMI_CHANNEL_GPFIFO }, &gf100_chan },
};
int
gf100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&gf100_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "cgrp.h"
#include "chan.h"
#include "runl.h"
#include <core/gpuobj.h>
#include <subdev/instmem.h>
#include "regsnv04.h"
#include <nvif/class.h>
static int
nv10_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
const u32 base = chan->id * 32;
chan->ramfc_offset = base;
nvkm_kmap(ramfc);
nvkm_wo32(ramfc, base + 0x00, offset);
nvkm_wo32(ramfc, base + 0x04, offset);
nvkm_wo32(ramfc, base + 0x0c, chan->push->addr >> 4);
nvkm_wo32(ramfc, base + 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_done(ramfc);
return 0;
}
static const struct nvkm_chan_func_ramfc
nv10_chan_ramfc = {
.layout = (const struct nvkm_ramfc_layout[]) {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
{ 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
{ 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
{ 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
{ 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
{ 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
{ 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
{}
},
.write = nv10_chan_ramfc_write,
.clear = nv04_chan_ramfc_clear,
.ctxdma = true,
};
static const struct nvkm_chan_func
nv10_chan = {
.inst = &nv04_chan_inst,
.userd = &nv04_chan_userd,
.ramfc = &nv10_chan_ramfc,
.start = nv04_chan_start,
.stop = nv04_chan_stop,
};
int
nv10_fifo_chid_nr(struct nvkm_fifo *fifo)
{
return 32;
}
static const struct nvkm_fifo_func
nv10_fifo = {
.chid_nr = nv10_fifo_chid_nr,
.chid_ctor = nv04_fifo_chid_ctor,
.runl_ctor = nv04_fifo_runl_ctor,
.init = nv04_fifo_init,
.intr = nv04_fifo_intr,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.runl = &nv04_runl,
.engn = &nv04_engn,
.engn_sw = &nv04_engn,
.cgrp = {{ }, &nv04_cgrp },
.chan = {{ 0, 0, NV10_CHANNEL_DMA }, &nv10_chan },
};
int
nv10_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&nv10_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "cgrp.h"
#include "chan.h"
#include "chid.h"
#include "runl.h"
#include "priv.h"
#include <core/gpuobj.h>
#include <subdev/mmu.h>
static void
nvkm_cgrp_ectx_put(struct nvkm_cgrp *cgrp, struct nvkm_ectx **pectx)
{
struct nvkm_ectx *ectx = *pectx;
if (ectx) {
struct nvkm_engn *engn = ectx->engn;
if (refcount_dec_and_test(&ectx->refs)) {
CGRP_TRACE(cgrp, "dtor ectx %d[%s]", engn->id, engn->engine->subdev.name);
nvkm_object_del(&ectx->object);
list_del(&ectx->head);
kfree(ectx);
}
*pectx = NULL;
}
}
static int
nvkm_cgrp_ectx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_ectx **pectx,
struct nvkm_chan *chan, struct nvkm_client *client)
{
struct nvkm_engine *engine = engn->engine;
struct nvkm_oclass cclass = {
.client = client,
.engine = engine,
};
struct nvkm_ectx *ectx;
int ret = 0;
/* Look for an existing context for this engine in the channel group. */
ectx = nvkm_list_find(ectx, &cgrp->ectxs, head, ectx->engn == engn);
if (ectx) {
refcount_inc(&ectx->refs);
*pectx = ectx;
return 0;
}
/* Nope - create a fresh one. */
CGRP_TRACE(cgrp, "ctor ectx %d[%s]", engn->id, engn->engine->subdev.name);
if (!(ectx = *pectx = kzalloc(sizeof(*ectx), GFP_KERNEL)))
return -ENOMEM;
ectx->engn = engn;
refcount_set(&ectx->refs, 1);
refcount_set(&ectx->uses, 0);
list_add_tail(&ectx->head, &cgrp->ectxs);
/* Allocate the HW structures. */
if (engine->func->fifo.cclass)
ret = engine->func->fifo.cclass(chan, &cclass, &ectx->object);
else if (engine->func->cclass)
ret = nvkm_object_new_(engine->func->cclass, &cclass, NULL, 0, &ectx->object);
if (ret)
nvkm_cgrp_ectx_put(cgrp, pectx);
return ret;
}
void
nvkm_cgrp_vctx_put(struct nvkm_cgrp *cgrp, struct nvkm_vctx **pvctx)
{
struct nvkm_vctx *vctx = *pvctx;
if (vctx) {
struct nvkm_engn *engn = vctx->ectx->engn;
if (refcount_dec_and_test(&vctx->refs)) {
CGRP_TRACE(cgrp, "dtor vctx %d[%s]", engn->id, engn->engine->subdev.name);
nvkm_vmm_put(vctx->vmm, &vctx->vma);
nvkm_gpuobj_del(&vctx->inst);
nvkm_cgrp_ectx_put(cgrp, &vctx->ectx);
if (vctx->vmm) {
atomic_dec(&vctx->vmm->engref[engn->engine->subdev.type]);
nvkm_vmm_unref(&vctx->vmm);
}
list_del(&vctx->head);
kfree(vctx);
}
*pvctx = NULL;
}
}
int
nvkm_cgrp_vctx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_chan *chan,
struct nvkm_vctx **pvctx, struct nvkm_client *client)
{
struct nvkm_ectx *ectx;
struct nvkm_vctx *vctx;
int ret;
/* Look for an existing sub-context for this engine+VEID in the channel group. */
vctx = nvkm_list_find(vctx, &cgrp->vctxs, head,
vctx->ectx->engn == engn && vctx->vmm == chan->vmm);
if (vctx) {
refcount_inc(&vctx->refs);
*pvctx = vctx;
return 0;
}
/* Nope - create a fresh one. But, context first. */
ret = nvkm_cgrp_ectx_get(cgrp, engn, &ectx, chan, client);
if (ret) {
CGRP_ERROR(cgrp, "ectx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret);
return ret;
}
/* Now, create the sub-context. */
CGRP_TRACE(cgrp, "ctor vctx %d[%s]", engn->id, engn->engine->subdev.name);
if (!(vctx = *pvctx = kzalloc(sizeof(*vctx), GFP_KERNEL))) {
nvkm_cgrp_ectx_put(cgrp, &ectx);
return -ENOMEM;
}
vctx->ectx = ectx;
vctx->vmm = nvkm_vmm_ref(chan->vmm);
refcount_set(&vctx->refs, 1);
list_add_tail(&vctx->head, &cgrp->vctxs);
/* MMU on some GPUs needs to know engine usage for TLB invalidation. */
if (vctx->vmm)
atomic_inc(&vctx->vmm->engref[engn->engine->subdev.type]);
/* Allocate the HW structures. */
if (engn->func->bind) {
ret = nvkm_object_bind(vctx->ectx->object, NULL, 0, &vctx->inst);
if (ret == 0 && engn->func->ctor)
ret = engn->func->ctor(engn, vctx);
}
if (ret)
nvkm_cgrp_vctx_put(cgrp, pvctx);
return ret;
}
static void
nvkm_cgrp_del(struct kref *kref)
{
struct nvkm_cgrp *cgrp = container_of(kref, typeof(*cgrp), kref);
struct nvkm_runl *runl = cgrp->runl;
if (runl->cgid)
nvkm_chid_put(runl->cgid, cgrp->id, &cgrp->lock);
mutex_destroy(&cgrp->mutex);
nvkm_vmm_unref(&cgrp->vmm);
kfree(cgrp);
}
void
nvkm_cgrp_unref(struct nvkm_cgrp **pcgrp)
{
struct nvkm_cgrp *cgrp = *pcgrp;
if (!cgrp)
return;
kref_put(&cgrp->kref, nvkm_cgrp_del);
*pcgrp = NULL;
}
struct nvkm_cgrp *
nvkm_cgrp_ref(struct nvkm_cgrp *cgrp)
{
if (cgrp)
kref_get(&cgrp->kref);
return cgrp;
}
void
nvkm_cgrp_put(struct nvkm_cgrp **pcgrp, unsigned long irqflags)
{
struct nvkm_cgrp *cgrp = *pcgrp;
if (!cgrp)
return;
*pcgrp = NULL;
spin_unlock_irqrestore(&cgrp->lock, irqflags);
}
int
nvkm_cgrp_new(struct nvkm_runl *runl, const char *name, struct nvkm_vmm *vmm, bool hw,
struct nvkm_cgrp **pcgrp)
{
struct nvkm_cgrp *cgrp;
if (!(cgrp = *pcgrp = kmalloc(sizeof(*cgrp), GFP_KERNEL)))
return -ENOMEM;
cgrp->func = runl->fifo->func->cgrp.func;
strscpy(cgrp->name, name, sizeof(cgrp->name));
cgrp->runl = runl;
cgrp->vmm = nvkm_vmm_ref(vmm);
cgrp->hw = hw;
cgrp->id = -1;
kref_init(&cgrp->kref);
INIT_LIST_HEAD(&cgrp->chans);
cgrp->chan_nr = 0;
spin_lock_init(&cgrp->lock);
INIT_LIST_HEAD(&cgrp->ectxs);
INIT_LIST_HEAD(&cgrp->vctxs);
mutex_init(&cgrp->mutex);
atomic_set(&cgrp->rc, NVKM_CGRP_RC_NONE);
if (runl->cgid) {
cgrp->id = nvkm_chid_get(runl->cgid, cgrp);
if (cgrp->id < 0) {
RUNL_ERROR(runl, "!cgids");
nvkm_cgrp_unref(pcgrp);
return -ENOSPC;
}
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "cgrp.h"
#include "chan.h"
#include "runl.h"
#include <core/memory.h>
#include <subdev/mc.h>
#include <subdev/vfn.h>
#include <nvif/class.h>
static u32
tu102_chan_doorbell_handle(struct nvkm_chan *chan)
{
return (chan->cgrp->runl->id << 16) | chan->id;
}
static void
tu102_chan_start(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
gk104_chan_start(chan);
nvkm_wr32(device, device->vfn->addr.user + 0x0090, chan->func->doorbell_handle(chan));
}
static const struct nvkm_chan_func
tu102_chan = {
.inst = &gf100_chan_inst,
.userd = &gv100_chan_userd,
.ramfc = &gv100_chan_ramfc,
.bind = gk104_chan_bind_inst,
.unbind = gk104_chan_unbind,
.start = tu102_chan_start,
.stop = gk104_chan_stop,
.preempt = gk110_chan_preempt,
.doorbell_handle = tu102_chan_doorbell_handle,
};
static bool
tu102_runl_pending(struct nvkm_runl *runl)
{
struct nvkm_device *device = runl->fifo->engine.subdev.device;
return nvkm_rd32(device, 0x002b0c + (runl->id * 0x10)) & 0x00008000;
}
static void
tu102_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
{
struct nvkm_device *device = runl->fifo->engine.subdev.device;
u64 addr = nvkm_memory_addr(memory) + start;
/*XXX: target? */
nvkm_wr32(device, 0x002b00 + (runl->id * 0x10), lower_32_bits(addr));
nvkm_wr32(device, 0x002b04 + (runl->id * 0x10), upper_32_bits(addr));
nvkm_wr32(device, 0x002b08 + (runl->id * 0x10), count);
}
static const struct nvkm_runl_func
tu102_runl = {
.runqs = 2,
.size = 16,
.update = nv50_runl_update,
.insert_cgrp = gv100_runl_insert_cgrp,
.insert_chan = gv100_runl_insert_chan,
.commit = tu102_runl_commit,
.wait = nv50_runl_wait,
.pending = tu102_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
.preempt = gv100_runl_preempt,
.preempt_pending = gf100_runl_preempt_pending,
};
static const struct nvkm_enum
tu102_fifo_mmu_fault_engine[] = {
{ 0x01, "DISPLAY" },
{ 0x03, "PTP" },
{ 0x06, "PWR_PMU" },
{ 0x08, "IFB", NULL, NVKM_ENGINE_IFB },
{ 0x09, "PERF" },
{ 0x1f, "PHYSICAL" },
{ 0x20, "HOST0" },
{ 0x21, "HOST1" },
{ 0x22, "HOST2" },
{ 0x23, "HOST3" },
{ 0x24, "HOST4" },
{ 0x25, "HOST5" },
{ 0x26, "HOST6" },
{ 0x27, "HOST7" },
{ 0x28, "HOST8" },
{ 0x29, "HOST9" },
{ 0x2a, "HOST10" },
{ 0x2b, "HOST11" },
{ 0x2c, "HOST12" },
{ 0x2d, "HOST13" },
{ 0x2e, "HOST14" },
{ 0x80, "BAR1", NULL, NVKM_SUBDEV_BAR },
{ 0xc0, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
{}
};
const struct nvkm_fifo_func_mmu_fault
tu102_fifo_mmu_fault = {
.recover = gf100_fifo_mmu_fault_recover,
.access = gv100_fifo_mmu_fault_access,
.engine = tu102_fifo_mmu_fault_engine,
.reason = gv100_fifo_mmu_fault_reason,
.hubclient = gv100_fifo_mmu_fault_hubclient,
.gpcclient = gv100_fifo_mmu_fault_gpcclient,
};
void
tu102_fifo_intr_ctxsw_timeout_info(struct nvkm_engn *engn, u32 info)
{
struct nvkm_runl *runl = engn->runl;
struct nvkm_cgrp *cgrp;
unsigned long flags;
/* Check that engine hasn't become unstuck since timeout raised. */
ENGN_DEBUG(engn, "CTXSW_TIMEOUT %08x", info);
if (info & 0xc0000000)
return;
/* Determine channel group the engine is stuck on, and schedule recovery. */
switch (info & 0x0000c000) {
case 0x00004000: /* LOAD */
cgrp = nvkm_runl_cgrp_get_cgid(runl, info & 0x3fff0000, &flags);
break;
case 0x00008000: /* SAVE */
case 0x0000c000: /* SWITCH */
cgrp = nvkm_runl_cgrp_get_cgid(runl, info & 0x00003fff, &flags);
break;
default:
cgrp = NULL;
break;
}
if (!WARN_ON(!cgrp)) {
nvkm_runl_rc_cgrp(cgrp);
nvkm_cgrp_put(&cgrp, flags);
}
}
static void
tu102_fifo_intr_ctxsw_timeout(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_runl *runl;
struct nvkm_engn *engn;
u32 engm = nvkm_rd32(device, 0x002a30);
u32 info;
nvkm_runl_foreach(runl, fifo) {
nvkm_runl_foreach_engn_cond(engn, runl, engm & BIT(engn->id)) {
info = nvkm_rd32(device, 0x003200 + (engn->id * 4));
tu102_fifo_intr_ctxsw_timeout_info(engn, info);
}
}
nvkm_wr32(device, 0x002a30, engm);
}
static void
tu102_fifo_intr_sched(struct nvkm_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
u32 intr = nvkm_rd32(subdev->device, 0x00254c);
u32 code = intr & 0x000000ff;
nvkm_error(subdev, "SCHED_ERROR %02x\n", code);
}
static irqreturn_t
tu102_fifo_intr(struct nvkm_inth *inth)
{
struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 mask = nvkm_rd32(device, 0x002140);
u32 stat = nvkm_rd32(device, 0x002100) & mask;
if (stat & 0x00000001) {
gk104_fifo_intr_bind(fifo);
nvkm_wr32(device, 0x002100, 0x00000001);
stat &= ~0x00000001;
}
if (stat & 0x00000002) {
tu102_fifo_intr_ctxsw_timeout(fifo);
stat &= ~0x00000002;
}
if (stat & 0x00000100) {
tu102_fifo_intr_sched(fifo);
nvkm_wr32(device, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
if (stat & 0x00010000) {
gk104_fifo_intr_chsw(fifo);
nvkm_wr32(device, 0x002100, 0x00010000);
stat &= ~0x00010000;
}
if (stat & 0x20000000) {
if (gf100_fifo_intr_pbdma(fifo))
stat &= ~0x20000000;
}
if (stat & 0x40000000) {
gk104_fifo_intr_runlist(fifo);
stat &= ~0x40000000;
}
if (stat & 0x80000000) {
nvkm_wr32(device, 0x002100, 0x80000000);
nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
stat &= ~0x80000000;
}
if (stat) {
nvkm_error(subdev, "INTR %08x\n", stat);
spin_lock(&fifo->lock);
nvkm_mask(device, 0x002140, stat, 0x00000000);
spin_unlock(&fifo->lock);
nvkm_wr32(device, 0x002100, stat);
}
return IRQ_HANDLED;
}
static void
tu102_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask)
{
/* Not directly related to PBDMAs, but, enables doorbell to function. */
nvkm_mask(fifo->engine.subdev.device, 0xb65000, 0x80000000, 0x80000000);
}
static const struct nvkm_fifo_func
tu102_fifo = {
.chid_nr = gm200_fifo_chid_nr,
.chid_ctor = gk110_fifo_chid_ctor,
.runq_nr = gm200_fifo_runq_nr,
.runl_ctor = gk104_fifo_runl_ctor,
.init = gk104_fifo_init,
.init_pbdmas = tu102_fifo_init_pbdmas,
.intr = tu102_fifo_intr,
.mmu_fault = &tu102_fifo_mmu_fault,
.nonstall = &gf100_fifo_nonstall,
.runl = &tu102_runl,
.runq = &gv100_runq,
.engn = &gv100_engn,
.engn_ce = &gv100_engn_ce,
.cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp, .force = true },
.chan = {{ 0, 0, TURING_CHANNEL_GPFIFO_A }, &tu102_chan },
};
int
tu102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&tu102_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "cgrp.h"
#include "chan.h"
#include "chid.h"
#include "runl.h"
#include <core/ramht.h>
#include <subdev/timer.h>
#include <nvif/class.h>
void
nv50_eobj_ramht_del(struct nvkm_chan *chan, int hash)
{
nvkm_ramht_remove(chan->ramht, hash);
}
int
nv50_eobj_ramht_add(struct nvkm_engn *engn, struct nvkm_object *eobj, struct nvkm_chan *chan)
{
return nvkm_ramht_insert(chan->ramht, eobj, 0, 4, eobj->handle, engn->id << 20);
}
void
nv50_chan_stop(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_mask(device, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
}
void
nv50_chan_start(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_mask(device, 0x002600 + (chan->id * 4), 0x80000000, 0x80000000);
}
void
nv50_chan_unbind(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_wr32(device, 0x002600 + (chan->id * 4), 0x00000000);
}
static void
nv50_chan_bind(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_wr32(device, 0x002600 + (chan->id * 4), chan->ramfc->addr >> 12);
}
static int
nv50_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
const u32 limit2 = ilog2(length / 8);
int ret;
ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->inst, &chan->ramfc);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->inst, &chan->eng);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->inst, &chan->pgd);
if (ret)
return ret;
ret = nvkm_ramht_new(device, 0x8000, 16, chan->inst, &chan->ramht);
if (ret)
return ret;
nvkm_kmap(chan->ramfc);
nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
nvkm_wo32(chan->ramfc, 0x48, chan->push->node->offset >> 4);
nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(offset));
nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(offset) | (limit2 << 16));
nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
nvkm_wo32(chan->ramfc, 0x7c, 0x30000000 | devm);
nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->node->offset >> 4));
nvkm_done(chan->ramfc);
return 0;
}
static const struct nvkm_chan_func_ramfc
nv50_chan_ramfc = {
.write = nv50_chan_ramfc_write,
.ctxdma = true,
.devm = 0xfff,
};
const struct nvkm_chan_func_userd
nv50_chan_userd = {
.bar = 0,
.base = 0xc00000,
.size = 0x002000,
};
const struct nvkm_chan_func_inst
nv50_chan_inst = {
.size = 0x10000,
.vmm = true,
};
static const struct nvkm_chan_func
nv50_chan = {
.inst = &nv50_chan_inst,
.userd = &nv50_chan_userd,
.ramfc = &nv50_chan_ramfc,
.bind = nv50_chan_bind,
.unbind = nv50_chan_unbind,
.start = nv50_chan_start,
.stop = nv50_chan_stop,
};
static void
nv50_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
struct nvkm_subdev *subdev = &chan->cgrp->runl->fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u64 start = 0, limit = 0;
u32 flags = 0, ptr0, save;
switch (engn->engine->subdev.type) {
case NVKM_ENGINE_GR : ptr0 = 0x0000; break;
case NVKM_ENGINE_MPEG : ptr0 = 0x0060; break;
default:
WARN_ON(1);
return;
}
if (!cctx) {
/* HW bug workaround:
*
* PFIFO will hang forever if the connected engines don't report
* that they've processed the context switch request.
*
* In order for the kickoff to work, we need to ensure all the
* connected engines are in a state where they can answer.
*
* Newer chipsets don't seem to suffer from this issue, and well,
* there's also a "ignore these engines" bitmask reg we can use
* if we hit the issue there..
*/
save = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
/* Tell engines to save out contexts. */
nvkm_wr32(device, 0x0032fc, chan->inst->addr >> 12);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
break;
);
nvkm_wr32(device, 0x00b860, save);
} else {
flags = 0x00190000;
start = cctx->vctx->inst->addr;
limit = start + cctx->vctx->inst->size - 1;
}
nvkm_kmap(chan->eng);
nvkm_wo32(chan->eng, ptr0 + 0x00, flags);
nvkm_wo32(chan->eng, ptr0 + 0x04, lower_32_bits(limit));
nvkm_wo32(chan->eng, ptr0 + 0x08, lower_32_bits(start));
nvkm_wo32(chan->eng, ptr0 + 0x0c, upper_32_bits(limit) << 24 |
lower_32_bits(start));
nvkm_wo32(chan->eng, ptr0 + 0x10, 0x00000000);
nvkm_wo32(chan->eng, ptr0 + 0x14, 0x00000000);
nvkm_done(chan->eng);
}
static const struct nvkm_engn_func
nv50_engn = {
.bind = nv50_ectx_bind,
.ramht_add = nv50_eobj_ramht_add,
.ramht_del = nv50_eobj_ramht_del,
};
const struct nvkm_engn_func
nv50_engn_sw = {
.ramht_add = nv50_eobj_ramht_add,
.ramht_del = nv50_eobj_ramht_del,
};
static bool
nv50_runl_pending(struct nvkm_runl *runl)
{
return nvkm_rd32(runl->fifo->engine.subdev.device, 0x0032ec) & 0x00000100;
}
int
nv50_runl_wait(struct nvkm_runl *runl)
{
struct nvkm_fifo *fifo = runl->fifo;
nvkm_msec(fifo->engine.subdev.device, fifo->timeout.chan_msec,
if (!nvkm_runl_update_pending(runl))
return 0;
usleep_range(1, 2);
);
return -ETIMEDOUT;
}
static void
nv50_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
{
struct nvkm_device *device = runl->fifo->engine.subdev.device;
u64 addr = nvkm_memory_addr(memory) + start;
nvkm_wr32(device, 0x0032f4, addr >> 12);
nvkm_wr32(device, 0x0032ec, count);
}
static void
nv50_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
{
nvkm_wo32(memory, offset, chan->id);
}
static struct nvkm_memory *
nv50_runl_alloc(struct nvkm_runl *runl, u32 *offset)
{
const u32 segment = ALIGN((runl->cgrp_nr + runl->chan_nr) * runl->func->size, 0x1000);
const u32 maxsize = (runl->cgid ? runl->cgid->nr : 0) + runl->chid->nr;
int ret;
if (unlikely(!runl->mem)) {
ret = nvkm_memory_new(runl->fifo->engine.subdev.device, NVKM_MEM_TARGET_INST,
maxsize * 2 * runl->func->size, 0, false, &runl->mem);
if (ret) {
RUNL_ERROR(runl, "alloc %d\n", ret);
return ERR_PTR(ret);
}
} else {
if (runl->offset + segment >= nvkm_memory_size(runl->mem)) {
ret = runl->func->wait(runl);
if (ret) {
RUNL_DEBUG(runl, "rewind timeout");
return ERR_PTR(ret);
}
runl->offset = 0;
}
}
*offset = runl->offset;
runl->offset += segment;
return runl->mem;
}
int
nv50_runl_update(struct nvkm_runl *runl)
{
struct nvkm_memory *memory;
struct nvkm_cgrp *cgrp;
struct nvkm_chan *chan;
u32 start, offset, count;
/*TODO: prio, interleaving. */
RUNL_TRACE(runl, "RAMRL: update cgrps:%d chans:%d", runl->cgrp_nr, runl->chan_nr);
memory = nv50_runl_alloc(runl, &start);
if (IS_ERR(memory))
return PTR_ERR(memory);
RUNL_TRACE(runl, "RAMRL: update start:%08x", start);
offset = start;
nvkm_kmap(memory);
nvkm_runl_foreach_cgrp(cgrp, runl) {
if (cgrp->hw) {
CGRP_TRACE(cgrp, " RAMRL+%08x: chans:%d", offset, cgrp->chan_nr);
runl->func->insert_cgrp(cgrp, memory, offset);
offset += runl->func->size;
}
nvkm_cgrp_foreach_chan(chan, cgrp) {
CHAN_TRACE(chan, "RAMRL+%08x: [%s]", offset, chan->name);
runl->func->insert_chan(chan, memory, offset);
offset += runl->func->size;
}
}
nvkm_done(memory);
/*TODO: look into using features on newer HW to guarantee forward progress. */
list_rotate_left(&runl->cgrps);
count = (offset - start) / runl->func->size;
RUNL_TRACE(runl, "RAMRL: commit start:%08x count:%d", start, count);
runl->func->commit(runl, memory, start, count);
return 0;
}
const struct nvkm_runl_func
nv50_runl = {
.size = 4,
.update = nv50_runl_update,
.insert_chan = nv50_runl_insert_chan,
.commit = nv50_runl_commit,
.wait = nv50_runl_wait,
.pending = nv50_runl_pending,
};
void
nv50_fifo_init(struct nvkm_fifo *fifo)
{
struct nvkm_runl *runl = nvkm_runl_first(fifo);
struct nvkm_device *device = fifo->engine.subdev.device;
int i;
nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
nvkm_wr32(device, 0x00250c, 0x6f3cfc34);
nvkm_wr32(device, 0x002044, 0x01003fff);
nvkm_wr32(device, 0x002100, 0xffffffff);
nvkm_wr32(device, 0x002140, 0xbfffffff);
for (i = 0; i < 128; i++)
nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
atomic_set(&runl->changed, 1);
runl->func->update(runl);
nvkm_wr32(device, 0x003200, 0x00000001);
nvkm_wr32(device, 0x003250, 0x00000001);
nvkm_wr32(device, 0x002500, 0x00000001);
}
int
nv50_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr)
{
/* CHID 0 is unusable (some kind of PIO channel?), 127 is "channel invalid". */
return nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 1, nr - 2, &fifo->chid);
}
int
nv50_fifo_chid_nr(struct nvkm_fifo *fifo)
{
return 128;
}
static const struct nvkm_fifo_func
nv50_fifo = {
.chid_nr = nv50_fifo_chid_nr,
.chid_ctor = nv50_fifo_chid_ctor,
.runl_ctor = nv04_fifo_runl_ctor,
.init = nv50_fifo_init,
.intr = nv04_fifo_intr,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.runl = &nv50_runl,
.engn = &nv50_engn,
.engn_sw = &nv50_engn_sw,
.cgrp = {{ }, &nv04_cgrp },
.chan = {{ 0, 0, NV50_CHANNEL_GPFIFO }, &nv50_chan },
};
int
nv50_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&nv50_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "cgrp.h"
#include "chan.h"
#include "chid.h"
#include "runl.h"
#include <core/memory.h>
#include <subdev/timer.h>
#include <nvif/class.h>
void
gk110_chan_preempt(struct nvkm_chan *chan)
{
struct nvkm_cgrp *cgrp = chan->cgrp;
if (cgrp->hw) {
cgrp->func->preempt(cgrp);
return;
}
gf100_chan_preempt(chan);
}
const struct nvkm_chan_func
gk110_chan = {
.inst = &gf100_chan_inst,
.userd = &gk104_chan_userd,
.ramfc = &gk104_chan_ramfc,
.bind = gk104_chan_bind,
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
.stop = gk104_chan_stop,
.preempt = gk110_chan_preempt,
};
static void
gk110_cgrp_preempt(struct nvkm_cgrp *cgrp)
{
nvkm_wr32(cgrp->runl->fifo->engine.subdev.device, 0x002634, 0x01000000 | cgrp->id);
}
const struct nvkm_cgrp_func
gk110_cgrp = {
.preempt = gk110_cgrp_preempt,
};
void
gk110_runl_insert_cgrp(struct nvkm_cgrp *cgrp, struct nvkm_memory *memory, u64 offset)
{
nvkm_wo32(memory, offset + 0, (cgrp->chan_nr << 26) | (128 << 18) |
(3 << 14) | 0x00002000 | cgrp->id);
nvkm_wo32(memory, offset + 4, 0x00000000);
}
const struct nvkm_runl_func
gk110_runl = {
.size = 8,
.update = nv50_runl_update,
.insert_cgrp = gk110_runl_insert_cgrp,
.insert_chan = gk104_runl_insert_chan,
.commit = gk104_runl_commit,
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
.fault_clear = gk104_runl_fault_clear,
.preempt_pending = gf100_runl_preempt_pending,
};
int
gk110_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr)
{
int ret;
ret = nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 0, nr, &fifo->cgid);
if (ret)
return ret;
return gf100_fifo_chid_ctor(fifo, nr);
}
static const struct nvkm_fifo_func
gk110_fifo = {
.chid_nr = gk104_fifo_chid_nr,
.chid_ctor = gk110_fifo_chid_ctor,
.runq_nr = gf100_fifo_runq_nr,
.runl_ctor = gk104_fifo_runl_ctor,
.init = gk104_fifo_init,
.init_pbdmas = gk104_fifo_init_pbdmas,
.intr = gk104_fifo_intr,
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gk104_fifo_mmu_fault,
.nonstall = &gf100_fifo_nonstall,
.runl = &gk110_runl,
.runq = &gk104_runq,
.engn = &gk104_engn,
.engn_ce = &gk104_engn_ce,
.cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp },
.chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_B }, &gk110_chan },
};
int
gk110_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&gk110_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define nvkm_ucgrp(p) container_of((p), struct nvkm_ucgrp, object)
#include "priv.h"
#include "cgrp.h"
#include "runl.h"
#include <subdev/mmu.h>
#include <nvif/if0021.h>
struct nvkm_ucgrp {
struct nvkm_object object;
struct nvkm_cgrp *cgrp;
};
static int
nvkm_ucgrp_chan_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_cgrp *cgrp = nvkm_ucgrp(oclass->parent)->cgrp;
return nvkm_uchan_new(cgrp->runl->fifo, cgrp, oclass, argv, argc, pobject);
}
static int
nvkm_ucgrp_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
{
struct nvkm_cgrp *cgrp = nvkm_ucgrp(object)->cgrp;
struct nvkm_fifo *fifo = cgrp->runl->fifo;
const struct nvkm_fifo_func_chan *chan = &fifo->func->chan;
int c = 0;
/* *_CHANNEL_GPFIFO_* */
if (chan->user.oclass) {
if (c++ == index) {
oclass->base = chan->user;
oclass->ctor = nvkm_ucgrp_chan_new;
return 0;
}
}
return -EINVAL;
}
static void *
nvkm_ucgrp_dtor(struct nvkm_object *object)
{
struct nvkm_ucgrp *ucgrp = nvkm_ucgrp(object);
nvkm_cgrp_unref(&ucgrp->cgrp);
return ucgrp;
}
static const struct nvkm_object_func
nvkm_ucgrp = {
.dtor = nvkm_ucgrp_dtor,
.sclass = nvkm_ucgrp_sclass,
};
int
nvkm_ucgrp_new(struct nvkm_fifo *fifo, const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
union nvif_cgrp_args *args = argv;
struct nvkm_runl *runl;
struct nvkm_vmm *vmm;
struct nvkm_ucgrp *ucgrp;
int ret;
if (argc < sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
argc -= sizeof(args->v0);
if (args->v0.namelen != argc)
return -EINVAL;
/* Lookup objects referenced in args. */
runl = nvkm_runl_get(fifo, args->v0.runlist, 0);
if (!runl)
return -EINVAL;
vmm = nvkm_uvmm_search(oclass->client, args->v0.vmm);
if (IS_ERR(vmm))
return PTR_ERR(vmm);
/* Allocate channel group. */
if (!(ucgrp = kzalloc(sizeof(*ucgrp), GFP_KERNEL))) {
ret = -ENOMEM;
goto done;
}
nvkm_object_ctor(&nvkm_ucgrp, oclass, &ucgrp->object);
*pobject = &ucgrp->object;
ret = nvkm_cgrp_new(runl, args->v0.name, vmm, true, &ucgrp->cgrp);
if (ret)
goto done;
/* Return channel group info to caller. */
args->v0.cgid = ucgrp->cgrp->id;
done:
nvkm_vmm_unref(&vmm);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/ucgrp.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "cgrp.h"
#include "chan.h"
#include "chid.h"
#include "runl.h"
#include "runq.h"
#include <core/gpuobj.h>
#include <subdev/top.h>
#include <subdev/vfn.h>
#include <nvif/class.h>
static u32
ga100_chan_doorbell_handle(struct nvkm_chan *chan)
{
return (chan->cgrp->runl->doorbell << 16) | chan->id;
}
static void
ga100_chan_stop(struct nvkm_chan *chan)
{
struct nvkm_runl *runl = chan->cgrp->runl;
nvkm_wr32(runl->fifo->engine.subdev.device, runl->chan + (chan->id * 4), 0x00000003);
}
static void
ga100_chan_start(struct nvkm_chan *chan)
{
struct nvkm_runl *runl = chan->cgrp->runl;
struct nvkm_device *device = runl->fifo->engine.subdev.device;
const int gfid = 0;
nvkm_wr32(device, runl->chan + (chan->id * 4), 0x00000002);
nvkm_wr32(device, runl->addr + 0x0090, (gfid << 16) | chan->id); /* INTERNAL_DOORBELL. */
}
static void
ga100_chan_unbind(struct nvkm_chan *chan)
{
struct nvkm_runl *runl = chan->cgrp->runl;
nvkm_wr32(runl->fifo->engine.subdev.device, runl->chan + (chan->id * 4), 0xffffffff);
}
static int
ga100_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
const u32 limit2 = ilog2(length / 8);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x010, 0x0000face);
nvkm_wo32(chan->inst, 0x030, 0x7ffff902);
nvkm_wo32(chan->inst, 0x048, lower_32_bits(offset));
nvkm_wo32(chan->inst, 0x04c, upper_32_bits(offset) | (limit2 << 16));
nvkm_wo32(chan->inst, 0x084, 0x20400000);
nvkm_wo32(chan->inst, 0x094, 0x30000000 | devm);
nvkm_wo32(chan->inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
nvkm_wo32(chan->inst, 0x0e8, chan->id);
nvkm_wo32(chan->inst, 0x0f4, 0x00001000 | (priv ? 0x00000100 : 0x00000000));
nvkm_wo32(chan->inst, 0x0f8, 0x80000000 | chan->cgrp->runl->nonstall.vector);
nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
nvkm_done(chan->inst);
return 0;
}
static const struct nvkm_chan_func_ramfc
ga100_chan_ramfc = {
.write = ga100_chan_ramfc_write,
.devm = 0xfff,
.priv = true,
};
const struct nvkm_chan_func
ga100_chan = {
.inst = &gf100_chan_inst,
.userd = &gv100_chan_userd,
.ramfc = &ga100_chan_ramfc,
.unbind = ga100_chan_unbind,
.start = ga100_chan_start,
.stop = ga100_chan_stop,
.preempt = gk110_chan_preempt,
.doorbell_handle = ga100_chan_doorbell_handle,
};
static void
ga100_cgrp_preempt(struct nvkm_cgrp *cgrp)
{
struct nvkm_runl *runl = cgrp->runl;
nvkm_wr32(runl->fifo->engine.subdev.device, runl->addr + 0x098, 0x01000000 | cgrp->id);
}
const struct nvkm_cgrp_func
ga100_cgrp = {
.preempt = ga100_cgrp_preempt,
};
static int
ga100_engn_cxid(struct nvkm_engn *engn, bool *cgid)
{
struct nvkm_runl *runl = engn->runl;
struct nvkm_device *device = runl->fifo->engine.subdev.device;
u32 stat = nvkm_rd32(device, runl->addr + 0x200 + engn->id * 0x40);
ENGN_DEBUG(engn, "status %08x", stat);
*cgid = true;
switch ((stat & 0x0000e000) >> 13) {
case 0 /* INVALID */: return -ENODEV;
case 1 /* VALID */:
case 5 /* SAVE */: return (stat & 0x00000fff);
case 6 /* LOAD */: return (stat & 0x0fff0000) >> 16;
case 7 /* SWITCH */:
if (nvkm_engine_chsw_load(engn->engine))
return (stat & 0x0fff0000) >> 16;
return (stat & 0x00000fff);
default:
WARN_ON(1);
break;
}
return -ENODEV;
}
static int
ga100_engn_nonstall(struct nvkm_engn *engn)
{
struct nvkm_engine *engine = engn->engine;
if (WARN_ON(!engine->func->nonstall))
return -EINVAL;
return engine->func->nonstall(engine);
}
const struct nvkm_engn_func
ga100_engn = {
.nonstall = ga100_engn_nonstall,
.cxid = ga100_engn_cxid,
.ctor = gk104_ectx_ctor,
.bind = gv100_ectx_bind,
};
const struct nvkm_engn_func
ga100_engn_ce = {
.nonstall = ga100_engn_nonstall,
.cxid = ga100_engn_cxid,
.ctor = gv100_ectx_ce_ctor,
.bind = gv100_ectx_ce_bind,
};
static bool
ga100_runq_idle(struct nvkm_runq *runq)
{
struct nvkm_device *device = runq->fifo->engine.subdev.device;
return !(nvkm_rd32(device, 0x04015c + (runq->id * 0x800)) & 0x0000e000);
}
static bool
ga100_runq_intr_1(struct nvkm_runq *runq, struct nvkm_runl *runl)
{
struct nvkm_device *device = runq->fifo->engine.subdev.device;
u32 inte = nvkm_rd32(device, 0x040180 + (runq->id * 0x800));
u32 intr = nvkm_rd32(device, 0x040148 + (runq->id * 0x800));
u32 stat = intr & inte;
if (!stat) {
RUNQ_DEBUG(runq, "inte1 %08x %08x", intr, inte);
return false;
}
if (stat & 0x80000000) {
u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x0800)) & runl->chid->mask;
struct nvkm_chan *chan;
unsigned long flags;
RUNQ_ERROR(runq, "CTXNOTVALID chid:%d", chid);
chan = nvkm_runl_chan_get_chid(runl, chid, &flags);
if (chan) {
nvkm_chan_error(chan, true);
nvkm_chan_put(&chan, flags);
}
nvkm_mask(device, 0x0400ac + (runq->id * 0x800), 0x00030000, 0x00030000);
stat &= ~0x80000000;
}
if (stat) {
RUNQ_ERROR(runq, "intr1 %08x", stat);
nvkm_wr32(device, 0x0401a0 + (runq->id * 0x800), stat);
}
nvkm_wr32(device, 0x040148 + (runq->id * 0x800), intr);
return true;
}
static bool
ga100_runq_intr_0(struct nvkm_runq *runq, struct nvkm_runl *runl)
{
struct nvkm_device *device = runq->fifo->engine.subdev.device;
u32 inte = nvkm_rd32(device, 0x040170 + (runq->id * 0x800));
u32 intr = nvkm_rd32(device, 0x040108 + (runq->id * 0x800));
u32 stat = intr & inte;
if (!stat) {
RUNQ_DEBUG(runq, "inte0 %08x %08x", intr, inte);
return false;
}
/*TODO: expand on this when fixing up gf100's version. */
if (stat & 0xc6afe000) {
u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x0800)) & runl->chid->mask;
struct nvkm_chan *chan;
unsigned long flags;
RUNQ_ERROR(runq, "intr0 %08x", stat);
chan = nvkm_runl_chan_get_chid(runl, chid, &flags);
if (chan) {
nvkm_chan_error(chan, true);
nvkm_chan_put(&chan, flags);
}
stat &= ~0xc6afe000;
}
if (stat) {
RUNQ_ERROR(runq, "intr0 %08x", stat);
nvkm_wr32(device, 0x040190 + (runq->id * 0x800), stat);
}
nvkm_wr32(device, 0x040108 + (runq->id * 0x800), intr);
return true;
}
static bool
ga100_runq_intr(struct nvkm_runq *runq, struct nvkm_runl *runl)
{
bool intr0 = ga100_runq_intr_0(runq, runl);
bool intr1 = ga100_runq_intr_1(runq, runl);
return intr0 || intr1;
}
static void
ga100_runq_init(struct nvkm_runq *runq)
{
struct nvkm_device *device = runq->fifo->engine.subdev.device;
nvkm_wr32(device, 0x040108 + (runq->id * 0x800), 0xffffffff); /* INTR_0 */
nvkm_wr32(device, 0x040148 + (runq->id * 0x800), 0xffffffff); /* INTR_1 */
nvkm_wr32(device, 0x040170 + (runq->id * 0x800), 0xffffffff); /* INTR_0_EN_SET_TREE */
nvkm_wr32(device, 0x040180 + (runq->id * 0x800), 0xffffffff); /* INTR_1_EN_SET_TREE */
}
const struct nvkm_runq_func
ga100_runq = {
.init = ga100_runq_init,
.intr = ga100_runq_intr,
.idle = ga100_runq_idle,
};
static bool
ga100_runl_preempt_pending(struct nvkm_runl *runl)
{
return nvkm_rd32(runl->fifo->engine.subdev.device, runl->addr + 0x098) & 0x00100000;
}
static void
ga100_runl_preempt(struct nvkm_runl *runl)
{
nvkm_wr32(runl->fifo->engine.subdev.device, runl->addr + 0x098, 0x00000000);
}
static void
ga100_runl_allow(struct nvkm_runl *runl, u32 engm)
{
nvkm_mask(runl->fifo->engine.subdev.device, runl->addr + 0x094, 0x00000001, 0x00000000);
}
static void
ga100_runl_block(struct nvkm_runl *runl, u32 engm)
{
nvkm_mask(runl->fifo->engine.subdev.device, runl->addr + 0x094, 0x00000001, 0x00000001);
}
static bool
ga100_runl_pending(struct nvkm_runl *runl)
{
struct nvkm_device *device = runl->fifo->engine.subdev.device;
return nvkm_rd32(device, runl->addr + 0x08c) & 0x00008000;
}
static void
ga100_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
{
struct nvkm_device *device = runl->fifo->engine.subdev.device;
u64 addr = nvkm_memory_addr(memory) + start;
nvkm_wr32(device, runl->addr + 0x080, lower_32_bits(addr));
nvkm_wr32(device, runl->addr + 0x084, upper_32_bits(addr));
nvkm_wr32(device, runl->addr + 0x088, count);
}
static irqreturn_t
ga100_runl_intr(struct nvkm_inth *inth)
{
struct nvkm_runl *runl = container_of(inth, typeof(*runl), inth);
struct nvkm_engn *engn;
struct nvkm_device *device = runl->fifo->engine.subdev.device;
u32 inte = nvkm_rd32(device, runl->addr + 0x120);
u32 intr = nvkm_rd32(device, runl->addr + 0x100);
u32 stat = intr & inte;
u32 info;
if (!stat) {
RUNL_DEBUG(runl, "inte %08x %08x", intr, inte);
return IRQ_NONE;
}
if (stat & 0x00000007) {
nvkm_runl_foreach_engn_cond(engn, runl, stat & BIT(engn->id)) {
info = nvkm_rd32(device, runl->addr + 0x224 + (engn->id * 0x40));
tu102_fifo_intr_ctxsw_timeout_info(engn, info);
nvkm_wr32(device, runl->addr + 0x100, BIT(engn->id));
stat &= ~BIT(engn->id);
}
}
if (stat & 0x00000300) {
nvkm_wr32(device, runl->addr + 0x100, stat & 0x00000300);
stat &= ~0x00000300;
}
if (stat & 0x00010000) {
if (runl->runq[0]) {
if (runl->runq[0]->func->intr(runl->runq[0], runl))
stat &= ~0x00010000;
}
}
if (stat & 0x00020000) {
if (runl->runq[1]) {
if (runl->runq[1]->func->intr(runl->runq[1], runl))
stat &= ~0x00020000;
}
}
if (stat) {
RUNL_ERROR(runl, "intr %08x", stat);
nvkm_wr32(device, runl->addr + 0x140, stat);
}
nvkm_wr32(device, runl->addr + 0x180, 0x00000001);
return IRQ_HANDLED;
}
static void
ga100_runl_fini(struct nvkm_runl *runl)
{
nvkm_mask(runl->fifo->engine.subdev.device, runl->addr + 0x300, 0x80000000, 0x00000000);
nvkm_inth_block(&runl->inth);
}
static void
ga100_runl_init(struct nvkm_runl *runl)
{
struct nvkm_fifo *fifo = runl->fifo;
struct nvkm_runq *runq;
struct nvkm_device *device = fifo->engine.subdev.device;
int i;
/* Submit NULL runlist and preempt. */
nvkm_wr32(device, runl->addr + 0x088, 0x00000000);
runl->func->preempt(runl);
/* Enable doorbell. */
nvkm_mask(device, runl->addr + 0x300, 0x80000000, 0x80000000);
nvkm_wr32(device, runl->addr + 0x100, 0xffffffff); /* INTR_0 */
nvkm_wr32(device, runl->addr + 0x140, 0xffffffff); /* INTR_0_EN_CLEAR_TREE(0) */
nvkm_wr32(device, runl->addr + 0x120, 0x000f1307); /* INTR_0_EN_SET_TREE(0) */
nvkm_wr32(device, runl->addr + 0x148, 0xffffffff); /* INTR_0_EN_CLEAR_TREE(1) */
nvkm_wr32(device, runl->addr + 0x128, 0x00000000); /* INTR_0_EN_SET_TREE(1) */
/* Init PBDMA(s). */
for (i = 0; i < runl->runq_nr; i++) {
runq = runl->runq[i];
runq->func->init(runq);
}
nvkm_inth_allow(&runl->inth);
}
const struct nvkm_runl_func
ga100_runl = {
.init = ga100_runl_init,
.fini = ga100_runl_fini,
.size = 16,
.update = nv50_runl_update,
.insert_cgrp = gv100_runl_insert_cgrp,
.insert_chan = gv100_runl_insert_chan,
.commit = ga100_runl_commit,
.wait = nv50_runl_wait,
.pending = ga100_runl_pending,
.block = ga100_runl_block,
.allow = ga100_runl_allow,
.preempt = ga100_runl_preempt,
.preempt_pending = ga100_runl_preempt_pending,
};
static int
ga100_runl_new(struct nvkm_fifo *fifo, int id, u32 addr, struct nvkm_runl **prunl)
{
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_top_device *tdev;
struct nvkm_runl *runl;
struct nvkm_engn *engn;
u32 chcfg = nvkm_rd32(device, addr + 0x004);
u32 chnum = 1 << (chcfg & 0x0000000f);
u32 chaddr = (chcfg & 0xfffffff0);
u32 dbcfg = nvkm_rd32(device, addr + 0x008);
u32 vector = nvkm_rd32(device, addr + 0x160);
int i, ret;
runl = nvkm_runl_new(fifo, id, addr, chnum);
if (IS_ERR(runl))
return PTR_ERR(runl);
*prunl = runl;
for (i = 0; i < 2; i++) {
u32 pbcfg = nvkm_rd32(device, addr + 0x010 + (i * 0x04));
if (pbcfg & 0x80000000) {
runl->runq[runl->runq_nr] =
nvkm_runq_new(fifo, ((pbcfg & 0x03fffc00) - 0x040000) / 0x800);
if (!runl->runq[runl->runq_nr]) {
RUNL_ERROR(runl, "runq %d", runl->runq_nr);
return -ENOMEM;
}
runl->runq_nr++;
}
}
nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist == runl->addr) {
if (tdev->engine < 0) {
RUNL_DEBUG(runl, "engn !top");
return -EINVAL;
}
engn = nvkm_runl_add(runl, tdev->engine, (tdev->type == NVKM_ENGINE_CE) ?
fifo->func->engn_ce : fifo->func->engn,
tdev->type, tdev->inst);
if (!engn)
return -EINVAL;
if (!engn->engine->func->nonstall) {
RUNL_DEBUG(runl, "engn %s !nonstall", engn->engine->subdev.name);
return -EINVAL;
}
}
if (list_empty(&runl->engns)) {
RUNL_DEBUG(runl, "!engns");
return -EINVAL;
}
ret = nvkm_inth_add(&device->vfn->intr, vector & 0x00000fff, NVKM_INTR_PRIO_NORMAL,
&fifo->engine.subdev, ga100_runl_intr, &runl->inth);
if (ret) {
RUNL_ERROR(runl, "inth %d", ret);
return ret;
}
runl->chan = chaddr;
runl->doorbell = dbcfg >> 16;
return 0;
}
static irqreturn_t
ga100_fifo_nonstall_intr(struct nvkm_inth *inth)
{
struct nvkm_runl *runl = container_of(inth, typeof(*runl), nonstall.inth);
nvkm_event_ntfy(&runl->fifo->nonstall.event, runl->id, NVKM_FIFO_NONSTALL_EVENT);
return IRQ_HANDLED;
}
static void
ga100_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
{
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
nvkm_inth_block(&runl->nonstall.inth);
}
static void
ga100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
{
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
nvkm_inth_allow(&runl->nonstall.inth);
}
const struct nvkm_event_func
ga100_fifo_nonstall = {
.init = ga100_fifo_nonstall_allow,
.fini = ga100_fifo_nonstall_block,
};
int
ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_vfn *vfn = subdev->device->vfn;
struct nvkm_runl *runl;
int ret, nr = 0;
nvkm_runl_foreach(runl, fifo) {
struct nvkm_engn *engn = list_first_entry(&runl->engns, typeof(*engn), head);
runl->nonstall.vector = engn->func->nonstall(engn);
if (runl->nonstall.vector < 0) {
RUNL_ERROR(runl, "nonstall %d", runl->nonstall.vector);
return runl->nonstall.vector;
}
ret = nvkm_inth_add(&vfn->intr, runl->nonstall.vector, NVKM_INTR_PRIO_NORMAL,
subdev, ga100_fifo_nonstall_intr, &runl->nonstall.inth);
if (ret)
return ret;
nr = max(nr, runl->id + 1);
}
return nr;
}
int
ga100_fifo_runl_ctor(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_top_device *tdev;
struct nvkm_runl *runl;
int id = 0, ret;
nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist >= 0) {
runl = nvkm_runl_get(fifo, -1, tdev->runlist);
if (!runl) {
ret = ga100_runl_new(fifo, id++, tdev->runlist, &runl);
if (ret) {
if (runl)
nvkm_runl_del(runl);
continue;
}
}
}
return 0;
}
static const struct nvkm_fifo_func
ga100_fifo = {
.runl_ctor = ga100_fifo_runl_ctor,
.mmu_fault = &tu102_fifo_mmu_fault,
.nonstall_ctor = ga100_fifo_nonstall_ctor,
.nonstall = &ga100_fifo_nonstall,
.runl = &ga100_runl,
.runq = &ga100_runq,
.engn = &ga100_engn,
.engn_ce = &ga100_engn_ce,
.cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &ga100_cgrp, .force = true },
.chan = {{ 0, 0, AMPERE_CHANNEL_GPFIFO_A }, &ga100_chan },
};
int
ga100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&ga100_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <nvif/class.h>
int
gm200_fifo_runq_nr(struct nvkm_fifo *fifo)
{
return nvkm_rd32(fifo->engine.subdev.device, 0x002004) & 0x000000ff;
}
int
gm200_fifo_chid_nr(struct nvkm_fifo *fifo)
{
return nvkm_rd32(fifo->engine.subdev.device, 0x002008);
}
static const struct nvkm_fifo_func
gm200_fifo = {
.chid_nr = gm200_fifo_chid_nr,
.chid_ctor = gk110_fifo_chid_ctor,
.runq_nr = gm200_fifo_runq_nr,
.runl_ctor = gk104_fifo_runl_ctor,
.init = gk104_fifo_init,
.init_pbdmas = gk104_fifo_init_pbdmas,
.intr = gk104_fifo_intr,
.intr_mmu_fault_unit = gm107_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gm107_fifo_mmu_fault,
.nonstall = &gf100_fifo_nonstall,
.runl = &gm107_runl,
.runq = &gk208_runq,
.engn = &gk104_engn,
.engn_ce = &gk104_engn_ce,
.cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp },
.chan = {{ 0, 0, MAXWELL_CHANNEL_GPFIFO_A }, &gm107_chan },
};
int
gm200_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&gm200_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "runq.h"
#include "priv.h"
void
nvkm_runq_del(struct nvkm_runq *runq)
{
list_del(&runq->head);
kfree(runq);
}
struct nvkm_runq *
nvkm_runq_new(struct nvkm_fifo *fifo, int pbid)
{
struct nvkm_runq *runq;
if (!(runq = kzalloc(sizeof(*runq), GFP_KERNEL)))
return NULL;
runq->func = fifo->func->runq;
runq->fifo = fifo;
runq->id = pbid;
list_add_tail(&runq->head, &fifo->runqs);
return runq;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/runq.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "chan.h"
#include "chid.h"
#include "runl.h"
#include "runq.h"
#include <core/gpuobj.h>
#include <subdev/bar.h>
#include <subdev/mc.h>
#include <subdev/mmu.h>
#include <nvif/cl0080.h>
#include <nvif/unpack.h>
bool
nvkm_fifo_ctxsw_in_progress(struct nvkm_engine *engine)
{
struct nvkm_runl *runl;
struct nvkm_engn *engn;
nvkm_runl_foreach(runl, engine->subdev.device->fifo) {
nvkm_runl_foreach_engn(engn, runl) {
if (engn->engine == engine)
return engn->func->chsw ? engn->func->chsw(engn) : false;
}
}
return false;
}
void
nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
{
return fifo->func->pause(fifo, flags);
}
void
nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
{
return fifo->func->start(fifo, flags);
}
void
nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
{
return fifo->func->mmu_fault->recover(fifo, info);
}
static int
nvkm_fifo_class_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
void *argv, u32 argc, struct nvkm_object **pobject)
{
struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
if (oclass->engn == &fifo->func->cgrp.user)
return nvkm_ucgrp_new(fifo, oclass, argv, argc, pobject);
if (oclass->engn == &fifo->func->chan.user)
return nvkm_uchan_new(fifo, NULL, oclass, argv, argc, pobject);
WARN_ON(1);
return -ENOSYS;
}
static const struct nvkm_device_oclass
nvkm_fifo_class = {
.ctor = nvkm_fifo_class_new,
};
static int
nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class)
{
struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
const struct nvkm_fifo_func_cgrp *cgrp = &fifo->func->cgrp;
const struct nvkm_fifo_func_chan *chan = &fifo->func->chan;
int c = 0;
/* *_CHANNEL_GROUP_* */
if (cgrp->user.oclass) {
if (c++ == index) {
oclass->base = cgrp->user;
oclass->engn = &fifo->func->cgrp.user;
*class = &nvkm_fifo_class;
return 0;
}
}
/* *_CHANNEL_DMA, *_CHANNEL_GPFIFO_* */
if (chan->user.oclass) {
if (c++ == index) {
oclass->base = chan->user;
oclass->engn = &fifo->func->chan.user;
*class = &nvkm_fifo_class;
return 0;
}
}
return c;
}
static int
nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
{
struct nvkm_fifo *fifo = nvkm_fifo(engine);
struct nvkm_runl *runl;
nvkm_inth_block(&fifo->engine.subdev.inth);
nvkm_runl_foreach(runl, fifo)
nvkm_runl_fini(runl);
return 0;
}
static int
nvkm_fifo_init(struct nvkm_engine *engine)
{
struct nvkm_fifo *fifo = nvkm_fifo(engine);
struct nvkm_runq *runq;
struct nvkm_runl *runl;
u32 mask = 0;
if (fifo->func->init_pbdmas) {
nvkm_runq_foreach(runq, fifo)
mask |= BIT(runq->id);
fifo->func->init_pbdmas(fifo, mask);
nvkm_runq_foreach(runq, fifo)
runq->func->init(runq);
}
nvkm_runl_foreach(runl, fifo) {
if (runl->func->init)
runl->func->init(runl);
}
if (fifo->func->init)
fifo->func->init(fifo);
nvkm_inth_allow(&fifo->engine.subdev.inth);
return 0;
}
static int
nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
{
struct nvkm_fifo *fifo = nvkm_fifo(engine);
struct nvkm_runl *runl;
struct nvkm_engn *engn;
int ret;
ret = nvkm_subdev_oneinit(&fifo->engine.subdev);
if (ret)
return ret;
switch (mthd) {
case NV_DEVICE_HOST_CHANNELS: *data = fifo->chid ? fifo->chid->nr : 0; return 0;
case NV_DEVICE_HOST_RUNLISTS:
*data = 0;
nvkm_runl_foreach(runl, fifo)
*data |= BIT(runl->id);
return 0;
case NV_DEVICE_HOST_RUNLIST_ENGINES:
runl = nvkm_runl_get(fifo, *data, 0);
if (runl) {
*data = 0;
nvkm_runl_foreach_engn(engn, runl) {
#define CASE(n) case NVKM_ENGINE_##n: *data |= NV_DEVICE_HOST_RUNLIST_ENGINES_##n; break
switch (engn->engine->subdev.type) {
case NVKM_ENGINE_DMAOBJ:
break;
CASE(SW );
CASE(GR );
CASE(MPEG );
CASE(ME );
CASE(CIPHER);
CASE(BSP );
CASE(VP );
CASE(CE );
CASE(SEC );
CASE(MSVLD );
CASE(MSPDEC);
CASE(MSPPP );
CASE(MSENC );
CASE(VIC );
CASE(SEC2 );
CASE(NVDEC );
CASE(NVENC );
default:
WARN_ON(1);
break;
}
#undef CASE
}
return 0;
}
return -EINVAL;
case NV_DEVICE_HOST_RUNLIST_CHANNELS:
if (!fifo->chid) {
runl = nvkm_runl_get(fifo, *data, 0);
if (runl) {
*data = runl->chid->nr;
return 0;
}
}
return -EINVAL;
default:
break;
}
return -ENOSYS;
}
static int
nvkm_fifo_oneinit(struct nvkm_engine *engine)
{
struct nvkm_subdev *subdev = &engine->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_fifo *fifo = nvkm_fifo(engine);
struct nvkm_runl *runl;
struct nvkm_engn *engn;
int ret, nr, i;
/* Initialise CHID/CGID allocator(s) on GPUs where they aren't per-runlist. */
if (fifo->func->chid_nr) {
ret = fifo->func->chid_ctor(fifo, fifo->func->chid_nr(fifo));
if (ret)
return ret;
}
/* Create runqueues for each PBDMA. */
if (fifo->func->runq_nr) {
for (nr = fifo->func->runq_nr(fifo), i = 0; i < nr; i++) {
if (!nvkm_runq_new(fifo, i))
return -ENOMEM;
}
}
/* Create runlists. */
ret = fifo->func->runl_ctor(fifo);
if (ret)
return ret;
nvkm_runl_foreach(runl, fifo) {
RUNL_DEBUG(runl, "chan:%06x", runl->chan);
nvkm_runl_foreach_engn(engn, runl) {
ENGN_DEBUG(engn, "");
}
}
/* Register interrupt handler. */
if (fifo->func->intr) {
ret = nvkm_inth_add(&device->mc->intr, NVKM_INTR_SUBDEV, NVKM_INTR_PRIO_NORMAL,
subdev, fifo->func->intr, &subdev->inth);
if (ret) {
nvkm_error(subdev, "intr %d\n", ret);
return ret;
}
}
/* Initialise non-stall intr handling. */
if (fifo->func->nonstall) {
if (fifo->func->nonstall_ctor) {
ret = fifo->func->nonstall_ctor(fifo);
if (ret < 0) {
nvkm_error(subdev, "nonstall %d\n", ret);
return ret;
}
} else {
ret = 1;
}
ret = nvkm_event_init(fifo->func->nonstall, &fifo->engine.subdev, 1, ret,
&fifo->nonstall.event);
if (ret)
return ret;
}
/* Allocate USERD + BAR1 polling area. */
if (fifo->func->chan.func->userd->bar == 1) {
struct nvkm_vmm *bar1 = nvkm_bar_bar1_vmm(device);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, fifo->chid->nr *
fifo->func->chan.func->userd->size, 0, true,
&fifo->userd.mem);
if (ret)
return ret;
ret = nvkm_vmm_get(bar1, 12, nvkm_memory_size(fifo->userd.mem), &fifo->userd.bar1);
if (ret)
return ret;
ret = nvkm_memory_map(fifo->userd.mem, 0, bar1, fifo->userd.bar1, NULL, 0);
if (ret)
return ret;
}
return 0;
}
static void
nvkm_fifo_preinit(struct nvkm_engine *engine)
{
nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0);
}
static void *
nvkm_fifo_dtor(struct nvkm_engine *engine)
{
struct nvkm_fifo *fifo = nvkm_fifo(engine);
struct nvkm_runl *runl, *runt;
struct nvkm_runq *runq, *rtmp;
if (fifo->userd.bar1)
nvkm_vmm_put(nvkm_bar_bar1_vmm(engine->subdev.device), &fifo->userd.bar1);
nvkm_memory_unref(&fifo->userd.mem);
list_for_each_entry_safe(runl, runt, &fifo->runls, head)
nvkm_runl_del(runl);
list_for_each_entry_safe(runq, rtmp, &fifo->runqs, head)
nvkm_runq_del(runq);
nvkm_chid_unref(&fifo->cgid);
nvkm_chid_unref(&fifo->chid);
nvkm_event_fini(&fifo->nonstall.event);
mutex_destroy(&fifo->mutex);
return fifo;
}
static const struct nvkm_engine_func
nvkm_fifo = {
.dtor = nvkm_fifo_dtor,
.preinit = nvkm_fifo_preinit,
.oneinit = nvkm_fifo_oneinit,
.info = nvkm_fifo_info,
.init = nvkm_fifo_init,
.fini = nvkm_fifo_fini,
.base.sclass = nvkm_fifo_class_get,
};
int
nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
{
struct nvkm_fifo *fifo;
if (!(fifo = *pfifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
return -ENOMEM;
fifo->func = func;
INIT_LIST_HEAD(&fifo->runqs);
INIT_LIST_HEAD(&fifo->runls);
/*TODO: Needs to be >CTXSW_TIMEOUT, so RC can recover before this is hit.
* CTXSW_TIMEOUT HW default seems to differ between GPUs, so just a
* large number for now until we support changing it.
*/
fifo->timeout.chan_msec = 10000;
spin_lock_init(&fifo->lock);
mutex_init(&fifo->mutex);
return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "cgrp.h"
#include "chan.h"
#include "chid.h"
#include "runl.h"
#include "regsnv04.h"
#include <core/ramht.h>
#include <subdev/instmem.h>
#include <nvif/class.h>
static int
nv17_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
const u32 base = chan->id * 64;
chan->ramfc_offset = base;
nvkm_kmap(ramfc);
nvkm_wo32(ramfc, base + 0x00, offset);
nvkm_wo32(ramfc, base + 0x04, offset);
nvkm_wo32(ramfc, base + 0x0c, chan->push->addr >> 4);
nvkm_wo32(ramfc, base + 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_done(ramfc);
return 0;
}
static const struct nvkm_chan_func_ramfc
nv17_chan_ramfc = {
.layout = (const struct nvkm_ramfc_layout[]) {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
{ 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
{ 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
{ 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
{ 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
{ 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
{ 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
{ 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
{ 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
{ 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
{ 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
{ 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
{}
},
.write = nv17_chan_ramfc_write,
.clear = nv04_chan_ramfc_clear,
.ctxdma = true,
};
static const struct nvkm_chan_func
nv17_chan = {
.inst = &nv04_chan_inst,
.userd = &nv04_chan_userd,
.ramfc = &nv17_chan_ramfc,
.start = nv04_chan_start,
.stop = nv04_chan_stop,
};
static void
nv17_fifo_init(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_instmem *imem = device->imem;
struct nvkm_ramht *ramht = imem->ramht;
struct nvkm_memory *ramro = imem->ramro;
struct nvkm_memory *ramfc = imem->ramfc;
nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((ramht->bits - 9) << 16) |
(ramht->gpuobj->addr >> 8));
nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8 |
0x00010000);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask);
nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
}
static const struct nvkm_fifo_func
nv17_fifo = {
.chid_nr = nv10_fifo_chid_nr,
.chid_ctor = nv04_fifo_chid_ctor,
.runl_ctor = nv04_fifo_runl_ctor,
.init = nv17_fifo_init,
.intr = nv04_fifo_intr,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.runl = &nv04_runl,
.engn = &nv04_engn,
.engn_sw = &nv04_engn,
.cgrp = {{ }, &nv04_cgrp },
.chan = {{ 0, 0, NV17_CHANNEL_DMA }, &nv17_chan },
};
int
nv17_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&nv17_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c |
/*
* Copyright 2020 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "chid.h"
void
nvkm_chid_put(struct nvkm_chid *chid, int id, spinlock_t *data_lock)
{
if (id >= 0) {
spin_lock_irq(&chid->lock);
spin_lock(data_lock);
chid->data[id] = NULL;
spin_unlock(data_lock);
clear_bit(id, chid->used);
spin_unlock_irq(&chid->lock);
}
}
int
nvkm_chid_get(struct nvkm_chid *chid, void *data)
{
int id = -1, cid;
spin_lock_irq(&chid->lock);
cid = find_first_zero_bit(chid->used, chid->nr);
if (cid < chid->nr) {
set_bit(cid, chid->used);
chid->data[cid] = data;
id = cid;
}
spin_unlock_irq(&chid->lock);
return id;
}
static void
nvkm_chid_del(struct kref *kref)
{
struct nvkm_chid *chid = container_of(kref, typeof(*chid), kref);
nvkm_event_fini(&chid->event);
kvfree(chid->data);
kfree(chid);
}
void
nvkm_chid_unref(struct nvkm_chid **pchid)
{
struct nvkm_chid *chid = *pchid;
if (!chid)
return;
kref_put(&chid->kref, nvkm_chid_del);
*pchid = NULL;
}
struct nvkm_chid *
nvkm_chid_ref(struct nvkm_chid *chid)
{
if (chid)
kref_get(&chid->kref);
return chid;
}
int
nvkm_chid_new(const struct nvkm_event_func *func, struct nvkm_subdev *subdev,
int nr, int first, int count, struct nvkm_chid **pchid)
{
struct nvkm_chid *chid;
int id;
if (!(chid = *pchid = kzalloc(struct_size(chid, used, nr), GFP_KERNEL)))
return -ENOMEM;
kref_init(&chid->kref);
chid->nr = nr;
chid->mask = chid->nr - 1;
spin_lock_init(&chid->lock);
if (!(chid->data = kvzalloc(sizeof(*chid->data) * nr, GFP_KERNEL))) {
nvkm_chid_unref(pchid);
return -ENOMEM;
}
for (id = 0; id < first; id++)
__set_bit(id, chid->used);
for (id = first + count; id < nr; id++)
__set_bit(id, chid->used);
return nvkm_event_init(func, subdev, 1, nr, &chid->event);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/chid.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "chan.h"
#include "chid.h"
#include "cgrp.h"
#include "runl.h"
#include "runq.h"
#include <core/gpuobj.h>
#include <subdev/mmu.h>
#include <nvif/class.h>
static u32
gv100_chan_doorbell_handle(struct nvkm_chan *chan)
{
return chan->id;
}
static int
gv100_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
const u64 userd = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
const u32 limit2 = ilog2(length / 8);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x008, lower_32_bits(userd));
nvkm_wo32(chan->inst, 0x00c, upper_32_bits(userd));
nvkm_wo32(chan->inst, 0x010, 0x0000face);
nvkm_wo32(chan->inst, 0x030, 0x7ffff902);
nvkm_wo32(chan->inst, 0x048, lower_32_bits(offset));
nvkm_wo32(chan->inst, 0x04c, upper_32_bits(offset) | (limit2 << 16));
nvkm_wo32(chan->inst, 0x084, 0x20400000);
nvkm_wo32(chan->inst, 0x094, 0x30000000 | devm);
nvkm_wo32(chan->inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
nvkm_wo32(chan->inst, 0x0e8, chan->id);
nvkm_wo32(chan->inst, 0x0f4, 0x00001000 | (priv ? 0x00000100 : 0x00000000));
nvkm_wo32(chan->inst, 0x0f8, 0x10003080);
nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
nvkm_done(chan->inst);
return 0;
}
const struct nvkm_chan_func_ramfc
gv100_chan_ramfc = {
.write = gv100_chan_ramfc_write,
.devm = 0xfff,
.priv = true,
};
const struct nvkm_chan_func_userd
gv100_chan_userd = {
.bar = -1,
.size = 0x200,
.clear = gf100_chan_userd_clear,
};
static const struct nvkm_chan_func
gv100_chan = {
.inst = &gf100_chan_inst,
.userd = &gv100_chan_userd,
.ramfc = &gv100_chan_ramfc,
.bind = gk104_chan_bind_inst,
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
.stop = gk104_chan_stop,
.preempt = gk110_chan_preempt,
.doorbell_handle = gv100_chan_doorbell_handle,
};
void
gv100_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
u64 addr = 0ULL;
if (cctx) {
addr = cctx->vctx->vma->addr;
addr |= 4ULL;
}
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x210, lower_32_bits(addr));
nvkm_wo32(chan->inst, 0x214, upper_32_bits(addr));
nvkm_mo32(chan->inst, 0x0ac, 0x00010000, cctx ? 0x00010000 : 0x00000000);
nvkm_done(chan->inst);
}
const struct nvkm_engn_func
gv100_engn = {
.chsw = gk104_engn_chsw,
.cxid = gk104_engn_cxid,
.ctor = gk104_ectx_ctor,
.bind = gv100_ectx_bind,
};
void
gv100_ectx_ce_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
const u64 bar2 = cctx ? nvkm_memory_bar2(cctx->vctx->inst->memory) : 0ULL;
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x220, lower_32_bits(bar2));
nvkm_wo32(chan->inst, 0x224, upper_32_bits(bar2));
nvkm_mo32(chan->inst, 0x0ac, 0x00020000, cctx ? 0x00020000 : 0x00000000);
nvkm_done(chan->inst);
}
int
gv100_ectx_ce_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx)
{
if (nvkm_memory_bar2(vctx->inst->memory) == ~0ULL)
return -EFAULT;
return 0;
}
const struct nvkm_engn_func
gv100_engn_ce = {
.chsw = gk104_engn_chsw,
.cxid = gk104_engn_cxid,
.ctor = gv100_ectx_ce_ctor,
.bind = gv100_ectx_ce_bind,
};
static bool
gv100_runq_intr_1_ctxnotvalid(struct nvkm_runq *runq, int chid)
{
struct nvkm_fifo *fifo = runq->fifo;
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_chan *chan;
unsigned long flags;
RUNQ_ERROR(runq, "CTXNOTVALID chid:%d", chid);
chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags);
if (WARN_ON_ONCE(!chan))
return false;
nvkm_chan_error(chan, true);
nvkm_chan_put(&chan, flags);
nvkm_mask(device, 0x0400ac + (runq->id * 0x2000), 0x00030000, 0x00030000);
nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), 0x80000000);
return true;
}
const struct nvkm_runq_func
gv100_runq = {
.init = gk208_runq_init,
.intr = gk104_runq_intr,
.intr_0_names = gk104_runq_intr_0_names,
.intr_1_ctxnotvalid = gv100_runq_intr_1_ctxnotvalid,
.idle = gk104_runq_idle,
};
void
gv100_runl_preempt(struct nvkm_runl *runl)
{
nvkm_wr32(runl->fifo->engine.subdev.device, 0x002638, BIT(runl->id));
}
void
gv100_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
{
const u64 user = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
const u64 inst = chan->inst->addr;
nvkm_wo32(memory, offset + 0x0, lower_32_bits(user) | chan->runq << 1);
nvkm_wo32(memory, offset + 0x4, upper_32_bits(user));
nvkm_wo32(memory, offset + 0x8, lower_32_bits(inst) | chan->id);
nvkm_wo32(memory, offset + 0xc, upper_32_bits(inst));
}
void
gv100_runl_insert_cgrp(struct nvkm_cgrp *cgrp, struct nvkm_memory *memory, u64 offset)
{
nvkm_wo32(memory, offset + 0x0, (128 << 24) | (3 << 16) | 0x00000001);
nvkm_wo32(memory, offset + 0x4, cgrp->chan_nr);
nvkm_wo32(memory, offset + 0x8, cgrp->id);
nvkm_wo32(memory, offset + 0xc, 0x00000000);
}
static const struct nvkm_runl_func
gv100_runl = {
.runqs = 2,
.size = 16,
.update = nv50_runl_update,
.insert_cgrp = gv100_runl_insert_cgrp,
.insert_chan = gv100_runl_insert_chan,
.commit = gk104_runl_commit,
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
.preempt = gv100_runl_preempt,
.preempt_pending = gf100_runl_preempt_pending,
};
const struct nvkm_enum
gv100_fifo_mmu_fault_gpcclient[] = {
{ 0x00, "T1_0" },
{ 0x01, "T1_1" },
{ 0x02, "T1_2" },
{ 0x03, "T1_3" },
{ 0x04, "T1_4" },
{ 0x05, "T1_5" },
{ 0x06, "T1_6" },
{ 0x07, "T1_7" },
{ 0x08, "PE_0" },
{ 0x09, "PE_1" },
{ 0x0a, "PE_2" },
{ 0x0b, "PE_3" },
{ 0x0c, "PE_4" },
{ 0x0d, "PE_5" },
{ 0x0e, "PE_6" },
{ 0x0f, "PE_7" },
{ 0x10, "RAST" },
{ 0x11, "GCC" },
{ 0x12, "GPCCS" },
{ 0x13, "PROP_0" },
{ 0x14, "PROP_1" },
{ 0x15, "PROP_2" },
{ 0x16, "PROP_3" },
{ 0x17, "GPM" },
{ 0x18, "LTP_UTLB_0" },
{ 0x19, "LTP_UTLB_1" },
{ 0x1a, "LTP_UTLB_2" },
{ 0x1b, "LTP_UTLB_3" },
{ 0x1c, "LTP_UTLB_4" },
{ 0x1d, "LTP_UTLB_5" },
{ 0x1e, "LTP_UTLB_6" },
{ 0x1f, "LTP_UTLB_7" },
{ 0x20, "RGG_UTLB" },
{ 0x21, "T1_8" },
{ 0x22, "T1_9" },
{ 0x23, "T1_10" },
{ 0x24, "T1_11" },
{ 0x25, "T1_12" },
{ 0x26, "T1_13" },
{ 0x27, "T1_14" },
{ 0x28, "T1_15" },
{ 0x29, "TPCCS_0" },
{ 0x2a, "TPCCS_1" },
{ 0x2b, "TPCCS_2" },
{ 0x2c, "TPCCS_3" },
{ 0x2d, "TPCCS_4" },
{ 0x2e, "TPCCS_5" },
{ 0x2f, "TPCCS_6" },
{ 0x30, "TPCCS_7" },
{ 0x31, "PE_8" },
{ 0x32, "PE_9" },
{ 0x33, "TPCCS_8" },
{ 0x34, "TPCCS_9" },
{ 0x35, "T1_16" },
{ 0x36, "T1_17" },
{ 0x37, "T1_18" },
{ 0x38, "T1_19" },
{ 0x39, "PE_10" },
{ 0x3a, "PE_11" },
{ 0x3b, "TPCCS_10" },
{ 0x3c, "TPCCS_11" },
{ 0x3d, "T1_20" },
{ 0x3e, "T1_21" },
{ 0x3f, "T1_22" },
{ 0x40, "T1_23" },
{ 0x41, "PE_12" },
{ 0x42, "PE_13" },
{ 0x43, "TPCCS_12" },
{ 0x44, "TPCCS_13" },
{ 0x45, "T1_24" },
{ 0x46, "T1_25" },
{ 0x47, "T1_26" },
{ 0x48, "T1_27" },
{ 0x49, "PE_14" },
{ 0x4a, "PE_15" },
{ 0x4b, "TPCCS_14" },
{ 0x4c, "TPCCS_15" },
{ 0x4d, "T1_28" },
{ 0x4e, "T1_29" },
{ 0x4f, "T1_30" },
{ 0x50, "T1_31" },
{ 0x51, "PE_16" },
{ 0x52, "PE_17" },
{ 0x53, "TPCCS_16" },
{ 0x54, "TPCCS_17" },
{ 0x55, "T1_32" },
{ 0x56, "T1_33" },
{ 0x57, "T1_34" },
{ 0x58, "T1_35" },
{ 0x59, "PE_18" },
{ 0x5a, "PE_19" },
{ 0x5b, "TPCCS_18" },
{ 0x5c, "TPCCS_19" },
{ 0x5d, "T1_36" },
{ 0x5e, "T1_37" },
{ 0x5f, "T1_38" },
{ 0x60, "T1_39" },
{}
};
const struct nvkm_enum
gv100_fifo_mmu_fault_hubclient[] = {
{ 0x00, "VIP" },
{ 0x01, "CE0" },
{ 0x02, "CE1" },
{ 0x03, "DNISO" },
{ 0x04, "FE" },
{ 0x05, "FECS" },
{ 0x06, "HOST" },
{ 0x07, "HOST_CPU" },
{ 0x08, "HOST_CPU_NB" },
{ 0x09, "ISO" },
{ 0x0a, "MMU" },
{ 0x0b, "NVDEC" },
{ 0x0d, "NVENC1" },
{ 0x0e, "NISO" },
{ 0x0f, "P2P" },
{ 0x10, "PD" },
{ 0x11, "PERF" },
{ 0x12, "PMU" },
{ 0x13, "RASTERTWOD" },
{ 0x14, "SCC" },
{ 0x15, "SCC_NB" },
{ 0x16, "SEC" },
{ 0x17, "SSYNC" },
{ 0x18, "CE2" },
{ 0x19, "XV" },
{ 0x1a, "MMU_NB" },
{ 0x1b, "NVENC0" },
{ 0x1c, "DFALCON" },
{ 0x1d, "SKED" },
{ 0x1e, "AFALCON" },
{ 0x1f, "DONT_CARE" },
{ 0x20, "HSCE0" },
{ 0x21, "HSCE1" },
{ 0x22, "HSCE2" },
{ 0x23, "HSCE3" },
{ 0x24, "HSCE4" },
{ 0x25, "HSCE5" },
{ 0x26, "HSCE6" },
{ 0x27, "HSCE7" },
{ 0x28, "HSCE8" },
{ 0x29, "HSCE9" },
{ 0x2a, "HSHUB" },
{ 0x2b, "PTP_X0" },
{ 0x2c, "PTP_X1" },
{ 0x2d, "PTP_X2" },
{ 0x2e, "PTP_X3" },
{ 0x2f, "PTP_X4" },
{ 0x30, "PTP_X5" },
{ 0x31, "PTP_X6" },
{ 0x32, "PTP_X7" },
{ 0x33, "NVENC2" },
{ 0x34, "VPR_SCRUBBER0" },
{ 0x35, "VPR_SCRUBBER1" },
{ 0x36, "DWBIF" },
{ 0x37, "FBFALCON" },
{ 0x38, "CE_SHIM" },
{ 0x39, "GSP" },
{}
};
const struct nvkm_enum
gv100_fifo_mmu_fault_reason[] = {
{ 0x00, "PDE" },
{ 0x01, "PDE_SIZE" },
{ 0x02, "PTE" },
{ 0x03, "VA_LIMIT_VIOLATION" },
{ 0x04, "UNBOUND_INST_BLOCK" },
{ 0x05, "PRIV_VIOLATION" },
{ 0x06, "RO_VIOLATION" },
{ 0x07, "WO_VIOLATION" },
{ 0x08, "PITCH_MASK_VIOLATION" },
{ 0x09, "WORK_CREATION" },
{ 0x0a, "UNSUPPORTED_APERTURE" },
{ 0x0b, "COMPRESSION_FAILURE" },
{ 0x0c, "UNSUPPORTED_KIND" },
{ 0x0d, "REGION_VIOLATION" },
{ 0x0e, "POISONED" },
{ 0x0f, "ATOMIC_VIOLATION" },
{}
};
static const struct nvkm_enum
gv100_fifo_mmu_fault_engine[] = {
{ 0x01, "DISPLAY" },
{ 0x03, "PTP" },
{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
{ 0x06, "PWR_PMU" },
{ 0x08, "IFB", NULL, NVKM_ENGINE_IFB },
{ 0x09, "PERF" },
{ 0x1f, "PHYSICAL" },
{ 0x20, "HOST0" },
{ 0x21, "HOST1" },
{ 0x22, "HOST2" },
{ 0x23, "HOST3" },
{ 0x24, "HOST4" },
{ 0x25, "HOST5" },
{ 0x26, "HOST6" },
{ 0x27, "HOST7" },
{ 0x28, "HOST8" },
{ 0x29, "HOST9" },
{ 0x2a, "HOST10" },
{ 0x2b, "HOST11" },
{ 0x2c, "HOST12" },
{ 0x2d, "HOST13" },
{}
};
const struct nvkm_enum
gv100_fifo_mmu_fault_access[] = {
{ 0x0, "VIRT_READ" },
{ 0x1, "VIRT_WRITE" },
{ 0x2, "VIRT_ATOMIC" },
{ 0x3, "VIRT_PREFETCH" },
{ 0x4, "VIRT_ATOMIC_WEAK" },
{ 0x8, "PHYS_READ" },
{ 0x9, "PHYS_WRITE" },
{ 0xa, "PHYS_ATOMIC" },
{ 0xb, "PHYS_PREFETCH" },
{}
};
static const struct nvkm_fifo_func_mmu_fault
gv100_fifo_mmu_fault = {
.recover = gf100_fifo_mmu_fault_recover,
.access = gv100_fifo_mmu_fault_access,
.engine = gv100_fifo_mmu_fault_engine,
.reason = gv100_fifo_mmu_fault_reason,
.hubclient = gv100_fifo_mmu_fault_hubclient,
.gpcclient = gv100_fifo_mmu_fault_gpcclient,
};
static void
gv100_fifo_intr_ctxsw_timeout(struct nvkm_fifo *fifo, u32 engm)
{
struct nvkm_runl *runl;
struct nvkm_engn *engn;
nvkm_runl_foreach(runl, fifo) {
nvkm_runl_foreach_engn_cond(engn, runl, engm & BIT(engn->id))
nvkm_runl_rc_engn(runl, engn);
}
}
static const struct nvkm_fifo_func
gv100_fifo = {
.chid_nr = gm200_fifo_chid_nr,
.chid_ctor = gk110_fifo_chid_ctor,
.runq_nr = gm200_fifo_runq_nr,
.runl_ctor = gk104_fifo_runl_ctor,
.init = gk104_fifo_init,
.init_pbdmas = gk104_fifo_init_pbdmas,
.intr = gk104_fifo_intr,
.intr_ctxsw_timeout = gv100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gv100_fifo_mmu_fault,
.nonstall = &gf100_fifo_nonstall,
.runl = &gv100_runl,
.runq = &gv100_runq,
.engn = &gv100_engn,
.engn_ce = &gv100_engn_ce,
.cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp, .force = true },
.chan = {{ 0, 0, VOLTA_CHANNEL_GPFIFO_A }, &gv100_chan },
};
int
gv100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&gv100_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "runq.h"
#include <nvif/class.h>
void
gk208_runq_init(struct nvkm_runq *runq)
{
gk104_runq_init(runq);
nvkm_wr32(runq->fifo->engine.subdev.device, 0x04012c + (runq->id * 0x2000), 0x000f4240);
}
const struct nvkm_runq_func
gk208_runq = {
.init = gk208_runq_init,
.intr = gk104_runq_intr,
.intr_0_names = gk104_runq_intr_0_names,
.idle = gk104_runq_idle,
};
static int
gk208_fifo_chid_nr(struct nvkm_fifo *fifo)
{
return 1024;
}
static const struct nvkm_fifo_func
gk208_fifo = {
.chid_nr = gk208_fifo_chid_nr,
.chid_ctor = gk110_fifo_chid_ctor,
.runq_nr = gf100_fifo_runq_nr,
.runl_ctor = gk104_fifo_runl_ctor,
.init = gk104_fifo_init,
.init_pbdmas = gk104_fifo_init_pbdmas,
.intr = gk104_fifo_intr,
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gk104_fifo_mmu_fault,
.nonstall = &gf100_fifo_nonstall,
.runl = &gk110_runl,
.runq = &gk208_runq,
.engn = &gk104_engn,
.engn_ce = &gk104_engn_ce,
.cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp },
.chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_A }, &gk110_chan },
};
int
gk208_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&gk208_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "cgrp.h"
#include "chan.h"
#include "runl.h"
#include <core/ramht.h>
#include <subdev/timer.h>
#include <nvif/class.h>
static void
g84_chan_bind(struct nvkm_chan *chan)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
nvkm_wr32(device, 0x002600 + (chan->id * 4), chan->ramfc->addr >> 8);
}
static int
g84_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
{
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
const u32 limit2 = ilog2(length / 8);
int ret;
ret = nvkm_gpuobj_new(device, 0x0200, 0, true, chan->inst, &chan->eng);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->inst, &chan->pgd);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, chan->inst, &chan->cache);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, chan->inst, &chan->ramfc);
if (ret)
return ret;
ret = nvkm_ramht_new(device, 0x8000, 16, chan->inst, &chan->ramht);
if (ret)
return ret;
nvkm_kmap(chan->ramfc);
nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
nvkm_wo32(chan->ramfc, 0x48, chan->push->node->offset >> 4);
nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(offset));
nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(offset) | (limit2 << 16));
nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
nvkm_wo32(chan->ramfc, 0x7c, 0x30000000 | devm);
nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->node->offset >> 4));
nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
nvkm_wo32(chan->ramfc, 0x98, chan->inst->addr >> 12);
nvkm_done(chan->ramfc);
return 0;
}
static const struct nvkm_chan_func_ramfc
g84_chan_ramfc = {
.write = g84_chan_ramfc_write,
.ctxdma = true,
.devm = 0xfff,
};
const struct nvkm_chan_func
g84_chan = {
.inst = &nv50_chan_inst,
.userd = &nv50_chan_userd,
.ramfc = &g84_chan_ramfc,
.bind = g84_chan_bind,
.unbind = nv50_chan_unbind,
.start = nv50_chan_start,
.stop = nv50_chan_stop,
};
static void
g84_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
{
struct nvkm_subdev *subdev = &chan->cgrp->runl->fifo->engine.subdev;
struct nvkm_device *device = subdev->device;
u64 start = 0, limit = 0;
u32 flags = 0, ptr0, save;
switch (engn->engine->subdev.type) {
case NVKM_ENGINE_GR : ptr0 = 0x0020; break;
case NVKM_ENGINE_VP :
case NVKM_ENGINE_MSPDEC: ptr0 = 0x0040; break;
case NVKM_ENGINE_MPEG :
case NVKM_ENGINE_MSPPP : ptr0 = 0x0060; break;
case NVKM_ENGINE_BSP :
case NVKM_ENGINE_MSVLD : ptr0 = 0x0080; break;
case NVKM_ENGINE_CIPHER:
case NVKM_ENGINE_SEC : ptr0 = 0x00a0; break;
case NVKM_ENGINE_CE : ptr0 = 0x00c0; break;
default:
WARN_ON(1);
return;
}
if (!cctx) {
save = nvkm_mask(device, 0x002520, 0x0000003f, BIT(engn->id - 1));
nvkm_wr32(device, 0x0032fc, chan->inst->addr >> 12);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
break;
);
nvkm_wr32(device, 0x002520, save);
} else {
flags = 0x00190000;
start = cctx->vctx->inst->addr;
limit = start + cctx->vctx->inst->size - 1;
}
nvkm_kmap(chan->eng);
nvkm_wo32(chan->eng, ptr0 + 0x00, flags);
nvkm_wo32(chan->eng, ptr0 + 0x04, lower_32_bits(limit));
nvkm_wo32(chan->eng, ptr0 + 0x08, lower_32_bits(start));
nvkm_wo32(chan->eng, ptr0 + 0x0c, upper_32_bits(limit) << 24 |
lower_32_bits(start));
nvkm_wo32(chan->eng, ptr0 + 0x10, 0x00000000);
nvkm_wo32(chan->eng, ptr0 + 0x14, 0x00000000);
nvkm_done(chan->eng);
}
const struct nvkm_engn_func
g84_engn = {
.bind = g84_ectx_bind,
.ramht_add = nv50_eobj_ramht_add,
.ramht_del = nv50_eobj_ramht_del,
};
static void
g84_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
{
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags);
nvkm_mask(fifo->engine.subdev.device, 0x002140, 0x40000000, 0x00000000);
spin_unlock_irqrestore(&fifo->lock, flags);
}
static void
g84_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
{
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags);
nvkm_mask(fifo->engine.subdev.device, 0x002140, 0x40000000, 0x40000000);
spin_unlock_irqrestore(&fifo->lock, flags);
}
const struct nvkm_event_func
g84_fifo_nonstall = {
.init = g84_fifo_nonstall_allow,
.fini = g84_fifo_nonstall_block,
};
static int
g84_fifo_runl_ctor(struct nvkm_fifo *fifo)
{
struct nvkm_runl *runl;
runl = nvkm_runl_new(fifo, 0, 0, 0);
if (IS_ERR(runl))
return PTR_ERR(runl);
nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_SW, 0);
nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_DMAOBJ, 0);
nvkm_runl_add(runl, 1, fifo->func->engn, NVKM_ENGINE_GR, 0);
nvkm_runl_add(runl, 2, fifo->func->engn, NVKM_ENGINE_MPEG, 0);
nvkm_runl_add(runl, 3, fifo->func->engn, NVKM_ENGINE_ME, 0);
nvkm_runl_add(runl, 4, fifo->func->engn, NVKM_ENGINE_VP, 0);
nvkm_runl_add(runl, 5, fifo->func->engn, NVKM_ENGINE_CIPHER, 0);
nvkm_runl_add(runl, 6, fifo->func->engn, NVKM_ENGINE_BSP, 0);
return 0;
}
static const struct nvkm_fifo_func
g84_fifo = {
.chid_nr = nv50_fifo_chid_nr,
.chid_ctor = nv50_fifo_chid_ctor,
.runl_ctor = g84_fifo_runl_ctor,
.init = nv50_fifo_init,
.intr = nv04_fifo_intr,
.pause = nv04_fifo_pause,
.start = nv04_fifo_start,
.nonstall = &g84_fifo_nonstall,
.runl = &nv50_runl,
.engn = &g84_engn,
.engn_sw = &nv50_engn_sw,
.cgrp = {{ }, &nv04_cgrp },
.chan = {{ 0, 0, G82_CHANNEL_GPFIFO }, &g84_chan },
};
int
g84_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&g84_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "runl.h"
#include "cgrp.h"
#include "chan.h"
#include "chid.h"
#include "priv.h"
#include "runq.h"
#include <core/gpuobj.h>
#include <subdev/timer.h>
#include <subdev/top.h>
static struct nvkm_cgrp *
nvkm_engn_cgrp_get(struct nvkm_engn *engn, unsigned long *pirqflags)
{
struct nvkm_cgrp *cgrp = NULL;
struct nvkm_chan *chan;
bool cgid;
int id;
id = engn->func->cxid(engn, &cgid);
if (id < 0)
return NULL;
if (!cgid) {
chan = nvkm_runl_chan_get_chid(engn->runl, id, pirqflags);
if (chan)
cgrp = chan->cgrp;
} else {
cgrp = nvkm_runl_cgrp_get_cgid(engn->runl, id, pirqflags);
}
WARN_ON(!cgrp);
return cgrp;
}
static void
nvkm_runl_rc(struct nvkm_runl *runl)
{
struct nvkm_fifo *fifo = runl->fifo;
struct nvkm_cgrp *cgrp, *gtmp;
struct nvkm_chan *chan, *ctmp;
struct nvkm_engn *engn;
unsigned long flags;
int rc, state, i;
bool reset;
/* Runlist is blocked before scheduling recovery - fetch count. */
BUG_ON(!mutex_is_locked(&runl->mutex));
rc = atomic_xchg(&runl->rc_pending, 0);
if (!rc)
return;
/* Look for channel groups flagged for RC. */
nvkm_runl_foreach_cgrp_safe(cgrp, gtmp, runl) {
state = atomic_cmpxchg(&cgrp->rc, NVKM_CGRP_RC_PENDING, NVKM_CGRP_RC_RUNNING);
if (state == NVKM_CGRP_RC_PENDING) {
/* Disable all channels in them, and remove from runlist. */
nvkm_cgrp_foreach_chan_safe(chan, ctmp, cgrp) {
nvkm_chan_error(chan, false);
nvkm_chan_remove_locked(chan);
}
}
}
/* On GPUs with runlist preempt, wait for PBDMA(s) servicing runlist to go idle. */
if (runl->func->preempt) {
for (i = 0; i < runl->runq_nr; i++) {
struct nvkm_runq *runq = runl->runq[i];
if (runq) {
nvkm_msec(fifo->engine.subdev.device, 2000,
if (runq->func->idle(runq))
break;
);
}
}
}
/* Look for engines that are still on flagged channel groups - reset them. */
nvkm_runl_foreach_engn_cond(engn, runl, engn->func->cxid) {
cgrp = nvkm_engn_cgrp_get(engn, &flags);
if (!cgrp) {
ENGN_DEBUG(engn, "cxid not valid");
continue;
}
reset = atomic_read(&cgrp->rc) == NVKM_CGRP_RC_RUNNING;
nvkm_cgrp_put(&cgrp, flags);
if (!reset) {
ENGN_DEBUG(engn, "cxid not in recovery");
continue;
}
ENGN_DEBUG(engn, "resetting...");
/*TODO: can we do something less of a potential catastrophe on failure? */
WARN_ON(nvkm_engine_reset(engn->engine));
}
/* Submit runlist update, and clear any remaining exception state. */
runl->func->update(runl);
if (runl->func->fault_clear)
runl->func->fault_clear(runl);
/* Unblock runlist processing. */
while (rc--)
nvkm_runl_allow(runl);
runl->func->wait(runl);
}
static void
nvkm_runl_rc_runl(struct nvkm_runl *runl)
{
RUNL_ERROR(runl, "rc scheduled");
nvkm_runl_block(runl);
if (runl->func->preempt)
runl->func->preempt(runl);
atomic_inc(&runl->rc_pending);
schedule_work(&runl->work);
}
void
nvkm_runl_rc_cgrp(struct nvkm_cgrp *cgrp)
{
if (atomic_cmpxchg(&cgrp->rc, NVKM_CGRP_RC_NONE, NVKM_CGRP_RC_PENDING) != NVKM_CGRP_RC_NONE)
return;
CGRP_ERROR(cgrp, "rc scheduled");
nvkm_runl_rc_runl(cgrp->runl);
}
void
nvkm_runl_rc_engn(struct nvkm_runl *runl, struct nvkm_engn *engn)
{
struct nvkm_cgrp *cgrp;
unsigned long flags;
/* Lookup channel group currently on engine. */
cgrp = nvkm_engn_cgrp_get(engn, &flags);
if (!cgrp) {
ENGN_DEBUG(engn, "rc skipped, not on channel");
return;
}
nvkm_runl_rc_cgrp(cgrp);
nvkm_cgrp_put(&cgrp, flags);
}
static void
nvkm_runl_work(struct work_struct *work)
{
struct nvkm_runl *runl = container_of(work, typeof(*runl), work);
mutex_lock(&runl->mutex);
nvkm_runl_rc(runl);
mutex_unlock(&runl->mutex);
}
struct nvkm_chan *
nvkm_runl_chan_get_inst(struct nvkm_runl *runl, u64 inst, unsigned long *pirqflags)
{
struct nvkm_chid *chid = runl->chid;
struct nvkm_chan *chan;
unsigned long flags;
int id;
spin_lock_irqsave(&chid->lock, flags);
for_each_set_bit(id, chid->used, chid->nr) {
chan = chid->data[id];
if (likely(chan)) {
if (chan->inst->addr == inst) {
spin_lock(&chan->cgrp->lock);
*pirqflags = flags;
spin_unlock(&chid->lock);
return chan;
}
}
}
spin_unlock_irqrestore(&chid->lock, flags);
return NULL;
}
struct nvkm_chan *
nvkm_runl_chan_get_chid(struct nvkm_runl *runl, int id, unsigned long *pirqflags)
{
struct nvkm_chid *chid = runl->chid;
struct nvkm_chan *chan;
unsigned long flags;
spin_lock_irqsave(&chid->lock, flags);
if (!WARN_ON(id >= chid->nr)) {
chan = chid->data[id];
if (likely(chan)) {
spin_lock(&chan->cgrp->lock);
*pirqflags = flags;
spin_unlock(&chid->lock);
return chan;
}
}
spin_unlock_irqrestore(&chid->lock, flags);
return NULL;
}
struct nvkm_cgrp *
nvkm_runl_cgrp_get_cgid(struct nvkm_runl *runl, int id, unsigned long *pirqflags)
{
struct nvkm_chid *cgid = runl->cgid;
struct nvkm_cgrp *cgrp;
unsigned long flags;
spin_lock_irqsave(&cgid->lock, flags);
if (!WARN_ON(id >= cgid->nr)) {
cgrp = cgid->data[id];
if (likely(cgrp)) {
spin_lock(&cgrp->lock);
*pirqflags = flags;
spin_unlock(&cgid->lock);
return cgrp;
}
}
spin_unlock_irqrestore(&cgid->lock, flags);
return NULL;
}
int
nvkm_runl_preempt_wait(struct nvkm_runl *runl)
{
return nvkm_msec(runl->fifo->engine.subdev.device, runl->fifo->timeout.chan_msec,
if (!runl->func->preempt_pending(runl))
break;
nvkm_runl_rc(runl);
usleep_range(1, 2);
) < 0 ? -ETIMEDOUT : 0;
}
bool
nvkm_runl_update_pending(struct nvkm_runl *runl)
{
if (!runl->func->pending(runl))
return false;
nvkm_runl_rc(runl);
return true;
}
void
nvkm_runl_update_locked(struct nvkm_runl *runl, bool wait)
{
if (atomic_xchg(&runl->changed, 0) && runl->func->update) {
runl->func->update(runl);
if (wait)
runl->func->wait(runl);
}
}
void
nvkm_runl_allow(struct nvkm_runl *runl)
{
struct nvkm_fifo *fifo = runl->fifo;
unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags);
if (!--runl->blocked) {
RUNL_TRACE(runl, "running");
runl->func->allow(runl, ~0);
}
spin_unlock_irqrestore(&fifo->lock, flags);
}
void
nvkm_runl_block(struct nvkm_runl *runl)
{
struct nvkm_fifo *fifo = runl->fifo;
unsigned long flags;
spin_lock_irqsave(&fifo->lock, flags);
if (!runl->blocked++) {
RUNL_TRACE(runl, "stopped");
runl->func->block(runl, ~0);
}
spin_unlock_irqrestore(&fifo->lock, flags);
}
void
nvkm_runl_fini(struct nvkm_runl *runl)
{
if (runl->func->fini)
runl->func->fini(runl);
flush_work(&runl->work);
}
void
nvkm_runl_del(struct nvkm_runl *runl)
{
struct nvkm_engn *engn, *engt;
nvkm_memory_unref(&runl->mem);
list_for_each_entry_safe(engn, engt, &runl->engns, head) {
list_del(&engn->head);
kfree(engn);
}
nvkm_chid_unref(&runl->chid);
nvkm_chid_unref(&runl->cgid);
list_del(&runl->head);
mutex_destroy(&runl->mutex);
kfree(runl);
}
struct nvkm_engn *
nvkm_runl_add(struct nvkm_runl *runl, int engi, const struct nvkm_engn_func *func,
enum nvkm_subdev_type type, int inst)
{
struct nvkm_fifo *fifo = runl->fifo;
struct nvkm_device *device = fifo->engine.subdev.device;
struct nvkm_engine *engine;
struct nvkm_engn *engn;
engine = nvkm_device_engine(device, type, inst);
if (!engine) {
RUNL_DEBUG(runl, "engn %d.%d[%s] not found", engi, inst, nvkm_subdev_type[type]);
return NULL;
}
if (!(engn = kzalloc(sizeof(*engn), GFP_KERNEL)))
return NULL;
engn->func = func;
engn->runl = runl;
engn->id = engi;
engn->engine = engine;
engn->fault = -1;
list_add_tail(&engn->head, &runl->engns);
/* Lookup MMU engine ID for fault handling. */
if (device->top)
engn->fault = nvkm_top_fault_id(device, engine->subdev.type, engine->subdev.inst);
if (engn->fault < 0 && fifo->func->mmu_fault) {
const struct nvkm_enum *map = fifo->func->mmu_fault->engine;
while (map->name) {
if (map->data2 == engine->subdev.type && map->inst == engine->subdev.inst) {
engn->fault = map->value;
break;
}
map++;
}
}
return engn;
}
struct nvkm_runl *
nvkm_runl_get(struct nvkm_fifo *fifo, int runi, u32 addr)
{
struct nvkm_runl *runl;
nvkm_runl_foreach(runl, fifo) {
if ((runi >= 0 && runl->id == runi) || (runi < 0 && runl->addr == addr))
return runl;
}
return NULL;
}
struct nvkm_runl *
nvkm_runl_new(struct nvkm_fifo *fifo, int runi, u32 addr, int id_nr)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
struct nvkm_runl *runl;
int ret;
if (!(runl = kzalloc(sizeof(*runl), GFP_KERNEL)))
return ERR_PTR(-ENOMEM);
runl->func = fifo->func->runl;
runl->fifo = fifo;
runl->id = runi;
runl->addr = addr;
INIT_LIST_HEAD(&runl->engns);
INIT_LIST_HEAD(&runl->cgrps);
atomic_set(&runl->changed, 0);
mutex_init(&runl->mutex);
INIT_WORK(&runl->work, nvkm_runl_work);
atomic_set(&runl->rc_triggered, 0);
atomic_set(&runl->rc_pending, 0);
list_add_tail(&runl->head, &fifo->runls);
if (!fifo->chid) {
if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, id_nr, 0, id_nr, &runl->cgid)) ||
(ret = nvkm_chid_new(&nvkm_chan_event, subdev, id_nr, 0, id_nr, &runl->chid))) {
RUNL_ERROR(runl, "cgid/chid: %d", ret);
nvkm_runl_del(runl);
return ERR_PTR(ret);
}
} else {
runl->cgid = nvkm_chid_ref(fifo->cgid);
runl->chid = nvkm_chid_ref(fifo->chid);
}
return runl;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <nvif/class.h>
static const struct nvkm_fifo_func
ga102_fifo = {
.runl_ctor = ga100_fifo_runl_ctor,
.mmu_fault = &tu102_fifo_mmu_fault,
.nonstall_ctor = ga100_fifo_nonstall_ctor,
.nonstall = &ga100_fifo_nonstall,
.runl = &ga100_runl,
.runq = &ga100_runq,
.engn = &ga100_engn,
.engn_ce = &ga100_engn_ce,
.cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &ga100_cgrp, .force = true },
.chan = {{ 0, 0, AMPERE_CHANNEL_GPFIFO_B }, &ga100_chan },
};
int
ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&ga102_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "runl.h"
#include <core/gpuobj.h>
#include <subdev/fault.h>
#include <nvif/class.h>
static void
gp100_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
{
nvkm_wo32(memory, offset + 0, chan->id | chan->runq << 14);
nvkm_wo32(memory, offset + 4, chan->inst->addr >> 12);
}
static const struct nvkm_runl_func
gp100_runl = {
.runqs = 2,
.size = 8,
.update = nv50_runl_update,
.insert_cgrp = gk110_runl_insert_cgrp,
.insert_chan = gp100_runl_insert_chan,
.commit = gk104_runl_commit,
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
.fault_clear = gk104_runl_fault_clear,
.preempt_pending = gf100_runl_preempt_pending,
};
static const struct nvkm_enum
gp100_fifo_mmu_fault_engine[] = {
{ 0x01, "DISPLAY" },
{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
{ 0x06, "HOST0" },
{ 0x07, "HOST1" },
{ 0x08, "HOST2" },
{ 0x09, "HOST3" },
{ 0x0a, "HOST4" },
{ 0x0b, "HOST5" },
{ 0x0c, "HOST6" },
{ 0x0d, "HOST7" },
{ 0x0e, "HOST8" },
{ 0x0f, "HOST9" },
{ 0x10, "HOST10" },
{ 0x13, "PERF" },
{ 0x17, "PMU" },
{ 0x18, "PTP" },
{ 0x1f, "PHYSICAL" },
{}
};
static const struct nvkm_fifo_func_mmu_fault
gp100_fifo_mmu_fault = {
.recover = gf100_fifo_mmu_fault_recover,
.access = gf100_fifo_mmu_fault_access,
.engine = gp100_fifo_mmu_fault_engine,
.reason = gk104_fifo_mmu_fault_reason,
.hubclient = gk104_fifo_mmu_fault_hubclient,
.gpcclient = gk104_fifo_mmu_fault_gpcclient,
};
static void
gp100_fifo_intr_mmu_fault_unit(struct nvkm_fifo *fifo, int unit)
{
struct nvkm_device *device = fifo->engine.subdev.device;
u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
struct nvkm_fault_data info;
info.inst = (u64)inst << 12;
info.addr = ((u64)vahi << 32) | valo;
info.time = 0;
info.engine = unit;
info.valid = 1;
info.gpc = (type & 0x1f000000) >> 24;
info.hub = (type & 0x00100000) >> 20;
info.access = (type & 0x00070000) >> 16;
info.client = (type & 0x00007f00) >> 8;
info.reason = (type & 0x0000001f);
nvkm_fifo_fault(fifo, &info);
}
static const struct nvkm_fifo_func
gp100_fifo = {
.chid_nr = gm200_fifo_chid_nr,
.chid_ctor = gk110_fifo_chid_ctor,
.runq_nr = gm200_fifo_runq_nr,
.runl_ctor = gk104_fifo_runl_ctor,
.init = gk104_fifo_init,
.init_pbdmas = gk104_fifo_init_pbdmas,
.intr = gk104_fifo_intr,
.intr_mmu_fault_unit = gp100_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gp100_fifo_mmu_fault,
.nonstall = &gf100_fifo_nonstall,
.runl = &gp100_runl,
.runq = &gk208_runq,
.engn = &gk104_engn,
.engn_ce = &gk104_engn_ce,
.cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp, .force = true },
.chan = {{ 0, 0, PASCAL_CHANNEL_GPFIFO_A }, &gm107_chan },
};
int
gp100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&gp100_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c |
/*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <nvif/class.h>
static const struct nvkm_fifo_func
gk20a_fifo = {
.chid_nr = nv50_fifo_chid_nr,
.chid_ctor = gk110_fifo_chid_ctor,
.runq_nr = gf100_fifo_runq_nr,
.runl_ctor = gk104_fifo_runl_ctor,
.init = gk104_fifo_init,
.init_pbdmas = gk104_fifo_init_pbdmas,
.intr = gk104_fifo_intr,
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
.mmu_fault = &gk104_fifo_mmu_fault,
.nonstall = &gf100_fifo_nonstall,
.runl = &gk110_runl,
.runq = &gk208_runq,
.engn = &gk104_engn,
.engn_ce = &gk104_engn_ce,
.cgrp = {{ }, &gk110_cgrp },
.chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_A }, &gk110_chan },
};
int
gk20a_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fifo **pfifo)
{
return nvkm_fifo_new_(&gk20a_fifo, device, type, inst, pfifo);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "ctrl.h"
#include <core/client.h>
#include <subdev/clk.h>
#include <nvif/class.h>
#include <nvif/if0001.h>
#include <nvif/ioctl.h>
#include <nvif/unpack.h>
static int
nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
{
union {
struct nvif_control_pstate_info_v0 v0;
} *args = data;
struct nvkm_clk *clk = ctrl->device->clk;
int ret = -ENOSYS;
nvif_ioctl(&ctrl->object, "control pstate info size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(&ctrl->object, "control pstate info vers %d\n",
args->v0.version);
} else
return ret;
if (clk) {
args->v0.count = clk->state_nr;
args->v0.ustate_ac = clk->ustate_ac;
args->v0.ustate_dc = clk->ustate_dc;
args->v0.pwrsrc = clk->pwrsrc;
args->v0.pstate = clk->pstate;
} else {
args->v0.count = 0;
args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
args->v0.pwrsrc = -ENODEV;
args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
}
return 0;
}
static int
nvkm_control_mthd_pstate_attr(struct nvkm_control *ctrl, void *data, u32 size)
{
union {
struct nvif_control_pstate_attr_v0 v0;
} *args = data;
struct nvkm_clk *clk = ctrl->device->clk;
const struct nvkm_domain *domain;
struct nvkm_pstate *pstate;
struct nvkm_cstate *cstate;
int i = 0, j = -1;
u32 lo, hi;
int ret = -ENOSYS;
nvif_ioctl(&ctrl->object, "control pstate attr size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(&ctrl->object,
"control pstate attr vers %d state %d index %d\n",
args->v0.version, args->v0.state, args->v0.index);
if (!clk)
return -ENODEV;
if (args->v0.state < NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT)
return -EINVAL;
if (args->v0.state >= clk->state_nr)
return -EINVAL;
} else
return ret;
domain = clk->domains;
while (domain->name != nv_clk_src_max) {
if (domain->mname && ++j == args->v0.index)
break;
domain++;
}
if (domain->name == nv_clk_src_max)
return -EINVAL;
if (args->v0.state != NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT) {
list_for_each_entry(pstate, &clk->states, head) {
if (i++ == args->v0.state)
break;
}
lo = pstate->base.domain[domain->name];
hi = lo;
list_for_each_entry(cstate, &pstate->list, head) {
lo = min(lo, cstate->domain[domain->name]);
hi = max(hi, cstate->domain[domain->name]);
}
args->v0.state = pstate->pstate;
} else {
lo = max(nvkm_clk_read(clk, domain->name), 0);
hi = lo;
}
snprintf(args->v0.name, sizeof(args->v0.name), "%s", domain->mname);
snprintf(args->v0.unit, sizeof(args->v0.unit), "MHz");
args->v0.min = lo / domain->mdiv;
args->v0.max = hi / domain->mdiv;
args->v0.index = 0;
while ((++domain)->name != nv_clk_src_max) {
if (domain->mname) {
args->v0.index = ++j;
break;
}
}
return 0;
}
static int
nvkm_control_mthd_pstate_user(struct nvkm_control *ctrl, void *data, u32 size)
{
union {
struct nvif_control_pstate_user_v0 v0;
} *args = data;
struct nvkm_clk *clk = ctrl->device->clk;
int ret = -ENOSYS;
nvif_ioctl(&ctrl->object, "control pstate user size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(&ctrl->object,
"control pstate user vers %d ustate %d pwrsrc %d\n",
args->v0.version, args->v0.ustate, args->v0.pwrsrc);
if (!clk)
return -ENODEV;
} else
return ret;
if (args->v0.pwrsrc >= 0) {
ret |= nvkm_clk_ustate(clk, args->v0.ustate, args->v0.pwrsrc);
} else {
ret |= nvkm_clk_ustate(clk, args->v0.ustate, 0);
ret |= nvkm_clk_ustate(clk, args->v0.ustate, 1);
}
return ret;
}
static int
nvkm_control_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
struct nvkm_control *ctrl = nvkm_control(object);
switch (mthd) {
case NVIF_CONTROL_PSTATE_INFO:
return nvkm_control_mthd_pstate_info(ctrl, data, size);
case NVIF_CONTROL_PSTATE_ATTR:
return nvkm_control_mthd_pstate_attr(ctrl, data, size);
case NVIF_CONTROL_PSTATE_USER:
return nvkm_control_mthd_pstate_user(ctrl, data, size);
default:
break;
}
return -EINVAL;
}
static const struct nvkm_object_func
nvkm_control = {
.mthd = nvkm_control_mthd,
};
static int
nvkm_control_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
{
struct nvkm_control *ctrl;
if (!(ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL)))
return -ENOMEM;
*pobject = &ctrl->object;
ctrl->device = device;
nvkm_object_ctor(&nvkm_control, oclass, &ctrl->object);
return 0;
}
const struct nvkm_device_oclass
nvkm_control_oclass = {
.base.oclass = NVIF_CLASS_CONTROL,
.base.minver = -1,
.base.maxver = -1,
.ctor = nvkm_control_new,
};
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define nvkm_udevice(p) container_of((p), struct nvkm_udevice, object)
#include "priv.h"
#include "ctrl.h"
#include <core/client.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
#include <subdev/timer.h>
#include <nvif/class.h>
#include <nvif/cl0080.h>
#include <nvif/unpack.h>
struct nvkm_udevice {
struct nvkm_object object;
struct nvkm_device *device;
};
static int
nvkm_udevice_info_subdev(struct nvkm_device *device, u64 mthd, u64 *data)
{
struct nvkm_subdev *subdev;
enum nvkm_subdev_type type;
switch (mthd & NV_DEVICE_INFO_UNIT) {
case NV_DEVICE_HOST(0): type = NVKM_ENGINE_FIFO; break;
default:
return -EINVAL;
}
subdev = nvkm_device_subdev(device, type, 0);
if (subdev)
return nvkm_subdev_info(subdev, mthd, data);
return -ENODEV;
}
static void
nvkm_udevice_info_v1(struct nvkm_device *device,
struct nv_device_info_v1_data *args)
{
if (args->mthd & NV_DEVICE_INFO_UNIT) {
if (nvkm_udevice_info_subdev(device, args->mthd, &args->data))
args->mthd = NV_DEVICE_INFO_INVALID;
return;
}
args->mthd = NV_DEVICE_INFO_INVALID;
}
static int
nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
{
struct nvkm_object *object = &udev->object;
struct nvkm_device *device = udev->device;
struct nvkm_fb *fb = device->fb;
struct nvkm_instmem *imem = device->imem;
union {
struct nv_device_info_v0 v0;
struct nv_device_info_v1 v1;
} *args = data;
int ret = -ENOSYS, i;
nvif_ioctl(object, "device info size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v1, 1, 1, true))) {
nvif_ioctl(object, "device info vers %d count %d\n",
args->v1.version, args->v1.count);
if (args->v1.count * sizeof(args->v1.data[0]) == size) {
for (i = 0; i < args->v1.count; i++)
nvkm_udevice_info_v1(device, &args->v1.data[i]);
return 0;
}
return -EINVAL;
} else
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(object, "device info vers %d\n", args->v0.version);
} else
return ret;
switch (device->chipset) {
case 0x01a:
case 0x01f:
case 0x04c:
case 0x04e:
case 0x063:
case 0x067:
case 0x068:
case 0x0aa:
case 0x0ac:
case 0x0af:
args->v0.platform = NV_DEVICE_INFO_V0_IGP;
break;
default:
switch (device->type) {
case NVKM_DEVICE_PCI:
args->v0.platform = NV_DEVICE_INFO_V0_PCI;
break;
case NVKM_DEVICE_AGP:
args->v0.platform = NV_DEVICE_INFO_V0_AGP;
break;
case NVKM_DEVICE_PCIE:
args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
break;
case NVKM_DEVICE_TEGRA:
args->v0.platform = NV_DEVICE_INFO_V0_SOC;
break;
default:
WARN_ON(1);
break;
}
break;
}
switch (device->card_type) {
case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
case NV_10:
case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
default:
args->v0.family = 0;
break;
}
args->v0.chipset = device->chipset;
args->v0.revision = device->chiprev;
if (fb && fb->ram)
args->v0.ram_size = args->v0.ram_user = fb->ram->size;
else
args->v0.ram_size = args->v0.ram_user = 0;
if (imem && args->v0.ram_size > 0)
args->v0.ram_user = args->v0.ram_user - imem->reserved;
snprintf(args->v0.chip, sizeof(args->v0.chip), "%s", device->chip->name);
snprintf(args->v0.name, sizeof(args->v0.name), "%s", device->name);
return 0;
}
static int
nvkm_udevice_time(struct nvkm_udevice *udev, void *data, u32 size)
{
struct nvkm_object *object = &udev->object;
struct nvkm_device *device = udev->device;
union {
struct nv_device_time_v0 v0;
} *args = data;
int ret = -ENOSYS;
nvif_ioctl(object, "device time size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(object, "device time vers %d\n", args->v0.version);
args->v0.time = nvkm_timer_read(device->timer);
}
return ret;
}
static int
nvkm_udevice_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
nvif_ioctl(object, "device mthd %08x\n", mthd);
switch (mthd) {
case NV_DEVICE_V0_INFO:
return nvkm_udevice_info(udev, data, size);
case NV_DEVICE_V0_TIME:
return nvkm_udevice_time(udev, data, size);
default:
break;
}
return -EINVAL;
}
static int
nvkm_udevice_rd08(struct nvkm_object *object, u64 addr, u8 *data)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
*data = nvkm_rd08(udev->device, addr);
return 0;
}
static int
nvkm_udevice_rd16(struct nvkm_object *object, u64 addr, u16 *data)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
*data = nvkm_rd16(udev->device, addr);
return 0;
}
static int
nvkm_udevice_rd32(struct nvkm_object *object, u64 addr, u32 *data)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
*data = nvkm_rd32(udev->device, addr);
return 0;
}
static int
nvkm_udevice_wr08(struct nvkm_object *object, u64 addr, u8 data)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
nvkm_wr08(udev->device, addr, data);
return 0;
}
static int
nvkm_udevice_wr16(struct nvkm_object *object, u64 addr, u16 data)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
nvkm_wr16(udev->device, addr, data);
return 0;
}
static int
nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
nvkm_wr32(udev->device, addr, data);
return 0;
}
static int
nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
struct nvkm_device *device = udev->device;
*type = NVKM_OBJECT_MAP_IO;
*addr = device->func->resource_addr(device, 0);
*size = device->func->resource_size(device, 0);
return 0;
}
static int
nvkm_udevice_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
struct nvkm_device *device = udev->device;
int ret = 0;
mutex_lock(&device->mutex);
if (!--device->refcount) {
ret = nvkm_device_fini(device, suspend);
if (ret && suspend) {
device->refcount++;
goto done;
}
}
done:
mutex_unlock(&device->mutex);
return ret;
}
static int
nvkm_udevice_init(struct nvkm_object *object)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
struct nvkm_device *device = udev->device;
int ret = 0;
mutex_lock(&device->mutex);
if (!device->refcount++) {
ret = nvkm_device_init(device);
if (ret) {
device->refcount--;
goto done;
}
}
done:
mutex_unlock(&device->mutex);
return ret;
}
static int
nvkm_udevice_child_new(const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
{
struct nvkm_udevice *udev = nvkm_udevice(oclass->parent);
const struct nvkm_device_oclass *sclass = oclass->priv;
return sclass->ctor(udev->device, oclass, data, size, pobject);
}
static int
nvkm_udevice_child_get(struct nvkm_object *object, int index,
struct nvkm_oclass *oclass)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
struct nvkm_device *device = udev->device;
struct nvkm_engine *engine;
u64 mask = (1ULL << NVKM_ENGINE_DMAOBJ) |
(1ULL << NVKM_ENGINE_FIFO) |
(1ULL << NVKM_ENGINE_DISP) |
(1ULL << NVKM_ENGINE_PM);
const struct nvkm_device_oclass *sclass = NULL;
int i;
for (; i = __ffs64(mask), mask && !sclass; mask &= ~(1ULL << i)) {
if (!(engine = nvkm_device_engine(device, i, 0)) ||
!(engine->func->base.sclass))
continue;
oclass->engine = engine;
index -= engine->func->base.sclass(oclass, index, &sclass);
}
if (!sclass) {
if (index-- == 0)
sclass = &nvkm_control_oclass;
else if (device->mmu && index-- == 0)
sclass = &device->mmu->user;
else if (device->fault && index-- == 0)
sclass = &device->fault->user;
else if (device->vfn && index-- == 0)
sclass = &device->vfn->user;
else
return -EINVAL;
oclass->base = sclass->base;
oclass->engine = NULL;
}
oclass->ctor = nvkm_udevice_child_new;
oclass->priv = sclass;
return 0;
}
static const struct nvkm_object_func
nvkm_udevice_super = {
.init = nvkm_udevice_init,
.fini = nvkm_udevice_fini,
.mthd = nvkm_udevice_mthd,
.map = nvkm_udevice_map,
.rd08 = nvkm_udevice_rd08,
.rd16 = nvkm_udevice_rd16,
.rd32 = nvkm_udevice_rd32,
.wr08 = nvkm_udevice_wr08,
.wr16 = nvkm_udevice_wr16,
.wr32 = nvkm_udevice_wr32,
.sclass = nvkm_udevice_child_get,
};
static const struct nvkm_object_func
nvkm_udevice = {
.init = nvkm_udevice_init,
.fini = nvkm_udevice_fini,
.mthd = nvkm_udevice_mthd,
.sclass = nvkm_udevice_child_get,
};
static int
nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
union {
struct nv_device_v0 v0;
} *args = data;
struct nvkm_client *client = oclass->client;
struct nvkm_object *parent = &client->object;
const struct nvkm_object_func *func;
struct nvkm_udevice *udev;
int ret = -ENOSYS;
nvif_ioctl(parent, "create device size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(parent, "create device v%d device %016llx\n",
args->v0.version, args->v0.device);
} else
return ret;
/* give priviledged clients register access */
if (args->v0.priv)
func = &nvkm_udevice_super;
else
func = &nvkm_udevice;
if (!(udev = kzalloc(sizeof(*udev), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(func, oclass, &udev->object);
*pobject = &udev->object;
/* find the device that matches what the client requested */
if (args->v0.device != ~0)
udev->device = nvkm_device_find(args->v0.device);
else
udev->device = nvkm_device_find(client->device);
if (!udev->device)
return -ENODEV;
return 0;
}
const struct nvkm_sclass
nvkm_udevice_sclass = {
.oclass = NV_DEVICE,
.minver = 0,
.maxver = 0,
.ctor = nvkm_udevice_new,
};
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/device/user.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include <core/pci.h>
#include "priv.h"
struct nvkm_device_pci_device {
u16 device;
const char *name;
const struct nvkm_device_pci_vendor *vendor;
};
struct nvkm_device_pci_vendor {
u16 vendor;
u16 device;
const char *name;
const struct nvkm_device_quirk quirk;
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0189[] = {
/* Apple iMac G4 NV18 */
{ 0x10de, 0x0010, NULL, { .tv_gpio = 4 } },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_01f0[] = {
/* MSI nForce2 IGP */
{ 0x1462, 0x5710, NULL, { .tv_pin_mask = 0xc } },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0322[] = {
/* Zotac FX5200 */
{ 0x19da, 0x1035, NULL, { .tv_pin_mask = 0xc } },
{ 0x19da, 0x2035, NULL, { .tv_pin_mask = 0xc } },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_05e7[] = {
{ 0x10de, 0x0595, "Tesla T10 Processor" },
{ 0x10de, 0x068f, "Tesla T10 Processor" },
{ 0x10de, 0x0697, "Tesla M1060" },
{ 0x10de, 0x0714, "Tesla M1060" },
{ 0x10de, 0x0743, "Tesla M1060" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0609[] = {
{ 0x106b, 0x00a7, "GeForce 8800 GS" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_062e[] = {
{ 0x106b, 0x0605, "GeForce GT 130" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0649[] = {
{ 0x1043, 0x202d, "GeForce GT 220M" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0652[] = {
{ 0x152d, 0x0850, "GeForce GT 240M LE" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0654[] = {
{ 0x1043, 0x14a2, "GeForce GT 320M" },
{ 0x1043, 0x14d2, "GeForce GT 320M" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0655[] = {
{ 0x106b, 0x0633, "GeForce GT 120" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0656[] = {
{ 0x106b, 0x0693, "GeForce GT 120" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_06d1[] = {
{ 0x10de, 0x0771, "Tesla C2050" },
{ 0x10de, 0x0772, "Tesla C2070" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_06d2[] = {
{ 0x10de, 0x088f, "Tesla X2070" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_06de[] = {
{ 0x10de, 0x0773, "Tesla S2050" },
{ 0x10de, 0x082f, "Tesla M2050" },
{ 0x10de, 0x0840, "Tesla X2070" },
{ 0x10de, 0x0842, "Tesla M2050" },
{ 0x10de, 0x0846, "Tesla M2050" },
{ 0x10de, 0x0866, "Tesla M2050" },
{ 0x10de, 0x0907, "Tesla M2050" },
{ 0x10de, 0x091e, "Tesla M2050" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_06e8[] = {
{ 0x103c, 0x360b, "GeForce 9200M GE" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_06f9[] = {
{ 0x10de, 0x060d, "Quadro FX 370 Low Profile" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_06ff[] = {
{ 0x10de, 0x0711, "HICx8 + Graphics" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0866[] = {
{ 0x106b, 0x00b1, "GeForce 9400M" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0872[] = {
{ 0x1043, 0x1c42, "GeForce G205M" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0873[] = {
{ 0x1043, 0x1c52, "GeForce G205M" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0a6e[] = {
{ 0x17aa, 0x3607, "Second Generation ION" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0a70[] = {
{ 0x17aa, 0x3605, "Second Generation ION" },
{ 0x17aa, 0x3617, "Second Generation ION" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0a73[] = {
{ 0x17aa, 0x3607, "Second Generation ION" },
{ 0x17aa, 0x3610, "Second Generation ION" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0a74[] = {
{ 0x17aa, 0x903a, "GeForce G210" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0a75[] = {
{ 0x17aa, 0x3605, "Second Generation ION" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0a7a[] = {
{ 0x1462, 0xaa51, "GeForce 405" },
{ 0x1462, 0xaa58, "GeForce 405" },
{ 0x1462, 0xac71, "GeForce 405" },
{ 0x1462, 0xac82, "GeForce 405" },
{ 0x1642, 0x3980, "GeForce 405" },
{ 0x17aa, 0x3950, "GeForce 405M" },
{ 0x17aa, 0x397d, "GeForce 405M" },
{ 0x1b0a, 0x90b4, "GeForce 405" },
{ 0x1bfd, 0x0003, "GeForce 405" },
{ 0x1bfd, 0x8006, "GeForce 405" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0dd8[] = {
{ 0x10de, 0x0914, "Quadro 2000D" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0de9[] = {
{ 0x1025, 0x0692, "GeForce GT 620M" },
{ 0x1025, 0x0725, "GeForce GT 620M" },
{ 0x1025, 0x0728, "GeForce GT 620M" },
{ 0x1025, 0x072b, "GeForce GT 620M" },
{ 0x1025, 0x072e, "GeForce GT 620M" },
{ 0x1025, 0x0753, "GeForce GT 620M" },
{ 0x1025, 0x0754, "GeForce GT 620M" },
{ 0x17aa, 0x3977, "GeForce GT 640M LE" },
{ 0x1b0a, 0x2210, "GeForce GT 635M" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0dea[] = {
{ 0x17aa, 0x365a, "GeForce 615" },
{ 0x17aa, 0x365b, "GeForce 615" },
{ 0x17aa, 0x365e, "GeForce 615" },
{ 0x17aa, 0x3660, "GeForce 615" },
{ 0x17aa, 0x366c, "GeForce 615" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0df4[] = {
{ 0x152d, 0x0952, "GeForce GT 630M" },
{ 0x152d, 0x0953, "GeForce GT 630M" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0fd2[] = {
{ 0x1028, 0x0595, "GeForce GT 640M LE" },
{ 0x1028, 0x05b2, "GeForce GT 640M LE" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0fe3[] = {
{ 0x103c, 0x2b16, "GeForce GT 745A" },
{ 0x17aa, 0x3675, "GeForce GT 745A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_104b[] = {
{ 0x1043, 0x844c, "GeForce GT 625" },
{ 0x1043, 0x846b, "GeForce GT 625" },
{ 0x1462, 0xb590, "GeForce GT 625" },
{ 0x174b, 0x0625, "GeForce GT 625" },
{ 0x174b, 0xa625, "GeForce GT 625" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1058[] = {
{ 0x103c, 0x2af1, "GeForce 610" },
{ 0x17aa, 0x3682, "GeForce 800A" },
{ 0x17aa, 0x3692, "GeForce 705A" },
{ 0x17aa, 0x3695, "GeForce 800A" },
{ 0x17aa, 0x36a8, "GeForce 800A" },
{ 0x17aa, 0x36ac, "GeForce 800A" },
{ 0x17aa, 0x36ad, "GeForce 800A" },
{ 0x705a, 0x3682, "GeForce 800A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_105b[] = {
{ 0x103c, 0x2afb, "GeForce 705A" },
{ 0x17aa, 0x36a1, "GeForce 800A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1091[] = {
{ 0x10de, 0x088e, "Tesla X2090" },
{ 0x10de, 0x0891, "Tesla X2090" },
{ 0x10de, 0x0974, "Tesla X2090" },
{ 0x10de, 0x098d, "Tesla X2090" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1096[] = {
{ 0x10de, 0x0911, "Tesla C2050" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1140[] = {
{ 0x1019, 0x999f, "GeForce GT 720M" },
{ 0x1025, 0x0600, "GeForce GT 620M" },
{ 0x1025, 0x0606, "GeForce GT 620M" },
{ 0x1025, 0x064a, "GeForce GT 620M" },
{ 0x1025, 0x064c, "GeForce GT 620M" },
{ 0x1025, 0x067a, "GeForce GT 620M" },
{ 0x1025, 0x0680, "GeForce GT 620M" },
{ 0x1025, 0x0686, "GeForce 710M" },
{ 0x1025, 0x0689, "GeForce 710M" },
{ 0x1025, 0x068b, "GeForce 710M" },
{ 0x1025, 0x068d, "GeForce 710M" },
{ 0x1025, 0x068e, "GeForce 710M" },
{ 0x1025, 0x0691, "GeForce 710M" },
{ 0x1025, 0x0692, "GeForce GT 620M" },
{ 0x1025, 0x0694, "GeForce GT 620M" },
{ 0x1025, 0x0702, "GeForce GT 620M" },
{ 0x1025, 0x0719, "GeForce GT 620M" },
{ 0x1025, 0x0725, "GeForce GT 620M" },
{ 0x1025, 0x0728, "GeForce GT 620M" },
{ 0x1025, 0x072b, "GeForce GT 620M" },
{ 0x1025, 0x072e, "GeForce GT 620M" },
{ 0x1025, 0x0732, "GeForce GT 620M" },
{ 0x1025, 0x0763, "GeForce GT 720M" },
{ 0x1025, 0x0773, "GeForce 710M" },
{ 0x1025, 0x0774, "GeForce 710M" },
{ 0x1025, 0x0776, "GeForce GT 720M" },
{ 0x1025, 0x077a, "GeForce 710M" },
{ 0x1025, 0x077b, "GeForce 710M" },
{ 0x1025, 0x077c, "GeForce 710M" },
{ 0x1025, 0x077d, "GeForce 710M" },
{ 0x1025, 0x077e, "GeForce 710M" },
{ 0x1025, 0x077f, "GeForce 710M" },
{ 0x1025, 0x0781, "GeForce GT 720M" },
{ 0x1025, 0x0798, "GeForce GT 720M" },
{ 0x1025, 0x0799, "GeForce GT 720M" },
{ 0x1025, 0x079b, "GeForce GT 720M" },
{ 0x1025, 0x079c, "GeForce GT 720M" },
{ 0x1025, 0x0807, "GeForce GT 720M" },
{ 0x1025, 0x0821, "GeForce 820M" },
{ 0x1025, 0x0823, "GeForce GT 720M" },
{ 0x1025, 0x0830, "GeForce GT 720M" },
{ 0x1025, 0x0833, "GeForce GT 720M" },
{ 0x1025, 0x0837, "GeForce GT 720M" },
{ 0x1025, 0x083e, "GeForce 820M" },
{ 0x1025, 0x0841, "GeForce 710M" },
{ 0x1025, 0x0853, "GeForce 820M" },
{ 0x1025, 0x0854, "GeForce 820M" },
{ 0x1025, 0x0855, "GeForce 820M" },
{ 0x1025, 0x0856, "GeForce 820M" },
{ 0x1025, 0x0857, "GeForce 820M" },
{ 0x1025, 0x0858, "GeForce 820M" },
{ 0x1025, 0x0863, "GeForce 820M" },
{ 0x1025, 0x0868, "GeForce 820M" },
{ 0x1025, 0x0869, "GeForce 810M" },
{ 0x1025, 0x0873, "GeForce 820M" },
{ 0x1025, 0x0878, "GeForce 820M" },
{ 0x1025, 0x087b, "GeForce 820M" },
{ 0x1025, 0x087f, "GeForce 820M" },
{ 0x1025, 0x0881, "GeForce 820M" },
{ 0x1025, 0x0885, "GeForce 820M" },
{ 0x1025, 0x088a, "GeForce 820M" },
{ 0x1025, 0x089b, "GeForce 820M" },
{ 0x1025, 0x0921, "GeForce 820M" },
{ 0x1025, 0x092e, "GeForce 810M" },
{ 0x1025, 0x092f, "GeForce 820M" },
{ 0x1025, 0x0932, "GeForce 820M" },
{ 0x1025, 0x093a, "GeForce 820M" },
{ 0x1025, 0x093c, "GeForce 820M" },
{ 0x1025, 0x093f, "GeForce 820M" },
{ 0x1025, 0x0941, "GeForce 820M" },
{ 0x1025, 0x0945, "GeForce 820M" },
{ 0x1025, 0x0954, "GeForce 820M" },
{ 0x1025, 0x0965, "GeForce 820M" },
{ 0x1028, 0x054d, "GeForce GT 630M" },
{ 0x1028, 0x054e, "GeForce GT 630M" },
{ 0x1028, 0x0554, "GeForce GT 620M" },
{ 0x1028, 0x0557, "GeForce GT 620M" },
{ 0x1028, 0x0562, "GeForce GT625M" },
{ 0x1028, 0x0565, "GeForce GT 630M" },
{ 0x1028, 0x0568, "GeForce GT 630M" },
{ 0x1028, 0x0590, "GeForce GT 630M" },
{ 0x1028, 0x0592, "GeForce GT625M" },
{ 0x1028, 0x0594, "GeForce GT625M" },
{ 0x1028, 0x0595, "GeForce GT625M" },
{ 0x1028, 0x05a2, "GeForce GT625M" },
{ 0x1028, 0x05b1, "GeForce GT625M" },
{ 0x1028, 0x05b3, "GeForce GT625M" },
{ 0x1028, 0x05da, "GeForce GT 630M" },
{ 0x1028, 0x05de, "GeForce GT 720M" },
{ 0x1028, 0x05e0, "GeForce GT 720M" },
{ 0x1028, 0x05e8, "GeForce GT 630M" },
{ 0x1028, 0x05f4, "GeForce GT 720M" },
{ 0x1028, 0x060f, "GeForce GT 720M" },
{ 0x1028, 0x062f, "GeForce GT 720M" },
{ 0x1028, 0x064e, "GeForce 820M" },
{ 0x1028, 0x0652, "GeForce 820M" },
{ 0x1028, 0x0653, "GeForce 820M" },
{ 0x1028, 0x0655, "GeForce 820M" },
{ 0x1028, 0x065e, "GeForce 820M" },
{ 0x1028, 0x0662, "GeForce 820M" },
{ 0x1028, 0x068d, "GeForce 820M" },
{ 0x1028, 0x06ad, "GeForce 820M" },
{ 0x1028, 0x06ae, "GeForce 820M" },
{ 0x1028, 0x06af, "GeForce 820M" },
{ 0x1028, 0x06b0, "GeForce 820M" },
{ 0x1028, 0x06c0, "GeForce 820M" },
{ 0x1028, 0x06c1, "GeForce 820M" },
{ 0x103c, 0x18ef, "GeForce GT 630M" },
{ 0x103c, 0x18f9, "GeForce GT 630M" },
{ 0x103c, 0x18fb, "GeForce GT 630M" },
{ 0x103c, 0x18fd, "GeForce GT 630M" },
{ 0x103c, 0x18ff, "GeForce GT 630M" },
{ 0x103c, 0x218a, "GeForce 820M" },
{ 0x103c, 0x21bb, "GeForce 820M" },
{ 0x103c, 0x21bc, "GeForce 820M" },
{ 0x103c, 0x220e, "GeForce 820M" },
{ 0x103c, 0x2210, "GeForce 820M" },
{ 0x103c, 0x2212, "GeForce 820M" },
{ 0x103c, 0x2214, "GeForce 820M" },
{ 0x103c, 0x2218, "GeForce 820M" },
{ 0x103c, 0x225b, "GeForce 820M" },
{ 0x103c, 0x225d, "GeForce 820M" },
{ 0x103c, 0x226d, "GeForce 820M" },
{ 0x103c, 0x226f, "GeForce 820M" },
{ 0x103c, 0x22d2, "GeForce 820M" },
{ 0x103c, 0x22d9, "GeForce 820M" },
{ 0x103c, 0x2335, "GeForce 820M" },
{ 0x103c, 0x2337, "GeForce 820M" },
{ 0x103c, 0x2aef, "GeForce GT 720A" },
{ 0x103c, 0x2af9, "GeForce 710A" },
{ 0x1043, 0x10dd, "NVS 5200M" },
{ 0x1043, 0x10ed, "NVS 5200M" },
{ 0x1043, 0x11fd, "GeForce GT 720M" },
{ 0x1043, 0x124d, "GeForce GT 720M" },
{ 0x1043, 0x126d, "GeForce GT 720M" },
{ 0x1043, 0x131d, "GeForce GT 720M" },
{ 0x1043, 0x13fd, "GeForce GT 720M" },
{ 0x1043, 0x14c7, "GeForce GT 720M" },
{ 0x1043, 0x1507, "GeForce GT 620M" },
{ 0x1043, 0x15ad, "GeForce 820M" },
{ 0x1043, 0x15ed, "GeForce 820M" },
{ 0x1043, 0x160d, "GeForce 820M" },
{ 0x1043, 0x163d, "GeForce 820M" },
{ 0x1043, 0x165d, "GeForce 820M" },
{ 0x1043, 0x166d, "GeForce 820M" },
{ 0x1043, 0x16cd, "GeForce 820M" },
{ 0x1043, 0x16dd, "GeForce 820M" },
{ 0x1043, 0x170d, "GeForce 820M" },
{ 0x1043, 0x176d, "GeForce 820M" },
{ 0x1043, 0x178d, "GeForce 820M" },
{ 0x1043, 0x179d, "GeForce 820M" },
{ 0x1043, 0x2132, "GeForce GT 620M" },
{ 0x1043, 0x2136, "NVS 5200M" },
{ 0x1043, 0x21ba, "GeForce GT 720M" },
{ 0x1043, 0x21fa, "GeForce GT 720M" },
{ 0x1043, 0x220a, "GeForce GT 720M" },
{ 0x1043, 0x221a, "GeForce GT 720M" },
{ 0x1043, 0x223a, "GeForce GT 710M" },
{ 0x1043, 0x224a, "GeForce GT 710M" },
{ 0x1043, 0x227a, "GeForce 820M" },
{ 0x1043, 0x228a, "GeForce 820M" },
{ 0x1043, 0x22fa, "GeForce 820M" },
{ 0x1043, 0x232a, "GeForce 820M" },
{ 0x1043, 0x233a, "GeForce 820M" },
{ 0x1043, 0x235a, "GeForce 820M" },
{ 0x1043, 0x236a, "GeForce 820M" },
{ 0x1043, 0x238a, "GeForce 820M" },
{ 0x1043, 0x8595, "GeForce GT 720M" },
{ 0x1043, 0x85ea, "GeForce GT 720M" },
{ 0x1043, 0x85eb, "GeForce 820M" },
{ 0x1043, 0x85ec, "GeForce 820M" },
{ 0x1043, 0x85ee, "GeForce GT 720M" },
{ 0x1043, 0x85f3, "GeForce 820M" },
{ 0x1043, 0x860e, "GeForce 820M" },
{ 0x1043, 0x861a, "GeForce 820M" },
{ 0x1043, 0x861b, "GeForce 820M" },
{ 0x1043, 0x8628, "GeForce 820M" },
{ 0x1043, 0x8643, "GeForce 820M" },
{ 0x1043, 0x864c, "GeForce 820M" },
{ 0x1043, 0x8652, "GeForce 820M" },
{ 0x1043, 0x8660, "GeForce 820M" },
{ 0x1043, 0x8661, "GeForce 820M" },
{ 0x105b, 0x0dac, "GeForce GT 720M" },
{ 0x105b, 0x0dad, "GeForce GT 720M" },
{ 0x105b, 0x0ef3, "GeForce GT 720M" },
{ 0x10cf, 0x17f5, "GeForce GT 720M" },
{ 0x1179, 0xfa01, "GeForce 710M" },
{ 0x1179, 0xfa02, "GeForce 710M" },
{ 0x1179, 0xfa03, "GeForce 710M" },
{ 0x1179, 0xfa05, "GeForce 710M" },
{ 0x1179, 0xfa11, "GeForce 710M" },
{ 0x1179, 0xfa13, "GeForce 710M" },
{ 0x1179, 0xfa18, "GeForce 710M" },
{ 0x1179, 0xfa19, "GeForce 710M" },
{ 0x1179, 0xfa21, "GeForce 710M" },
{ 0x1179, 0xfa23, "GeForce 710M" },
{ 0x1179, 0xfa2a, "GeForce 710M" },
{ 0x1179, 0xfa32, "GeForce 710M" },
{ 0x1179, 0xfa33, "GeForce 710M" },
{ 0x1179, 0xfa36, "GeForce 710M" },
{ 0x1179, 0xfa38, "GeForce 710M" },
{ 0x1179, 0xfa42, "GeForce 710M" },
{ 0x1179, 0xfa43, "GeForce 710M" },
{ 0x1179, 0xfa45, "GeForce 710M" },
{ 0x1179, 0xfa47, "GeForce 710M" },
{ 0x1179, 0xfa49, "GeForce 710M" },
{ 0x1179, 0xfa58, "GeForce 710M" },
{ 0x1179, 0xfa59, "GeForce 710M" },
{ 0x1179, 0xfa88, "GeForce 710M" },
{ 0x1179, 0xfa89, "GeForce 710M" },
{ 0x144d, 0xb092, "GeForce GT 620M" },
{ 0x144d, 0xc0d5, "GeForce GT 630M" },
{ 0x144d, 0xc0d7, "GeForce GT 620M" },
{ 0x144d, 0xc0e2, "NVS 5200M" },
{ 0x144d, 0xc0e3, "NVS 5200M" },
{ 0x144d, 0xc0e4, "NVS 5200M" },
{ 0x144d, 0xc10d, "GeForce 820M" },
{ 0x144d, 0xc652, "GeForce GT 620M" },
{ 0x144d, 0xc709, "GeForce 710M" },
{ 0x144d, 0xc711, "GeForce 710M" },
{ 0x144d, 0xc736, "GeForce 710M" },
{ 0x144d, 0xc737, "GeForce 710M" },
{ 0x144d, 0xc745, "GeForce 820M" },
{ 0x144d, 0xc750, "GeForce 820M" },
{ 0x1462, 0x10b8, "GeForce GT 710M" },
{ 0x1462, 0x10e9, "GeForce GT 720M" },
{ 0x1462, 0x1116, "GeForce 820M" },
{ 0x1462, 0xaa33, "GeForce 720M" },
{ 0x1462, 0xaaa2, "GeForce GT 720M" },
{ 0x1462, 0xaaa3, "GeForce 820M" },
{ 0x1462, 0xacb2, "GeForce GT 720M" },
{ 0x1462, 0xacc1, "GeForce GT 720M" },
{ 0x1462, 0xae61, "GeForce 720M" },
{ 0x1462, 0xae65, "GeForce GT 720M" },
{ 0x1462, 0xae6a, "GeForce 820M" },
{ 0x1462, 0xae71, "GeForce GT 720M" },
{ 0x14c0, 0x0083, "GeForce 820M" },
{ 0x152d, 0x0926, "GeForce 620M" },
{ 0x152d, 0x0982, "GeForce GT 630M" },
{ 0x152d, 0x0983, "GeForce GT 630M" },
{ 0x152d, 0x1005, "GeForce GT820M" },
{ 0x152d, 0x1012, "GeForce 710M" },
{ 0x152d, 0x1019, "GeForce 820M" },
{ 0x152d, 0x1030, "GeForce GT 630M" },
{ 0x152d, 0x1055, "GeForce 710M" },
{ 0x152d, 0x1067, "GeForce GT 720M" },
{ 0x152d, 0x1092, "GeForce 820M" },
{ 0x17aa, 0x2200, "NVS 5200M" },
{ 0x17aa, 0x2213, "GeForce GT 720M" },
{ 0x17aa, 0x2220, "GeForce GT 720M" },
{ 0x17aa, 0x309c, "GeForce GT 720A" },
{ 0x17aa, 0x30b4, "GeForce 820A" },
{ 0x17aa, 0x30b7, "GeForce 720A" },
{ 0x17aa, 0x30e4, "GeForce 820A" },
{ 0x17aa, 0x361b, "GeForce 820A" },
{ 0x17aa, 0x361c, "GeForce 820A" },
{ 0x17aa, 0x361d, "GeForce 820A" },
{ 0x17aa, 0x3656, "GeForce GT620M" },
{ 0x17aa, 0x365a, "GeForce 705M" },
{ 0x17aa, 0x365e, "GeForce 800M" },
{ 0x17aa, 0x3661, "GeForce 820A" },
{ 0x17aa, 0x366c, "GeForce 800M" },
{ 0x17aa, 0x3685, "GeForce 800M" },
{ 0x17aa, 0x3686, "GeForce 800M" },
{ 0x17aa, 0x3687, "GeForce 705A" },
{ 0x17aa, 0x3696, "GeForce 820A" },
{ 0x17aa, 0x369b, "GeForce 820A" },
{ 0x17aa, 0x369c, "GeForce 820A" },
{ 0x17aa, 0x369d, "GeForce 820A" },
{ 0x17aa, 0x369e, "GeForce 820A" },
{ 0x17aa, 0x36a6, "GeForce 820A" },
{ 0x17aa, 0x36a7, "GeForce 820A" },
{ 0x17aa, 0x36a9, "GeForce 820A" },
{ 0x17aa, 0x36af, "GeForce 820A" },
{ 0x17aa, 0x36b0, "GeForce 820A" },
{ 0x17aa, 0x36b6, "GeForce 820A" },
{ 0x17aa, 0x3800, "GeForce GT 720M" },
{ 0x17aa, 0x3801, "GeForce GT 720M" },
{ 0x17aa, 0x3802, "GeForce GT 720M" },
{ 0x17aa, 0x3803, "GeForce GT 720M" },
{ 0x17aa, 0x3804, "GeForce GT 720M" },
{ 0x17aa, 0x3806, "GeForce GT 720M" },
{ 0x17aa, 0x3808, "GeForce GT 720M" },
{ 0x17aa, 0x380d, "GeForce 820M" },
{ 0x17aa, 0x380e, "GeForce 820M" },
{ 0x17aa, 0x380f, "GeForce 820M" },
{ 0x17aa, 0x3811, "GeForce 820M" },
{ 0x17aa, 0x3812, "GeForce 820M" },
{ 0x17aa, 0x3813, "GeForce 820M" },
{ 0x17aa, 0x3816, "GeForce 820M" },
{ 0x17aa, 0x3817, "GeForce 820M" },
{ 0x17aa, 0x3818, "GeForce 820M" },
{ 0x17aa, 0x381a, "GeForce 820M" },
{ 0x17aa, 0x381c, "GeForce 820M" },
{ 0x17aa, 0x381d, "GeForce 820M" },
{ 0x17aa, 0x3901, "GeForce 610M" },
{ 0x17aa, 0x3902, "GeForce 710M" },
{ 0x17aa, 0x3903, "GeForce 710M" },
{ 0x17aa, 0x3904, "GeForce GT 625M" },
{ 0x17aa, 0x3905, "GeForce GT 720M" },
{ 0x17aa, 0x3907, "GeForce 820M" },
{ 0x17aa, 0x3910, "GeForce GT 720M" },
{ 0x17aa, 0x3912, "GeForce GT 720M" },
{ 0x17aa, 0x3913, "GeForce 820M" },
{ 0x17aa, 0x3915, "GeForce 820M" },
{ 0x17aa, 0x3983, "GeForce 610M" },
{ 0x17aa, 0x5001, "GeForce 610M" },
{ 0x17aa, 0x5003, "GeForce GT 720M" },
{ 0x17aa, 0x5005, "GeForce 705M" },
{ 0x17aa, 0x500d, "GeForce GT 620M" },
{ 0x17aa, 0x5014, "GeForce 710M" },
{ 0x17aa, 0x5017, "GeForce 710M" },
{ 0x17aa, 0x5019, "GeForce 710M" },
{ 0x17aa, 0x501a, "GeForce 710M" },
{ 0x17aa, 0x501f, "GeForce GT 720M" },
{ 0x17aa, 0x5025, "GeForce 710M" },
{ 0x17aa, 0x5027, "GeForce 710M" },
{ 0x17aa, 0x502a, "GeForce 710M" },
{ 0x17aa, 0x502b, "GeForce GT 720M" },
{ 0x17aa, 0x502d, "GeForce 710M" },
{ 0x17aa, 0x502e, "GeForce GT 720M" },
{ 0x17aa, 0x502f, "GeForce GT 720M" },
{ 0x17aa, 0x5030, "GeForce 705M" },
{ 0x17aa, 0x5031, "GeForce 705M" },
{ 0x17aa, 0x5032, "GeForce 820M" },
{ 0x17aa, 0x5033, "GeForce 820M" },
{ 0x17aa, 0x503e, "GeForce 710M" },
{ 0x17aa, 0x503f, "GeForce 820M" },
{ 0x17aa, 0x5040, "GeForce 820M" },
{ 0x1854, 0x0177, "GeForce 710M" },
{ 0x1854, 0x0180, "GeForce 710M" },
{ 0x1854, 0x0190, "GeForce GT 720M" },
{ 0x1854, 0x0192, "GeForce GT 720M" },
{ 0x1854, 0x0224, "GeForce 820M" },
{ 0x1b0a, 0x20dd, "GeForce GT 620M" },
{ 0x1b0a, 0x20df, "GeForce GT 620M" },
{ 0x1b0a, 0x210e, "GeForce 820M" },
{ 0x1b0a, 0x2202, "GeForce GT 720M" },
{ 0x1b0a, 0x90d7, "GeForce 820M" },
{ 0x1b0a, 0x90dd, "GeForce 820M" },
{ 0x1b50, 0x5530, "GeForce 820M" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1185[] = {
{ 0x10de, 0x106f, "GeForce GTX 760" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1189[] = {
{ 0x10de, 0x1074, "GeForce GTX 760 Ti OEM" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1199[] = {
{ 0x1458, 0xd001, "GeForce GTX 760" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_11e3[] = {
{ 0x17aa, 0x3683, "GeForce GTX 760A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1247[] = {
{ 0x1043, 0x212a, "GeForce GT 635M" },
{ 0x1043, 0x212b, "GeForce GT 635M" },
{ 0x1043, 0x212c, "GeForce GT 635M" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_124d[] = {
{ 0x1462, 0x10cc, "GeForce GT 635M" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1290[] = {
{ 0x103c, 0x2afa, "GeForce 730A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1292[] = {
{ 0x17aa, 0x3675, "GeForce GT 740A" },
{ 0x17aa, 0x367c, "GeForce GT 740A" },
{ 0x17aa, 0x3684, "GeForce GT 740A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1295[] = {
{ 0x103c, 0x2b0d, "GeForce 710A" },
{ 0x103c, 0x2b0f, "GeForce 710A" },
{ 0x103c, 0x2b20, "GeForce 810A" },
{ 0x103c, 0x2b21, "GeForce 810A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1299[] = {
{ 0x17aa, 0x369b, "GeForce 920A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1340[] = {
{ 0x103c, 0x2b2b, "GeForce 830A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1341[] = {
{ 0x17aa, 0x3697, "GeForce 840A" },
{ 0x17aa, 0x3699, "GeForce 840A" },
{ 0x17aa, 0x369c, "GeForce 840A" },
{ 0x17aa, 0x36af, "GeForce 840A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1346[] = {
{ 0x17aa, 0x30ba, "GeForce 930A" },
{ 0x17aa, 0x362c, "GeForce 930A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1347[] = {
{ 0x17aa, 0x36b9, "GeForce 940A" },
{ 0x17aa, 0x36ba, "GeForce 940A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_137a[] = {
{ 0x17aa, 0x2225, "Quadro K620M" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_137d[] = {
{ 0x17aa, 0x3699, "GeForce 940A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1391[] = {
{ 0x17aa, 0x3697, "GeForce GTX 850A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_1392[] = {
{ 0x1028, 0x066a, "GeForce GPU" },
{ 0x1043, 0x861e, "GeForce GTX 750 Ti" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_139a[] = {
{ 0x17aa, 0x36b9, "GeForce GTX 950A" },
{}
};
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_139b[] = {
{ 0x1028, 0x06a3, "GeForce GTX 860M" },
{ 0x19da, 0xc248, "GeForce GTX 750 Ti" },
{}
};
static const struct nvkm_device_pci_device
nvkm_device_pci_10de[] = {
{ 0x0020, "RIVA TNT" },
{ 0x0028, "RIVA TNT2/TNT2 Pro" },
{ 0x0029, "RIVA TNT2 Ultra" },
{ 0x002c, "Vanta/Vanta LT" },
{ 0x002d, "RIVA TNT2 Model 64/Model 64 Pro" },
{ 0x0040, "GeForce 6800 Ultra" },
{ 0x0041, "GeForce 6800" },
{ 0x0042, "GeForce 6800 LE" },
{ 0x0043, "GeForce 6800 XE" },
{ 0x0044, "GeForce 6800 XT" },
{ 0x0045, "GeForce 6800 GT" },
{ 0x0046, "GeForce 6800 GT" },
{ 0x0047, "GeForce 6800 GS" },
{ 0x0048, "GeForce 6800 XT" },
{ 0x004e, "Quadro FX 4000" },
{ 0x0090, "GeForce 7800 GTX" },
{ 0x0091, "GeForce 7800 GTX" },
{ 0x0092, "GeForce 7800 GT" },
{ 0x0093, "GeForce 7800 GS" },
{ 0x0095, "GeForce 7800 SLI" },
{ 0x0098, "GeForce Go 7800" },
{ 0x0099, "GeForce Go 7800 GTX" },
{ 0x009d, "Quadro FX 4500" },
{ 0x00a0, "Aladdin TNT2" },
{ 0x00c0, "GeForce 6800 GS" },
{ 0x00c1, "GeForce 6800" },
{ 0x00c2, "GeForce 6800 LE" },
{ 0x00c3, "GeForce 6800 XT" },
{ 0x00c8, "GeForce Go 6800" },
{ 0x00c9, "GeForce Go 6800 Ultra" },
{ 0x00cc, "Quadro FX Go1400" },
{ 0x00cd, "Quadro FX 3450/4000 SDI" },
{ 0x00ce, "Quadro FX 1400" },
{ 0x00f1, "GeForce 6600 GT" },
{ 0x00f2, "GeForce 6600" },
{ 0x00f3, "GeForce 6200" },
{ 0x00f4, "GeForce 6600 LE" },
{ 0x00f5, "GeForce 7800 GS" },
{ 0x00f6, "GeForce 6800 GS" },
{ 0x00f8, "Quadro FX 3400/Quadro FX 4000" },
{ 0x00f9, "GeForce 6800 Ultra" },
{ 0x00fa, "GeForce PCX 5750" },
{ 0x00fb, "GeForce PCX 5900" },
{ 0x00fc, "Quadro FX 330/GeForce PCX 5300" },
{ 0x00fd, "Quadro FX 330/Quadro NVS 280 PCI-E" },
{ 0x00fe, "Quadro FX 1300" },
{ 0x0100, "GeForce 256" },
{ 0x0101, "GeForce DDR" },
{ 0x0103, "Quadro" },
{ 0x0110, "GeForce2 MX/MX 400" },
{ 0x0111, "GeForce2 MX 100/200" },
{ 0x0112, "GeForce2 Go" },
{ 0x0113, "Quadro2 MXR/EX/Go" },
{ 0x0140, "GeForce 6600 GT" },
{ 0x0141, "GeForce 6600" },
{ 0x0142, "GeForce 6600 LE" },
{ 0x0143, "GeForce 6600 VE" },
{ 0x0144, "GeForce Go 6600" },
{ 0x0145, "GeForce 6610 XL" },
{ 0x0146, "GeForce Go 6600 TE/6200 TE" },
{ 0x0147, "GeForce 6700 XL" },
{ 0x0148, "GeForce Go 6600" },
{ 0x0149, "GeForce Go 6600 GT" },
{ 0x014a, "Quadro NVS 440" },
{ 0x014c, "Quadro FX 540M" },
{ 0x014d, "Quadro FX 550" },
{ 0x014e, "Quadro FX 540" },
{ 0x014f, "GeForce 6200" },
{ 0x0150, "GeForce2 GTS/GeForce2 Pro" },
{ 0x0151, "GeForce2 Ti" },
{ 0x0152, "GeForce2 Ultra" },
{ 0x0153, "Quadro2 Pro" },
{ 0x0160, "GeForce 6500" },
{ 0x0161, "GeForce 6200 TurboCache(TM)" },
{ 0x0162, "GeForce 6200SE TurboCache(TM)" },
{ 0x0163, "GeForce 6200 LE" },
{ 0x0164, "GeForce Go 6200" },
{ 0x0165, "Quadro NVS 285" },
{ 0x0166, "GeForce Go 6400" },
{ 0x0167, "GeForce Go 6200" },
{ 0x0168, "GeForce Go 6400" },
{ 0x0169, "GeForce 6250" },
{ 0x016a, "GeForce 7100 GS" },
{ 0x0170, "GeForce4 MX 460" },
{ 0x0171, "GeForce4 MX 440" },
{ 0x0172, "GeForce4 MX 420" },
{ 0x0173, "GeForce4 MX 440-SE" },
{ 0x0174, "GeForce4 440 Go" },
{ 0x0175, "GeForce4 420 Go" },
{ 0x0176, "GeForce4 420 Go 32M" },
{ 0x0177, "GeForce4 460 Go" },
{ 0x0178, "Quadro4 550 XGL" },
{ 0x0179, "GeForce4 440 Go 64M" },
{ 0x017a, "Quadro NVS 400" },
{ 0x017c, "Quadro4 500 GoGL" },
{ 0x017d, "GeForce4 410 Go 16M" },
{ 0x0181, "GeForce4 MX 440 with AGP8X" },
{ 0x0182, "GeForce4 MX 440SE with AGP8X" },
{ 0x0183, "GeForce4 MX 420 with AGP8X" },
{ 0x0185, "GeForce4 MX 4000" },
{ 0x0188, "Quadro4 580 XGL" },
{ 0x0189, "GeForce4 MX with AGP8X (Mac)", nvkm_device_pci_10de_0189 },
{ 0x018a, "Quadro NVS 280 SD" },
{ 0x018b, "Quadro4 380 XGL" },
{ 0x018c, "Quadro NVS 50 PCI" },
{ 0x0191, "GeForce 8800 GTX" },
{ 0x0193, "GeForce 8800 GTS" },
{ 0x0194, "GeForce 8800 Ultra" },
{ 0x0197, "Tesla C870" },
{ 0x019d, "Quadro FX 5600" },
{ 0x019e, "Quadro FX 4600" },
{ 0x01a0, "GeForce2 Integrated GPU" },
{ 0x01d0, "GeForce 7350 LE" },
{ 0x01d1, "GeForce 7300 LE" },
{ 0x01d2, "GeForce 7550 LE" },
{ 0x01d3, "GeForce 7300 SE/7200 GS" },
{ 0x01d6, "GeForce Go 7200" },
{ 0x01d7, "GeForce Go 7300" },
{ 0x01d8, "GeForce Go 7400" },
{ 0x01da, "Quadro NVS 110M" },
{ 0x01db, "Quadro NVS 120M" },
{ 0x01dc, "Quadro FX 350M" },
{ 0x01dd, "GeForce 7500 LE" },
{ 0x01de, "Quadro FX 350" },
{ 0x01df, "GeForce 7300 GS" },
{ 0x01f0, "GeForce4 MX Integrated GPU", nvkm_device_pci_10de_01f0 },
{ 0x0200, "GeForce3" },
{ 0x0201, "GeForce3 Ti 200" },
{ 0x0202, "GeForce3 Ti 500" },
{ 0x0203, "Quadro DCC" },
{ 0x0211, "GeForce 6800" },
{ 0x0212, "GeForce 6800 LE" },
{ 0x0215, "GeForce 6800 GT" },
{ 0x0218, "GeForce 6800 XT" },
{ 0x0221, "GeForce 6200" },
{ 0x0222, "GeForce 6200 A-LE" },
{ 0x0240, "GeForce 6150" },
{ 0x0241, "GeForce 6150 LE" },
{ 0x0242, "GeForce 6100" },
{ 0x0244, "GeForce Go 6150" },
{ 0x0245, "Quadro NVS 210S / GeForce 6150LE" },
{ 0x0247, "GeForce Go 6100" },
{ 0x0250, "GeForce4 Ti 4600" },
{ 0x0251, "GeForce4 Ti 4400" },
{ 0x0253, "GeForce4 Ti 4200" },
{ 0x0258, "Quadro4 900 XGL" },
{ 0x0259, "Quadro4 750 XGL" },
{ 0x025b, "Quadro4 700 XGL" },
{ 0x0280, "GeForce4 Ti 4800" },
{ 0x0281, "GeForce4 Ti 4200 with AGP8X" },
{ 0x0282, "GeForce4 Ti 4800 SE" },
{ 0x0286, "GeForce4 4200 Go" },
{ 0x0288, "Quadro4 980 XGL" },
{ 0x0289, "Quadro4 780 XGL" },
{ 0x028c, "Quadro4 700 GoGL" },
{ 0x0290, "GeForce 7900 GTX" },
{ 0x0291, "GeForce 7900 GT/GTO" },
{ 0x0292, "GeForce 7900 GS" },
{ 0x0293, "GeForce 7950 GX2" },
{ 0x0294, "GeForce 7950 GX2" },
{ 0x0295, "GeForce 7950 GT" },
{ 0x0297, "GeForce Go 7950 GTX" },
{ 0x0298, "GeForce Go 7900 GS" },
{ 0x0299, "Quadro NVS 510M" },
{ 0x029a, "Quadro FX 2500M" },
{ 0x029b, "Quadro FX 1500M" },
{ 0x029c, "Quadro FX 5500" },
{ 0x029d, "Quadro FX 3500" },
{ 0x029e, "Quadro FX 1500" },
{ 0x029f, "Quadro FX 4500 X2" },
{ 0x02e0, "GeForce 7600 GT" },
{ 0x02e1, "GeForce 7600 GS" },
{ 0x02e2, "GeForce 7300 GT" },
{ 0x02e3, "GeForce 7900 GS" },
{ 0x02e4, "GeForce 7950 GT" },
{ 0x0301, "GeForce FX 5800 Ultra" },
{ 0x0302, "GeForce FX 5800" },
{ 0x0308, "Quadro FX 2000" },
{ 0x0309, "Quadro FX 1000" },
{ 0x0311, "GeForce FX 5600 Ultra" },
{ 0x0312, "GeForce FX 5600" },
{ 0x0314, "GeForce FX 5600XT" },
{ 0x031a, "GeForce FX Go5600" },
{ 0x031b, "GeForce FX Go5650" },
{ 0x031c, "Quadro FX Go700" },
{ 0x0320, "GeForce FX 5200" },
{ 0x0321, "GeForce FX 5200 Ultra" },
{ 0x0322, "GeForce FX 5200", nvkm_device_pci_10de_0322 },
{ 0x0323, "GeForce FX 5200LE" },
{ 0x0324, "GeForce FX Go5200" },
{ 0x0325, "GeForce FX Go5250" },
{ 0x0326, "GeForce FX 5500" },
{ 0x0327, "GeForce FX 5100" },
{ 0x0328, "GeForce FX Go5200 32M/64M" },
{ 0x032a, "Quadro NVS 55/280 PCI" },
{ 0x032b, "Quadro FX 500/FX 600" },
{ 0x032c, "GeForce FX Go53xx" },
{ 0x032d, "GeForce FX Go5100" },
{ 0x0330, "GeForce FX 5900 Ultra" },
{ 0x0331, "GeForce FX 5900" },
{ 0x0332, "GeForce FX 5900XT" },
{ 0x0333, "GeForce FX 5950 Ultra" },
{ 0x0334, "GeForce FX 5900ZT" },
{ 0x0338, "Quadro FX 3000" },
{ 0x033f, "Quadro FX 700" },
{ 0x0341, "GeForce FX 5700 Ultra" },
{ 0x0342, "GeForce FX 5700" },
{ 0x0343, "GeForce FX 5700LE" },
{ 0x0344, "GeForce FX 5700VE" },
{ 0x0347, "GeForce FX Go5700" },
{ 0x0348, "GeForce FX Go5700" },
{ 0x034c, "Quadro FX Go1000" },
{ 0x034e, "Quadro FX 1100" },
{ 0x038b, "GeForce 7650 GS" },
{ 0x0390, "GeForce 7650 GS" },
{ 0x0391, "GeForce 7600 GT" },
{ 0x0392, "GeForce 7600 GS" },
{ 0x0393, "GeForce 7300 GT" },
{ 0x0394, "GeForce 7600 LE" },
{ 0x0395, "GeForce 7300 GT" },
{ 0x0397, "GeForce Go 7700" },
{ 0x0398, "GeForce Go 7600" },
{ 0x0399, "GeForce Go 7600 GT" },
{ 0x039c, "Quadro FX 560M" },
{ 0x039e, "Quadro FX 560" },
{ 0x03d0, "GeForce 6150SE nForce 430" },
{ 0x03d1, "GeForce 6100 nForce 405" },
{ 0x03d2, "GeForce 6100 nForce 400" },
{ 0x03d5, "GeForce 6100 nForce 420" },
{ 0x03d6, "GeForce 7025 / nForce 630a" },
{ 0x0400, "GeForce 8600 GTS" },
{ 0x0401, "GeForce 8600 GT" },
{ 0x0402, "GeForce 8600 GT" },
{ 0x0403, "GeForce 8600 GS" },
{ 0x0404, "GeForce 8400 GS" },
{ 0x0405, "GeForce 9500M GS" },
{ 0x0406, "GeForce 8300 GS" },
{ 0x0407, "GeForce 8600M GT" },
{ 0x0408, "GeForce 9650M GS" },
{ 0x0409, "GeForce 8700M GT" },
{ 0x040a, "Quadro FX 370" },
{ 0x040b, "Quadro NVS 320M" },
{ 0x040c, "Quadro FX 570M" },
{ 0x040d, "Quadro FX 1600M" },
{ 0x040e, "Quadro FX 570" },
{ 0x040f, "Quadro FX 1700" },
{ 0x0410, "GeForce GT 330" },
{ 0x0420, "GeForce 8400 SE" },
{ 0x0421, "GeForce 8500 GT" },
{ 0x0422, "GeForce 8400 GS" },
{ 0x0423, "GeForce 8300 GS" },
{ 0x0424, "GeForce 8400 GS" },
{ 0x0425, "GeForce 8600M GS" },
{ 0x0426, "GeForce 8400M GT" },
{ 0x0427, "GeForce 8400M GS" },
{ 0x0428, "GeForce 8400M G" },
{ 0x0429, "Quadro NVS 140M" },
{ 0x042a, "Quadro NVS 130M" },
{ 0x042b, "Quadro NVS 135M" },
{ 0x042c, "GeForce 9400 GT" },
{ 0x042d, "Quadro FX 360M" },
{ 0x042e, "GeForce 9300M G" },
{ 0x042f, "Quadro NVS 290" },
{ 0x0531, "GeForce 7150M / nForce 630M" },
{ 0x0533, "GeForce 7000M / nForce 610M" },
{ 0x053a, "GeForce 7050 PV / nForce 630a" },
{ 0x053b, "GeForce 7050 PV / nForce 630a" },
{ 0x053e, "GeForce 7025 / nForce 630a" },
{ 0x05e0, "GeForce GTX 295" },
{ 0x05e1, "GeForce GTX 280" },
{ 0x05e2, "GeForce GTX 260" },
{ 0x05e3, "GeForce GTX 285" },
{ 0x05e6, "GeForce GTX 275" },
{ 0x05e7, "Tesla C1060", nvkm_device_pci_10de_05e7 },
{ 0x05ea, "GeForce GTX 260" },
{ 0x05eb, "GeForce GTX 295" },
{ 0x05ed, "Quadroplex 2200 D2" },
{ 0x05f8, "Quadroplex 2200 S4" },
{ 0x05f9, "Quadro CX" },
{ 0x05fd, "Quadro FX 5800" },
{ 0x05fe, "Quadro FX 4800" },
{ 0x05ff, "Quadro FX 3800" },
{ 0x0600, "GeForce 8800 GTS 512" },
{ 0x0601, "GeForce 9800 GT" },
{ 0x0602, "GeForce 8800 GT" },
{ 0x0603, "GeForce GT 230" },
{ 0x0604, "GeForce 9800 GX2" },
{ 0x0605, "GeForce 9800 GT" },
{ 0x0606, "GeForce 8800 GS" },
{ 0x0607, "GeForce GTS 240" },
{ 0x0608, "GeForce 9800M GTX" },
{ 0x0609, "GeForce 8800M GTS", nvkm_device_pci_10de_0609 },
{ 0x060a, "GeForce GTX 280M" },
{ 0x060b, "GeForce 9800M GT" },
{ 0x060c, "GeForce 8800M GTX" },
{ 0x060d, "GeForce 8800 GS" },
{ 0x060f, "GeForce GTX 285M" },
{ 0x0610, "GeForce 9600 GSO" },
{ 0x0611, "GeForce 8800 GT" },
{ 0x0612, "GeForce 9800 GTX/9800 GTX+" },
{ 0x0613, "GeForce 9800 GTX+" },
{ 0x0614, "GeForce 9800 GT" },
{ 0x0615, "GeForce GTS 250" },
{ 0x0617, "GeForce 9800M GTX" },
{ 0x0618, "GeForce GTX 260M" },
{ 0x0619, "Quadro FX 4700 X2" },
{ 0x061a, "Quadro FX 3700" },
{ 0x061b, "Quadro VX 200" },
{ 0x061c, "Quadro FX 3600M" },
{ 0x061d, "Quadro FX 2800M" },
{ 0x061e, "Quadro FX 3700M" },
{ 0x061f, "Quadro FX 3800M" },
{ 0x0621, "GeForce GT 230" },
{ 0x0622, "GeForce 9600 GT" },
{ 0x0623, "GeForce 9600 GS" },
{ 0x0625, "GeForce 9600 GSO 512" },
{ 0x0626, "GeForce GT 130" },
{ 0x0627, "GeForce GT 140" },
{ 0x0628, "GeForce 9800M GTS" },
{ 0x062a, "GeForce 9700M GTS" },
{ 0x062b, "GeForce 9800M GS" },
{ 0x062c, "GeForce 9800M GTS" },
{ 0x062d, "GeForce 9600 GT" },
{ 0x062e, "GeForce 9600 GT", nvkm_device_pci_10de_062e },
{ 0x0630, "GeForce 9700 S" },
{ 0x0631, "GeForce GTS 160M" },
{ 0x0632, "GeForce GTS 150M" },
{ 0x0635, "GeForce 9600 GSO" },
{ 0x0637, "GeForce 9600 GT" },
{ 0x0638, "Quadro FX 1800" },
{ 0x063a, "Quadro FX 2700M" },
{ 0x0640, "GeForce 9500 GT" },
{ 0x0641, "GeForce 9400 GT" },
{ 0x0643, "GeForce 9500 GT" },
{ 0x0644, "GeForce 9500 GS" },
{ 0x0645, "GeForce 9500 GS" },
{ 0x0646, "GeForce GT 120" },
{ 0x0647, "GeForce 9600M GT" },
{ 0x0648, "GeForce 9600M GS" },
{ 0x0649, "GeForce 9600M GT", nvkm_device_pci_10de_0649 },
{ 0x064a, "GeForce 9700M GT" },
{ 0x064b, "GeForce 9500M G" },
{ 0x064c, "GeForce 9650M GT" },
{ 0x0651, "GeForce G 110M" },
{ 0x0652, "GeForce GT 130M", nvkm_device_pci_10de_0652 },
{ 0x0653, "GeForce GT 120M" },
{ 0x0654, "GeForce GT 220M", nvkm_device_pci_10de_0654 },
{ 0x0655, NULL, nvkm_device_pci_10de_0655 },
{ 0x0656, NULL, nvkm_device_pci_10de_0656 },
{ 0x0658, "Quadro FX 380" },
{ 0x0659, "Quadro FX 580" },
{ 0x065a, "Quadro FX 1700M" },
{ 0x065b, "GeForce 9400 GT" },
{ 0x065c, "Quadro FX 770M" },
{ 0x06c0, "GeForce GTX 480" },
{ 0x06c4, "GeForce GTX 465" },
{ 0x06ca, "GeForce GTX 480M" },
{ 0x06cd, "GeForce GTX 470" },
{ 0x06d1, "Tesla C2050 / C2070", nvkm_device_pci_10de_06d1 },
{ 0x06d2, "Tesla M2070", nvkm_device_pci_10de_06d2 },
{ 0x06d8, "Quadro 6000" },
{ 0x06d9, "Quadro 5000" },
{ 0x06da, "Quadro 5000M" },
{ 0x06dc, "Quadro 6000" },
{ 0x06dd, "Quadro 4000" },
{ 0x06de, "Tesla T20 Processor", nvkm_device_pci_10de_06de },
{ 0x06df, "Tesla M2070-Q" },
{ 0x06e0, "GeForce 9300 GE" },
{ 0x06e1, "GeForce 9300 GS" },
{ 0x06e2, "GeForce 8400" },
{ 0x06e3, "GeForce 8400 SE" },
{ 0x06e4, "GeForce 8400 GS" },
{ 0x06e5, "GeForce 9300M GS" },
{ 0x06e6, "GeForce G100" },
{ 0x06e7, "GeForce 9300 SE" },
{ 0x06e8, "GeForce 9200M GS", nvkm_device_pci_10de_06e8 },
{ 0x06e9, "GeForce 9300M GS" },
{ 0x06ea, "Quadro NVS 150M" },
{ 0x06eb, "Quadro NVS 160M" },
{ 0x06ec, "GeForce G 105M" },
{ 0x06ef, "GeForce G 103M" },
{ 0x06f1, "GeForce G105M" },
{ 0x06f8, "Quadro NVS 420" },
{ 0x06f9, "Quadro FX 370 LP", nvkm_device_pci_10de_06f9 },
{ 0x06fa, "Quadro NVS 450" },
{ 0x06fb, "Quadro FX 370M" },
{ 0x06fd, "Quadro NVS 295" },
{ 0x06ff, "HICx16 + Graphics", nvkm_device_pci_10de_06ff },
{ 0x07e0, "GeForce 7150 / nForce 630i" },
{ 0x07e1, "GeForce 7100 / nForce 630i" },
{ 0x07e2, "GeForce 7050 / nForce 630i" },
{ 0x07e3, "GeForce 7050 / nForce 610i" },
{ 0x07e5, "GeForce 7050 / nForce 620i" },
{ 0x0840, "GeForce 8200M" },
{ 0x0844, "GeForce 9100M G" },
{ 0x0845, "GeForce 8200M G" },
{ 0x0846, "GeForce 9200" },
{ 0x0847, "GeForce 9100" },
{ 0x0848, "GeForce 8300" },
{ 0x0849, "GeForce 8200" },
{ 0x084a, "nForce 730a" },
{ 0x084b, "GeForce 9200" },
{ 0x084c, "nForce 980a/780a SLI" },
{ 0x084d, "nForce 750a SLI" },
{ 0x084f, "GeForce 8100 / nForce 720a" },
{ 0x0860, "GeForce 9400" },
{ 0x0861, "GeForce 9400" },
{ 0x0862, "GeForce 9400M G" },
{ 0x0863, "GeForce 9400M" },
{ 0x0864, "GeForce 9300" },
{ 0x0865, "ION" },
{ 0x0866, "GeForce 9400M G", nvkm_device_pci_10de_0866 },
{ 0x0867, "GeForce 9400" },
{ 0x0868, "nForce 760i SLI" },
{ 0x0869, "GeForce 9400" },
{ 0x086a, "GeForce 9400" },
{ 0x086c, "GeForce 9300 / nForce 730i" },
{ 0x086d, "GeForce 9200" },
{ 0x086e, "GeForce 9100M G" },
{ 0x086f, "GeForce 8200M G" },
{ 0x0870, "GeForce 9400M" },
{ 0x0871, "GeForce 9200" },
{ 0x0872, "GeForce G102M", nvkm_device_pci_10de_0872 },
{ 0x0873, "GeForce G102M", nvkm_device_pci_10de_0873 },
{ 0x0874, "ION" },
{ 0x0876, "ION" },
{ 0x087a, "GeForce 9400" },
{ 0x087d, "ION" },
{ 0x087e, "ION LE" },
{ 0x087f, "ION LE" },
{ 0x08a0, "GeForce 320M" },
{ 0x08a2, "GeForce 320M" },
{ 0x08a3, "GeForce 320M" },
{ 0x08a4, "GeForce 320M" },
{ 0x08a5, "GeForce 320M" },
{ 0x0a20, "GeForce GT 220" },
{ 0x0a22, "GeForce 315" },
{ 0x0a23, "GeForce 210" },
{ 0x0a26, "GeForce 405" },
{ 0x0a27, "GeForce 405" },
{ 0x0a28, "GeForce GT 230M" },
{ 0x0a29, "GeForce GT 330M" },
{ 0x0a2a, "GeForce GT 230M" },
{ 0x0a2b, "GeForce GT 330M" },
{ 0x0a2c, "NVS 5100M" },
{ 0x0a2d, "GeForce GT 320M" },
{ 0x0a32, "GeForce GT 415" },
{ 0x0a34, "GeForce GT 240M" },
{ 0x0a35, "GeForce GT 325M" },
{ 0x0a38, "Quadro 400" },
{ 0x0a3c, "Quadro FX 880M" },
{ 0x0a60, "GeForce G210" },
{ 0x0a62, "GeForce 205" },
{ 0x0a63, "GeForce 310" },
{ 0x0a64, "Second Generation ION" },
{ 0x0a65, "GeForce 210" },
{ 0x0a66, "GeForce 310" },
{ 0x0a67, "GeForce 315" },
{ 0x0a68, "GeForce G105M" },
{ 0x0a69, "GeForce G105M" },
{ 0x0a6a, "NVS 2100M" },
{ 0x0a6c, "NVS 3100M" },
{ 0x0a6e, "GeForce 305M", nvkm_device_pci_10de_0a6e },
{ 0x0a6f, "Second Generation ION" },
{ 0x0a70, "GeForce 310M", nvkm_device_pci_10de_0a70 },
{ 0x0a71, "GeForce 305M" },
{ 0x0a72, "GeForce 310M" },
{ 0x0a73, "GeForce 305M", nvkm_device_pci_10de_0a73 },
{ 0x0a74, "GeForce G210M", nvkm_device_pci_10de_0a74 },
{ 0x0a75, "GeForce 310M", nvkm_device_pci_10de_0a75 },
{ 0x0a76, "Second Generation ION" },
{ 0x0a78, "Quadro FX 380 LP" },
{ 0x0a7a, "GeForce 315M", nvkm_device_pci_10de_0a7a },
{ 0x0a7c, "Quadro FX 380M" },
{ 0x0ca0, "GeForce GT 330" },
{ 0x0ca2, "GeForce GT 320" },
{ 0x0ca3, "GeForce GT 240" },
{ 0x0ca4, "GeForce GT 340" },
{ 0x0ca5, "GeForce GT 220" },
{ 0x0ca7, "GeForce GT 330" },
{ 0x0ca8, "GeForce GTS 260M" },
{ 0x0ca9, "GeForce GTS 250M" },
{ 0x0cac, "GeForce GT 220" },
{ 0x0caf, "GeForce GT 335M" },
{ 0x0cb0, "GeForce GTS 350M" },
{ 0x0cb1, "GeForce GTS 360M" },
{ 0x0cbc, "Quadro FX 1800M" },
{ 0x0dc0, "GeForce GT 440" },
{ 0x0dc4, "GeForce GTS 450" },
{ 0x0dc5, "GeForce GTS 450" },
{ 0x0dc6, "GeForce GTS 450" },
{ 0x0dcd, "GeForce GT 555M" },
{ 0x0dce, "GeForce GT 555M" },
{ 0x0dd1, "GeForce GTX 460M" },
{ 0x0dd2, "GeForce GT 445M" },
{ 0x0dd3, "GeForce GT 435M" },
{ 0x0dd6, "GeForce GT 550M" },
{ 0x0dd8, "Quadro 2000", nvkm_device_pci_10de_0dd8 },
{ 0x0dda, "Quadro 2000M" },
{ 0x0de0, "GeForce GT 440" },
{ 0x0de1, "GeForce GT 430" },
{ 0x0de2, "GeForce GT 420" },
{ 0x0de3, "GeForce GT 635M" },
{ 0x0de4, "GeForce GT 520" },
{ 0x0de5, "GeForce GT 530" },
{ 0x0de7, "GeForce GT 610" },
{ 0x0de8, "GeForce GT 620M" },
{ 0x0de9, "GeForce GT 630M", nvkm_device_pci_10de_0de9 },
{ 0x0dea, "GeForce 610M", nvkm_device_pci_10de_0dea },
{ 0x0deb, "GeForce GT 555M" },
{ 0x0dec, "GeForce GT 525M" },
{ 0x0ded, "GeForce GT 520M" },
{ 0x0dee, "GeForce GT 415M" },
{ 0x0def, "NVS 5400M" },
{ 0x0df0, "GeForce GT 425M" },
{ 0x0df1, "GeForce GT 420M" },
{ 0x0df2, "GeForce GT 435M" },
{ 0x0df3, "GeForce GT 420M" },
{ 0x0df4, "GeForce GT 540M", nvkm_device_pci_10de_0df4 },
{ 0x0df5, "GeForce GT 525M" },
{ 0x0df6, "GeForce GT 550M" },
{ 0x0df7, "GeForce GT 520M" },
{ 0x0df8, "Quadro 600" },
{ 0x0df9, "Quadro 500M" },
{ 0x0dfa, "Quadro 1000M" },
{ 0x0dfc, "NVS 5200M" },
{ 0x0e22, "GeForce GTX 460" },
{ 0x0e23, "GeForce GTX 460 SE" },
{ 0x0e24, "GeForce GTX 460" },
{ 0x0e30, "GeForce GTX 470M" },
{ 0x0e31, "GeForce GTX 485M" },
{ 0x0e3a, "Quadro 3000M" },
{ 0x0e3b, "Quadro 4000M" },
{ 0x0f00, "GeForce GT 630" },
{ 0x0f01, "GeForce GT 620" },
{ 0x0f02, "GeForce GT 730" },
{ 0x0fc0, "GeForce GT 640" },
{ 0x0fc1, "GeForce GT 640" },
{ 0x0fc2, "GeForce GT 630" },
{ 0x0fc6, "GeForce GTX 650" },
{ 0x0fc8, "GeForce GT 740" },
{ 0x0fc9, "GeForce GT 730" },
{ 0x0fcd, "GeForce GT 755M" },
{ 0x0fce, "GeForce GT 640M LE" },
{ 0x0fd1, "GeForce GT 650M" },
{ 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 },
{ 0x0fd3, "GeForce GT 640M LE" },
{ 0x0fd4, "GeForce GTX 660M" },
{ 0x0fd5, "GeForce GT 650M" },
{ 0x0fd8, "GeForce GT 640M" },
{ 0x0fd9, "GeForce GT 645M" },
{ 0x0fdf, "GeForce GT 740M" },
{ 0x0fe0, "GeForce GTX 660M" },
{ 0x0fe1, "GeForce GT 730M" },
{ 0x0fe2, "GeForce GT 745M" },
{ 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 },
{ 0x0fe4, "GeForce GT 750M" },
{ 0x0fe9, "GeForce GT 750M" },
{ 0x0fea, "GeForce GT 755M" },
{ 0x0fec, "GeForce 710A" },
{ 0x0fef, "GRID K340" },
{ 0x0ff2, "GRID K1" },
{ 0x0ff3, "Quadro K420" },
{ 0x0ff6, "Quadro K1100M" },
{ 0x0ff8, "Quadro K500M" },
{ 0x0ff9, "Quadro K2000D" },
{ 0x0ffa, "Quadro K600" },
{ 0x0ffb, "Quadro K2000M" },
{ 0x0ffc, "Quadro K1000M" },
{ 0x0ffd, "NVS 510" },
{ 0x0ffe, "Quadro K2000" },
{ 0x0fff, "Quadro 410" },
{ 0x1001, "GeForce GTX TITAN Z" },
{ 0x1004, "GeForce GTX 780" },
{ 0x1005, "GeForce GTX TITAN" },
{ 0x1007, "GeForce GTX 780" },
{ 0x1008, "GeForce GTX 780 Ti" },
{ 0x100a, "GeForce GTX 780 Ti" },
{ 0x100c, "GeForce GTX TITAN Black" },
{ 0x1021, "Tesla K20Xm" },
{ 0x1022, "Tesla K20c" },
{ 0x1023, "Tesla K40m" },
{ 0x1024, "Tesla K40c" },
{ 0x1026, "Tesla K20s" },
{ 0x1027, "Tesla K40st" },
{ 0x1028, "Tesla K20m" },
{ 0x1029, "Tesla K40s" },
{ 0x102a, "Tesla K40t" },
{ 0x102d, "Tesla K80" },
{ 0x103a, "Quadro K6000" },
{ 0x103c, "Quadro K5200" },
{ 0x1040, "GeForce GT 520" },
{ 0x1042, "GeForce 510" },
{ 0x1048, "GeForce 605" },
{ 0x1049, "GeForce GT 620" },
{ 0x104a, "GeForce GT 610" },
{ 0x104b, "GeForce GT 625 (OEM)", nvkm_device_pci_10de_104b },
{ 0x104c, "GeForce GT 705" },
{ 0x1050, "GeForce GT 520M" },
{ 0x1051, "GeForce GT 520MX" },
{ 0x1052, "GeForce GT 520M" },
{ 0x1054, "GeForce 410M" },
{ 0x1055, "GeForce 410M" },
{ 0x1056, "NVS 4200M" },
{ 0x1057, "NVS 4200M" },
{ 0x1058, "GeForce 610M", nvkm_device_pci_10de_1058 },
{ 0x1059, "GeForce 610M" },
{ 0x105a, "GeForce 610M" },
{ 0x105b, "GeForce 705M", nvkm_device_pci_10de_105b },
{ 0x107c, "NVS 315" },
{ 0x107d, "NVS 310" },
{ 0x1080, "GeForce GTX 580" },
{ 0x1081, "GeForce GTX 570" },
{ 0x1082, "GeForce GTX 560 Ti" },
{ 0x1084, "GeForce GTX 560" },
{ 0x1086, "GeForce GTX 570" },
{ 0x1087, "GeForce GTX 560 Ti" },
{ 0x1088, "GeForce GTX 590" },
{ 0x1089, "GeForce GTX 580" },
{ 0x108b, "GeForce GTX 580" },
{ 0x1091, "Tesla M2090", nvkm_device_pci_10de_1091 },
{ 0x1094, "Tesla M2075" },
{ 0x1096, "Tesla C2075", nvkm_device_pci_10de_1096 },
{ 0x109a, "Quadro 5010M" },
{ 0x109b, "Quadro 7000" },
{ 0x10c0, "GeForce 9300 GS" },
{ 0x10c3, "GeForce 8400GS" },
{ 0x10c5, "GeForce 405" },
{ 0x10d8, "NVS 300" },
{ 0x1140, NULL, nvkm_device_pci_10de_1140 },
{ 0x1180, "GeForce GTX 680" },
{ 0x1183, "GeForce GTX 660 Ti" },
{ 0x1184, "GeForce GTX 770" },
{ 0x1185, "GeForce GTX 660", nvkm_device_pci_10de_1185 },
{ 0x1187, "GeForce GTX 760" },
{ 0x1188, "GeForce GTX 690" },
{ 0x1189, "GeForce GTX 670", nvkm_device_pci_10de_1189 },
{ 0x118a, "GRID K520" },
{ 0x118e, "GeForce GTX 760 (192-bit)" },
{ 0x118f, "Tesla K10" },
{ 0x1193, "GeForce GTX 760 Ti OEM" },
{ 0x1194, "Tesla K8" },
{ 0x1195, "GeForce GTX 660" },
{ 0x1198, "GeForce GTX 880M" },
{ 0x1199, "GeForce GTX 870M", nvkm_device_pci_10de_1199 },
{ 0x119a, "GeForce GTX 860M" },
{ 0x119d, "GeForce GTX 775M" },
{ 0x119e, "GeForce GTX 780M" },
{ 0x119f, "GeForce GTX 780M" },
{ 0x11a0, "GeForce GTX 680M" },
{ 0x11a1, "GeForce GTX 670MX" },
{ 0x11a2, "GeForce GTX 675MX" },
{ 0x11a3, "GeForce GTX 680MX" },
{ 0x11a7, "GeForce GTX 675MX" },
{ 0x11b4, "Quadro K4200" },
{ 0x11b6, "Quadro K3100M" },
{ 0x11b7, "Quadro K4100M" },
{ 0x11b8, "Quadro K5100M" },
{ 0x11ba, "Quadro K5000" },
{ 0x11bc, "Quadro K5000M" },
{ 0x11bd, "Quadro K4000M" },
{ 0x11be, "Quadro K3000M" },
{ 0x11bf, "GRID K2" },
{ 0x11c0, "GeForce GTX 660" },
{ 0x11c2, "GeForce GTX 650 Ti BOOST" },
{ 0x11c3, "GeForce GTX 650 Ti" },
{ 0x11c4, "GeForce GTX 645" },
{ 0x11c5, "GeForce GT 740" },
{ 0x11c6, "GeForce GTX 650 Ti" },
{ 0x11c8, "GeForce GTX 650" },
{ 0x11cb, "GeForce GT 740" },
{ 0x11e0, "GeForce GTX 770M" },
{ 0x11e1, "GeForce GTX 765M" },
{ 0x11e2, "GeForce GTX 765M" },
{ 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
{ 0x11fa, "Quadro K4000" },
{ 0x11fc, "Quadro K2100M" },
{ 0x1200, "GeForce GTX 560 Ti" },
{ 0x1201, "GeForce GTX 560" },
{ 0x1203, "GeForce GTX 460 SE v2" },
{ 0x1205, "GeForce GTX 460 v2" },
{ 0x1206, "GeForce GTX 555" },
{ 0x1207, "GeForce GT 645" },
{ 0x1208, "GeForce GTX 560 SE" },
{ 0x1210, "GeForce GTX 570M" },
{ 0x1211, "GeForce GTX 580M" },
{ 0x1212, "GeForce GTX 675M" },
{ 0x1213, "GeForce GTX 670M" },
{ 0x1241, "GeForce GT 545" },
{ 0x1243, "GeForce GT 545" },
{ 0x1244, "GeForce GTX 550 Ti" },
{ 0x1245, "GeForce GTS 450" },
{ 0x1246, "GeForce GT 550M" },
{ 0x1247, "GeForce GT 555M", nvkm_device_pci_10de_1247 },
{ 0x1248, "GeForce GT 555M" },
{ 0x1249, "GeForce GTS 450" },
{ 0x124b, "GeForce GT 640" },
{ 0x124d, "GeForce GT 555M", nvkm_device_pci_10de_124d },
{ 0x1251, "GeForce GTX 560M" },
{ 0x1280, "GeForce GT 635" },
{ 0x1281, "GeForce GT 710" },
{ 0x1282, "GeForce GT 640" },
{ 0x1284, "GeForce GT 630" },
{ 0x1286, "GeForce GT 720" },
{ 0x1287, "GeForce GT 730" },
{ 0x1288, "GeForce GT 720" },
{ 0x1289, "GeForce GT 710" },
{ 0x1290, "GeForce GT 730M", nvkm_device_pci_10de_1290 },
{ 0x1291, "GeForce GT 735M" },
{ 0x1292, "GeForce GT 740M", nvkm_device_pci_10de_1292 },
{ 0x1293, "GeForce GT 730M" },
{ 0x1295, "GeForce 710M", nvkm_device_pci_10de_1295 },
{ 0x1296, "GeForce 825M" },
{ 0x1298, "GeForce GT 720M" },
{ 0x1299, "GeForce 920M", nvkm_device_pci_10de_1299 },
{ 0x129a, "GeForce 910M" },
{ 0x12b9, "Quadro K610M" },
{ 0x12ba, "Quadro K510M" },
{ 0x1340, "GeForce 830M", nvkm_device_pci_10de_1340 },
{ 0x1341, "GeForce 840M", nvkm_device_pci_10de_1341 },
{ 0x1344, "GeForce 845M" },
{ 0x1346, "GeForce 930M", nvkm_device_pci_10de_1346 },
{ 0x1347, "GeForce 940M", nvkm_device_pci_10de_1347 },
{ 0x137a, NULL, nvkm_device_pci_10de_137a },
{ 0x137d, NULL, nvkm_device_pci_10de_137d },
{ 0x1380, "GeForce GTX 750 Ti" },
{ 0x1381, "GeForce GTX 750" },
{ 0x1382, "GeForce GTX 745" },
{ 0x1390, "GeForce 845M" },
{ 0x1391, "GeForce GTX 850M", nvkm_device_pci_10de_1391 },
{ 0x1392, "GeForce GTX 860M", nvkm_device_pci_10de_1392 },
{ 0x1393, "GeForce 840M" },
{ 0x1398, "GeForce 845M" },
{ 0x139a, "GeForce GTX 950M", nvkm_device_pci_10de_139a },
{ 0x139b, "GeForce GTX 960M", nvkm_device_pci_10de_139b },
{ 0x139c, "GeForce 940M" },
{ 0x13b3, "Quadro K2200M" },
{ 0x13ba, "Quadro K2200" },
{ 0x13bb, "Quadro K620" },
{ 0x13bc, "Quadro K1200" },
{ 0x13c0, "GeForce GTX 980" },
{ 0x13c2, "GeForce GTX 970" },
{ 0x13d7, "GeForce GTX 980M" },
{ 0x13d8, "GeForce GTX 970M" },
{ 0x13d9, "GeForce GTX 965M" },
{ 0x1401, "GeForce GTX 960" },
{ 0x1617, "GeForce GTX 980M" },
{ 0x1618, "GeForce GTX 970M" },
{ 0x1619, "GeForce GTX 965M" },
{ 0x17c2, "GeForce GTX TITAN X" },
{ 0x17c8, "GeForce GTX 980 Ti" },
{ 0x17f0, "Quadro M6000" },
{}
};
static struct nvkm_device_pci *
nvkm_device_pci(struct nvkm_device *device)
{
return container_of(device, struct nvkm_device_pci, device);
}
static resource_size_t
nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
return pci_resource_start(pdev->pdev, bar);
}
static resource_size_t
nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
return pci_resource_len(pdev->pdev, bar);
}
static int
nvkm_device_pci_irq(struct nvkm_device *device)
{
return nvkm_device_pci(device)->pdev->irq;
}
static void
nvkm_device_pci_fini(struct nvkm_device *device, bool suspend)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
if (suspend) {
pci_disable_device(pdev->pdev);
pdev->suspend = true;
}
}
static int
nvkm_device_pci_preinit(struct nvkm_device *device)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
if (pdev->suspend) {
int ret = pci_enable_device(pdev->pdev);
if (ret)
return ret;
pci_set_master(pdev->pdev);
pdev->suspend = false;
}
return 0;
}
static void *
nvkm_device_pci_dtor(struct nvkm_device *device)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
pci_disable_device(pdev->pdev);
return pdev;
}
static const struct nvkm_device_func
nvkm_device_pci_func = {
.pci = nvkm_device_pci,
.dtor = nvkm_device_pci_dtor,
.preinit = nvkm_device_pci_preinit,
.fini = nvkm_device_pci_fini,
.irq = nvkm_device_pci_irq,
.resource_addr = nvkm_device_pci_resource_addr,
.resource_size = nvkm_device_pci_resource_size,
.cpu_coherent = !IS_ENABLED(CONFIG_ARM),
};
int
nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **pdevice)
{
const struct nvkm_device_quirk *quirk = NULL;
const struct nvkm_device_pci_device *pcid;
const struct nvkm_device_pci_vendor *pciv;
const char *name = NULL;
struct nvkm_device_pci *pdev;
int ret, bits;
ret = pci_enable_device(pci_dev);
if (ret)
return ret;
switch (pci_dev->vendor) {
case 0x10de: pcid = nvkm_device_pci_10de; break;
default:
pcid = NULL;
break;
}
while (pcid && pcid->device) {
if (pciv = pcid->vendor, pcid->device == pci_dev->device) {
while (pciv && pciv->vendor) {
if (pciv->vendor == pci_dev->subsystem_vendor &&
pciv->device == pci_dev->subsystem_device) {
quirk = &pciv->quirk;
name = pciv->name;
break;
}
pciv++;
}
if (!name)
name = pcid->name;
break;
}
pcid++;
}
if (!(pdev = kzalloc(sizeof(*pdev), GFP_KERNEL))) {
pci_disable_device(pci_dev);
return -ENOMEM;
}
*pdevice = &pdev->device;
pdev->pdev = pci_dev;
ret = nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
(u64)pci_domain_nr(pci_dev->bus) << 32 |
pci_dev->bus->number << 16 |
PCI_SLOT(pci_dev->devfn) << 8 |
PCI_FUNC(pci_dev->devfn), name,
cfg, dbg, detect, mmio, subdev_mask,
&pdev->device);
if (ret)
return ret;
/* Set DMA mask based on capabilities reported by the MMU subdev. */
if (pdev->device.mmu && !pdev->device.pci->agp.bridge)
bits = pdev->device.mmu->dma_bits;
else
bits = 32;
ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(bits));
if (ret && bits != 32) {
dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
pdev->device.mmu->dma_bits = 32;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c |
/*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <core/tegra.h>
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
#include "priv.h"
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
#include <asm/dma-iommu.h>
#endif
static int
nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
{
int ret;
if (tdev->vdd) {
ret = regulator_enable(tdev->vdd);
if (ret)
goto err_power;
}
ret = clk_prepare_enable(tdev->clk);
if (ret)
goto err_clk;
ret = clk_prepare_enable(tdev->clk_ref);
if (ret)
goto err_clk_ref;
ret = clk_prepare_enable(tdev->clk_pwr);
if (ret)
goto err_clk_pwr;
clk_set_rate(tdev->clk_pwr, 204000000);
udelay(10);
if (!tdev->pdev->dev.pm_domain) {
reset_control_assert(tdev->rst);
udelay(10);
ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
if (ret)
goto err_clamp;
udelay(10);
reset_control_deassert(tdev->rst);
udelay(10);
}
return 0;
err_clamp:
clk_disable_unprepare(tdev->clk_pwr);
err_clk_pwr:
clk_disable_unprepare(tdev->clk_ref);
err_clk_ref:
clk_disable_unprepare(tdev->clk);
err_clk:
if (tdev->vdd)
regulator_disable(tdev->vdd);
err_power:
return ret;
}
static int
nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
{
int ret;
clk_disable_unprepare(tdev->clk_pwr);
clk_disable_unprepare(tdev->clk_ref);
clk_disable_unprepare(tdev->clk);
udelay(10);
if (tdev->vdd) {
ret = regulator_disable(tdev->vdd);
if (ret)
return ret;
}
return 0;
}
static void
nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
{
#if IS_ENABLED(CONFIG_IOMMU_API)
struct device *dev = &tdev->pdev->dev;
unsigned long pgsize_bitmap;
int ret;
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
if (dev->archdata.mapping) {
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
arm_iommu_detach_device(dev);
arm_iommu_release_mapping(mapping);
}
#endif
if (!tdev->func->iommu_bit)
return;
mutex_init(&tdev->iommu.mutex);
if (device_iommu_mapped(dev)) {
tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
if (!tdev->iommu.domain)
goto error;
/*
* A IOMMU is only usable if it supports page sizes smaller
* or equal to the system's PAGE_SIZE, with a preference if
* both are equal.
*/
pgsize_bitmap = tdev->iommu.domain->pgsize_bitmap;
if (pgsize_bitmap & PAGE_SIZE) {
tdev->iommu.pgshift = PAGE_SHIFT;
} else {
tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
if (tdev->iommu.pgshift == 0) {
dev_warn(dev, "unsupported IOMMU page size\n");
goto free_domain;
}
tdev->iommu.pgshift -= 1;
}
ret = iommu_attach_device(tdev->iommu.domain, dev);
if (ret)
goto free_domain;
ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
(1ULL << tdev->func->iommu_bit) >>
tdev->iommu.pgshift, 1);
if (ret)
goto detach_device;
}
return;
detach_device:
iommu_detach_device(tdev->iommu.domain, dev);
free_domain:
iommu_domain_free(tdev->iommu.domain);
error:
tdev->iommu.domain = NULL;
tdev->iommu.pgshift = 0;
dev_err(dev, "cannot initialize IOMMU MM\n");
#endif
}
static void
nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
{
#if IS_ENABLED(CONFIG_IOMMU_API)
if (tdev->iommu.domain) {
nvkm_mm_fini(&tdev->iommu.mm);
iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
iommu_domain_free(tdev->iommu.domain);
}
#endif
}
static struct nvkm_device_tegra *
nvkm_device_tegra(struct nvkm_device *device)
{
return container_of(device, struct nvkm_device_tegra, device);
}
static struct resource *
nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
{
struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
}
static resource_size_t
nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
{
struct resource *res = nvkm_device_tegra_resource(device, bar);
return res ? res->start : 0;
}
static resource_size_t
nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
{
struct resource *res = nvkm_device_tegra_resource(device, bar);
return res ? resource_size(res) : 0;
}
static int
nvkm_device_tegra_irq(struct nvkm_device *device)
{
struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
return platform_get_irq_byname(tdev->pdev, "stall");
}
static void *
nvkm_device_tegra_dtor(struct nvkm_device *device)
{
struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
nvkm_device_tegra_power_down(tdev);
nvkm_device_tegra_remove_iommu(tdev);
return tdev;
}
static const struct nvkm_device_func
nvkm_device_tegra_func = {
.tegra = nvkm_device_tegra,
.dtor = nvkm_device_tegra_dtor,
.irq = nvkm_device_tegra_irq,
.resource_addr = nvkm_device_tegra_resource_addr,
.resource_size = nvkm_device_tegra_resource_size,
.cpu_coherent = false,
};
int
nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
struct platform_device *pdev,
const char *cfg, const char *dbg,
bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **pdevice)
{
struct nvkm_device_tegra *tdev;
unsigned long rate;
int ret;
if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
return -ENOMEM;
tdev->func = func;
tdev->pdev = pdev;
if (func->require_vdd) {
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(tdev->vdd)) {
ret = PTR_ERR(tdev->vdd);
goto free;
}
}
tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
if (IS_ERR(tdev->rst)) {
ret = PTR_ERR(tdev->rst);
goto free;
}
tdev->clk = devm_clk_get(&pdev->dev, "gpu");
if (IS_ERR(tdev->clk)) {
ret = PTR_ERR(tdev->clk);
goto free;
}
rate = clk_get_rate(tdev->clk);
if (rate == 0) {
ret = clk_set_rate(tdev->clk, ULONG_MAX);
if (ret < 0)
goto free;
rate = clk_get_rate(tdev->clk);
dev_dbg(&pdev->dev, "GPU clock set to %lu\n", rate);
}
if (func->require_ref_clk)
tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
if (IS_ERR(tdev->clk_ref)) {
ret = PTR_ERR(tdev->clk_ref);
goto free;
}
tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
if (IS_ERR(tdev->clk_pwr)) {
ret = PTR_ERR(tdev->clk_pwr);
goto free;
}
/**
* The IOMMU bit defines the upper limit of the GPU-addressable space.
*/
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
if (ret)
goto free;
nvkm_device_tegra_probe_iommu(tdev);
ret = nvkm_device_tegra_power_up(tdev);
if (ret)
goto remove;
tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
NVKM_DEVICE_TEGRA, pdev->id, NULL,
cfg, dbg, detect, mmio, subdev_mask,
&tdev->device);
if (ret)
goto powerdown;
*pdevice = &tdev->device;
return 0;
powerdown:
nvkm_device_tegra_power_down(tdev);
remove:
nvkm_device_tegra_remove_iommu(tdev);
free:
kfree(tdev);
return ret;
}
#else
int
nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
struct platform_device *pdev,
const char *cfg, const char *dbg,
bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **pdevice)
{
return -ENOSYS;
}
#endif
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "acpi.h"
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/therm.h>
static DEFINE_MUTEX(nv_devices_mutex);
static LIST_HEAD(nv_devices);
static struct nvkm_device *
nvkm_device_find_locked(u64 handle)
{
struct nvkm_device *device;
list_for_each_entry(device, &nv_devices, head) {
if (device->handle == handle)
return device;
}
return NULL;
}
struct nvkm_device *
nvkm_device_find(u64 handle)
{
struct nvkm_device *device;
mutex_lock(&nv_devices_mutex);
device = nvkm_device_find_locked(handle);
mutex_unlock(&nv_devices_mutex);
return device;
}
int
nvkm_device_list(u64 *name, int size)
{
struct nvkm_device *device;
int nr = 0;
mutex_lock(&nv_devices_mutex);
list_for_each_entry(device, &nv_devices, head) {
if (nr++ < size)
name[nr - 1] = device->handle;
}
mutex_unlock(&nv_devices_mutex);
return nr;
}
static const struct nvkm_device_chip
null_chipset = {
.name = "NULL",
.bios = { 0x00000001, nvkm_bios_new },
};
static const struct nvkm_device_chip
nv4_chipset = {
.name = "NV04",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv04_devinit_new },
.fb = { 0x00000001, nv04_fb_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv04_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv04_fifo_new },
.gr = { 0x00000001, nv04_gr_new },
.sw = { 0x00000001, nv04_sw_new },
};
static const struct nvkm_device_chip
nv5_chipset = {
.name = "NV05",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv05_devinit_new },
.fb = { 0x00000001, nv04_fb_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv04_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv04_fifo_new },
.gr = { 0x00000001, nv04_gr_new },
.sw = { 0x00000001, nv04_sw_new },
};
static const struct nvkm_device_chip
nv10_chipset = {
.name = "NV10",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv10_devinit_new },
.fb = { 0x00000001, nv10_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv04_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.gr = { 0x00000001, nv10_gr_new },
};
static const struct nvkm_device_chip
nv11_chipset = {
.name = "NV11",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv10_devinit_new },
.fb = { 0x00000001, nv10_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv11_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv10_fifo_new },
.gr = { 0x00000001, nv15_gr_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv15_chipset = {
.name = "NV15",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv10_devinit_new },
.fb = { 0x00000001, nv10_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv04_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv10_fifo_new },
.gr = { 0x00000001, nv15_gr_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv17_chipset = {
.name = "NV17",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv10_devinit_new },
.fb = { 0x00000001, nv10_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv17_fifo_new },
.gr = { 0x00000001, nv17_gr_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv18_chipset = {
.name = "NV18",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv10_devinit_new },
.fb = { 0x00000001, nv10_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv17_fifo_new },
.gr = { 0x00000001, nv17_gr_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv1a_chipset = {
.name = "nForce",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv1a_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv04_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv10_fifo_new },
.gr = { 0x00000001, nv15_gr_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv1f_chipset = {
.name = "nForce2",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv1a_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv17_fifo_new },
.gr = { 0x00000001, nv17_gr_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv20_chipset = {
.name = "NV20",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv20_devinit_new },
.fb = { 0x00000001, nv20_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv17_fifo_new },
.gr = { 0x00000001, nv20_gr_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv25_chipset = {
.name = "NV25",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv20_devinit_new },
.fb = { 0x00000001, nv25_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv17_fifo_new },
.gr = { 0x00000001, nv25_gr_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv28_chipset = {
.name = "NV28",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv20_devinit_new },
.fb = { 0x00000001, nv25_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv17_fifo_new },
.gr = { 0x00000001, nv25_gr_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv2a_chipset = {
.name = "NV2A",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv20_devinit_new },
.fb = { 0x00000001, nv25_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv17_fifo_new },
.gr = { 0x00000001, nv2a_gr_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv30_chipset = {
.name = "NV30",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv20_devinit_new },
.fb = { 0x00000001, nv30_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv17_fifo_new },
.gr = { 0x00000001, nv30_gr_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv31_chipset = {
.name = "NV31",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv20_devinit_new },
.fb = { 0x00000001, nv30_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv17_fifo_new },
.gr = { 0x00000001, nv30_gr_new },
.mpeg = { 0x00000001, nv31_mpeg_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv34_chipset = {
.name = "NV34",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv10_devinit_new },
.fb = { 0x00000001, nv10_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv17_fifo_new },
.gr = { 0x00000001, nv34_gr_new },
.mpeg = { 0x00000001, nv31_mpeg_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv35_chipset = {
.name = "NV35",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv04_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv20_devinit_new },
.fb = { 0x00000001, nv35_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv17_fifo_new },
.gr = { 0x00000001, nv35_gr_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv36_chipset = {
.name = "NV36",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv04_clk_new },
.devinit = { 0x00000001, nv20_devinit_new },
.fb = { 0x00000001, nv36_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv04_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv04_pci_new },
.timer = { 0x00000001, nv04_timer_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv17_fifo_new },
.gr = { 0x00000001, nv35_gr_new },
.mpeg = { 0x00000001, nv31_mpeg_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv40_chipset = {
.name = "NV40",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv40_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv40_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv40_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv41_chipset = {
.name = "NV41",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv41_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv41_mmu_new },
.pci = { 0x00000001, nv40_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv42_chipset = {
.name = "NV42",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv41_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv41_mmu_new },
.pci = { 0x00000001, nv40_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv43_chipset = {
.name = "NV43",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv41_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv41_mmu_new },
.pci = { 0x00000001, nv40_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv44_chipset = {
.name = "NV44",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv44_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv44_mc_new },
.mmu = { 0x00000001, nv44_mmu_new },
.pci = { 0x00000001, nv40_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv45_chipset = {
.name = "NV45",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv40_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv40_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv46_chipset = {
.name = "G72",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv46_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv44_mc_new },
.mmu = { 0x00000001, nv44_mmu_new },
.pci = { 0x00000001, nv46_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv47_chipset = {
.name = "G70",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv47_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv41_mmu_new },
.pci = { 0x00000001, nv40_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv49_chipset = {
.name = "G71",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv49_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv41_mmu_new },
.pci = { 0x00000001, nv40_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv4a_chipset = {
.name = "NV44A",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv44_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv44_mc_new },
.mmu = { 0x00000001, nv04_mmu_new },
.pci = { 0x00000001, nv40_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv4b_chipset = {
.name = "G73",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv49_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv17_mc_new },
.mmu = { 0x00000001, nv41_mmu_new },
.pci = { 0x00000001, nv40_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv4c_chipset = {
.name = "C61",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv46_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv44_mc_new },
.mmu = { 0x00000001, nv44_mmu_new },
.pci = { 0x00000001, nv4c_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv4e_chipset = {
.name = "C51",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv4e_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv4e_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv44_mc_new },
.mmu = { 0x00000001, nv44_mmu_new },
.pci = { 0x00000001, nv4c_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv50_chipset = {
.name = "G80",
.bar = { 0x00000001, nv50_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv50_bus_new },
.clk = { 0x00000001, nv50_clk_new },
.devinit = { 0x00000001, nv50_devinit_new },
.fb = { 0x00000001, nv50_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, nv50_gpio_new },
.i2c = { 0x00000001, nv50_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, nv50_mc_new },
.mmu = { 0x00000001, nv50_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, nv46_pci_new },
.therm = { 0x00000001, nv50_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv50_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, nv50_fifo_new },
.gr = { 0x00000001, nv50_gr_new },
.mpeg = { 0x00000001, nv50_mpeg_new },
.pm = { 0x00000001, nv50_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nv63_chipset = {
.name = "C73",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv46_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv44_mc_new },
.mmu = { 0x00000001, nv44_mmu_new },
.pci = { 0x00000001, nv4c_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv67_chipset = {
.name = "C67",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv46_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv44_mc_new },
.mmu = { 0x00000001, nv44_mmu_new },
.pci = { 0x00000001, nv4c_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv68_chipset = {
.name = "C68",
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv31_bus_new },
.clk = { 0x00000001, nv40_clk_new },
.devinit = { 0x00000001, nv1a_devinit_new },
.fb = { 0x00000001, nv46_fb_new },
.gpio = { 0x00000001, nv10_gpio_new },
.i2c = { 0x00000001, nv04_i2c_new },
.imem = { 0x00000001, nv40_instmem_new },
.mc = { 0x00000001, nv44_mc_new },
.mmu = { 0x00000001, nv44_mmu_new },
.pci = { 0x00000001, nv4c_pci_new },
.therm = { 0x00000001, nv40_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, nv04_disp_new },
.dma = { 0x00000001, nv04_dma_new },
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
.pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
static const struct nvkm_device_chip
nv84_chipset = {
.name = "G84",
.bar = { 0x00000001, g84_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv50_bus_new },
.clk = { 0x00000001, g84_clk_new },
.devinit = { 0x00000001, g84_devinit_new },
.fb = { 0x00000001, g84_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, nv50_gpio_new },
.i2c = { 0x00000001, nv50_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, g84_mc_new },
.mmu = { 0x00000001, g84_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, g84_pci_new },
.therm = { 0x00000001, g84_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.bsp = { 0x00000001, g84_bsp_new },
.cipher = { 0x00000001, g84_cipher_new },
.disp = { 0x00000001, g84_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
.pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv86_chipset = {
.name = "G86",
.bar = { 0x00000001, g84_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv50_bus_new },
.clk = { 0x00000001, g84_clk_new },
.devinit = { 0x00000001, g84_devinit_new },
.fb = { 0x00000001, g84_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, nv50_gpio_new },
.i2c = { 0x00000001, nv50_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, g84_mc_new },
.mmu = { 0x00000001, g84_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, g84_pci_new },
.therm = { 0x00000001, g84_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.bsp = { 0x00000001, g84_bsp_new },
.cipher = { 0x00000001, g84_cipher_new },
.disp = { 0x00000001, g84_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
.pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv92_chipset = {
.name = "G92",
.bar = { 0x00000001, g84_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, nv50_bus_new },
.clk = { 0x00000001, g84_clk_new },
.devinit = { 0x00000001, g84_devinit_new },
.fb = { 0x00000001, g84_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, nv50_gpio_new },
.i2c = { 0x00000001, nv50_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, g84_mc_new },
.mmu = { 0x00000001, g84_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, g92_pci_new },
.therm = { 0x00000001, g84_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.bsp = { 0x00000001, g84_bsp_new },
.cipher = { 0x00000001, g84_cipher_new },
.disp = { 0x00000001, g84_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
.pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv94_chipset = {
.name = "G94",
.bar = { 0x00000001, g84_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, g94_bus_new },
.clk = { 0x00000001, g84_clk_new },
.devinit = { 0x00000001, g84_devinit_new },
.fb = { 0x00000001, g84_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, g84_mc_new },
.mmu = { 0x00000001, g84_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, g94_pci_new },
.therm = { 0x00000001, g84_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.bsp = { 0x00000001, g84_bsp_new },
.cipher = { 0x00000001, g84_cipher_new },
.disp = { 0x00000001, g94_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
.pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv96_chipset = {
.name = "G96",
.bar = { 0x00000001, g84_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, g94_bus_new },
.clk = { 0x00000001, g84_clk_new },
.devinit = { 0x00000001, g84_devinit_new },
.fb = { 0x00000001, g84_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, g84_mc_new },
.mmu = { 0x00000001, g84_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, g94_pci_new },
.therm = { 0x00000001, g84_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.bsp = { 0x00000001, g84_bsp_new },
.cipher = { 0x00000001, g84_cipher_new },
.disp = { 0x00000001, g94_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
.pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nv98_chipset = {
.name = "G98",
.bar = { 0x00000001, g84_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, g94_bus_new },
.clk = { 0x00000001, g84_clk_new },
.devinit = { 0x00000001, g98_devinit_new },
.fb = { 0x00000001, g84_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, g98_mc_new },
.mmu = { 0x00000001, g84_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, g94_pci_new },
.therm = { 0x00000001, g84_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, g94_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, g98_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mspdec = { 0x00000001, g98_mspdec_new },
.msppp = { 0x00000001, g98_msppp_new },
.msvld = { 0x00000001, g98_msvld_new },
.pm = { 0x00000001, g84_pm_new },
.sec = { 0x00000001, g98_sec_new },
.sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nva0_chipset = {
.name = "GT200",
.bar = { 0x00000001, g84_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, g94_bus_new },
.clk = { 0x00000001, g84_clk_new },
.devinit = { 0x00000001, g84_devinit_new },
.fb = { 0x00000001, g84_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, nv50_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, g84_mc_new },
.mmu = { 0x00000001, g84_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, g94_pci_new },
.therm = { 0x00000001, g84_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.bsp = { 0x00000001, g84_bsp_new },
.cipher = { 0x00000001, g84_cipher_new },
.disp = { 0x00000001, gt200_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, gt200_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
.pm = { 0x00000001, gt200_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
static const struct nvkm_device_chip
nva3_chipset = {
.name = "GT215",
.bar = { 0x00000001, g84_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, g94_bus_new },
.clk = { 0x00000001, gt215_clk_new },
.devinit = { 0x00000001, gt215_devinit_new },
.fb = { 0x00000001, gt215_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, gt215_mc_new },
.mmu = { 0x00000001, g84_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, g94_pci_new },
.pmu = { 0x00000001, gt215_pmu_new },
.therm = { 0x00000001, gt215_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.ce = { 0x00000001, gt215_ce_new },
.disp = { 0x00000001, gt215_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, g98_fifo_new },
.gr = { 0x00000001, gt215_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, gt215_msvld_new },
.pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nva5_chipset = {
.name = "GT216",
.bar = { 0x00000001, g84_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, g94_bus_new },
.clk = { 0x00000001, gt215_clk_new },
.devinit = { 0x00000001, gt215_devinit_new },
.fb = { 0x00000001, gt215_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, gt215_mc_new },
.mmu = { 0x00000001, g84_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, g94_pci_new },
.pmu = { 0x00000001, gt215_pmu_new },
.therm = { 0x00000001, gt215_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.ce = { 0x00000001, gt215_ce_new },
.disp = { 0x00000001, gt215_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, g98_fifo_new },
.gr = { 0x00000001, gt215_gr_new },
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, gt215_msvld_new },
.pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nva8_chipset = {
.name = "GT218",
.bar = { 0x00000001, g84_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, g94_bus_new },
.clk = { 0x00000001, gt215_clk_new },
.devinit = { 0x00000001, gt215_devinit_new },
.fb = { 0x00000001, gt215_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, gt215_mc_new },
.mmu = { 0x00000001, g84_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, g94_pci_new },
.pmu = { 0x00000001, gt215_pmu_new },
.therm = { 0x00000001, gt215_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.ce = { 0x00000001, gt215_ce_new },
.disp = { 0x00000001, gt215_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, g98_fifo_new },
.gr = { 0x00000001, gt215_gr_new },
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, gt215_msvld_new },
.pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nvaa_chipset = {
.name = "MCP77/MCP78",
.bar = { 0x00000001, g84_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, g94_bus_new },
.clk = { 0x00000001, mcp77_clk_new },
.devinit = { 0x00000001, g98_devinit_new },
.fb = { 0x00000001, mcp77_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, g98_mc_new },
.mmu = { 0x00000001, mcp77_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, g94_pci_new },
.therm = { 0x00000001, g84_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, mcp77_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, g98_fifo_new },
.gr = { 0x00000001, gt200_gr_new },
.mspdec = { 0x00000001, g98_mspdec_new },
.msppp = { 0x00000001, g98_msppp_new },
.msvld = { 0x00000001, g98_msvld_new },
.pm = { 0x00000001, g84_pm_new },
.sec = { 0x00000001, g98_sec_new },
.sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nvac_chipset = {
.name = "MCP79/MCP7A",
.bar = { 0x00000001, g84_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, g94_bus_new },
.clk = { 0x00000001, mcp77_clk_new },
.devinit = { 0x00000001, g98_devinit_new },
.fb = { 0x00000001, mcp77_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, g98_mc_new },
.mmu = { 0x00000001, mcp77_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, g94_pci_new },
.therm = { 0x00000001, g84_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.disp = { 0x00000001, mcp77_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, g98_fifo_new },
.gr = { 0x00000001, mcp79_gr_new },
.mspdec = { 0x00000001, g98_mspdec_new },
.msppp = { 0x00000001, g98_msppp_new },
.msvld = { 0x00000001, g98_msvld_new },
.pm = { 0x00000001, g84_pm_new },
.sec = { 0x00000001, g98_sec_new },
.sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nvaf_chipset = {
.name = "MCP89",
.bar = { 0x00000001, g84_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, g94_bus_new },
.clk = { 0x00000001, gt215_clk_new },
.devinit = { 0x00000001, mcp89_devinit_new },
.fb = { 0x00000001, mcp89_fb_new },
.fuse = { 0x00000001, nv50_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, gt215_mc_new },
.mmu = { 0x00000001, mcp77_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, g94_pci_new },
.pmu = { 0x00000001, gt215_pmu_new },
.therm = { 0x00000001, gt215_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, nv40_volt_new },
.ce = { 0x00000001, gt215_ce_new },
.disp = { 0x00000001, mcp89_disp_new },
.dma = { 0x00000001, nv50_dma_new },
.fifo = { 0x00000001, g98_fifo_new },
.gr = { 0x00000001, mcp89_gr_new },
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, mcp89_msvld_new },
.pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
static const struct nvkm_device_chip
nvc0_chipset = {
.name = "GF100",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gf100_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gf100_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gf100_ltc_new },
.mc = { 0x00000001, gf100_mc_new },
.mmu = { 0x00000001, gf100_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gf100_pci_new },
.pmu = { 0x00000001, gf100_pmu_new },
.privring = { 0x00000001, gf100_privring_new },
.therm = { 0x00000001, gt215_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, gf100_volt_new },
.ce = { 0x00000003, gf100_ce_new },
.disp = { 0x00000001, gt215_disp_new },
.dma = { 0x00000001, gf100_dma_new },
.fifo = { 0x00000001, gf100_fifo_new },
.gr = { 0x00000001, gf100_gr_new },
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
.pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvc1_chipset = {
.name = "GF108",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gf100_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gf108_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gf100_ltc_new },
.mc = { 0x00000001, gf100_mc_new },
.mmu = { 0x00000001, gf100_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gf106_pci_new },
.pmu = { 0x00000001, gf100_pmu_new },
.privring = { 0x00000001, gf100_privring_new },
.therm = { 0x00000001, gt215_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, gf100_volt_new },
.ce = { 0x00000001, gf100_ce_new },
.disp = { 0x00000001, gt215_disp_new },
.dma = { 0x00000001, gf100_dma_new },
.fifo = { 0x00000001, gf100_fifo_new },
.gr = { 0x00000001, gf108_gr_new },
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
.pm = { 0x00000001, gf108_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvc3_chipset = {
.name = "GF106",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gf100_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gf100_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gf100_ltc_new },
.mc = { 0x00000001, gf100_mc_new },
.mmu = { 0x00000001, gf100_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gf106_pci_new },
.pmu = { 0x00000001, gf100_pmu_new },
.privring = { 0x00000001, gf100_privring_new },
.therm = { 0x00000001, gt215_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, gf100_volt_new },
.ce = { 0x00000001, gf100_ce_new },
.disp = { 0x00000001, gt215_disp_new },
.dma = { 0x00000001, gf100_dma_new },
.fifo = { 0x00000001, gf100_fifo_new },
.gr = { 0x00000001, gf104_gr_new },
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
.pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvc4_chipset = {
.name = "GF104",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gf100_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gf100_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gf100_ltc_new },
.mc = { 0x00000001, gf100_mc_new },
.mmu = { 0x00000001, gf100_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gf100_pci_new },
.pmu = { 0x00000001, gf100_pmu_new },
.privring = { 0x00000001, gf100_privring_new },
.therm = { 0x00000001, gt215_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, gf100_volt_new },
.ce = { 0x00000003, gf100_ce_new },
.disp = { 0x00000001, gt215_disp_new },
.dma = { 0x00000001, gf100_dma_new },
.fifo = { 0x00000001, gf100_fifo_new },
.gr = { 0x00000001, gf104_gr_new },
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
.pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvc8_chipset = {
.name = "GF110",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gf100_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gf100_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gf100_ltc_new },
.mc = { 0x00000001, gf100_mc_new },
.mmu = { 0x00000001, gf100_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gf100_pci_new },
.pmu = { 0x00000001, gf100_pmu_new },
.privring = { 0x00000001, gf100_privring_new },
.therm = { 0x00000001, gt215_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, gf100_volt_new },
.ce = { 0x00000003, gf100_ce_new },
.disp = { 0x00000001, gt215_disp_new },
.dma = { 0x00000001, gf100_dma_new },
.fifo = { 0x00000001, gf100_fifo_new },
.gr = { 0x00000001, gf110_gr_new },
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
.pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvce_chipset = {
.name = "GF114",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gf100_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gf100_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gf100_ltc_new },
.mc = { 0x00000001, gf100_mc_new },
.mmu = { 0x00000001, gf100_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gf100_pci_new },
.pmu = { 0x00000001, gf100_pmu_new },
.privring = { 0x00000001, gf100_privring_new },
.therm = { 0x00000001, gt215_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, gf100_volt_new },
.ce = { 0x00000003, gf100_ce_new },
.disp = { 0x00000001, gt215_disp_new },
.dma = { 0x00000001, gf100_dma_new },
.fifo = { 0x00000001, gf100_fifo_new },
.gr = { 0x00000001, gf104_gr_new },
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
.pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvcf_chipset = {
.name = "GF116",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gf100_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gf100_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, g94_gpio_new },
.i2c = { 0x00000001, g94_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gf100_ltc_new },
.mc = { 0x00000001, gf100_mc_new },
.mmu = { 0x00000001, gf100_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gf106_pci_new },
.pmu = { 0x00000001, gf100_pmu_new },
.privring = { 0x00000001, gf100_privring_new },
.therm = { 0x00000001, gt215_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, gf100_volt_new },
.ce = { 0x00000001, gf100_ce_new },
.disp = { 0x00000001, gt215_disp_new },
.dma = { 0x00000001, gf100_dma_new },
.fifo = { 0x00000001, gf100_fifo_new },
.gr = { 0x00000001, gf104_gr_new },
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
.pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvd7_chipset = {
.name = "GF117",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gf100_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gf100_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, gf119_gpio_new },
.i2c = { 0x00000001, gf117_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gf100_ltc_new },
.mc = { 0x00000001, gf100_mc_new },
.mmu = { 0x00000001, gf100_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gf106_pci_new },
.privring = { 0x00000001, gf117_privring_new },
.therm = { 0x00000001, gf119_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, gf117_volt_new },
.ce = { 0x00000001, gf100_ce_new },
.disp = { 0x00000001, gf119_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gf100_fifo_new },
.gr = { 0x00000001, gf117_gr_new },
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
.pm = { 0x00000001, gf117_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvd9_chipset = {
.name = "GF119",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gf100_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gf100_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, gf119_gpio_new },
.i2c = { 0x00000001, gf119_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gf100_ltc_new },
.mc = { 0x00000001, gf100_mc_new },
.mmu = { 0x00000001, gf100_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gf106_pci_new },
.pmu = { 0x00000001, gf119_pmu_new },
.privring = { 0x00000001, gf117_privring_new },
.therm = { 0x00000001, gf119_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.volt = { 0x00000001, gf100_volt_new },
.ce = { 0x00000001, gf100_ce_new },
.disp = { 0x00000001, gf119_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gf100_fifo_new },
.gr = { 0x00000001, gf119_gr_new },
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
.pm = { 0x00000001, gf117_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nve4_chipset = {
.name = "GK104",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gk104_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gk104_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gk104_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gk104_ltc_new },
.mc = { 0x00000001, gk104_mc_new },
.mmu = { 0x00000001, gk104_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gk104_pci_new },
.pmu = { 0x00000001, gk104_pmu_new },
.privring = { 0x00000001, gk104_privring_new },
.therm = { 0x00000001, gk104_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gk104_volt_new },
.ce = { 0x00000007, gk104_ce_new },
.disp = { 0x00000001, gk104_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gk104_fifo_new },
.gr = { 0x00000001, gk104_gr_new },
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
.pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nve6_chipset = {
.name = "GK106",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gk104_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gk104_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gk104_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gk104_ltc_new },
.mc = { 0x00000001, gk104_mc_new },
.mmu = { 0x00000001, gk104_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gk104_pci_new },
.pmu = { 0x00000001, gk104_pmu_new },
.privring = { 0x00000001, gk104_privring_new },
.therm = { 0x00000001, gk104_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gk104_volt_new },
.ce = { 0x00000007, gk104_ce_new },
.disp = { 0x00000001, gk104_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gk104_fifo_new },
.gr = { 0x00000001, gk104_gr_new },
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
.pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nve7_chipset = {
.name = "GK107",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gk104_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gk104_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gk104_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gk104_ltc_new },
.mc = { 0x00000001, gk104_mc_new },
.mmu = { 0x00000001, gk104_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gk104_pci_new },
.pmu = { 0x00000001, gk104_pmu_new },
.privring = { 0x00000001, gk104_privring_new },
.therm = { 0x00000001, gk104_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gk104_volt_new },
.ce = { 0x00000007, gk104_ce_new },
.disp = { 0x00000001, gk104_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gk104_fifo_new },
.gr = { 0x00000001, gk104_gr_new },
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
.pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvea_chipset = {
.name = "GK20A",
.bar = { 0x00000001, gk20a_bar_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gk20a_clk_new },
.fb = { 0x00000001, gk20a_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.imem = { 0x00000001, gk20a_instmem_new },
.ltc = { 0x00000001, gk104_ltc_new },
.mc = { 0x00000001, gk20a_mc_new },
.mmu = { 0x00000001, gk20a_mmu_new },
.pmu = { 0x00000001, gk20a_pmu_new },
.privring = { 0x00000001, gk20a_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gk20a_volt_new },
.ce = { 0x00000004, gk104_ce_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gk20a_fifo_new },
.gr = { 0x00000001, gk20a_gr_new },
.pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvf0_chipset = {
.name = "GK110",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gk104_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gk110_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gk110_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gk104_ltc_new },
.mc = { 0x00000001, gk104_mc_new },
.mmu = { 0x00000001, gk104_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gk104_pci_new },
.pmu = { 0x00000001, gk110_pmu_new },
.privring = { 0x00000001, gk104_privring_new },
.therm = { 0x00000001, gk104_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gk104_volt_new },
.ce = { 0x00000007, gk104_ce_new },
.disp = { 0x00000001, gk110_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gk110_fifo_new },
.gr = { 0x00000001, gk110_gr_new },
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nvf1_chipset = {
.name = "GK110B",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gk104_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gk110_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gk110_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gk104_ltc_new },
.mc = { 0x00000001, gk104_mc_new },
.mmu = { 0x00000001, gk104_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gk104_pci_new },
.pmu = { 0x00000001, gk110_pmu_new },
.privring = { 0x00000001, gk104_privring_new },
.therm = { 0x00000001, gk104_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gk104_volt_new },
.ce = { 0x00000007, gk104_ce_new },
.disp = { 0x00000001, gk110_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gk110_fifo_new },
.gr = { 0x00000001, gk110b_gr_new },
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv106_chipset = {
.name = "GK208B",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gk104_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gk110_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gk110_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gk104_ltc_new },
.mc = { 0x00000001, gk20a_mc_new },
.mmu = { 0x00000001, gk104_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gk104_pci_new },
.pmu = { 0x00000001, gk208_pmu_new },
.privring = { 0x00000001, gk104_privring_new },
.therm = { 0x00000001, gk104_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gk104_volt_new },
.ce = { 0x00000007, gk104_ce_new },
.disp = { 0x00000001, gk110_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gk208_fifo_new },
.gr = { 0x00000001, gk208_gr_new },
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv108_chipset = {
.name = "GK208",
.bar = { 0x00000001, gf100_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gk104_clk_new },
.devinit = { 0x00000001, gf100_devinit_new },
.fb = { 0x00000001, gk110_fb_new },
.fuse = { 0x00000001, gf100_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gk110_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gk104_ltc_new },
.mc = { 0x00000001, gk20a_mc_new },
.mmu = { 0x00000001, gk104_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gk104_pci_new },
.pmu = { 0x00000001, gk208_pmu_new },
.privring = { 0x00000001, gk104_privring_new },
.therm = { 0x00000001, gk104_therm_new },
.timer = { 0x00000001, nv41_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gk104_volt_new },
.ce = { 0x00000007, gk104_ce_new },
.disp = { 0x00000001, gk110_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gk208_fifo_new },
.gr = { 0x00000001, gk208_gr_new },
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv117_chipset = {
.name = "GM107",
.bar = { 0x00000001, gm107_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gk104_clk_new },
.devinit = { 0x00000001, gm107_devinit_new },
.fb = { 0x00000001, gm107_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gk110_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gm107_ltc_new },
.mc = { 0x00000001, gk20a_mc_new },
.mmu = { 0x00000001, gk104_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gk104_pci_new },
.pmu = { 0x00000001, gm107_pmu_new },
.privring = { 0x00000001, gk104_privring_new },
.therm = { 0x00000001, gm107_therm_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gk104_volt_new },
.ce = { 0x00000005, gm107_ce_new },
.disp = { 0x00000001, gm107_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gm107_fifo_new },
.gr = { 0x00000001, gm107_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.nvenc = { 0x00000001, gm107_nvenc_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv118_chipset = {
.name = "GM108",
.bar = { 0x00000001, gm107_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gk104_clk_new },
.devinit = { 0x00000001, gm107_devinit_new },
.fb = { 0x00000001, gm107_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gk110_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gm107_ltc_new },
.mc = { 0x00000001, gk20a_mc_new },
.mmu = { 0x00000001, gk104_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gk104_pci_new },
.pmu = { 0x00000001, gm107_pmu_new },
.privring = { 0x00000001, gk104_privring_new },
.therm = { 0x00000001, gm107_therm_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gk104_volt_new },
.ce = { 0x00000005, gm107_ce_new },
.disp = { 0x00000001, gm107_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gm107_fifo_new },
.gr = { 0x00000001, gm107_gr_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv120_chipset = {
.name = "GM200",
.acr = { 0x00000001, gm200_acr_new },
.bar = { 0x00000001, gm107_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, gm200_devinit_new },
.fb = { 0x00000001, gm200_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gm200_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gm200_ltc_new },
.mc = { 0x00000001, gk20a_mc_new },
.mmu = { 0x00000001, gm200_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gk104_pci_new },
.pmu = { 0x00000001, gm200_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.therm = { 0x00000001, gm200_therm_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gk104_volt_new },
.ce = { 0x00000007, gm200_ce_new },
.disp = { 0x00000001, gm200_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gm200_fifo_new },
.gr = { 0x00000001, gm200_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.nvenc = { 0x00000003, gm107_nvenc_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv124_chipset = {
.name = "GM204",
.acr = { 0x00000001, gm200_acr_new },
.bar = { 0x00000001, gm107_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, gm200_devinit_new },
.fb = { 0x00000001, gm200_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gm200_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gm200_ltc_new },
.mc = { 0x00000001, gk20a_mc_new },
.mmu = { 0x00000001, gm200_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gk104_pci_new },
.pmu = { 0x00000001, gm200_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.therm = { 0x00000001, gm200_therm_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gk104_volt_new },
.ce = { 0x00000007, gm200_ce_new },
.disp = { 0x00000001, gm200_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gm200_fifo_new },
.gr = { 0x00000001, gm200_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.nvenc = { 0x00000003, gm107_nvenc_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv126_chipset = {
.name = "GM206",
.acr = { 0x00000001, gm200_acr_new },
.bar = { 0x00000001, gm107_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, gm200_devinit_new },
.fb = { 0x00000001, gm200_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gm200_i2c_new },
.iccsense = { 0x00000001, gf100_iccsense_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gm200_ltc_new },
.mc = { 0x00000001, gk20a_mc_new },
.mmu = { 0x00000001, gm200_mmu_new },
.mxm = { 0x00000001, nv50_mxm_new },
.pci = { 0x00000001, gk104_pci_new },
.pmu = { 0x00000001, gm200_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.therm = { 0x00000001, gm200_therm_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gk104_volt_new },
.ce = { 0x00000007, gm200_ce_new },
.disp = { 0x00000001, gm200_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gm200_fifo_new },
.gr = { 0x00000001, gm200_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.nvenc = { 0x00000001, gm107_nvenc_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv12b_chipset = {
.name = "GM20B",
.acr = { 0x00000001, gm20b_acr_new },
.bar = { 0x00000001, gm20b_bar_new },
.bus = { 0x00000001, gf100_bus_new },
.clk = { 0x00000001, gm20b_clk_new },
.fb = { 0x00000001, gm20b_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.imem = { 0x00000001, gk20a_instmem_new },
.ltc = { 0x00000001, gm200_ltc_new },
.mc = { 0x00000001, gk20a_mc_new },
.mmu = { 0x00000001, gm20b_mmu_new },
.pmu = { 0x00000001, gm20b_pmu_new },
.privring = { 0x00000001, gk20a_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.volt = { 0x00000001, gm20b_volt_new },
.ce = { 0x00000004, gm200_ce_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gm200_fifo_new },
.gr = { 0x00000001, gm20b_gr_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv130_chipset = {
.name = "GP100",
.acr = { 0x00000001, gm200_acr_new },
.bar = { 0x00000001, gm107_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, gm200_devinit_new },
.fault = { 0x00000001, gp100_fault_new },
.fb = { 0x00000001, gp100_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp100_ltc_new },
.mc = { 0x00000001, gp100_mc_new },
.mmu = { 0x00000001, gp100_mmu_new },
.therm = { 0x00000001, gp100_therm_new },
.pci = { 0x00000001, gp100_pci_new },
.pmu = { 0x00000001, gm200_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.ce = { 0x0000003f, gp100_ce_new },
.dma = { 0x00000001, gf119_dma_new },
.disp = { 0x00000001, gp100_disp_new },
.fifo = { 0x00000001, gp100_fifo_new },
.gr = { 0x00000001, gp100_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.nvenc = { 0x00000007, gm107_nvenc_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv132_chipset = {
.name = "GP102",
.acr = { 0x00000001, gp102_acr_new },
.bar = { 0x00000001, gm107_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, gm200_devinit_new },
.fault = { 0x00000001, gp100_fault_new },
.fb = { 0x00000001, gp102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
.mc = { 0x00000001, gp100_mc_new },
.mmu = { 0x00000001, gp100_mmu_new },
.therm = { 0x00000001, gp100_therm_new },
.pci = { 0x00000001, gp100_pci_new },
.pmu = { 0x00000001, gp102_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.ce = { 0x0000000f, gp102_ce_new },
.disp = { 0x00000001, gp102_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gp100_fifo_new },
.gr = { 0x00000001, gp102_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.nvenc = { 0x00000003, gm107_nvenc_new },
.sec2 = { 0x00000001, gp102_sec2_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv134_chipset = {
.name = "GP104",
.acr = { 0x00000001, gp102_acr_new },
.bar = { 0x00000001, gm107_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, gm200_devinit_new },
.fault = { 0x00000001, gp100_fault_new },
.fb = { 0x00000001, gp102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
.mc = { 0x00000001, gp100_mc_new },
.mmu = { 0x00000001, gp100_mmu_new },
.therm = { 0x00000001, gp100_therm_new },
.pci = { 0x00000001, gp100_pci_new },
.pmu = { 0x00000001, gp102_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.ce = { 0x0000000f, gp102_ce_new },
.disp = { 0x00000001, gp102_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gp100_fifo_new },
.gr = { 0x00000001, gp104_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.nvenc = { 0x00000003, gm107_nvenc_new },
.sec2 = { 0x00000001, gp102_sec2_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv136_chipset = {
.name = "GP106",
.acr = { 0x00000001, gp102_acr_new },
.bar = { 0x00000001, gm107_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, gm200_devinit_new },
.fault = { 0x00000001, gp100_fault_new },
.fb = { 0x00000001, gp102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
.mc = { 0x00000001, gp100_mc_new },
.mmu = { 0x00000001, gp100_mmu_new },
.therm = { 0x00000001, gp100_therm_new },
.pci = { 0x00000001, gp100_pci_new },
.pmu = { 0x00000001, gp102_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.ce = { 0x0000000f, gp102_ce_new },
.disp = { 0x00000001, gp102_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gp100_fifo_new },
.gr = { 0x00000001, gp104_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.nvenc = { 0x00000001, gm107_nvenc_new },
.sec2 = { 0x00000001, gp102_sec2_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv137_chipset = {
.name = "GP107",
.acr = { 0x00000001, gp102_acr_new },
.bar = { 0x00000001, gm107_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, gm200_devinit_new },
.fault = { 0x00000001, gp100_fault_new },
.fb = { 0x00000001, gp102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
.mc = { 0x00000001, gp100_mc_new },
.mmu = { 0x00000001, gp100_mmu_new },
.therm = { 0x00000001, gp100_therm_new },
.pci = { 0x00000001, gp100_pci_new },
.pmu = { 0x00000001, gp102_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.ce = { 0x0000000f, gp102_ce_new },
.disp = { 0x00000001, gp102_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gp100_fifo_new },
.gr = { 0x00000001, gp107_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.nvenc = { 0x00000003, gm107_nvenc_new },
.sec2 = { 0x00000001, gp102_sec2_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv138_chipset = {
.name = "GP108",
.acr = { 0x00000001, gp108_acr_new },
.bar = { 0x00000001, gm107_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, gm200_devinit_new },
.fault = { 0x00000001, gp100_fault_new },
.fb = { 0x00000001, gp102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
.mc = { 0x00000001, gp100_mc_new },
.mmu = { 0x00000001, gp100_mmu_new },
.therm = { 0x00000001, gp100_therm_new },
.pci = { 0x00000001, gp100_pci_new },
.pmu = { 0x00000001, gp102_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.ce = { 0x0000000f, gp102_ce_new },
.disp = { 0x00000001, gp102_disp_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gp100_fifo_new },
.gr = { 0x00000001, gp108_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.sec2 = { 0x00000001, gp108_sec2_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv13b_chipset = {
.name = "GP10B",
.acr = { 0x00000001, gp10b_acr_new },
.bar = { 0x00000001, gm20b_bar_new },
.bus = { 0x00000001, gf100_bus_new },
.fault = { 0x00000001, gp10b_fault_new },
.fb = { 0x00000001, gp10b_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.imem = { 0x00000001, gk20a_instmem_new },
.ltc = { 0x00000001, gp10b_ltc_new },
.mc = { 0x00000001, gp10b_mc_new },
.mmu = { 0x00000001, gp10b_mmu_new },
.pmu = { 0x00000001, gp10b_pmu_new },
.privring = { 0x00000001, gp10b_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.ce = { 0x00000001, gp100_ce_new },
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gp100_fifo_new },
.gr = { 0x00000001, gp10b_gr_new },
.sw = { 0x00000001, gf100_sw_new },
};
static const struct nvkm_device_chip
nv140_chipset = {
.name = "GV100",
.acr = { 0x00000001, gv100_acr_new },
.bar = { 0x00000001, gm107_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, gv100_devinit_new },
.fault = { 0x00000001, gv100_fault_new },
.fb = { 0x00000001, gv100_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
.mc = { 0x00000001, gp100_mc_new },
.mmu = { 0x00000001, gv100_mmu_new },
.pci = { 0x00000001, gp100_pci_new },
.pmu = { 0x00000001, gp102_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.therm = { 0x00000001, gp100_therm_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.vfn = { 0x00000001, gv100_vfn_new },
.ce = { 0x000001ff, gv100_ce_new },
.disp = { 0x00000001, gv100_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, gv100_fifo_new },
.gr = { 0x00000001, gv100_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.nvenc = { 0x00000007, gm107_nvenc_new },
.sec2 = { 0x00000001, gp108_sec2_new },
};
static const struct nvkm_device_chip
nv162_chipset = {
.name = "TU102",
.acr = { 0x00000001, tu102_acr_new },
.bar = { 0x00000001, tu102_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
.mc = { 0x00000001, gp100_mc_new },
.mmu = { 0x00000001, tu102_mmu_new },
.pci = { 0x00000001, gp100_pci_new },
.pmu = { 0x00000001, gp102_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.therm = { 0x00000001, gp100_therm_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.vfn = { 0x00000001, tu102_vfn_new },
.ce = { 0x0000001f, tu102_ce_new },
.disp = { 0x00000001, tu102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, tu102_fifo_new },
.gr = { 0x00000001, tu102_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.nvenc = { 0x00000001, gm107_nvenc_new },
.sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv164_chipset = {
.name = "TU104",
.acr = { 0x00000001, tu102_acr_new },
.bar = { 0x00000001, tu102_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
.mc = { 0x00000001, gp100_mc_new },
.mmu = { 0x00000001, tu102_mmu_new },
.pci = { 0x00000001, gp100_pci_new },
.pmu = { 0x00000001, gp102_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.therm = { 0x00000001, gp100_therm_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.vfn = { 0x00000001, tu102_vfn_new },
.ce = { 0x0000001f, tu102_ce_new },
.disp = { 0x00000001, tu102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, tu102_fifo_new },
.gr = { 0x00000001, tu102_gr_new },
.nvdec = { 0x00000003, gm107_nvdec_new },
.nvenc = { 0x00000001, gm107_nvenc_new },
.sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv166_chipset = {
.name = "TU106",
.acr = { 0x00000001, tu102_acr_new },
.bar = { 0x00000001, tu102_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
.mc = { 0x00000001, gp100_mc_new },
.mmu = { 0x00000001, tu102_mmu_new },
.pci = { 0x00000001, gp100_pci_new },
.pmu = { 0x00000001, gp102_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.therm = { 0x00000001, gp100_therm_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.vfn = { 0x00000001, tu102_vfn_new },
.ce = { 0x0000001f, tu102_ce_new },
.disp = { 0x00000001, tu102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, tu102_fifo_new },
.gr = { 0x00000001, tu102_gr_new },
.nvdec = { 0x00000007, gm107_nvdec_new },
.nvenc = { 0x00000001, gm107_nvenc_new },
.sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv167_chipset = {
.name = "TU117",
.acr = { 0x00000001, tu102_acr_new },
.bar = { 0x00000001, tu102_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
.mc = { 0x00000001, gp100_mc_new },
.mmu = { 0x00000001, tu102_mmu_new },
.pci = { 0x00000001, gp100_pci_new },
.pmu = { 0x00000001, gp102_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.therm = { 0x00000001, gp100_therm_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.vfn = { 0x00000001, tu102_vfn_new },
.ce = { 0x0000001f, tu102_ce_new },
.disp = { 0x00000001, tu102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, tu102_fifo_new },
.gr = { 0x00000001, tu102_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.nvenc = { 0x00000001, gm107_nvenc_new },
.sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv168_chipset = {
.name = "TU116",
.acr = { 0x00000001, tu102_acr_new },
.bar = { 0x00000001, tu102_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, gp102_ltc_new },
.mc = { 0x00000001, gp100_mc_new },
.mmu = { 0x00000001, tu102_mmu_new },
.pci = { 0x00000001, gp100_pci_new },
.pmu = { 0x00000001, gp102_pmu_new },
.privring = { 0x00000001, gm200_privring_new },
.therm = { 0x00000001, gp100_therm_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, gk104_top_new },
.vfn = { 0x00000001, tu102_vfn_new },
.ce = { 0x0000001f, tu102_ce_new },
.disp = { 0x00000001, tu102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, tu102_fifo_new },
.gr = { 0x00000001, tu102_gr_new },
.nvdec = { 0x00000001, gm107_nvdec_new },
.nvenc = { 0x00000001, gm107_nvenc_new },
.sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
nv170_chipset = {
.name = "GA100",
.bar = { 0x00000001, tu102_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, ga100_fb_new },
.gpio = { 0x00000001, gk104_gpio_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, ga100_mc_new },
.mmu = { 0x00000001, tu102_mmu_new },
.pci = { 0x00000001, gp100_pci_new },
.privring = { 0x00000001, gm200_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, ga100_top_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x000003ff, ga100_ce_new },
.fifo = { 0x00000001, ga100_fifo_new },
};
static const struct nvkm_device_chip
nv172_chipset = {
.name = "GA102",
.acr = { 0x00000001, ga102_acr_new },
.bar = { 0x00000001, tu102_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, ga102_fb_new },
.gpio = { 0x00000001, ga102_gpio_new },
.gsp = { 0x00000001, ga102_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, ga102_ltc_new },
.mc = { 0x00000001, ga100_mc_new },
.mmu = { 0x00000001, tu102_mmu_new },
.pci = { 0x00000001, gp100_pci_new },
.privring = { 0x00000001, gm200_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, ga100_top_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
.disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000001, ga102_nvdec_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
static const struct nvkm_device_chip
nv173_chipset = {
.name = "GA103",
.acr = { 0x00000001, ga102_acr_new },
.bar = { 0x00000001, tu102_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, ga102_fb_new },
.gpio = { 0x00000001, ga102_gpio_new },
.gsp = { 0x00000001, ga102_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, ga102_ltc_new },
.mc = { 0x00000001, ga100_mc_new },
.mmu = { 0x00000001, tu102_mmu_new },
.pci = { 0x00000001, gp100_pci_new },
.privring = { 0x00000001, gm200_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, ga100_top_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
.disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000001, ga102_nvdec_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
static const struct nvkm_device_chip
nv174_chipset = {
.name = "GA104",
.acr = { 0x00000001, ga102_acr_new },
.bar = { 0x00000001, tu102_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, ga102_fb_new },
.gpio = { 0x00000001, ga102_gpio_new },
.gsp = { 0x00000001, ga102_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, ga102_ltc_new },
.mc = { 0x00000001, ga100_mc_new },
.mmu = { 0x00000001, tu102_mmu_new },
.pci = { 0x00000001, gp100_pci_new },
.privring = { 0x00000001, gm200_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, ga100_top_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
.disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000001, ga102_nvdec_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
static const struct nvkm_device_chip
nv176_chipset = {
.name = "GA106",
.acr = { 0x00000001, ga102_acr_new },
.bar = { 0x00000001, tu102_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, ga102_fb_new },
.gpio = { 0x00000001, ga102_gpio_new },
.gsp = { 0x00000001, ga102_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, ga102_ltc_new },
.mc = { 0x00000001, ga100_mc_new },
.mmu = { 0x00000001, tu102_mmu_new },
.pci = { 0x00000001, gp100_pci_new },
.privring = { 0x00000001, gm200_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, ga100_top_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
.disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000001, ga102_nvdec_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
static const struct nvkm_device_chip
nv177_chipset = {
.name = "GA107",
.acr = { 0x00000001, ga102_acr_new },
.bar = { 0x00000001, tu102_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, ga102_fb_new },
.gpio = { 0x00000001, ga102_gpio_new },
.gsp = { 0x00000001, ga102_gsp_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.ltc = { 0x00000001, ga102_ltc_new },
.mc = { 0x00000001, ga100_mc_new },
.mmu = { 0x00000001, tu102_mmu_new },
.pci = { 0x00000001, gp100_pci_new },
.privring = { 0x00000001, gm200_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, ga100_top_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
.disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000001, ga102_nvdec_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
struct nvkm_subdev *
nvkm_device_subdev(struct nvkm_device *device, int type, int inst)
{
struct nvkm_subdev *subdev;
list_for_each_entry(subdev, &device->subdev, head) {
if (subdev->type == type && subdev->inst == inst)
return subdev;
}
return NULL;
}
struct nvkm_engine *
nvkm_device_engine(struct nvkm_device *device, int type, int inst)
{
struct nvkm_subdev *subdev = nvkm_device_subdev(device, type, inst);
if (subdev && subdev->func == &nvkm_engine)
return container_of(subdev, struct nvkm_engine, subdev);
return NULL;
}
int
nvkm_device_fini(struct nvkm_device *device, bool suspend)
{
const char *action = suspend ? "suspend" : "fini";
struct nvkm_subdev *subdev;
int ret;
s64 time;
nvdev_trace(device, "%s running...\n", action);
time = ktime_to_us(ktime_get());
nvkm_acpi_fini(device);
list_for_each_entry_reverse(subdev, &device->subdev, head) {
ret = nvkm_subdev_fini(subdev, suspend);
if (ret && suspend)
goto fail;
}
nvkm_therm_clkgate_fini(device->therm, suspend);
if (device->func->fini)
device->func->fini(device, suspend);
nvkm_intr_unarm(device);
time = ktime_to_us(ktime_get()) - time;
nvdev_trace(device, "%s completed in %lldus...\n", action, time);
return 0;
fail:
list_for_each_entry_from(subdev, &device->subdev, head) {
int rret = nvkm_subdev_init(subdev);
if (rret)
nvkm_fatal(subdev, "failed restart, %d\n", ret);
}
nvdev_trace(device, "%s failed with %d\n", action, ret);
return ret;
}
static int
nvkm_device_preinit(struct nvkm_device *device)
{
struct nvkm_subdev *subdev;
int ret;
s64 time;
nvdev_trace(device, "preinit running...\n");
time = ktime_to_us(ktime_get());
nvkm_intr_unarm(device);
if (device->func->preinit) {
ret = device->func->preinit(device);
if (ret)
goto fail;
}
list_for_each_entry(subdev, &device->subdev, head) {
ret = nvkm_subdev_preinit(subdev);
if (ret)
goto fail;
}
ret = nvkm_devinit_post(device->devinit);
if (ret)
goto fail;
ret = nvkm_top_parse(device);
if (ret)
goto fail;
ret = nvkm_fb_mem_unlock(device->fb);
if (ret)
goto fail;
time = ktime_to_us(ktime_get()) - time;
nvdev_trace(device, "preinit completed in %lldus\n", time);
return 0;
fail:
nvdev_error(device, "preinit failed with %d\n", ret);
return ret;
}
int
nvkm_device_init(struct nvkm_device *device)
{
struct nvkm_subdev *subdev;
int ret;
s64 time;
ret = nvkm_device_preinit(device);
if (ret)
return ret;
nvkm_device_fini(device, false);
nvdev_trace(device, "init running...\n");
time = ktime_to_us(ktime_get());
nvkm_intr_rearm(device);
if (device->func->init) {
ret = device->func->init(device);
if (ret)
goto fail;
}
list_for_each_entry(subdev, &device->subdev, head) {
ret = nvkm_subdev_init(subdev);
if (ret)
goto fail_subdev;
}
nvkm_acpi_init(device);
nvkm_therm_clkgate_enable(device->therm);
time = ktime_to_us(ktime_get()) - time;
nvdev_trace(device, "init completed in %lldus\n", time);
return 0;
fail_subdev:
list_for_each_entry_from(subdev, &device->subdev, head)
nvkm_subdev_fini(subdev, false);
fail:
nvkm_device_fini(device, false);
nvdev_error(device, "init failed with %d\n", ret);
return ret;
}
void
nvkm_device_del(struct nvkm_device **pdevice)
{
struct nvkm_device *device = *pdevice;
struct nvkm_subdev *subdev, *subtmp;
if (device) {
mutex_lock(&nv_devices_mutex);
nvkm_intr_dtor(device);
list_for_each_entry_safe_reverse(subdev, subtmp, &device->subdev, head)
nvkm_subdev_del(&subdev);
if (device->pri)
iounmap(device->pri);
list_del(&device->head);
if (device->func->dtor)
*pdevice = device->func->dtor(device);
mutex_unlock(&nv_devices_mutex);
kfree(*pdevice);
*pdevice = NULL;
}
}
/* returns true if the GPU is in the CPU native byte order */
static inline bool
nvkm_device_endianness(struct nvkm_device *device)
{
#ifdef __BIG_ENDIAN
const bool big_endian = true;
#else
const bool big_endian = false;
#endif
/* Read NV_PMC_BOOT_1, and assume non-functional endian switch if it
* doesn't contain the expected values.
*/
u32 pmc_boot_1 = nvkm_rd32(device, 0x000004);
if (pmc_boot_1 && pmc_boot_1 != 0x01000001)
return !big_endian; /* Assume GPU is LE in this case. */
/* 0 means LE and 0x01000001 means BE GPU. Condition is true when
* GPU/CPU endianness don't match.
*/
if (big_endian == !pmc_boot_1) {
nvkm_wr32(device, 0x000004, 0x01000001);
nvkm_rd32(device, 0x000000);
if (nvkm_rd32(device, 0x000004) != (big_endian ? 0x01000001 : 0x00000000))
return !big_endian; /* Assume GPU is LE on any unexpected read-back. */
}
/* CPU/GPU endianness should (hopefully) match. */
return true;
}
int
nvkm_device_ctor(const struct nvkm_device_func *func,
const struct nvkm_device_quirk *quirk,
struct device *dev, enum nvkm_device_type type, u64 handle,
const char *name, const char *cfg, const char *dbg,
bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device *device)
{
struct nvkm_subdev *subdev;
u64 mmio_base, mmio_size;
u32 boot0, boot1, strap;
int ret = -EEXIST, j;
unsigned chipset;
mutex_lock(&nv_devices_mutex);
if (nvkm_device_find_locked(handle))
goto done;
device->func = func;
device->quirk = quirk;
device->dev = dev;
device->type = type;
device->handle = handle;
device->cfgopt = cfg;
device->dbgopt = dbg;
device->name = name;
list_add_tail(&device->head, &nv_devices);
device->debug = nvkm_dbgopt(device->dbgopt, "device");
INIT_LIST_HEAD(&device->subdev);
mmio_base = device->func->resource_addr(device, 0);
mmio_size = device->func->resource_size(device, 0);
if (detect || mmio) {
device->pri = ioremap(mmio_base, mmio_size);
if (device->pri == NULL) {
nvdev_error(device, "unable to map PRI\n");
ret = -ENOMEM;
goto done;
}
}
/* identify the chipset, and determine classes of subdev/engines */
if (detect) {
/* switch mmio to cpu's native endianness */
if (!nvkm_device_endianness(device)) {
nvdev_error(device,
"Couldn't switch GPU to CPUs endianness\n");
ret = -ENOSYS;
goto done;
}
boot0 = nvkm_rd32(device, 0x000000);
/* chipset can be overridden for devel/testing purposes */
chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
if (chipset) {
u32 override_boot0;
if (chipset >= 0x10) {
override_boot0 = ((chipset & 0x1ff) << 20);
override_boot0 |= 0x000000a1;
} else {
if (chipset != 0x04)
override_boot0 = 0x20104000;
else
override_boot0 = 0x20004000;
}
nvdev_warn(device, "CHIPSET OVERRIDE: %08x -> %08x\n",
boot0, override_boot0);
boot0 = override_boot0;
}
/* determine chipset and derive architecture from it */
if ((boot0 & 0x1f000000) > 0) {
device->chipset = (boot0 & 0x1ff00000) >> 20;
device->chiprev = (boot0 & 0x000000ff);
switch (device->chipset & 0x1f0) {
case 0x010: {
if (0x461 & (1 << (device->chipset & 0xf)))
device->card_type = NV_10;
else
device->card_type = NV_11;
device->chiprev = 0x00;
break;
}
case 0x020: device->card_type = NV_20; break;
case 0x030: device->card_type = NV_30; break;
case 0x040:
case 0x060: device->card_type = NV_40; break;
case 0x050:
case 0x080:
case 0x090:
case 0x0a0: device->card_type = NV_50; break;
case 0x0c0:
case 0x0d0: device->card_type = NV_C0; break;
case 0x0e0:
case 0x0f0:
case 0x100: device->card_type = NV_E0; break;
case 0x110:
case 0x120: device->card_type = GM100; break;
case 0x130: device->card_type = GP100; break;
case 0x140: device->card_type = GV100; break;
case 0x160: device->card_type = TU100; break;
case 0x170: device->card_type = GA100; break;
default:
break;
}
} else
if ((boot0 & 0xff00fff0) == 0x20004000) {
if (boot0 & 0x00f00000)
device->chipset = 0x05;
else
device->chipset = 0x04;
device->card_type = NV_04;
}
switch (device->chipset) {
case 0x004: device->chip = &nv4_chipset; break;
case 0x005: device->chip = &nv5_chipset; break;
case 0x010: device->chip = &nv10_chipset; break;
case 0x011: device->chip = &nv11_chipset; break;
case 0x015: device->chip = &nv15_chipset; break;
case 0x017: device->chip = &nv17_chipset; break;
case 0x018: device->chip = &nv18_chipset; break;
case 0x01a: device->chip = &nv1a_chipset; break;
case 0x01f: device->chip = &nv1f_chipset; break;
case 0x020: device->chip = &nv20_chipset; break;
case 0x025: device->chip = &nv25_chipset; break;
case 0x028: device->chip = &nv28_chipset; break;
case 0x02a: device->chip = &nv2a_chipset; break;
case 0x030: device->chip = &nv30_chipset; break;
case 0x031: device->chip = &nv31_chipset; break;
case 0x034: device->chip = &nv34_chipset; break;
case 0x035: device->chip = &nv35_chipset; break;
case 0x036: device->chip = &nv36_chipset; break;
case 0x040: device->chip = &nv40_chipset; break;
case 0x041: device->chip = &nv41_chipset; break;
case 0x042: device->chip = &nv42_chipset; break;
case 0x043: device->chip = &nv43_chipset; break;
case 0x044: device->chip = &nv44_chipset; break;
case 0x045: device->chip = &nv45_chipset; break;
case 0x046: device->chip = &nv46_chipset; break;
case 0x047: device->chip = &nv47_chipset; break;
case 0x049: device->chip = &nv49_chipset; break;
case 0x04a: device->chip = &nv4a_chipset; break;
case 0x04b: device->chip = &nv4b_chipset; break;
case 0x04c: device->chip = &nv4c_chipset; break;
case 0x04e: device->chip = &nv4e_chipset; break;
case 0x050: device->chip = &nv50_chipset; break;
case 0x063: device->chip = &nv63_chipset; break;
case 0x067: device->chip = &nv67_chipset; break;
case 0x068: device->chip = &nv68_chipset; break;
case 0x084: device->chip = &nv84_chipset; break;
case 0x086: device->chip = &nv86_chipset; break;
case 0x092: device->chip = &nv92_chipset; break;
case 0x094: device->chip = &nv94_chipset; break;
case 0x096: device->chip = &nv96_chipset; break;
case 0x098: device->chip = &nv98_chipset; break;
case 0x0a0: device->chip = &nva0_chipset; break;
case 0x0a3: device->chip = &nva3_chipset; break;
case 0x0a5: device->chip = &nva5_chipset; break;
case 0x0a8: device->chip = &nva8_chipset; break;
case 0x0aa: device->chip = &nvaa_chipset; break;
case 0x0ac: device->chip = &nvac_chipset; break;
case 0x0af: device->chip = &nvaf_chipset; break;
case 0x0c0: device->chip = &nvc0_chipset; break;
case 0x0c1: device->chip = &nvc1_chipset; break;
case 0x0c3: device->chip = &nvc3_chipset; break;
case 0x0c4: device->chip = &nvc4_chipset; break;
case 0x0c8: device->chip = &nvc8_chipset; break;
case 0x0ce: device->chip = &nvce_chipset; break;
case 0x0cf: device->chip = &nvcf_chipset; break;
case 0x0d7: device->chip = &nvd7_chipset; break;
case 0x0d9: device->chip = &nvd9_chipset; break;
case 0x0e4: device->chip = &nve4_chipset; break;
case 0x0e6: device->chip = &nve6_chipset; break;
case 0x0e7: device->chip = &nve7_chipset; break;
case 0x0ea: device->chip = &nvea_chipset; break;
case 0x0f0: device->chip = &nvf0_chipset; break;
case 0x0f1: device->chip = &nvf1_chipset; break;
case 0x106: device->chip = &nv106_chipset; break;
case 0x108: device->chip = &nv108_chipset; break;
case 0x117: device->chip = &nv117_chipset; break;
case 0x118: device->chip = &nv118_chipset; break;
case 0x120: device->chip = &nv120_chipset; break;
case 0x124: device->chip = &nv124_chipset; break;
case 0x126: device->chip = &nv126_chipset; break;
case 0x12b: device->chip = &nv12b_chipset; break;
case 0x130: device->chip = &nv130_chipset; break;
case 0x132: device->chip = &nv132_chipset; break;
case 0x134: device->chip = &nv134_chipset; break;
case 0x136: device->chip = &nv136_chipset; break;
case 0x137: device->chip = &nv137_chipset; break;
case 0x138: device->chip = &nv138_chipset; break;
case 0x13b: device->chip = &nv13b_chipset; break;
case 0x140: device->chip = &nv140_chipset; break;
case 0x162: device->chip = &nv162_chipset; break;
case 0x164: device->chip = &nv164_chipset; break;
case 0x166: device->chip = &nv166_chipset; break;
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
case 0x173: device->chip = &nv173_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
switch (device->chipset) {
case 0x170: device->chip = &nv170_chipset; break;
default:
break;
}
}
if (!device->chip) {
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
ret = -ENODEV;
goto done;
}
break;
}
nvdev_info(device, "NVIDIA %s (%08x)\n",
device->chip->name, boot0);
/* vGPU detection */
boot1 = nvkm_rd32(device, 0x0000004);
if (device->card_type >= TU100 && (boot1 & 0x00030000)) {
nvdev_info(device, "vGPUs are not supported\n");
ret = -ENODEV;
goto done;
}
/* read strapping information */
strap = nvkm_rd32(device, 0x101000);
/* determine frequency of timing crystal */
if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
(device->chipset >= 0x20 && device->chipset < 0x25))
strap &= 0x00000040;
else
strap &= 0x00400040;
switch (strap) {
case 0x00000000: device->crystal = 13500; break;
case 0x00000040: device->crystal = 14318; break;
case 0x00400000: device->crystal = 27000; break;
case 0x00400040: device->crystal = 25000; break;
}
} else {
device->chip = &null_chipset;
}
if (!device->name)
device->name = device->chip->name;
mutex_init(&device->mutex);
nvkm_intr_ctor(device);
#define NVKM_LAYOUT_ONCE(type,data,ptr) \
if (device->chip->ptr.inst && (subdev_mask & (BIT_ULL(type)))) { \
WARN_ON(device->chip->ptr.inst != 0x00000001); \
ret = device->chip->ptr.ctor(device, (type), -1, &device->ptr); \
subdev = nvkm_device_subdev(device, (type), 0); \
if (ret) { \
nvkm_subdev_del(&subdev); \
device->ptr = NULL; \
if (ret != -ENODEV) { \
nvdev_error(device, "%s ctor failed: %d\n", \
nvkm_subdev_type[(type)], ret); \
goto done; \
} \
} else { \
subdev->pself = (void **)&device->ptr; \
} \
}
#define NVKM_LAYOUT_INST(type,data,ptr,cnt) \
WARN_ON(device->chip->ptr.inst & ~((1 << ARRAY_SIZE(device->ptr)) - 1)); \
for (j = 0; device->chip->ptr.inst && j < ARRAY_SIZE(device->ptr); j++) { \
if ((device->chip->ptr.inst & BIT(j)) && (subdev_mask & BIT_ULL(type))) { \
ret = device->chip->ptr.ctor(device, (type), (j), &device->ptr[j]); \
subdev = nvkm_device_subdev(device, (type), (j)); \
if (ret) { \
nvkm_subdev_del(&subdev); \
device->ptr[j] = NULL; \
if (ret != -ENODEV) { \
nvdev_error(device, "%s%d ctor failed: %d\n", \
nvkm_subdev_type[(type)], (j), ret); \
goto done; \
} \
} else { \
subdev->pself = (void **)&device->ptr[j]; \
} \
} \
}
#include <core/layout.h>
#undef NVKM_LAYOUT_INST
#undef NVKM_LAYOUT_ONCE
ret = nvkm_intr_install(device);
done:
if (device->pri && (!mmio || ret)) {
iounmap(device->pri);
device->pri = NULL;
}
mutex_unlock(&nv_devices_mutex);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/device/base.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "acpi.h"
#include <core/device.h>
#include <subdev/clk.h>
#ifdef CONFIG_ACPI
static int
nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data)
{
struct nvkm_device *device = container_of(nb, typeof(*device), acpi.nb);
struct acpi_bus_event *info = data;
if (!strcmp(info->device_class, "ac_adapter"))
nvkm_clk_pwrsrc(device);
return NOTIFY_DONE;
}
#endif
void
nvkm_acpi_fini(struct nvkm_device *device)
{
#ifdef CONFIG_ACPI
unregister_acpi_notifier(&device->acpi.nb);
#endif
}
void
nvkm_acpi_init(struct nvkm_device *device)
{
#ifdef CONFIG_ACPI
device->acpi.nb.notifier_call = nvkm_acpi_ntfy;
register_acpi_notifier(&device->acpi.nb);
#endif
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define gf119_dmaobj(p) container_of((p), struct gf119_dmaobj, base)
#include "user.h"
#include <core/client.h>
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <nvif/cl0002.h>
#include <nvif/unpack.h>
struct gf119_dmaobj {
struct nvkm_dmaobj base;
u32 flags0;
};
static int
gf119_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct gf119_dmaobj *dmaobj = gf119_dmaobj(base);
struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
int ret;
ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
nvkm_wo32(*pgpuobj, 0x04, dmaobj->base.start >> 8);
nvkm_wo32(*pgpuobj, 0x08, dmaobj->base.limit >> 8);
nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
nvkm_wo32(*pgpuobj, 0x14, 0x00000000);
nvkm_done(*pgpuobj);
}
return ret;
}
static const struct nvkm_dmaobj_func
gf119_dmaobj_func = {
.bind = gf119_dmaobj_bind,
};
int
gf119_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
{
union {
struct gf119_dma_v0 v0;
} *args;
struct nvkm_object *parent = oclass->parent;
struct gf119_dmaobj *dmaobj;
u32 kind, page;
int ret;
if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
return -ENOMEM;
*pdmaobj = &dmaobj->base;
ret = nvkm_dmaobj_ctor(&gf119_dmaobj_func, dma, oclass,
&data, &size, &dmaobj->base);
if (ret)
return ret;
ret = -ENOSYS;
args = data;
nvif_ioctl(parent, "create gf119 dma size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(parent,
"create gf100 dma vers %d page %d kind %02x\n",
args->v0.version, args->v0.page, args->v0.kind);
kind = args->v0.kind;
page = args->v0.page;
} else
if (size == 0) {
if (dmaobj->base.target != NV_MEM_TARGET_VM) {
kind = GF119_DMA_V0_KIND_PITCH;
page = GF119_DMA_V0_PAGE_SP;
} else {
kind = GF119_DMA_V0_KIND_VM;
page = GF119_DMA_V0_PAGE_LP;
}
} else
return ret;
if (page > 1)
return -EINVAL;
dmaobj->flags0 = (kind << 20) | (page << 6);
switch (dmaobj->base.target) {
case NV_MEM_TARGET_VRAM:
dmaobj->flags0 |= 0x00000009;
break;
case NV_MEM_TARGET_VM:
case NV_MEM_TARGET_PCI:
case NV_MEM_TARGET_PCI_NOSNOOP:
/* XXX: don't currently know how to construct a real one
* of these. we only use them to represent pushbufs
* on these chipsets, and the classes that use them
* deal with the target themselves.
*/
break;
default:
return -EINVAL;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define nv04_dmaobj(p) container_of((p), struct nv04_dmaobj, base)
#include "user.h"
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <subdev/mmu/vmm.h>
#include <nvif/class.h>
struct nv04_dmaobj {
struct nvkm_dmaobj base;
bool clone;
u32 flags0;
u32 flags2;
};
static int
nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nv04_dmaobj *dmaobj = nv04_dmaobj(base);
struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
u64 offset = dmaobj->base.start & 0xfffff000;
u64 adjust = dmaobj->base.start & 0x00000fff;
u32 length = dmaobj->base.limit - dmaobj->base.start;
int ret;
if (dmaobj->clone) {
struct nvkm_memory *pgt =
device->mmu->vmm->pd->pt[0]->memory;
if (!dmaobj->base.start)
return nvkm_gpuobj_wrap(pgt, pgpuobj);
nvkm_kmap(pgt);
offset = nvkm_ro32(pgt, 8 + (offset >> 10));
offset &= 0xfffff000;
nvkm_done(pgt);
}
ret = nvkm_gpuobj_new(device, 16, align, false, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
nvkm_wo32(*pgpuobj, 0x04, length);
nvkm_wo32(*pgpuobj, 0x08, dmaobj->flags2 | offset);
nvkm_wo32(*pgpuobj, 0x0c, dmaobj->flags2 | offset);
nvkm_done(*pgpuobj);
}
return ret;
}
static const struct nvkm_dmaobj_func
nv04_dmaobj_func = {
.bind = nv04_dmaobj_bind,
};
int
nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
{
struct nvkm_device *device = dma->engine.subdev.device;
struct nv04_dmaobj *dmaobj;
int ret;
if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
return -ENOMEM;
*pdmaobj = &dmaobj->base;
ret = nvkm_dmaobj_ctor(&nv04_dmaobj_func, dma, oclass,
&data, &size, &dmaobj->base);
if (ret)
return ret;
if (dmaobj->base.target == NV_MEM_TARGET_VM) {
if (device->mmu->func == &nv04_mmu)
dmaobj->clone = true;
dmaobj->base.target = NV_MEM_TARGET_PCI;
dmaobj->base.access = NV_MEM_ACCESS_RW;
}
dmaobj->flags0 = oclass->base.oclass;
switch (dmaobj->base.target) {
case NV_MEM_TARGET_VRAM:
dmaobj->flags0 |= 0x00003000;
break;
case NV_MEM_TARGET_PCI:
dmaobj->flags0 |= 0x00023000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
dmaobj->flags0 |= 0x00033000;
break;
default:
return -EINVAL;
}
switch (dmaobj->base.access) {
case NV_MEM_ACCESS_RO:
dmaobj->flags0 |= 0x00004000;
break;
case NV_MEM_ACCESS_WO:
dmaobj->flags0 |= 0x00008000;
fallthrough;
case NV_MEM_ACCESS_RW:
dmaobj->flags2 |= 0x00000002;
break;
default:
return -EINVAL;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "user.h"
static const struct nvkm_dma_func
nv04_dma = {
.class_new = nv04_dmaobj_new,
};
int
nv04_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_dma **pdma)
{
return nvkm_dma_new_(&nv04_dma, device, type, inst, pdma);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "user.h"
static const struct nvkm_dma_func
gf100_dma = {
.class_new = gf100_dmaobj_new,
};
int
gf100_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_dma **pdma)
{
return nvkm_dma_new_(&gf100_dma, device, type, inst, pdma);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "user.h"
static const struct nvkm_dma_func
nv50_dma = {
.class_new = nv50_dmaobj_new,
};
int
nv50_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_dma **pdma)
{
return nvkm_dma_new_(&nv50_dma, device, type, inst, pdma);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define nv50_dmaobj(p) container_of((p), struct nv50_dmaobj, base)
#include "user.h"
#include <core/client.h>
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <nvif/cl0002.h>
#include <nvif/unpack.h>
struct nv50_dmaobj {
struct nvkm_dmaobj base;
u32 flags0;
u32 flags5;
};
static int
nv50_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nv50_dmaobj *dmaobj = nv50_dmaobj(base);
struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
int ret;
ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
upper_32_bits(dmaobj->base.start));
nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
nvkm_done(*pgpuobj);
}
return ret;
}
static const struct nvkm_dmaobj_func
nv50_dmaobj_func = {
.bind = nv50_dmaobj_bind,
};
int
nv50_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
{
union {
struct nv50_dma_v0 v0;
} *args;
struct nvkm_object *parent = oclass->parent;
struct nv50_dmaobj *dmaobj;
u32 user, part, comp, kind;
int ret;
if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
return -ENOMEM;
*pdmaobj = &dmaobj->base;
ret = nvkm_dmaobj_ctor(&nv50_dmaobj_func, dma, oclass,
&data, &size, &dmaobj->base);
if (ret)
return ret;
ret = -ENOSYS;
args = data;
nvif_ioctl(parent, "create nv50 dma size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
"comp %d kind %02x\n", args->v0.version,
args->v0.priv, args->v0.part, args->v0.comp,
args->v0.kind);
user = args->v0.priv;
part = args->v0.part;
comp = args->v0.comp;
kind = args->v0.kind;
} else
if (size == 0) {
if (dmaobj->base.target != NV_MEM_TARGET_VM) {
user = NV50_DMA_V0_PRIV_US;
part = NV50_DMA_V0_PART_256;
comp = NV50_DMA_V0_COMP_NONE;
kind = NV50_DMA_V0_KIND_PITCH;
} else {
user = NV50_DMA_V0_PRIV_VM;
part = NV50_DMA_V0_PART_VM;
comp = NV50_DMA_V0_COMP_VM;
kind = NV50_DMA_V0_KIND_VM;
}
} else
return ret;
if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
return -EINVAL;
dmaobj->flags0 = (comp << 29) | (kind << 22) | (user << 20) |
oclass->base.oclass;
dmaobj->flags5 = (part << 16);
switch (dmaobj->base.target) {
case NV_MEM_TARGET_VM:
dmaobj->flags0 |= 0x00000000;
break;
case NV_MEM_TARGET_VRAM:
dmaobj->flags0 |= 0x00010000;
break;
case NV_MEM_TARGET_PCI:
dmaobj->flags0 |= 0x00020000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
dmaobj->flags0 |= 0x00030000;
break;
default:
return -EINVAL;
}
switch (dmaobj->base.access) {
case NV_MEM_ACCESS_VM:
break;
case NV_MEM_ACCESS_RO:
dmaobj->flags0 |= 0x00040000;
break;
case NV_MEM_ACCESS_WO:
case NV_MEM_ACCESS_RW:
dmaobj->flags0 |= 0x00080000;
break;
default:
return -EINVAL;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "user.h"
#include <core/client.h>
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <nvif/cl0002.h>
#include <nvif/unpack.h>
static const struct nvkm_object_func nvkm_dmaobj_func;
struct nvkm_dmaobj *
nvkm_dmaobj_search(struct nvkm_client *client, u64 handle)
{
struct nvkm_object *object;
object = nvkm_object_search(client, handle, &nvkm_dmaobj_func);
if (IS_ERR(object))
return (void *)object;
return nvkm_dmaobj(object);
}
static int
nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
return dmaobj->func->bind(dmaobj, gpuobj, align, pgpuobj);
}
static void *
nvkm_dmaobj_dtor(struct nvkm_object *base)
{
return nvkm_dmaobj(base);
}
static const struct nvkm_object_func
nvkm_dmaobj_func = {
.dtor = nvkm_dmaobj_dtor,
.bind = nvkm_dmaobj_bind,
};
int
nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
const struct nvkm_oclass *oclass, void **pdata, u32 *psize,
struct nvkm_dmaobj *dmaobj)
{
union {
struct nv_dma_v0 v0;
} *args = *pdata;
struct nvkm_object *parent = oclass->parent;
void *data = *pdata;
u32 size = *psize;
int ret = -ENOSYS;
nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object);
dmaobj->func = func;
dmaobj->dma = dma;
nvif_ioctl(parent, "create dma size %d\n", *psize);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
nvif_ioctl(parent, "create dma vers %d target %d access %d "
"start %016llx limit %016llx\n",
args->v0.version, args->v0.target, args->v0.access,
args->v0.start, args->v0.limit);
dmaobj->target = args->v0.target;
dmaobj->access = args->v0.access;
dmaobj->start = args->v0.start;
dmaobj->limit = args->v0.limit;
} else
return ret;
*pdata = data;
*psize = size;
if (dmaobj->start > dmaobj->limit)
return -EINVAL;
switch (dmaobj->target) {
case NV_DMA_V0_TARGET_VM:
dmaobj->target = NV_MEM_TARGET_VM;
break;
case NV_DMA_V0_TARGET_VRAM:
dmaobj->target = NV_MEM_TARGET_VRAM;
break;
case NV_DMA_V0_TARGET_PCI:
dmaobj->target = NV_MEM_TARGET_PCI;
break;
case NV_DMA_V0_TARGET_PCI_US:
case NV_DMA_V0_TARGET_AGP:
dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
break;
default:
return -EINVAL;
}
switch (dmaobj->access) {
case NV_DMA_V0_ACCESS_VM:
dmaobj->access = NV_MEM_ACCESS_VM;
break;
case NV_DMA_V0_ACCESS_RD:
dmaobj->access = NV_MEM_ACCESS_RO;
break;
case NV_DMA_V0_ACCESS_WR:
dmaobj->access = NV_MEM_ACCESS_WO;
break;
case NV_DMA_V0_ACCESS_RDWR:
dmaobj->access = NV_MEM_ACCESS_RW;
break;
default:
return -EINVAL;
}
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "user.h"
static const struct nvkm_dma_func
gf119_dma = {
.class_new = gf119_dmaobj_new,
};
int
gf119_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_dma **pdma)
{
return nvkm_dma_new_(&gf119_dma, device, type, inst, pdma);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <core/client.h>
#include <engine/fifo.h>
#include <nvif/class.h>
static int
nvkm_dma_oclass_new(struct nvkm_device *device,
const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_dma *dma = nvkm_dma(oclass->engine);
struct nvkm_dmaobj *dmaobj = NULL;
int ret;
ret = dma->func->class_new(dma, oclass, data, size, &dmaobj);
if (dmaobj)
*pobject = &dmaobj->object;
return ret;
}
static const struct nvkm_device_oclass
nvkm_dma_oclass_base = {
.ctor = nvkm_dma_oclass_new,
};
static int
nvkm_dma_oclass_fifo_new(const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
return nvkm_dma_oclass_new(oclass->engine->subdev.device,
oclass, data, size, pobject);
}
static const struct nvkm_sclass
nvkm_dma_sclass[] = {
{ 0, 0, NV_DMA_FROM_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
{ 0, 0, NV_DMA_TO_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
{ 0, 0, NV_DMA_IN_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
};
static int
nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index,
const struct nvkm_device_oclass **class)
{
const int count = ARRAY_SIZE(nvkm_dma_sclass);
if (index < count) {
const struct nvkm_sclass *oclass = &nvkm_dma_sclass[index];
sclass->base = oclass[0];
sclass->engn = oclass;
*class = &nvkm_dma_oclass_base;
return index;
}
return count;
}
static int
nvkm_dma_oclass_fifo_get(struct nvkm_oclass *oclass, int index)
{
const int count = ARRAY_SIZE(nvkm_dma_sclass);
if (index < count) {
oclass->base = nvkm_dma_sclass[index];
return index;
}
return count;
}
static void *
nvkm_dma_dtor(struct nvkm_engine *engine)
{
return nvkm_dma(engine);
}
static const struct nvkm_engine_func
nvkm_dma = {
.dtor = nvkm_dma_dtor,
.base.sclass = nvkm_dma_oclass_base_get,
.fifo.sclass = nvkm_dma_oclass_fifo_get,
};
int
nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_dma **pdma)
{
struct nvkm_dma *dma;
if (!(dma = *pdma = kzalloc(sizeof(*dma), GFP_KERNEL)))
return -ENOMEM;
dma->func = func;
return nvkm_engine_ctor(&nvkm_dma, device, type, inst, true, &dma->engine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define gf100_dmaobj(p) container_of((p), struct gf100_dmaobj, base)
#include "user.h"
#include <core/client.h>
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <nvif/cl0002.h>
#include <nvif/unpack.h>
struct gf100_dmaobj {
struct nvkm_dmaobj base;
u32 flags0;
u32 flags5;
};
static int
gf100_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct gf100_dmaobj *dmaobj = gf100_dmaobj(base);
struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
int ret;
ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
upper_32_bits(dmaobj->base.start));
nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
nvkm_done(*pgpuobj);
}
return ret;
}
static const struct nvkm_dmaobj_func
gf100_dmaobj_func = {
.bind = gf100_dmaobj_bind,
};
int
gf100_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
{
union {
struct gf100_dma_v0 v0;
} *args;
struct nvkm_object *parent = oclass->parent;
struct gf100_dmaobj *dmaobj;
u32 kind, user, unkn;
int ret;
if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
return -ENOMEM;
*pdmaobj = &dmaobj->base;
ret = nvkm_dmaobj_ctor(&gf100_dmaobj_func, dma, oclass,
&data, &size, &dmaobj->base);
if (ret)
return ret;
ret = -ENOSYS;
args = data;
nvif_ioctl(parent, "create gf100 dma size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(parent,
"create gf100 dma vers %d priv %d kind %02x\n",
args->v0.version, args->v0.priv, args->v0.kind);
kind = args->v0.kind;
user = args->v0.priv;
unkn = 0;
} else
if (size == 0) {
if (dmaobj->base.target != NV_MEM_TARGET_VM) {
kind = GF100_DMA_V0_KIND_PITCH;
user = GF100_DMA_V0_PRIV_US;
unkn = 2;
} else {
kind = GF100_DMA_V0_KIND_VM;
user = GF100_DMA_V0_PRIV_VM;
unkn = 0;
}
} else
return ret;
if (user > 2)
return -EINVAL;
dmaobj->flags0 |= (kind << 22) | (user << 20) | oclass->base.oclass;
dmaobj->flags5 |= (unkn << 16);
switch (dmaobj->base.target) {
case NV_MEM_TARGET_VM:
dmaobj->flags0 |= 0x00000000;
break;
case NV_MEM_TARGET_VRAM:
dmaobj->flags0 |= 0x00010000;
break;
case NV_MEM_TARGET_PCI:
dmaobj->flags0 |= 0x00020000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
dmaobj->flags0 |= 0x00030000;
break;
default:
return -EINVAL;
}
switch (dmaobj->base.access) {
case NV_MEM_ACCESS_VM:
break;
case NV_MEM_ACCESS_RO:
dmaobj->flags0 |= 0x00040000;
break;
case NV_MEM_ACCESS_WO:
case NV_MEM_ACCESS_RW:
dmaobj->flags0 |= 0x00080000;
break;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "user.h"
static const struct nvkm_dma_func
gv100_dma = {
.class_new = gv100_dmaobj_new,
};
int
gv100_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_dma **pdma)
{
return nvkm_dma_new_(&gv100_dma, device, type, inst, pdma);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define gv100_dmaobj(p) container_of((p), struct gv100_dmaobj, base)
#include "user.h"
#include <core/client.h>
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <nvif/cl0002.h>
#include <nvif/unpack.h>
struct gv100_dmaobj {
struct nvkm_dmaobj base;
u32 flags0;
};
static int
gv100_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct gv100_dmaobj *dmaobj = gv100_dmaobj(base);
struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
u64 start = dmaobj->base.start >> 8;
u64 limit = dmaobj->base.limit >> 8;
int ret;
ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(start));
nvkm_wo32(*pgpuobj, 0x08, upper_32_bits(start));
nvkm_wo32(*pgpuobj, 0x0c, lower_32_bits(limit));
nvkm_wo32(*pgpuobj, 0x10, upper_32_bits(limit));
nvkm_done(*pgpuobj);
}
return ret;
}
static const struct nvkm_dmaobj_func
gv100_dmaobj_func = {
.bind = gv100_dmaobj_bind,
};
int
gv100_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
{
union {
struct gf119_dma_v0 v0;
} *args;
struct nvkm_object *parent = oclass->parent;
struct gv100_dmaobj *dmaobj;
u32 kind, page;
int ret;
if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
return -ENOMEM;
*pdmaobj = &dmaobj->base;
ret = nvkm_dmaobj_ctor(&gv100_dmaobj_func, dma, oclass,
&data, &size, &dmaobj->base);
if (ret)
return ret;
ret = -ENOSYS;
args = data;
nvif_ioctl(parent, "create gv100 dma size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(parent,
"create gv100 dma vers %d page %d kind %02x\n",
args->v0.version, args->v0.page, args->v0.kind);
kind = args->v0.kind != 0;
page = args->v0.page != 0;
} else
if (size == 0) {
kind = 0;
page = GF119_DMA_V0_PAGE_SP;
} else
return ret;
if (kind)
dmaobj->flags0 |= 0x00100000;
if (page)
dmaobj->flags0 |= 0x00000040;
dmaobj->flags0 |= 0x00000004; /* rw */
switch (dmaobj->base.target) {
case NV_MEM_TARGET_VRAM : dmaobj->flags0 |= 0x00000001; break;
case NV_MEM_TARGET_PCI : dmaobj->flags0 |= 0x00000002; break;
case NV_MEM_TARGET_PCI_NOSNOOP: dmaobj->flags0 |= 0x00000003; break;
default:
return -EINVAL;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/dma/usergv100.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <nvif/class.h>
static const struct nvkm_engine_func
gm107_ce = {
.intr = gk104_ce_intr,
.sclass = {
{ -1, -1, KEPLER_DMA_COPY_A },
{ -1, -1, MAXWELL_DMA_COPY_A },
{}
}
};
int
gm107_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
return nvkm_engine_new_(&gm107_ce, device, type, inst, true, pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <core/enum.h>
#include <nvif/class.h>
static const struct nvkm_enum
gk104_ce_launcherr_report[] = {
{ 0x0, "NO_ERR" },
{ 0x1, "2D_LAYER_EXCEEDS_DEPTH" },
{ 0x2, "INVALID_ARGUMENT" },
{ 0x3, "MEM2MEM_RECT_OUT_OF_BOUNDS" },
{ 0x4, "SRC_LINE_EXCEEDS_PITCH" },
{ 0x5, "SRC_LINE_EXCEEDS_NEG_PITCH" },
{ 0x6, "DST_LINE_EXCEEDS_PITCH" },
{ 0x7, "DST_LINE_EXCEEDS_NEG_PITCH" },
{ 0x8, "BAD_SRC_PIXEL_COMP_REF" },
{ 0x9, "INVALID_VALUE" },
{ 0xa, "UNUSED_FIELD" },
{ 0xb, "INVALID_OPERATION" },
{}
};
static void
gk104_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
{
struct nvkm_subdev *subdev = &ce->subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x104f14 + base);
const struct nvkm_enum *en =
nvkm_enum_find(gk104_ce_launcherr_report, stat & 0x0000000f);
nvkm_warn(subdev, "LAUNCHERR %08x [%s]\n", stat, en ? en->name : "");
nvkm_wr32(device, 0x104f14 + base, 0x00000000);
}
void
gk104_ce_intr(struct nvkm_engine *ce)
{
struct nvkm_subdev *subdev = &ce->subdev;
struct nvkm_device *device = subdev->device;
const u32 base = subdev->inst * 0x1000;
u32 mask = nvkm_rd32(device, 0x104904 + base);
u32 intr = nvkm_rd32(device, 0x104908 + base) & mask;
if (intr & 0x00000001) {
nvkm_warn(subdev, "BLOCKPIPE\n");
nvkm_wr32(device, 0x104908 + base, 0x00000001);
intr &= ~0x00000001;
}
if (intr & 0x00000002) {
nvkm_warn(subdev, "NONBLOCKPIPE\n");
nvkm_wr32(device, 0x104908 + base, 0x00000002);
intr &= ~0x00000002;
}
if (intr & 0x00000004) {
gk104_ce_intr_launcherr(ce, base);
nvkm_wr32(device, 0x104908 + base, 0x00000004);
intr &= ~0x00000004;
}
if (intr) {
nvkm_warn(subdev, "intr %08x\n", intr);
nvkm_wr32(device, 0x104908 + base, intr);
}
}
static const struct nvkm_engine_func
gk104_ce = {
.intr = gk104_ce_intr,
.sclass = {
{ -1, -1, KEPLER_DMA_COPY_A },
{}
}
};
int
gk104_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
return nvkm_engine_new_(&gk104_ce, device, type, inst, true, pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "fuc/gf100.fuc3.h"
#include <nvif/class.h>
static void
gf100_ce_init(struct nvkm_falcon *ce)
{
nvkm_wr32(ce->engine.subdev.device, ce->addr + 0x084, ce->engine.subdev.inst);
}
static const struct nvkm_falcon_func
gf100_ce0 = {
.code.data = gf100_ce_code,
.code.size = sizeof(gf100_ce_code),
.data.data = gf100_ce_data,
.data.size = sizeof(gf100_ce_data),
.init = gf100_ce_init,
.intr = gt215_ce_intr,
.sclass = {
{ -1, -1, FERMI_DMA },
{}
}
};
static const struct nvkm_falcon_func
gf100_ce1 = {
.code.data = gf100_ce_code,
.code.size = sizeof(gf100_ce_code),
.data.data = gf100_ce_data,
.data.size = sizeof(gf100_ce_data),
.init = gf100_ce_init,
.intr = gt215_ce_intr,
.sclass = {
{ -1, -1, FERMI_DECOMPRESS },
{}
}
};
int
gf100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
return nvkm_falcon_new_(inst ? &gf100_ce1 : &gf100_ce0, device, type, inst, true,
0x104000 + (inst * 0x1000), pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <nvif/class.h>
static const struct nvkm_engine_func
tu102_ce = {
.intr = gp100_ce_intr,
.cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, TURING_DMA_COPY_A },
{}
}
};
int
tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <subdev/vfn.h>
#include <nvif/class.h>
static irqreturn_t
ga100_ce_intr(struct nvkm_inth *inth)
{
struct nvkm_subdev *subdev = container_of(inth, typeof(*subdev), inth);
/*TODO*/
nvkm_error(subdev, "intr\n");
return IRQ_NONE;
}
int
ga100_ce_nonstall(struct nvkm_engine *engine)
{
struct nvkm_subdev *subdev = &engine->subdev;
struct nvkm_device *device = subdev->device;
return nvkm_rd32(device, 0x104424 + (subdev->inst * 0x80)) & 0x00000fff;
}
int
ga100_ce_fini(struct nvkm_engine *engine, bool suspend)
{
nvkm_inth_block(&engine->subdev.inth);
return 0;
}
int
ga100_ce_init(struct nvkm_engine *engine)
{
nvkm_inth_allow(&engine->subdev.inth);
return 0;
}
int
ga100_ce_oneinit(struct nvkm_engine *engine)
{
struct nvkm_subdev *subdev = &engine->subdev;
struct nvkm_device *device = subdev->device;
u32 vector;
vector = nvkm_rd32(device, 0x10442c + (subdev->inst * 0x80)) & 0x00000fff;
return nvkm_inth_add(&device->vfn->intr, vector, NVKM_INTR_PRIO_NORMAL,
subdev, ga100_ce_intr, &subdev->inth);
}
static const struct nvkm_engine_func
ga100_ce = {
.oneinit = ga100_ce_oneinit,
.init = ga100_ce_init,
.fini = ga100_ce_fini,
.nonstall = ga100_ce_nonstall,
.cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, AMPERE_DMA_COPY_A },
{}
}
};
int
ga100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
return nvkm_engine_new_(&ga100_ce, device, type, inst, true, pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <nvif/class.h>
static const struct nvkm_engine_func
gm200_ce = {
.intr = gk104_ce_intr,
.sclass = {
{ -1, -1, MAXWELL_DMA_COPY_A },
{}
}
};
int
gm200_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
return nvkm_engine_new_(&gm200_ce, device, type, inst, true, pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <core/enum.h>
#include <nvif/class.h>
static const struct nvkm_engine_func
gp102_ce = {
.intr = gp100_ce_intr,
.sclass = {
{ -1, -1, PASCAL_DMA_COPY_B },
{ -1, -1, PASCAL_DMA_COPY_A },
{}
}
};
int
gp102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
return nvkm_engine_new_(&gp102_ce, device, type, inst, true, pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/gpuobj.h>
#include <core/object.h>
#include <nvif/class.h>
static int
gv100_ce_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, int align,
struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_device *device = object->engine->subdev.device;
u32 size;
/* Allocate fault method buffer (magics come from nvgpu). */
size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
size = roundup(size, PAGE_SIZE);
return nvkm_gpuobj_new(device, size, align, true, parent, pgpuobj);
}
const struct nvkm_object_func
gv100_ce_cclass = {
.bind = gv100_ce_cclass_bind,
};
static const struct nvkm_engine_func
gv100_ce = {
.intr = gp100_ce_intr,
.cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, VOLTA_DMA_COPY_A },
{}
}
};
int
gv100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
return nvkm_engine_new_(&gv100_ce, device, type, inst, true, pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "fuc/gt215.fuc3.h"
#include <core/client.h>
#include <core/enum.h>
#include <core/gpuobj.h>
#include <engine/fifo.h>
#include <nvif/class.h>
static const struct nvkm_enum
gt215_ce_isr_error_name[] = {
{ 0x0001, "ILLEGAL_MTHD" },
{ 0x0002, "INVALID_ENUM" },
{ 0x0003, "INVALID_BITFIELD" },
{}
};
void
gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_chan *chan)
{
struct nvkm_subdev *subdev = &ce->engine.subdev;
struct nvkm_device *device = subdev->device;
const u32 base = subdev->inst * 0x1000;
u32 ssta = nvkm_rd32(device, 0x104040 + base) & 0x0000ffff;
u32 addr = nvkm_rd32(device, 0x104040 + base) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
u32 data = nvkm_rd32(device, 0x104044 + base);
const struct nvkm_enum *en =
nvkm_enum_find(gt215_ce_isr_error_name, ssta);
nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
"subc %d mthd %04x data %08x\n", ssta,
en ? en->name : "", chan ? chan->id : -1,
chan ? chan->inst->addr : 0,
chan ? chan->name : "unknown",
subc, mthd, data);
}
static const struct nvkm_falcon_func
gt215_ce = {
.code.data = gt215_ce_code,
.code.size = sizeof(gt215_ce_code),
.data.data = gt215_ce_data,
.data.size = sizeof(gt215_ce_data),
.intr = gt215_ce_intr,
.sclass = {
{ -1, -1, GT212_DMA },
{}
}
};
int
gt215_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
return nvkm_falcon_new_(>215_ce, device, type, -1,
(device->chipset != 0xaf), 0x104000, pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <nvif/class.h>
static const struct nvkm_engine_func
ga102_ce = {
.oneinit = ga100_ce_oneinit,
.init = ga100_ce_init,
.fini = ga100_ce_fini,
.nonstall = ga100_ce_nonstall,
.cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, AMPERE_DMA_COPY_A },
{ -1, -1, AMPERE_DMA_COPY_B },
{}
}
};
int
ga102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
return nvkm_engine_new_(&ga102_ce, device, type, inst, true, pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <core/enum.h>
#include <nvif/class.h>
static const struct nvkm_enum
gp100_ce_launcherr_report[] = {
{ 0x0, "NO_ERR" },
{ 0x1, "2D_LAYER_EXCEEDS_DEPTH" },
{ 0x2, "INVALID_ALIGNMENT" },
{ 0x3, "MEM2MEM_RECT_OUT_OF_BOUNDS" },
{ 0x4, "SRC_LINE_EXCEEDS_PITCH" },
{ 0x5, "SRC_LINE_EXCEEDS_NEG_PITCH" },
{ 0x6, "DST_LINE_EXCEEDS_PITCH" },
{ 0x7, "DST_LINE_EXCEEDS_NEG_PITCH" },
{ 0x8, "BAD_SRC_PIXEL_COMP_REF" },
{ 0x9, "INVALID_VALUE" },
{ 0xa, "UNUSED_FIELD" },
{ 0xb, "INVALID_OPERATION" },
{ 0xc, "NO_RESOURCES" },
{ 0xd, "INVALID_CONFIG" },
{}
};
static void
gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
{
struct nvkm_subdev *subdev = &ce->subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x104418 + base);
const struct nvkm_enum *en =
nvkm_enum_find(gp100_ce_launcherr_report, stat & 0x0000000f);
nvkm_warn(subdev, "LAUNCHERR %08x [%s]\n", stat, en ? en->name : "");
}
void
gp100_ce_intr(struct nvkm_engine *ce)
{
struct nvkm_subdev *subdev = &ce->subdev;
struct nvkm_device *device = subdev->device;
const u32 base = subdev->inst * 0x80;
u32 mask = nvkm_rd32(device, 0x10440c + base);
u32 intr = nvkm_rd32(device, 0x104410 + base) & mask;
if (intr & 0x00000001) { //XXX: guess
nvkm_warn(subdev, "BLOCKPIPE\n");
nvkm_wr32(device, 0x104410 + base, 0x00000001);
intr &= ~0x00000001;
}
if (intr & 0x00000002) { //XXX: guess
nvkm_warn(subdev, "NONBLOCKPIPE\n");
nvkm_wr32(device, 0x104410 + base, 0x00000002);
intr &= ~0x00000002;
}
if (intr & 0x00000004) {
gp100_ce_intr_launcherr(ce, base);
nvkm_wr32(device, 0x104410 + base, 0x00000004);
intr &= ~0x00000004;
}
if (intr) {
nvkm_warn(subdev, "intr %08x\n", intr);
nvkm_wr32(device, 0x104410 + base, intr);
}
}
static const struct nvkm_engine_func
gp100_ce = {
.intr = gp100_ce_intr,
.sclass = {
{ -1, -1, PASCAL_DMA_COPY_A },
{}
}
};
int
gp100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
return nvkm_engine_new_(&gp100_ce, device, type, inst, true, pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
int
ga100_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src)
{
struct nvkm_falcon *falcon = fw->falcon;
struct nvkm_device *device = falcon->owner->device;
u32 reg_fuse_version;
int idx;
FLCN_DBG(falcon, "brom: %08x %08x", fw->engine_id, fw->ucode_id);
FLCN_DBG(falcon, "fuse_version: %d", fw->fuse_ver);
if (fw->engine_id & 0x00000001) {
reg_fuse_version = nvkm_rd32(device, 0x824140 + (fw->ucode_id - 1) * 4);
} else
if (fw->engine_id & 0x00000004) {
reg_fuse_version = nvkm_rd32(device, 0x824100 + (fw->ucode_id - 1) * 4);
} else
if (fw->engine_id & 0x00000400) {
reg_fuse_version = nvkm_rd32(device, 0x8241c0 + (fw->ucode_id - 1) * 4);
} else {
WARN_ON(1);
return -ENOSYS;
}
FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version);
if (reg_fuse_version) {
reg_fuse_version = fls(reg_fuse_version);
FLCN_DBG(falcon, "reg_fuse_version: %d", reg_fuse_version);
if (WARN_ON(fw->fuse_ver < reg_fuse_version))
return -EINVAL;
idx = fw->fuse_ver - reg_fuse_version;
} else {
idx = fw->sig_nr - 1;
}
return idx;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c |
/*
* Copyright 2022 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/memory.h>
#include <subdev/mc.h>
#include <subdev/timer.h>
void
gm200_flcn_tracepc(struct nvkm_falcon *falcon)
{
u32 sctl = nvkm_falcon_rd32(falcon, 0x240);
u32 tidx = nvkm_falcon_rd32(falcon, 0x148);
int nr = (tidx & 0x00ff0000) >> 16, sp, ip;
FLCN_ERR(falcon, "TRACEPC SCTL %08x TIDX %08x", sctl, tidx);
for (sp = 0; sp < nr; sp++) {
nvkm_falcon_wr32(falcon, 0x148, sp);
ip = nvkm_falcon_rd32(falcon, 0x14c);
FLCN_ERR(falcon, "TRACEPC: %08x", ip);
}
}
static void
gm200_flcn_pio_dmem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len)
{
while (len >= 4) {
*(u32 *)img = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
img += 4;
len -= 4;
}
/* Sigh. Tegra PMU FW's init message... */
if (len) {
u32 data = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
while (len--) {
*(u8 *)img++ = data & 0xff;
data >>= 8;
}
}
}
static void
gm200_flcn_pio_dmem_rd_init(struct nvkm_falcon *falcon, u8 port, u32 dmem_base)
{
nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(25) | dmem_base);
}
static void
gm200_flcn_pio_dmem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
{
while (len >= 4) {
nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), *(u32 *)img);
img += 4;
len -= 4;
}
WARN_ON(len);
}
static void
gm200_flcn_pio_dmem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 dmem_base)
{
nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(24) | dmem_base);
}
const struct nvkm_falcon_func_pio
gm200_flcn_dmem_pio = {
.min = 1,
.max = 0x100,
.wr_init = gm200_flcn_pio_dmem_wr_init,
.wr = gm200_flcn_pio_dmem_wr,
.rd_init = gm200_flcn_pio_dmem_rd_init,
.rd = gm200_flcn_pio_dmem_rd,
};
static void
gm200_flcn_pio_imem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 imem_base)
{
nvkm_falcon_wr32(falcon, 0x180 + (port * 0x10), (sec ? BIT(28) : 0) | BIT(24) | imem_base);
}
static void
gm200_flcn_pio_imem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
{
nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag++);
while (len >= 4) {
nvkm_falcon_wr32(falcon, 0x184 + (port * 0x10), *(u32 *)img);
img += 4;
len -= 4;
}
}
const struct nvkm_falcon_func_pio
gm200_flcn_imem_pio = {
.min = 0x100,
.max = 0x100,
.wr_init = gm200_flcn_pio_imem_wr_init,
.wr = gm200_flcn_pio_imem_wr,
};
int
gm200_flcn_bind_stat(struct nvkm_falcon *falcon, bool intr)
{
if (intr && !(nvkm_falcon_rd32(falcon, 0x008) & 0x00000008))
return -1;
return (nvkm_falcon_rd32(falcon, 0x0dc) & 0x00007000) >> 12;
}
void
gm200_flcn_bind_inst(struct nvkm_falcon *falcon, int target, u64 addr)
{
nvkm_falcon_mask(falcon, 0x604, 0x00000007, 0x00000000); /* DMAIDX_VIRT */
nvkm_falcon_wr32(falcon, 0x054, (1 << 30) | (target << 28) | (addr >> 12));
nvkm_falcon_mask(falcon, 0x090, 0x00010000, 0x00010000);
nvkm_falcon_mask(falcon, 0x0a4, 0x00000008, 0x00000008);
}
int
gm200_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *falcon)
{
nvkm_falcon_mask(falcon, 0x040, 0x00000000, 0x00000000);
if (nvkm_msec(falcon->owner->device, 10,
if (!(nvkm_falcon_rd32(falcon, 0x10c) & 0x00000006))
break;
) < 0)
return -ETIMEDOUT;
return 0;
}
int
gm200_flcn_enable(struct nvkm_falcon *falcon)
{
struct nvkm_device *device = falcon->owner->device;
int ret;
if (falcon->func->reset_eng) {
ret = falcon->func->reset_eng(falcon);
if (ret)
return ret;
}
if (falcon->func->select) {
ret = falcon->func->select(falcon);
if (ret)
return ret;
}
if (falcon->func->reset_pmc)
nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst);
ret = falcon->func->reset_wait_mem_scrubbing(falcon);
if (ret)
return ret;
nvkm_falcon_wr32(falcon, 0x084, nvkm_rd32(device, 0x000000));
return 0;
}
int
gm200_flcn_disable(struct nvkm_falcon *falcon)
{
struct nvkm_device *device = falcon->owner->device;
int ret;
if (falcon->func->select) {
ret = falcon->func->select(falcon);
if (ret)
return ret;
}
nvkm_falcon_mask(falcon, 0x048, 0x00000003, 0x00000000);
nvkm_falcon_wr32(falcon, 0x014, 0xffffffff);
if (falcon->func->reset_pmc) {
if (falcon->func->reset_prep) {
ret = falcon->func->reset_prep(falcon);
if (ret)
return ret;
}
nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
}
if (falcon->func->reset_eng) {
ret = falcon->func->reset_eng(falcon);
if (ret)
return ret;
}
return 0;
}
int
gm200_flcn_fw_boot(struct nvkm_falcon_fw *fw, u32 *pmbox0, u32 *pmbox1, u32 mbox0_ok, u32 irqsclr)
{
struct nvkm_falcon *falcon = fw->falcon;
u32 mbox0, mbox1;
int ret = 0;
nvkm_falcon_wr32(falcon, 0x040, pmbox0 ? *pmbox0 : 0xcafebeef);
if (pmbox1)
nvkm_falcon_wr32(falcon, 0x044, *pmbox1);
nvkm_falcon_wr32(falcon, 0x104, fw->boot_addr);
nvkm_falcon_wr32(falcon, 0x100, 0x00000002);
if (nvkm_msec(falcon->owner->device, 2000,
if (nvkm_falcon_rd32(falcon, 0x100) & 0x00000010)
break;
) < 0)
ret = -ETIMEDOUT;
mbox0 = nvkm_falcon_rd32(falcon, 0x040);
mbox1 = nvkm_falcon_rd32(falcon, 0x044);
if (FLCN_ERRON(falcon, ret || mbox0 != mbox0_ok, "mbox %08x %08x", mbox0, mbox1))
ret = ret ?: -EIO;
if (irqsclr)
nvkm_falcon_mask(falcon, 0x004, 0xffffffff, irqsclr);
return ret;
}
int
gm200_flcn_fw_load(struct nvkm_falcon_fw *fw)
{
struct nvkm_falcon *falcon = fw->falcon;
int target, ret;
if (fw->inst) {
nvkm_falcon_mask(falcon, 0x048, 0x00000001, 0x00000001);
switch (nvkm_memory_target(fw->inst)) {
case NVKM_MEM_TARGET_VRAM: target = 0; break;
case NVKM_MEM_TARGET_HOST: target = 2; break;
case NVKM_MEM_TARGET_NCOH: target = 3; break;
default:
WARN_ON(1);
return -EINVAL;
}
falcon->func->bind_inst(falcon, target, nvkm_memory_addr(fw->inst));
if (nvkm_msec(falcon->owner->device, 10,
if (falcon->func->bind_stat(falcon, falcon->func->bind_intr) == 5)
break;
) < 0)
return -ETIMEDOUT;
nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
if (nvkm_msec(falcon->owner->device, 10,
if (falcon->func->bind_stat(falcon, false) == 0)
break;
) < 0)
return -ETIMEDOUT;
} else {
nvkm_falcon_mask(falcon, 0x624, 0x00000080, 0x00000080);
nvkm_falcon_wr32(falcon, 0x10c, 0x00000000);
}
if (fw->boot) {
switch (nvkm_memory_target(&fw->fw.mem.memory)) {
case NVKM_MEM_TARGET_VRAM: target = 4; break;
case NVKM_MEM_TARGET_HOST: target = 5; break;
case NVKM_MEM_TARGET_NCOH: target = 6; break;
default:
WARN_ON(1);
return -EINVAL;
}
ret = nvkm_falcon_pio_wr(falcon, fw->boot, 0, 0,
IMEM, falcon->code.limit - fw->boot_size, fw->boot_size,
fw->boot_addr >> 8, false);
if (ret)
return ret;
return fw->func->load_bld(fw);
}
ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->nmem_base_img, fw->nmem_base_img, 0,
IMEM, fw->nmem_base, fw->nmem_size, fw->nmem_base >> 8, false);
if (ret)
return ret;
ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->imem_base_img, fw->imem_base_img, 0,
IMEM, fw->imem_base, fw->imem_size, fw->imem_base >> 8, true);
if (ret)
return ret;
ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->dmem_base_img, fw->dmem_base_img, 0,
DMEM, fw->dmem_base, fw->dmem_size, 0, false);
if (ret)
return ret;
return 0;
}
int
gm200_flcn_fw_reset(struct nvkm_falcon_fw *fw)
{
return nvkm_falcon_reset(fw->falcon);
}
int
gm200_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *sig_base_src)
{
struct nvkm_falcon *falcon = fw->falcon;
u32 addr = falcon->func->debug;
int ret = 0;
if (addr) {
ret = nvkm_falcon_enable(falcon);
if (ret)
return ret;
if (nvkm_falcon_rd32(falcon, addr) & 0x00100000) {
*sig_base_src = fw->sig_base_dbg;
return 1;
}
}
return ret;
}
const struct nvkm_falcon_fw_func
gm200_flcn_fw = {
.signature = gm200_flcn_fw_signature,
.reset = gm200_flcn_fw_reset,
.load = gm200_flcn_fw_load,
.boot = gm200_flcn_fw_boot,
};
| linux-master | drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c |
/*
* Copyright 2022 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
static void
gp102_flcn_pio_emem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len)
{
while (len >= 4) {
*(u32 *)img = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
img += 4;
len -= 4;
}
}
static void
gp102_flcn_pio_emem_rd_init(struct nvkm_falcon *falcon, u8 port, u32 dmem_base)
{
nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), BIT(25) | dmem_base);
}
static void
gp102_flcn_pio_emem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
{
while (len >= 4) {
nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), *(u32 *)img);
img += 4;
len -= 4;
}
}
static void
gp102_flcn_pio_emem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 emem_base)
{
nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), BIT(24) | emem_base);
}
const struct nvkm_falcon_func_pio
gp102_flcn_emem_pio = {
.min = 4,
.max = 0x100,
.wr_init = gp102_flcn_pio_emem_wr_init,
.wr = gp102_flcn_pio_emem_wr,
.rd_init = gp102_flcn_pio_emem_rd_init,
.rd = gp102_flcn_pio_emem_rd,
};
int
gp102_flcn_reset_eng(struct nvkm_falcon *falcon)
{
int ret;
if (falcon->func->reset_prep) {
ret = falcon->func->reset_prep(falcon);
if (ret)
return ret;
}
nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000001);
udelay(10);
nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000000);
return falcon->func->reset_wait_mem_scrubbing(falcon);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/falcon/gp102.c |
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/top.h>
static const struct nvkm_falcon_func_dma *
nvkm_falcon_dma(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
{
switch (*mem_type) {
case IMEM: return falcon->func->imem_dma;
case DMEM: return falcon->func->dmem_dma;
default:
return NULL;
}
}
int
nvkm_falcon_dma_wr(struct nvkm_falcon *falcon, const u8 *img, u64 dma_addr, u32 dma_base,
enum nvkm_falcon_mem mem_type, u32 mem_base, int len, bool sec)
{
const struct nvkm_falcon_func_dma *dma = nvkm_falcon_dma(falcon, &mem_type, &mem_base);
const char *type = nvkm_falcon_mem(mem_type);
const int dmalen = 256;
u32 dma_start = 0;
u32 dst, src, cmd;
int ret, i;
if (WARN_ON(!dma->xfer))
return -EINVAL;
if (mem_type == DMEM) {
dma_start = dma_base;
dma_addr += dma_base;
}
FLCN_DBG(falcon, "%s %08x <- %08x bytes at %08x (%010llx %08x)",
type, mem_base, len, dma_base, dma_addr - dma_base, dma_start);
if (WARN_ON(!len || (len & (dmalen - 1))))
return -EINVAL;
ret = dma->init(falcon, dma_addr, dmalen, mem_type, sec, &cmd);
if (ret)
return ret;
dst = mem_base;
src = dma_base;
if (len) {
while (len >= dmalen) {
dma->xfer(falcon, dst, src - dma_start, cmd);
if (img && nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
for (i = 0; i < dmalen; i += 4, mem_base += 4) {
const int w = 8, x = (i / 4) % w;
if (x == 0)
printk(KERN_INFO "%s %08x <-", type, mem_base);
printk(KERN_CONT " %08x", *(u32 *)(img + src + i));
if (x == (w - 1) || ((i + 4) == dmalen))
printk(KERN_CONT " <- %08x+%08x", dma_base,
src + i - dma_base - (x * 4));
if (i == (7 * 4))
printk(KERN_CONT " *");
}
}
if (nvkm_msec(falcon->owner->device, 2000,
if (dma->done(falcon))
break;
) < 0)
return -ETIMEDOUT;
src += dmalen;
dst += dmalen;
len -= dmalen;
}
WARN_ON(len);
}
return 0;
}
static const struct nvkm_falcon_func_pio *
nvkm_falcon_pio(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
{
switch (*mem_type) {
case IMEM:
return falcon->func->imem_pio;
case DMEM:
if (!falcon->func->emem_addr || *mem_base < falcon->func->emem_addr)
return falcon->func->dmem_pio;
*mem_base -= falcon->func->emem_addr;
fallthrough;
case EMEM:
return falcon->func->emem_pio;
default:
return NULL;
}
}
int
nvkm_falcon_pio_rd(struct nvkm_falcon *falcon, u8 port, enum nvkm_falcon_mem mem_type, u32 mem_base,
const u8 *img, u32 img_base, int len)
{
const struct nvkm_falcon_func_pio *pio = nvkm_falcon_pio(falcon, &mem_type, &mem_base);
const char *type = nvkm_falcon_mem(mem_type);
int xfer_len;
if (WARN_ON(!pio || !pio->rd))
return -EINVAL;
FLCN_DBG(falcon, "%s %08x -> %08x bytes at %08x", type, mem_base, len, img_base);
if (WARN_ON(!len || (len & (pio->min - 1))))
return -EINVAL;
pio->rd_init(falcon, port, mem_base);
do {
xfer_len = min(len, pio->max);
pio->rd(falcon, port, img, xfer_len);
if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
for (img_base = 0; img_base < xfer_len; img_base += 4, mem_base += 4) {
if (((img_base / 4) % 8) == 0)
printk(KERN_INFO "%s %08x ->", type, mem_base);
printk(KERN_CONT " %08x", *(u32 *)(img + img_base));
}
}
img += xfer_len;
len -= xfer_len;
} while (len);
return 0;
}
int
nvkm_falcon_pio_wr(struct nvkm_falcon *falcon, const u8 *img, u32 img_base, u8 port,
enum nvkm_falcon_mem mem_type, u32 mem_base, int len, u16 tag, bool sec)
{
const struct nvkm_falcon_func_pio *pio = nvkm_falcon_pio(falcon, &mem_type, &mem_base);
const char *type = nvkm_falcon_mem(mem_type);
int xfer_len;
if (WARN_ON(!pio || !pio->wr))
return -EINVAL;
FLCN_DBG(falcon, "%s %08x <- %08x bytes at %08x", type, mem_base, len, img_base);
if (WARN_ON(!len || (len & (pio->min - 1))))
return -EINVAL;
pio->wr_init(falcon, port, sec, mem_base);
do {
xfer_len = min(len, pio->max);
pio->wr(falcon, port, img, xfer_len, tag++);
if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
for (img_base = 0; img_base < xfer_len; img_base += 4, mem_base += 4) {
if (((img_base / 4) % 8) == 0)
printk(KERN_INFO "%s %08x <-", type, mem_base);
printk(KERN_CONT " %08x", *(u32 *)(img + img_base));
if ((img_base / 4) == 7 && mem_type == IMEM)
printk(KERN_CONT " %04x", tag - 1);
}
}
img += xfer_len;
len -= xfer_len;
} while (len);
return 0;
}
void
nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
u32 size, u16 tag, u8 port, bool secure)
{
if (secure && !falcon->secret) {
nvkm_warn(falcon->user,
"writing with secure tag on a non-secure falcon!\n");
return;
}
falcon->func->load_imem(falcon, data, start, size, tag, port,
secure);
}
void
nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
u32 size, u8 port)
{
mutex_lock(&falcon->dmem_mutex);
falcon->func->load_dmem(falcon, data, start, size, port);
mutex_unlock(&falcon->dmem_mutex);
}
void
nvkm_falcon_start(struct nvkm_falcon *falcon)
{
falcon->func->start(falcon);
}
int
nvkm_falcon_reset(struct nvkm_falcon *falcon)
{
int ret;
ret = falcon->func->disable(falcon);
if (WARN_ON(ret))
return ret;
return nvkm_falcon_enable(falcon);
}
static int
nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
{
const struct nvkm_falcon_func *func = falcon->func;
const struct nvkm_subdev *subdev = falcon->owner;
u32 reg;
if (!falcon->addr) {
falcon->addr = nvkm_top_addr(subdev->device, subdev->type, subdev->inst);
if (WARN_ON(!falcon->addr))
return -ENODEV;
}
reg = nvkm_falcon_rd32(falcon, 0x12c);
falcon->version = reg & 0xf;
falcon->secret = (reg >> 4) & 0x3;
falcon->code.ports = (reg >> 8) & 0xf;
falcon->data.ports = (reg >> 12) & 0xf;
reg = nvkm_falcon_rd32(falcon, 0x108);
falcon->code.limit = (reg & 0x1ff) << 8;
falcon->data.limit = (reg & 0x3fe00) >> 1;
if (func->debug) {
u32 val = nvkm_falcon_rd32(falcon, func->debug);
falcon->debug = (val >> 20) & 0x1;
}
return 0;
}
void
nvkm_falcon_put(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
{
if (unlikely(!falcon))
return;
mutex_lock(&falcon->mutex);
if (falcon->user == user) {
nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
falcon->user = NULL;
}
mutex_unlock(&falcon->mutex);
}
int
nvkm_falcon_get(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
{
int ret = 0;
mutex_lock(&falcon->mutex);
if (falcon->user) {
nvkm_error(user, "%s falcon already acquired by %s!\n",
falcon->name, falcon->user->name);
mutex_unlock(&falcon->mutex);
return -EBUSY;
}
nvkm_debug(user, "acquired %s falcon\n", falcon->name);
if (!falcon->oneinit)
ret = nvkm_falcon_oneinit(falcon);
falcon->user = user;
mutex_unlock(&falcon->mutex);
return ret;
}
void
nvkm_falcon_dtor(struct nvkm_falcon *falcon)
{
}
int
nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
struct nvkm_subdev *subdev, const char *name, u32 addr,
struct nvkm_falcon *falcon)
{
falcon->func = func;
falcon->owner = subdev;
falcon->name = name;
falcon->addr = addr;
falcon->addr2 = func->addr2;
mutex_init(&falcon->mutex);
mutex_init(&falcon->dmem_mutex);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/falcon/base.c |
/*
* Copyright 2022 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/memory.h>
#include <subdev/mmu.h>
#include <nvfw/fw.h>
#include <nvfw/hs.h>
int
nvkm_falcon_fw_patch(struct nvkm_falcon_fw *fw)
{
struct nvkm_falcon *falcon = fw->falcon;
u32 sig_base_src = fw->sig_base_prd;
u32 src, dst, len, i;
int idx = 0;
FLCNFW_DBG(fw, "patching sigs:%d size:%d", fw->sig_nr, fw->sig_size);
if (fw->func->signature) {
idx = fw->func->signature(fw, &sig_base_src);
if (idx < 0)
return idx;
}
src = idx * fw->sig_size;
dst = fw->sig_base_img;
len = fw->sig_size / 4;
FLCNFW_DBG(fw, "patch idx:%d src:%08x dst:%08x", idx, sig_base_src + src, dst);
for (i = 0; i < len; i++) {
u32 sig = *(u32 *)(fw->sigs + src);
if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
if (i % 8 == 0)
printk(KERN_INFO "sig -> %08x:", dst);
printk(KERN_CONT " %08x", sig);
}
*(u32 *)(fw->fw.img + dst) = sig;
src += 4;
dst += 4;
}
return 0;
}
static void
nvkm_falcon_fw_dtor_sigs(struct nvkm_falcon_fw *fw)
{
kfree(fw->sigs);
fw->sigs = NULL;
}
int
nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
bool release, u32 *pmbox0, u32 *pmbox1, u32 mbox0_ok, u32 irqsclr)
{
struct nvkm_falcon *falcon = fw->falcon;
int ret;
ret = nvkm_falcon_get(falcon, user);
if (ret)
return ret;
if (fw->sigs) {
ret = nvkm_falcon_fw_patch(fw);
if (ret)
goto done;
nvkm_falcon_fw_dtor_sigs(fw);
}
FLCNFW_DBG(fw, "resetting");
fw->func->reset(fw);
FLCNFW_DBG(fw, "loading");
if (fw->func->setup) {
ret = fw->func->setup(fw);
if (ret)
goto done;
}
ret = fw->func->load(fw);
if (ret)
goto done;
FLCNFW_DBG(fw, "booting");
ret = fw->func->boot(fw, pmbox0, pmbox1, mbox0_ok, irqsclr);
if (ret)
FLCNFW_ERR(fw, "boot failed: %d", ret);
else
FLCNFW_DBG(fw, "booted");
done:
if (ret || release)
nvkm_falcon_put(falcon, user);
return ret;
}
int
nvkm_falcon_fw_oneinit(struct nvkm_falcon_fw *fw, struct nvkm_falcon *falcon,
struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
int ret;
fw->falcon = falcon;
fw->vmm = nvkm_vmm_ref(vmm);
fw->inst = nvkm_memory_ref(inst);
if (fw->boot) {
FLCN_DBG(falcon, "mapping %s fw", fw->fw.name);
ret = nvkm_vmm_get(fw->vmm, 12, nvkm_memory_size(&fw->fw.mem.memory), &fw->vma);
if (ret) {
FLCN_ERR(falcon, "get %d", ret);
return ret;
}
ret = nvkm_memory_map(&fw->fw.mem.memory, 0, fw->vmm, fw->vma, NULL, 0);
if (ret) {
FLCN_ERR(falcon, "map %d", ret);
return ret;
}
}
return 0;
}
void
nvkm_falcon_fw_dtor(struct nvkm_falcon_fw *fw)
{
nvkm_vmm_put(fw->vmm, &fw->vma);
nvkm_vmm_unref(&fw->vmm);
nvkm_memory_unref(&fw->inst);
nvkm_falcon_fw_dtor_sigs(fw);
nvkm_firmware_dtor(&fw->fw);
}
static const struct nvkm_firmware_func
nvkm_falcon_fw_dma = {
.type = NVKM_FIRMWARE_IMG_DMA,
};
static const struct nvkm_firmware_func
nvkm_falcon_fw = {
.type = NVKM_FIRMWARE_IMG_RAM,
};
int
nvkm_falcon_fw_sign(struct nvkm_falcon_fw *fw, u32 sig_base_img, u32 sig_size, const u8 *sigs,
int sig_nr_prd, u32 sig_base_prd, int sig_nr_dbg, u32 sig_base_dbg)
{
fw->sig_base_prd = sig_base_prd;
fw->sig_base_dbg = sig_base_dbg;
fw->sig_base_img = sig_base_img;
fw->sig_size = sig_size;
fw->sig_nr = sig_nr_prd + sig_nr_dbg;
fw->sigs = kmalloc_array(fw->sig_nr, fw->sig_size, GFP_KERNEL);
if (!fw->sigs)
return -ENOMEM;
memcpy(fw->sigs, sigs + sig_base_prd, sig_nr_prd * fw->sig_size);
if (sig_nr_dbg)
memcpy(fw->sigs + sig_size, sigs + sig_base_dbg, sig_nr_dbg * fw->sig_size);
return 0;
}
int
nvkm_falcon_fw_ctor(const struct nvkm_falcon_fw_func *func, const char *name,
struct nvkm_device *device, bool dma, const void *src, u32 len,
struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
{
const struct nvkm_firmware_func *type = dma ? &nvkm_falcon_fw_dma : &nvkm_falcon_fw;
int ret;
fw->func = func;
ret = nvkm_firmware_ctor(type, name, device, src, len, &fw->fw);
if (ret)
return ret;
return falcon ? nvkm_falcon_fw_oneinit(fw, falcon, NULL, NULL) : 0;
}
int
nvkm_falcon_fw_ctor_hs(const struct nvkm_falcon_fw_func *func, const char *name,
struct nvkm_subdev *subdev, const char *bl, const char *img, int ver,
struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
{
const struct firmware *blob;
const struct nvfw_bin_hdr *hdr;
const struct nvfw_hs_header *hshdr;
const struct nvfw_hs_load_header *lhdr;
const struct nvfw_bl_desc *desc;
u32 loc, sig;
int ret;
ret = nvkm_firmware_load_name(subdev, img, "", ver, &blob);
if (ret)
return ret;
hdr = nvfw_bin_hdr(subdev, blob->data);
hshdr = nvfw_hs_header(subdev, blob->data + hdr->header_offset);
ret = nvkm_falcon_fw_ctor(func, name, subdev->device, bl != NULL,
blob->data + hdr->data_offset, hdr->data_size, falcon, fw);
if (ret)
goto done;
/* Earlier FW releases by NVIDIA for Nouveau's use aren't in NVIDIA's
* standard format, and don't have the indirection seen in the 0x10de
* case.
*/
switch (hdr->bin_magic) {
case 0x000010de:
loc = *(u32 *)(blob->data + hshdr->patch_loc);
sig = *(u32 *)(blob->data + hshdr->patch_sig);
break;
case 0x3b1d14f0:
loc = hshdr->patch_loc;
sig = hshdr->patch_sig;
break;
default:
WARN_ON(1);
ret = -EINVAL;
goto done;
}
ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size, blob->data,
1, hshdr->sig_prod_offset + sig,
1, hshdr->sig_dbg_offset + sig);
if (ret)
goto done;
lhdr = nvfw_hs_load_header(subdev, blob->data + hshdr->hdr_offset);
fw->nmem_base_img = 0;
fw->nmem_base = lhdr->non_sec_code_off;
fw->nmem_size = lhdr->non_sec_code_size;
fw->imem_base_img = lhdr->apps[0];
fw->imem_base = ALIGN(lhdr->apps[0], 0x100);
fw->imem_size = lhdr->apps[lhdr->num_apps + 0];
fw->dmem_base_img = lhdr->data_dma_base;
fw->dmem_base = 0;
fw->dmem_size = lhdr->data_size;
fw->dmem_sign = loc - lhdr->data_dma_base;
if (bl) {
nvkm_firmware_put(blob);
ret = nvkm_firmware_load_name(subdev, bl, "", ver, &blob);
if (ret)
return ret;
hdr = nvfw_bin_hdr(subdev, blob->data);
desc = nvfw_bl_desc(subdev, blob->data + hdr->header_offset);
fw->boot_addr = desc->start_tag << 8;
fw->boot_size = desc->code_size;
fw->boot = kmemdup(blob->data + hdr->data_offset + desc->code_off,
fw->boot_size, GFP_KERNEL);
if (!fw->boot)
ret = -ENOMEM;
} else {
fw->boot_addr = fw->nmem_base;
}
done:
if (ret)
nvkm_falcon_fw_dtor(fw);
nvkm_firmware_put(blob);
return ret;
}
int
nvkm_falcon_fw_ctor_hs_v2(const struct nvkm_falcon_fw_func *func, const char *name,
struct nvkm_subdev *subdev, const char *img, int ver,
struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
{
const struct nvfw_bin_hdr *hdr;
const struct nvfw_hs_header_v2 *hshdr;
const struct nvfw_hs_load_header_v2 *lhdr;
const struct firmware *blob;
u32 loc, sig, cnt, *meta;
int ret;
ret = nvkm_firmware_load_name(subdev, img, "", ver, &blob);
if (ret)
return ret;
hdr = nvfw_bin_hdr(subdev, blob->data);
hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset);
meta = (u32 *)(blob->data + hshdr->meta_data_offset);
loc = *(u32 *)(blob->data + hshdr->patch_loc);
sig = *(u32 *)(blob->data + hshdr->patch_sig);
cnt = *(u32 *)(blob->data + hshdr->num_sig);
ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true,
blob->data + hdr->data_offset, hdr->data_size, falcon, fw);
if (ret)
goto done;
ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data,
cnt, hshdr->sig_prod_offset + sig, 0, 0);
if (ret)
goto done;
lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset);
fw->imem_base_img = lhdr->app[0].offset;
fw->imem_base = 0;
fw->imem_size = lhdr->app[0].size;
fw->dmem_base_img = lhdr->os_data_offset;
fw->dmem_base = 0;
fw->dmem_size = lhdr->os_data_size;
fw->dmem_sign = loc - lhdr->os_data_offset;
fw->boot_addr = lhdr->app[0].offset;
fw->fuse_ver = meta[0];
fw->engine_id = meta[1];
fw->ucode_id = meta[2];
done:
if (ret)
nvkm_falcon_fw_dtor(fw);
nvkm_firmware_put(blob);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/falcon/fw.c |
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "qmgr.h"
static bool
nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind)
{
u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg);
u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg);
u32 free;
size = ALIGN(size, QUEUE_ALIGNMENT);
if (head >= tail) {
free = cmdq->offset + cmdq->size - head;
free -= HDR_SIZE;
if (size > free) {
*rewind = true;
head = cmdq->offset;
}
}
if (head < tail)
free = tail - head - 1;
return size <= free;
}
static void
nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size)
{
struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
nvkm_falcon_pio_wr(falcon, data, 0, 0, DMEM, cmdq->position, size, 0, false);
cmdq->position += ALIGN(size, QUEUE_ALIGNMENT);
}
static void
nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq)
{
struct nvfw_falcon_cmd cmd;
cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND;
cmd.size = sizeof(cmd);
nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size);
cmdq->position = cmdq->offset;
}
static int
nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size)
{
struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
bool rewind = false;
mutex_lock(&cmdq->mutex);
if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) {
FLCNQ_DBG(cmdq, "queue full");
mutex_unlock(&cmdq->mutex);
return -EAGAIN;
}
cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg);
if (rewind)
nvkm_falcon_cmdq_rewind(cmdq);
return 0;
}
static void
nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq)
{
nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position);
mutex_unlock(&cmdq->mutex);
}
static int
nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd)
{
static unsigned timeout = 2000;
unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
int ret = -EAGAIN;
while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
ret = nvkm_falcon_cmdq_open(cmdq, cmd->size);
if (ret) {
FLCNQ_ERR(cmdq, "timeout waiting for queue space");
return ret;
}
nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size);
nvkm_falcon_cmdq_close(cmdq);
return ret;
}
/* specifies that we want to know the command status in the answer message */
#define CMD_FLAGS_STATUS BIT(0)
/* specifies that we want an interrupt when the answer message is queued */
#define CMD_FLAGS_INTR BIT(1)
int
nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd,
nvkm_falcon_qmgr_callback cb, void *priv,
unsigned long timeout)
{
struct nvkm_falcon_qmgr_seq *seq;
int ret;
if (!wait_for_completion_timeout(&cmdq->ready,
msecs_to_jiffies(1000))) {
FLCNQ_ERR(cmdq, "timeout waiting for queue ready");
return -ETIMEDOUT;
}
seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr);
if (IS_ERR(seq))
return PTR_ERR(seq);
cmd->seq_id = seq->id;
cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
seq->state = SEQ_STATE_USED;
seq->async = !timeout;
seq->callback = cb;
seq->priv = priv;
ret = nvkm_falcon_cmdq_write(cmdq, cmd);
if (ret) {
seq->state = SEQ_STATE_PENDING;
nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
return ret;
}
if (!seq->async) {
if (!wait_for_completion_timeout(&seq->done, timeout)) {
FLCNQ_ERR(cmdq, "timeout waiting for reply");
return -ETIMEDOUT;
}
ret = seq->result;
nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
}
return ret;
}
void
nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq)
{
reinit_completion(&cmdq->ready);
}
void
nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq,
u32 index, u32 offset, u32 size)
{
const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func;
cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride;
cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride;
cmdq->offset = offset;
cmdq->size = size;
complete_all(&cmdq->ready);
FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x",
index, cmdq->offset, cmdq->size);
}
void
nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq)
{
struct nvkm_falcon_cmdq *cmdq = *pcmdq;
if (cmdq) {
kfree(*pcmdq);
*pcmdq = NULL;
}
}
int
nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
struct nvkm_falcon_cmdq **pcmdq)
{
struct nvkm_falcon_cmdq *cmdq = *pcmdq;
if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL)))
return -ENOMEM;
cmdq->qmgr = qmgr;
cmdq->name = name;
mutex_init(&cmdq->mutex);
init_completion(&cmdq->ready);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c |
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "qmgr.h"
static void
nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq)
{
spin_lock(&msgq->lock);
msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
}
static void
nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit)
{
struct nvkm_falcon *falcon = msgq->qmgr->falcon;
if (commit)
nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position);
spin_unlock(&msgq->lock);
}
bool
nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq)
{
u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg);
u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
return head == tail;
}
static int
nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size)
{
struct nvkm_falcon *falcon = msgq->qmgr->falcon;
u32 head, tail, available;
head = nvkm_falcon_rd32(falcon, msgq->head_reg);
/* has the buffer looped? */
if (head < msgq->position)
msgq->position = msgq->offset;
tail = msgq->position;
available = head - tail;
if (size > available) {
FLCNQ_ERR(msgq, "requested %d bytes, but only %d available",
size, available);
return -EINVAL;
}
nvkm_falcon_pio_rd(falcon, 0, DMEM, tail, data, 0, size);
msgq->position += ALIGN(size, QUEUE_ALIGNMENT);
return 0;
}
static int
nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
{
int ret = 0;
nvkm_falcon_msgq_open(msgq);
if (nvkm_falcon_msgq_empty(msgq))
goto close;
ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE);
if (ret) {
FLCNQ_ERR(msgq, "failed to read message header");
goto close;
}
if (hdr->size > MSG_BUF_SIZE) {
FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size);
ret = -ENOSPC;
goto close;
}
if (hdr->size > HDR_SIZE) {
u32 read_size = hdr->size - HDR_SIZE;
ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size);
if (ret) {
FLCNQ_ERR(msgq, "failed to read message data");
goto close;
}
}
ret = 1;
close:
nvkm_falcon_msgq_close(msgq, (ret >= 0));
return ret;
}
static int
nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
{
struct nvkm_falcon_qmgr_seq *seq;
seq = &msgq->qmgr->seq.id[hdr->seq_id];
if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id);
return -EINVAL;
}
if (seq->state == SEQ_STATE_USED) {
if (seq->callback)
seq->result = seq->callback(seq->priv, hdr);
}
if (seq->async) {
nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
return 0;
}
complete_all(&seq->done);
return 0;
}
void
nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq)
{
/*
* We are invoked from a worker thread, so normally we have plenty of
* stack space to work with.
*/
u8 msg_buffer[MSG_BUF_SIZE];
struct nvfw_falcon_msg *hdr = (void *)msg_buffer;
while (nvkm_falcon_msgq_read(msgq, hdr) > 0)
nvkm_falcon_msgq_exec(msgq, hdr);
}
int
nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq,
void *data, u32 size)
{
struct nvkm_falcon *falcon = msgq->qmgr->falcon;
struct nvfw_falcon_msg *hdr = data;
int ret;
msgq->head_reg = falcon->func->msgq.head;
msgq->tail_reg = falcon->func->msgq.tail;
msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail);
nvkm_falcon_msgq_open(msgq);
ret = nvkm_falcon_msgq_pop(msgq, data, size);
if (ret == 0 && hdr->size != size) {
FLCN_ERR(falcon, "unexpected init message size %d vs %d",
hdr->size, size);
ret = -EINVAL;
}
nvkm_falcon_msgq_close(msgq, ret == 0);
return ret;
}
void
nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq,
u32 index, u32 offset, u32 size)
{
const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func;
msgq->head_reg = func->msgq.head + index * func->msgq.stride;
msgq->tail_reg = func->msgq.tail + index * func->msgq.stride;
msgq->offset = offset;
FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x",
index, msgq->offset, size);
}
void
nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq)
{
struct nvkm_falcon_msgq *msgq = *pmsgq;
if (msgq) {
kfree(*pmsgq);
*pmsgq = NULL;
}
}
int
nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
struct nvkm_falcon_msgq **pmsgq)
{
struct nvkm_falcon_msgq *msgq = *pmsgq;
if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL)))
return -ENOMEM;
msgq->qmgr = qmgr;
msgq->name = name;
spin_lock_init(&msgq->lock);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c |
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "qmgr.h"
struct nvkm_falcon_qmgr_seq *
nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr)
{
const struct nvkm_subdev *subdev = qmgr->falcon->owner;
struct nvkm_falcon_qmgr_seq *seq;
u32 index;
mutex_lock(&qmgr->seq.mutex);
index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM);
if (index >= NVKM_FALCON_QMGR_SEQ_NUM) {
nvkm_error(subdev, "no free sequence available\n");
mutex_unlock(&qmgr->seq.mutex);
return ERR_PTR(-EAGAIN);
}
set_bit(index, qmgr->seq.tbl);
mutex_unlock(&qmgr->seq.mutex);
seq = &qmgr->seq.id[index];
seq->state = SEQ_STATE_PENDING;
return seq;
}
void
nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr,
struct nvkm_falcon_qmgr_seq *seq)
{
/* no need to acquire seq.mutex since clear_bit is atomic */
seq->state = SEQ_STATE_FREE;
seq->callback = NULL;
reinit_completion(&seq->done);
clear_bit(seq->id, qmgr->seq.tbl);
}
void
nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr)
{
struct nvkm_falcon_qmgr *qmgr = *pqmgr;
if (qmgr) {
kfree(*pqmgr);
*pqmgr = NULL;
}
}
int
nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
struct nvkm_falcon_qmgr **pqmgr)
{
struct nvkm_falcon_qmgr *qmgr;
int i;
if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL)))
return -ENOMEM;
qmgr->falcon = falcon;
mutex_init(&qmgr->seq.mutex);
for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) {
qmgr->seq.id[i].id = i;
init_completion(&qmgr->seq.id[i].done);
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c |
/*
* Copyright 2022 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <subdev/mc.h>
#include <subdev/timer.h>
static bool
ga102_flcn_dma_done(struct nvkm_falcon *falcon)
{
return !!(nvkm_falcon_rd32(falcon, 0x118) & 0x00000002);
}
static void
ga102_flcn_dma_xfer(struct nvkm_falcon *falcon, u32 mem_base, u32 dma_base, u32 cmd)
{
nvkm_falcon_wr32(falcon, 0x114, mem_base);
nvkm_falcon_wr32(falcon, 0x11c, dma_base);
nvkm_falcon_wr32(falcon, 0x118, cmd);
}
static int
ga102_flcn_dma_init(struct nvkm_falcon *falcon, u64 dma_addr, int xfer_len,
enum nvkm_falcon_mem mem_type, bool sec, u32 *cmd)
{
*cmd = (ilog2(xfer_len) - 2) << 8;
if (mem_type == IMEM)
*cmd |= 0x00000010;
if (sec)
*cmd |= 0x00000004;
nvkm_falcon_wr32(falcon, 0x110, dma_addr >> 8);
nvkm_falcon_wr32(falcon, 0x128, 0x00000000);
return 0;
}
const struct nvkm_falcon_func_dma
ga102_flcn_dma = {
.init = ga102_flcn_dma_init,
.xfer = ga102_flcn_dma_xfer,
.done = ga102_flcn_dma_done,
};
int
ga102_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *falcon)
{
nvkm_falcon_mask(falcon, 0x040, 0x00000000, 0x00000000);
if (nvkm_msec(falcon->owner->device, 20,
if (!(nvkm_falcon_rd32(falcon, 0x0f4) & 0x00001000))
break;
) < 0)
return -ETIMEDOUT;
return 0;
}
int
ga102_flcn_reset_prep(struct nvkm_falcon *falcon)
{
nvkm_falcon_rd32(falcon, 0x0f4);
nvkm_usec(falcon->owner->device, 150,
if (nvkm_falcon_rd32(falcon, 0x0f4) & 0x80000000)
break;
_warn = false;
);
return 0;
}
int
ga102_flcn_select(struct nvkm_falcon *falcon)
{
if ((nvkm_falcon_rd32(falcon, falcon->addr2 + 0x668) & 0x00000010) != 0x00000000) {
nvkm_falcon_wr32(falcon, falcon->addr2 + 0x668, 0x00000000);
if (nvkm_msec(falcon->owner->device, 10,
if (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x668) & 0x00000001)
break;
) < 0)
return -ETIMEDOUT;
}
return 0;
}
int
ga102_flcn_fw_boot(struct nvkm_falcon_fw *fw, u32 *mbox0, u32 *mbox1, u32 mbox0_ok, u32 irqsclr)
{
struct nvkm_falcon *falcon = fw->falcon;
nvkm_falcon_wr32(falcon, falcon->addr2 + 0x210, fw->dmem_sign);
nvkm_falcon_wr32(falcon, falcon->addr2 + 0x19c, fw->engine_id);
nvkm_falcon_wr32(falcon, falcon->addr2 + 0x198, fw->ucode_id);
nvkm_falcon_wr32(falcon, falcon->addr2 + 0x180, 0x00000001);
return gm200_flcn_fw_boot(fw, mbox0, mbox1, mbox0_ok, irqsclr);
}
int
ga102_flcn_fw_load(struct nvkm_falcon_fw *fw)
{
struct nvkm_falcon *falcon = fw->falcon;
int ret = 0;
nvkm_falcon_mask(falcon, 0x624, 0x00000080, 0x00000080);
nvkm_falcon_wr32(falcon, 0x10c, 0x00000000);
nvkm_falcon_mask(falcon, 0x600, 0x00010007, (0 << 16) | (1 << 2) | 1);
ret = nvkm_falcon_dma_wr(falcon, fw->fw.img, fw->fw.phys, fw->imem_base_img,
IMEM, fw->imem_base, fw->imem_size, true);
if (ret)
return ret;
ret = nvkm_falcon_dma_wr(falcon, fw->fw.img, fw->fw.phys, fw->dmem_base_img,
DMEM, fw->dmem_base, fw->dmem_size, false);
if (ret)
return ret;
return 0;
}
const struct nvkm_falcon_fw_func
ga102_flcn_fw = {
.signature = ga100_flcn_fw_signature,
.reset = gm200_flcn_fw_reset,
.load = ga102_flcn_fw_load,
.boot = ga102_flcn_fw_boot,
};
| linux-master | drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.