python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "gf100.h"
#include "ram.h"
static const struct nvkm_fb_func
gm107_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gm107_ram_new,
.default_bigpage = 17,
};
int
gm107_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gm107_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
* Lyude Paul
*/
#include "gk104.h"
#include "gf100.h"
#include "ram.h"
/*
*******************************************************************************
* PGRAPH registers for clockgating
*******************************************************************************
*/
const struct nvkm_therm_clkgate_init
gk104_fb_clkgate_blcg_init_unk_0[] = {
{ 0x100d10, 1, 0x0000c244 },
{ 0x100d30, 1, 0x0000c242 },
{ 0x100d3c, 1, 0x00000242 },
{ 0x100d48, 1, 0x00000242 },
{ 0x100d1c, 1, 0x00000042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_fb_clkgate_blcg_init_vm_0[] = {
{ 0x100c98, 1, 0x00000242 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_fb_clkgate_blcg_init_main_0[] = {
{ 0x10f000, 1, 0x00000042 },
{ 0x17e030, 1, 0x00000044 },
{ 0x17e040, 1, 0x00000044 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_fb_clkgate_blcg_init_bcast_0[] = {
{ 0x17ea60, 4, 0x00000044 },
{}
};
static const struct nvkm_therm_clkgate_pack
gk104_fb_clkgate_pack[] = {
{ gk104_fb_clkgate_blcg_init_unk_0 },
{ gk104_fb_clkgate_blcg_init_vm_0 },
{ gk104_fb_clkgate_blcg_init_main_0 },
{ gk104_fb_clkgate_blcg_init_bcast_0 },
{}
};
static const struct nvkm_fb_func
gk104_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gk104_ram_new,
.default_bigpage = 17,
.clkgate_pack = gk104_fb_clkgate_pack,
};
int
gk104_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gk104_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define mcp77_ram(p) container_of((p), struct mcp77_ram, base)
#include "ram.h"
struct mcp77_ram {
struct nvkm_ram base;
u64 poller_base;
};
static int
mcp77_ram_init(struct nvkm_ram *base)
{
struct mcp77_ram *ram = mcp77_ram(base);
struct nvkm_device *device = ram->base.fb->subdev.device;
u32 dniso = ((ram->base.size - (ram->poller_base + 0x00)) >> 5) - 1;
u32 hostnb = ((ram->base.size - (ram->poller_base + 0x20)) >> 5) - 1;
u32 flush = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1;
/* Enable NISO poller for various clients and set their associated
* read address, only for MCP77/78 and MCP79/7A. (fd#27501)
*/
nvkm_wr32(device, 0x100c18, dniso);
nvkm_mask(device, 0x100c14, 0x00000000, 0x00000001);
nvkm_wr32(device, 0x100c1c, hostnb);
nvkm_mask(device, 0x100c14, 0x00000000, 0x00000002);
nvkm_wr32(device, 0x100c24, flush);
nvkm_mask(device, 0x100c14, 0x00000000, 0x00010000);
return 0;
}
static const struct nvkm_ram_func
mcp77_ram_func = {
.init = mcp77_ram_init,
};
int
mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_device *device = fb->subdev.device;
u32 rsvd_head = ( 256 * 1024); /* vga memory */
u32 rsvd_tail = (1024 * 1024) + 0x1000; /* vbios etc + poller mem */
u64 base = (u64)nvkm_rd32(device, 0x100e10) << 12;
u64 size = (u64)nvkm_rd32(device, 0x100e14) << 12;
struct mcp77_ram *ram;
int ret;
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
return -ENOMEM;
*pram = &ram->base;
ret = nvkm_ram_ctor(&mcp77_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
size, &ram->base);
if (ret)
return ret;
ram->poller_base = size - rsvd_tail;
ram->base.stolen = base;
nvkm_mm_fini(&ram->base.vram);
return nvkm_mm_init(&ram->base.vram, NVKM_RAM_MM_NORMAL,
rsvd_head >> NVKM_RAM_MM_SHIFT,
(size - rsvd_head - rsvd_tail) >>
NVKM_RAM_MM_SHIFT, 1);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "ram.h"
int
nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct pci_dev *bridge;
u32 mem, mib;
int domain = 0;
struct pci_dev *pdev = NULL;
if (dev_is_pci(fb->subdev.device->dev))
pdev = to_pci_dev(fb->subdev.device->dev);
if (pdev)
domain = pci_domain_nr(pdev->bus);
bridge = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 1));
if (!bridge) {
nvkm_error(&fb->subdev, "no bridge device\n");
return -ENODEV;
}
if (fb->subdev.device->chipset == 0x1a) {
pci_read_config_dword(bridge, 0x7c, &mem);
mib = ((mem >> 6) & 31) + 1;
} else {
pci_read_config_dword(bridge, 0x84, &mem);
mib = ((mem >> 4) & 127) + 1;
}
return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
mib * 1024 * 1024, pram);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
static void
nv44_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
tile->addr = 0x00000001; /* mode = vram */
tile->addr |= addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
}
void
nv44_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
struct nvkm_device *device = fb->subdev.device;
nvkm_wr32(device, 0x100604 + (i * 0x10), tile->limit);
nvkm_wr32(device, 0x100608 + (i * 0x10), tile->pitch);
nvkm_wr32(device, 0x100600 + (i * 0x10), tile->addr);
nvkm_rd32(device, 0x100600 + (i * 0x10));
}
void
nv44_fb_init(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
nvkm_wr32(device, 0x100850, 0x80000000);
nvkm_wr32(device, 0x100800, 0x00000001);
}
static const struct nvkm_fb_func
nv44_fb = {
.init = nv44_fb_init,
.tile.regions = 12,
.tile.init = nv44_fb_tile_init,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new,
};
int
nv44_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv44_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
static void
nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
else tile->zcomp = 0x00200000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
#ifdef __BIG_ENDIAN
tile->zcomp |= 0x01000000;
#endif
}
}
static const struct nvkm_fb_func
nv25_fb = {
.tags = nv20_fb_tags,
.tile.regions = 8,
.tile.init = nv20_fb_tile_init,
.tile.comp = nv25_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
};
int
nv25_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv25_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "gf100.h"
#include "ram.h"
#include <core/memory.h>
#include <core/option.h>
#include <subdev/therm.h>
void
gf100_fb_intr(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
struct nvkm_subdev *subdev = &fb->base.subdev;
struct nvkm_device *device = subdev->device;
u32 intr = nvkm_rd32(device, 0x000100);
if (intr & 0x08000000)
nvkm_debug(subdev, "PFFB intr\n");
if (intr & 0x00002000)
nvkm_debug(subdev, "PBFB intr\n");
}
int
gf100_fb_oneinit(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
struct nvkm_device *device = fb->base.subdev.device;
int ret, size = 1 << (fb->base.page ? fb->base.page : 17);
size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
size = max(size, 0x1000);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
true, &fb->base.mmu_rd);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
true, &fb->base.mmu_wr);
if (ret)
return ret;
return 0;
}
int
gf100_fb_init_page(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
switch (fb->page) {
case 16: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001); break;
case 17: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); break;
default:
return -EINVAL;
}
return 0;
}
void
gf100_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
{
nvkm_wr32(fb->subdev.device, 0x100c10, fb->sysmem.flush_page_addr >> 8);
}
void
gf100_fb_init(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
struct nvkm_device *device = fb->base.subdev.device;
if (base->func->clkgate_pack) {
nvkm_therm_clkgate_init(device->therm,
base->func->clkgate_pack);
}
}
void *
gf100_fb_dtor(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
return fb;
}
int
gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
struct gf100_fb *fb;
if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
nvkm_fb_ctor(func, device, type, inst, &fb->base);
*pfb = &fb->base;
return 0;
}
static const struct nvkm_fb_func
gf100_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gf100_ram_new,
.default_bigpage = 17,
};
int
gf100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gf100_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
void
nv10_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
tile->addr = 0x80000000 | addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
}
void
nv10_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
tile->addr = 0;
tile->limit = 0;
tile->pitch = 0;
tile->zcomp = 0;
}
void
nv10_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
struct nvkm_device *device = fb->subdev.device;
nvkm_wr32(device, 0x100244 + (i * 0x10), tile->limit);
nvkm_wr32(device, 0x100248 + (i * 0x10), tile->pitch);
nvkm_wr32(device, 0x100240 + (i * 0x10), tile->addr);
nvkm_rd32(device, 0x100240 + (i * 0x10));
}
static const struct nvkm_fb_func
nv10_fb = {
.tile.regions = 8,
.tile.init = nv10_fb_tile_init,
.tile.fini = nv10_fb_tile_fini,
.tile.prog = nv10_fb_tile_prog,
.ram_new = nv10_ram_new,
};
int
nv10_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv10_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "ram.h"
int
nv4e_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_device *device = fb->subdev.device;
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_UNKNOWN,
size, pram);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
static void
nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
else tile->zcomp |= 0x08000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13;
#ifdef __BIG_ENDIAN
tile->zcomp |= 0x40000000;
#endif
}
}
static const struct nvkm_fb_func
nv35_fb = {
.tags = nv20_fb_tags,
.init = nv30_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv35_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
};
int
nv35_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv35_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "ram.h"
#include <subdev/bios.h>
#include <subdev/bios/init.h>
#include <subdev/bios/rammap.h>
static int
gp100_ram_init(struct nvkm_ram *ram)
{
struct nvkm_subdev *subdev = &ram->fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
u8 ver, hdr, cnt, len, snr, ssz;
u32 data;
int i;
/* run a bunch of tables from rammap table. there's actually
* individual pointers for each rammap entry too, but, nvidia
* seem to just run the last two entries' scripts early on in
* their init, and never again.. we'll just run 'em all once
* for now.
*
* i strongly suspect that each script is for a separate mode
* (likely selected by 0x9a065c's lower bits?), and the
* binary driver skips the one that's already been setup by
* the init tables.
*/
data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
if (!data || hdr < 0x15)
return -EINVAL;
cnt = nvbios_rd08(bios, data + 0x14); /* guess at count */
data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
if (cnt) {
u32 save = nvkm_rd32(device, 0x9a065c) & 0x000000f0;
for (i = 0; i < cnt; i++, data += 4) {
if (i != save >> 4) {
nvkm_mask(device, 0x9a065c, 0x000000f0, i << 4);
nvbios_init(subdev, nvbios_rd32(bios, data));
}
}
nvkm_mask(device, 0x9a065c, 0x000000f0, save);
}
nvkm_mask(device, 0x9a0584, 0x11000000, 0x00000000);
nvkm_wr32(device, 0x10ecc0, 0xffffffff);
nvkm_mask(device, 0x9a0160, 0x00000010, 0x00000010);
return 0;
}
static u32
gp100_ram_probe_fbpa(struct nvkm_device *device, int fbpa)
{
return nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
}
static const struct nvkm_ram_func
gp100_ram = {
.upper = 0x1000000000ULL,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gm200_ram_probe_fbp_amount,
.probe_fbpa_amount = gp100_ram_probe_fbpa,
.init = gp100_ram_init,
};
int
gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_ram *ram;
if (!(ram = *pram = kzalloc(sizeof(*ram), GFP_KERNEL)))
return -ENOMEM;
return gf100_ram_ctor(&gp100_ram, fb, ram);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "gf100.h"
#include "ram.h"
bool
tu102_fb_vpr_scrub_required(struct nvkm_fb *fb)
{
return (nvkm_rd32(fb->subdev.device, 0x1fa80c) & 0x00000010) != 0;
}
static const struct nvkm_fb_func
tu102_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gp102_fb_oneinit,
.init = gm200_fb_init,
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.vidmem.size = gp102_fb_vidmem_size,
.vpr.scrub_required = tu102_fb_vpr_scrub_required,
.vpr.scrub = gp102_fb_vpr_scrub,
.ram_new = gp102_ram_new,
.default_bigpage = 16,
};
int
tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&tu102_fb, device, type, inst, pfb);
}
MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include "ram.h"
#include <core/client.h>
#include <core/enum.h>
#include <engine/fifo.h>
static int
nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
{
struct nv50_fb *fb = nv50_fb(base);
return fb->func->ram_new(&fb->base, pram);
}
static const struct nvkm_enum vm_dispatch_subclients[] = {
{ 0x00000000, "GRCTX" },
{ 0x00000001, "NOTIFY" },
{ 0x00000002, "QUERY" },
{ 0x00000003, "COND" },
{ 0x00000004, "M2M_IN" },
{ 0x00000005, "M2M_OUT" },
{ 0x00000006, "M2M_NOTIFY" },
{}
};
static const struct nvkm_enum vm_ccache_subclients[] = {
{ 0x00000000, "CB" },
{ 0x00000001, "TIC" },
{ 0x00000002, "TSC" },
{}
};
static const struct nvkm_enum vm_prop_subclients[] = {
{ 0x00000000, "RT0" },
{ 0x00000001, "RT1" },
{ 0x00000002, "RT2" },
{ 0x00000003, "RT3" },
{ 0x00000004, "RT4" },
{ 0x00000005, "RT5" },
{ 0x00000006, "RT6" },
{ 0x00000007, "RT7" },
{ 0x00000008, "ZETA" },
{ 0x00000009, "LOCAL" },
{ 0x0000000a, "GLOBAL" },
{ 0x0000000b, "STACK" },
{ 0x0000000c, "DST2D" },
{}
};
static const struct nvkm_enum vm_pfifo_subclients[] = {
{ 0x00000000, "PUSHBUF" },
{ 0x00000001, "SEMAPHORE" },
{}
};
static const struct nvkm_enum vm_bar_subclients[] = {
{ 0x00000000, "FB" },
{ 0x00000001, "IN" },
{}
};
static const struct nvkm_enum vm_client[] = {
{ 0x00000000, "STRMOUT" },
{ 0x00000003, "DISPATCH", vm_dispatch_subclients },
{ 0x00000004, "PFIFO_WRITE" },
{ 0x00000005, "CCACHE", vm_ccache_subclients },
{ 0x00000006, "PMSPPP" },
{ 0x00000007, "CLIPID" },
{ 0x00000008, "PFIFO_READ" },
{ 0x00000009, "VFETCH" },
{ 0x0000000a, "TEXTURE" },
{ 0x0000000b, "PROP", vm_prop_subclients },
{ 0x0000000c, "PVP" },
{ 0x0000000d, "PBSP" },
{ 0x0000000e, "PCRYPT" },
{ 0x0000000f, "PCOUNTER" },
{ 0x00000011, "PDAEMON" },
{}
};
static const struct nvkm_enum vm_engine[] = {
{ 0x00000000, "PGRAPH" },
{ 0x00000001, "PVP" },
{ 0x00000004, "PEEPHOLE" },
{ 0x00000005, "PFIFO", vm_pfifo_subclients },
{ 0x00000006, "BAR", vm_bar_subclients },
{ 0x00000008, "PMSPPP" },
{ 0x00000008, "PMPEG" },
{ 0x00000009, "PBSP" },
{ 0x0000000a, "PCRYPT" },
{ 0x0000000b, "PCOUNTER" },
{ 0x0000000c, "SEMAPHORE_BG" },
{ 0x0000000d, "PCE0" },
{ 0x0000000e, "PMU" },
{}
};
static const struct nvkm_enum vm_fault[] = {
{ 0x00000000, "PT_NOT_PRESENT" },
{ 0x00000001, "PT_TOO_SHORT" },
{ 0x00000002, "PAGE_NOT_PRESENT" },
{ 0x00000003, "PAGE_SYSTEM_ONLY" },
{ 0x00000004, "PAGE_READ_ONLY" },
{ 0x00000006, "NULL_DMAOBJ" },
{ 0x00000007, "WRONG_MEMTYPE" },
{ 0x0000000b, "VRAM_LIMIT" },
{ 0x0000000f, "DMAOBJ_LIMIT" },
{}
};
static void
nv50_fb_intr(struct nvkm_fb *base)
{
struct nv50_fb *fb = nv50_fb(base);
struct nvkm_subdev *subdev = &fb->base.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_chan *chan;
const struct nvkm_enum *en, *re, *cl, *sc;
u32 trap[6], idx, inst;
u8 st0, st1, st2, st3;
unsigned long flags;
int i;
idx = nvkm_rd32(device, 0x100c90);
if (!(idx & 0x80000000))
return;
idx &= 0x00ffffff;
for (i = 0; i < 6; i++) {
nvkm_wr32(device, 0x100c90, idx | i << 24);
trap[i] = nvkm_rd32(device, 0x100c94);
}
nvkm_wr32(device, 0x100c90, idx | 0x80000000);
/* decode status bits into something more useful */
if (device->chipset < 0xa3 ||
device->chipset == 0xaa || device->chipset == 0xac) {
st0 = (trap[0] & 0x0000000f) >> 0;
st1 = (trap[0] & 0x000000f0) >> 4;
st2 = (trap[0] & 0x00000f00) >> 8;
st3 = (trap[0] & 0x0000f000) >> 12;
} else {
st0 = (trap[0] & 0x000000ff) >> 0;
st1 = (trap[0] & 0x0000ff00) >> 8;
st2 = (trap[0] & 0x00ff0000) >> 16;
st3 = (trap[0] & 0xff000000) >> 24;
}
inst = ((trap[2] << 16) | trap[1]) << 12;
en = nvkm_enum_find(vm_engine, st0);
re = nvkm_enum_find(vm_fault , st1);
cl = nvkm_enum_find(vm_client, st2);
if (cl && cl->data) sc = nvkm_enum_find(cl->data, st3);
else if (en && en->data) sc = nvkm_enum_find(en->data, st3);
else sc = NULL;
chan = nvkm_chan_get_inst(&device->fifo->engine, inst, &flags);
nvkm_error(subdev, "trapped %s at %02x%04x%04x on channel %d [%08x %s] "
"engine %02x [%s] client %02x [%s] "
"subclient %02x [%s] reason %08x [%s]\n",
(trap[5] & 0x00000100) ? "read" : "write",
trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
chan ? chan->id : -1, inst,
chan ? chan->name : "unknown",
st0, en ? en->name : "",
st2, cl ? cl->name : "", st3, sc ? sc->name : "",
st1, re ? re->name : "");
nvkm_chan_put(&chan, flags);
}
static void
nv50_fb_init(struct nvkm_fb *base)
{
struct nv50_fb *fb = nv50_fb(base);
struct nvkm_device *device = fb->base.subdev.device;
/* This is needed to get meaningful information from 100c90
* on traps. No idea what these values mean exactly. */
nvkm_wr32(device, 0x100c90, fb->func->trap);
}
static u32
nv50_fb_tags(struct nvkm_fb *base)
{
struct nv50_fb *fb = nv50_fb(base);
if (fb->func->tags)
return fb->func->tags(&fb->base);
return 0;
}
static void
nv50_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
{
nvkm_wr32(fb->subdev.device, 0x100c08, fb->sysmem.flush_page_addr >> 8);
}
static void *
nv50_fb_dtor(struct nvkm_fb *base)
{
struct nv50_fb *fb = nv50_fb(base);
return fb;
}
static const struct nvkm_fb_func
nv50_fb_ = {
.dtor = nv50_fb_dtor,
.tags = nv50_fb_tags,
.init = nv50_fb_init,
.intr = nv50_fb_intr,
.sysmem.flush_page_init = nv50_fb_sysmem_flush_page_init,
.ram_new = nv50_fb_ram_new,
};
int
nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
struct nv50_fb *fb;
if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
nvkm_fb_ctor(&nv50_fb_, device, type, inst, &fb->base);
fb->func = func;
*pfb = &fb->base;
return 0;
}
static const struct nv50_fb_func
nv50_fb = {
.ram_new = nv50_ram_new,
.tags = nv20_fb_tags,
.trap = 0x000707ff,
};
int
nv50_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nv50_fb_new_(&nv50_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Lyude Paul
*/
#include "gf100.h"
#include "gk104.h"
#include "ram.h"
#include <subdev/therm.h>
#include <subdev/fb.h>
/*
*******************************************************************************
* PGRAPH registers for clockgating
*******************************************************************************
*/
static const struct nvkm_therm_clkgate_init
gk110_fb_clkgate_blcg_init_unk_0[] = {
{ 0x100d10, 1, 0x0000c242 },
{ 0x100d30, 1, 0x0000c242 },
{ 0x100d3c, 1, 0x00000242 },
{ 0x100d48, 1, 0x0000c242 },
{ 0x100d1c, 1, 0x00000042 },
{}
};
static const struct nvkm_therm_clkgate_pack
gk110_fb_clkgate_pack[] = {
{ gk110_fb_clkgate_blcg_init_unk_0 },
{ gk104_fb_clkgate_blcg_init_vm_0 },
{ gk104_fb_clkgate_blcg_init_main_0 },
{ gk104_fb_clkgate_blcg_init_bcast_0 },
{}
};
static const struct nvkm_fb_func
gk110_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gk104_ram_new,
.default_bigpage = 17,
.clkgate_pack = gk110_fb_clkgate_pack,
};
int
gk110_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gk110_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "ram.h"
u32
gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
struct nvkm_device *device, int fbp, int *pltcs)
{
u32 fbpt = nvkm_rd32(device, 0x022438);
u32 fbpat = nvkm_rd32(device, 0x02243c);
u32 fbpas = fbpat / fbpt;
u32 fbpa = fbp * fbpas;
u32 size = 0;
while (fbpas--) {
if (!(fbpao & BIT(fbpa)))
size += func->probe_fbpa_amount(device, fbpa);
fbpa++;
}
*pltcs = 1;
return size;
}
static const struct nvkm_ram_func
gf108_ram = {
.upper = 0x0200000000ULL,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.init = gf100_ram_init,
.calc = gf100_ram_calc,
.prog = gf100_ram_prog,
.tidy = gf100_ram_tidy,
};
int
gf108_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
return gf100_ram_new_(&gf108_ram, fb, pram);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
static const struct nvkm_fb_func
nv49_fb = {
.tags = nv20_fb_tags,
.init = nv41_fb_init,
.tile.regions = 15,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv40_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
.ram_new = nv49_ram_new,
};
int
nv49_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv49_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "gf100.h"
#include "ram.h"
static const struct nvkm_fb_func
ga100_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gm200_fb_init,
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.vidmem.size = gp102_fb_vidmem_size,
.ram_new = gp102_ram_new,
.default_bigpage = 16,
};
int
ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&ga100_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "gf100.h"
#include "ram.h"
#include <core/memory.h>
int
gm200_fb_init_page(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
switch (fb->page) {
case 16: nvkm_mask(device, 0x100c80, 0x00001801, 0x00001001); break;
case 17: nvkm_mask(device, 0x100c80, 0x00001801, 0x00000000); break;
case 0: nvkm_mask(device, 0x100c80, 0x00001800, 0x00001800); break;
default:
return -EINVAL;
}
return 0;
}
void
gm200_fb_init(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
struct nvkm_device *device = fb->base.subdev.device;
nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
nvkm_mask(device, 0x100cc4, 0x00060000,
min(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
}
static const struct nvkm_fb_func
gm200_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gm200_fb_init,
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gm200_ram_new,
.default_bigpage = 0 /* per-instance. */,
};
int
gm200_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gm200_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
* Roy Spliet <[email protected]>
*/
#define gt215_ram(p) container_of((p), struct gt215_ram, base)
#include "ram.h"
#include "ramfuc.h"
#include <core/memory.h>
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/M0205.h>
#include <subdev/bios/rammap.h>
#include <subdev/bios/timing.h>
#include <subdev/clk/gt215.h>
#include <subdev/gpio.h>
struct gt215_ramfuc {
struct ramfuc base;
struct ramfuc_reg r_0x001610;
struct ramfuc_reg r_0x001700;
struct ramfuc_reg r_0x002504;
struct ramfuc_reg r_0x004000;
struct ramfuc_reg r_0x004004;
struct ramfuc_reg r_0x004018;
struct ramfuc_reg r_0x004128;
struct ramfuc_reg r_0x004168;
struct ramfuc_reg r_0x100080;
struct ramfuc_reg r_0x100200;
struct ramfuc_reg r_0x100210;
struct ramfuc_reg r_0x100220[9];
struct ramfuc_reg r_0x100264;
struct ramfuc_reg r_0x1002d0;
struct ramfuc_reg r_0x1002d4;
struct ramfuc_reg r_0x1002dc;
struct ramfuc_reg r_0x10053c;
struct ramfuc_reg r_0x1005a0;
struct ramfuc_reg r_0x1005a4;
struct ramfuc_reg r_0x100700;
struct ramfuc_reg r_0x100714;
struct ramfuc_reg r_0x100718;
struct ramfuc_reg r_0x10071c;
struct ramfuc_reg r_0x100720;
struct ramfuc_reg r_0x100760;
struct ramfuc_reg r_0x1007a0;
struct ramfuc_reg r_0x1007e0;
struct ramfuc_reg r_0x100da0;
struct ramfuc_reg r_0x10f804;
struct ramfuc_reg r_0x1110e0;
struct ramfuc_reg r_0x111100;
struct ramfuc_reg r_0x111104;
struct ramfuc_reg r_0x1111e0;
struct ramfuc_reg r_0x111400;
struct ramfuc_reg r_0x611200;
struct ramfuc_reg r_mr[4];
struct ramfuc_reg r_gpio[4];
};
struct gt215_ltrain {
enum {
NVA3_TRAIN_UNKNOWN,
NVA3_TRAIN_UNSUPPORTED,
NVA3_TRAIN_ONCE,
NVA3_TRAIN_EXEC,
NVA3_TRAIN_DONE
} state;
u32 r_100720;
u32 r_1111e0;
u32 r_111400;
struct nvkm_memory *memory;
};
struct gt215_ram {
struct nvkm_ram base;
struct gt215_ramfuc fuc;
struct gt215_ltrain ltrain;
};
static void
gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
{
int i, lo, hi;
u8 median[8], bins[4] = {0, 0, 0, 0}, bin = 0, qty = 0;
for (i = 0; i < 8; i++) {
for (lo = 0; lo < 0x40; lo++) {
if (!(vals[lo] & 0x80000000))
continue;
if (vals[lo] & (0x101 << i))
break;
}
if (lo == 0x40)
return;
for (hi = lo + 1; hi < 0x40; hi++) {
if (!(vals[lo] & 0x80000000))
continue;
if (!(vals[hi] & (0x101 << i))) {
hi--;
break;
}
}
median[i] = ((hi - lo) >> 1) + lo;
bins[(median[i] & 0xf0) >> 4]++;
median[i] += 0x30;
}
/* Find the best value for 0x1111e0 */
for (i = 0; i < 4; i++) {
if (bins[i] > qty) {
bin = i + 3;
qty = bins[i];
}
}
train->r_100720 = 0;
for (i = 0; i < 8; i++) {
median[i] = max(median[i], (u8) (bin << 4));
median[i] = min(median[i], (u8) ((bin << 4) | 0xf));
train->r_100720 |= ((median[i] & 0x0f) << (i << 2));
}
train->r_1111e0 = 0x02000000 | (bin * 0x101);
train->r_111400 = 0x0;
}
/*
* Link training for (at least) DDR3
*/
static int
gt215_link_train(struct gt215_ram *ram)
{
struct gt215_ltrain *train = &ram->ltrain;
struct gt215_ramfuc *fuc = &ram->fuc;
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
struct nvkm_clk *clk = device->clk;
u32 *result, r1700;
int ret, i;
struct nvbios_M0205T M0205T = { 0 };
u8 ver, hdr, cnt, len, snr, ssz;
unsigned int clk_current;
unsigned long flags;
unsigned long *f = &flags;
if (nvkm_boolopt(device->cfgopt, "NvMemExec", true) != true)
return -ENOSYS;
/* XXX: Multiple partitions? */
result = kmalloc_array(64, sizeof(u32), GFP_KERNEL);
if (!result)
return -ENOMEM;
train->state = NVA3_TRAIN_EXEC;
/* Clock speeds for training and back */
nvbios_M0205Tp(bios, &ver, &hdr, &cnt, &len, &snr, &ssz, &M0205T);
if (M0205T.freq == 0) {
kfree(result);
return -ENOENT;
}
clk_current = nvkm_clk_read(clk, nv_clk_src_mem);
ret = gt215_clk_pre(clk, f);
if (ret)
goto out;
/* First: clock up/down */
ret = ram->base.func->calc(&ram->base, (u32) M0205T.freq * 1000);
if (ret)
goto out;
/* Do this *after* calc, eliminates write in script */
nvkm_wr32(device, 0x111400, 0x00000000);
/* XXX: Magic writes that improve train reliability? */
nvkm_mask(device, 0x100674, 0x0000ffff, 0x00000000);
nvkm_mask(device, 0x1005e4, 0x0000ffff, 0x00000000);
nvkm_mask(device, 0x100b0c, 0x000000ff, 0x00000000);
nvkm_wr32(device, 0x100c04, 0x00000400);
/* Now the training script */
r1700 = ram_rd32(fuc, 0x001700);
ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
ram_wr32(fuc, 0x611200, 0x3300);
ram_wait_vblank(fuc);
ram_wait(fuc, 0x611200, 0x00000003, 0x00000000, 500000);
ram_mask(fuc, 0x001610, 0x00000083, 0x00000003);
ram_mask(fuc, 0x100080, 0x00000020, 0x00000000);
ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
ram_wr32(fuc, 0x001700, 0x00000000);
ram_train(fuc);
/* Reset */
ram_mask(fuc, 0x10f804, 0x80000000, 0x80000000);
ram_wr32(fuc, 0x10053c, 0x0);
ram_wr32(fuc, 0x100720, train->r_100720);
ram_wr32(fuc, 0x1111e0, train->r_1111e0);
ram_wr32(fuc, 0x111400, train->r_111400);
ram_nuke(fuc, 0x100080);
ram_mask(fuc, 0x100080, 0x00000020, 0x00000020);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x001700, r1700);
ram_mask(fuc, 0x001610, 0x00000083, 0x00000080);
ram_wr32(fuc, 0x611200, 0x3330);
ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
ram_exec(fuc, true);
ram->base.func->calc(&ram->base, clk_current);
ram_exec(fuc, true);
/* Post-processing, avoids flicker */
nvkm_mask(device, 0x616308, 0x10, 0x10);
nvkm_mask(device, 0x616b08, 0x10, 0x10);
gt215_clk_post(clk, f);
ram_train_result(ram->base.fb, result, 64);
for (i = 0; i < 64; i++)
nvkm_debug(subdev, "Train: %08x", result[i]);
gt215_link_train_calc(result, train);
nvkm_debug(subdev, "Train: %08x %08x %08x", train->r_100720,
train->r_1111e0, train->r_111400);
kfree(result);
train->state = NVA3_TRAIN_DONE;
return ret;
out:
if(ret == -EBUSY)
f = NULL;
train->state = NVA3_TRAIN_UNSUPPORTED;
gt215_clk_post(clk, f);
kfree(result);
return ret;
}
static int
gt215_link_train_init(struct gt215_ram *ram)
{
static const u32 pattern[16] = {
0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
0x33333333, 0x55555555, 0x77777777, 0x66666666,
0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
};
struct gt215_ltrain *train = &ram->ltrain;
struct nvkm_device *device = ram->base.fb->subdev.device;
struct nvkm_bios *bios = device->bios;
struct nvbios_M0205E M0205E;
u8 ver, hdr, cnt, len;
u32 r001700;
u64 addr;
int ret, i = 0;
train->state = NVA3_TRAIN_UNSUPPORTED;
/* We support type "5"
* XXX: training pattern table appears to be unused for this routine */
if (!nvbios_M0205Ep(bios, i, &ver, &hdr, &cnt, &len, &M0205E))
return -ENOENT;
if (M0205E.type != 5)
return 0;
train->state = NVA3_TRAIN_ONCE;
ret = nvkm_ram_get(device, NVKM_RAM_MM_NORMAL, 0x01, 16, 0x8000,
true, true, &ram->ltrain.memory);
if (ret)
return ret;
addr = nvkm_memory_addr(ram->ltrain.memory);
nvkm_wr32(device, 0x100538, 0x10000000 | (addr >> 16));
nvkm_wr32(device, 0x1005a8, 0x0000ffff);
nvkm_mask(device, 0x10f800, 0x00000001, 0x00000001);
for (i = 0; i < 0x30; i++) {
nvkm_wr32(device, 0x10f8c0, (i << 8) | i);
nvkm_wr32(device, 0x10f900, pattern[i % 16]);
}
for (i = 0; i < 0x30; i++) {
nvkm_wr32(device, 0x10f8e0, (i << 8) | i);
nvkm_wr32(device, 0x10f920, pattern[i % 16]);
}
/* And upload the pattern */
r001700 = nvkm_rd32(device, 0x1700);
nvkm_wr32(device, 0x1700, addr >> 16);
for (i = 0; i < 16; i++)
nvkm_wr32(device, 0x700000 + (i << 2), pattern[i]);
for (i = 0; i < 16; i++)
nvkm_wr32(device, 0x700100 + (i << 2), pattern[i]);
nvkm_wr32(device, 0x1700, r001700);
train->r_100720 = nvkm_rd32(device, 0x100720);
train->r_1111e0 = nvkm_rd32(device, 0x1111e0);
train->r_111400 = nvkm_rd32(device, 0x111400);
return 0;
}
static void
gt215_link_train_fini(struct gt215_ram *ram)
{
nvkm_memory_unref(&ram->ltrain.memory);
}
/*
* RAM reclocking
*/
#define T(t) cfg->timing_10_##t
static int
gt215_ram_timing_calc(struct gt215_ram *ram, u32 *timing)
{
struct nvbios_ramcfg *cfg = &ram->base.target.bios;
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
struct nvkm_device *device = subdev->device;
int tUNK_base, tUNK_40_0, prevCL;
u32 cur2, cur3, cur7, cur8;
cur2 = nvkm_rd32(device, 0x100228);
cur3 = nvkm_rd32(device, 0x10022c);
cur7 = nvkm_rd32(device, 0x10023c);
cur8 = nvkm_rd32(device, 0x100240);
switch ((!T(CWL)) * ram->base.type) {
case NVKM_RAM_TYPE_DDR2:
T(CWL) = T(CL) - 1;
break;
case NVKM_RAM_TYPE_GDDR3:
T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
break;
}
prevCL = (cur3 & 0x000000ff) + 1;
tUNK_base = ((cur7 & 0x00ff0000) >> 16) - prevCL;
timing[0] = (T(RP) << 24 | T(RAS) << 16 | T(RFC) << 8 | T(RC));
timing[1] = (T(WR) + 1 + T(CWL)) << 24 |
max_t(u8,T(18), 1) << 16 |
(T(WTR) + 1 + T(CWL)) << 8 |
(5 + T(CL) - T(CWL));
timing[2] = (T(CWL) - 1) << 24 |
(T(RRD) << 16) |
(T(RCDWR) << 8) |
T(RCDRD);
timing[3] = (cur3 & 0x00ff0000) |
(0x30 + T(CL)) << 24 |
(0xb + T(CL)) << 8 |
(T(CL) - 1);
timing[4] = T(20) << 24 |
T(21) << 16 |
T(13) << 8 |
T(13);
timing[5] = T(RFC) << 24 |
max_t(u8,T(RCDRD), T(RCDWR)) << 16 |
max_t(u8, (T(CWL) + 6), (T(CL) + 2)) << 8 |
T(RP);
timing[6] = (0x5a + T(CL)) << 16 |
max_t(u8, 1, (6 - T(CL) + T(CWL))) << 8 |
(0x50 + T(CL) - T(CWL));
timing[7] = (cur7 & 0xff000000) |
((tUNK_base + T(CL)) << 16) |
0x202;
timing[8] = cur8 & 0xffffff00;
switch (ram->base.type) {
case NVKM_RAM_TYPE_DDR2:
case NVKM_RAM_TYPE_GDDR3:
tUNK_40_0 = prevCL - (cur8 & 0xff);
if (tUNK_40_0 > 0)
timing[8] |= T(CL);
break;
default:
break;
}
nvkm_debug(subdev, "Entry: 220: %08x %08x %08x %08x\n",
timing[0], timing[1], timing[2], timing[3]);
nvkm_debug(subdev, " 230: %08x %08x %08x %08x\n",
timing[4], timing[5], timing[6], timing[7]);
nvkm_debug(subdev, " 240: %08x\n", timing[8]);
return 0;
}
#undef T
static void
nvkm_sddr2_dll_reset(struct gt215_ramfuc *fuc)
{
ram_mask(fuc, mr[0], 0x100, 0x100);
ram_nsec(fuc, 1000);
ram_mask(fuc, mr[0], 0x100, 0x000);
ram_nsec(fuc, 1000);
}
static void
nvkm_sddr3_dll_disable(struct gt215_ramfuc *fuc, u32 *mr)
{
u32 mr1_old = ram_rd32(fuc, mr[1]);
if (!(mr1_old & 0x1)) {
ram_wr32(fuc, 0x1002d4, 0x00000001);
ram_wr32(fuc, mr[1], mr[1]);
ram_nsec(fuc, 1000);
}
}
static void
nvkm_gddr3_dll_disable(struct gt215_ramfuc *fuc, u32 *mr)
{
u32 mr1_old = ram_rd32(fuc, mr[1]);
if (!(mr1_old & 0x40)) {
ram_wr32(fuc, mr[1], mr[1]);
ram_nsec(fuc, 1000);
}
}
static void
gt215_ram_lock_pll(struct gt215_ramfuc *fuc, struct gt215_clk_info *mclk)
{
ram_wr32(fuc, 0x004004, mclk->pll);
ram_mask(fuc, 0x004000, 0x00000001, 0x00000001);
ram_mask(fuc, 0x004000, 0x00000010, 0x00000000);
ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
ram_mask(fuc, 0x004000, 0x00000010, 0x00000010);
}
static void
gt215_ram_gpio(struct gt215_ramfuc *fuc, u8 tag, u32 val)
{
struct nvkm_gpio *gpio = fuc->base.fb->subdev.device->gpio;
struct dcb_gpio_func func;
u32 reg, sh, gpio_val;
int ret;
if (nvkm_gpio_get(gpio, 0, tag, DCB_GPIO_UNUSED) != val) {
ret = nvkm_gpio_find(gpio, 0, tag, DCB_GPIO_UNUSED, &func);
if (ret)
return;
reg = func.line >> 3;
sh = (func.line & 0x7) << 2;
gpio_val = ram_rd32(fuc, gpio[reg]);
if (gpio_val & (8 << sh))
val = !val;
if (!(func.log[1] & 1))
val = !val;
ram_mask(fuc, gpio[reg], (0x3 << sh), ((val | 0x2) << sh));
ram_nsec(fuc, 20000);
}
}
static int
gt215_ram_calc(struct nvkm_ram *base, u32 freq)
{
struct gt215_ram *ram = gt215_ram(base);
struct gt215_ramfuc *fuc = &ram->fuc;
struct gt215_ltrain *train = &ram->ltrain;
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
struct gt215_clk_info mclk;
struct nvkm_gpio *gpio = device->gpio;
struct nvkm_ram_data *next;
u8 ver, hdr, cnt, len, strap;
u32 data;
u32 r004018, r100760, r100da0, r111100, ctrl;
u32 unk714, unk718, unk71c;
int ret, i;
u32 timing[9];
bool pll2pll;
next = &ram->base.target;
next->freq = freq;
ram->base.next = next;
if (ram->ltrain.state == NVA3_TRAIN_ONCE)
gt215_link_train(ram);
/* lookup memory config data relevant to the target frequency */
data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len,
&next->bios);
if (!data || ver != 0x10 || hdr < 0x05) {
nvkm_error(subdev, "invalid/missing rammap entry\n");
return -EINVAL;
}
/* locate specific data set for the attached memory */
strap = nvbios_ramcfg_index(subdev);
if (strap >= cnt) {
nvkm_error(subdev, "invalid ramcfg strap\n");
return -EINVAL;
}
data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap,
&ver, &hdr, &next->bios);
if (!data || ver != 0x10 || hdr < 0x09) {
nvkm_error(subdev, "invalid/missing ramcfg entry\n");
return -EINVAL;
}
/* lookup memory timings, if bios says they're present */
if (next->bios.ramcfg_timing != 0xff) {
data = nvbios_timingEp(bios, next->bios.ramcfg_timing,
&ver, &hdr, &cnt, &len,
&next->bios);
if (!data || ver != 0x10 || hdr < 0x17) {
nvkm_error(subdev, "invalid/missing timing entry\n");
return -EINVAL;
}
}
ret = gt215_pll_info(device->clk, 0x12, 0x4000, freq, &mclk);
if (ret < 0) {
nvkm_error(subdev, "failed mclk calculation\n");
return ret;
}
gt215_ram_timing_calc(ram, timing);
ret = ram_init(fuc, ram->base.fb);
if (ret)
return ret;
/* Determine ram-specific MR values */
ram->base.mr[0] = ram_rd32(fuc, mr[0]);
ram->base.mr[1] = ram_rd32(fuc, mr[1]);
ram->base.mr[2] = ram_rd32(fuc, mr[2]);
switch (ram->base.type) {
case NVKM_RAM_TYPE_DDR2:
ret = nvkm_sddr2_calc(&ram->base);
break;
case NVKM_RAM_TYPE_DDR3:
ret = nvkm_sddr3_calc(&ram->base);
break;
case NVKM_RAM_TYPE_GDDR3:
ret = nvkm_gddr3_calc(&ram->base);
break;
default:
ret = -ENOSYS;
break;
}
if (ret)
return ret;
/* XXX: 750MHz seems rather arbitrary */
if (freq <= 750000) {
r004018 = 0x10000000;
r100760 = 0x22222222;
r100da0 = 0x00000010;
} else {
r004018 = 0x00000000;
r100760 = 0x00000000;
r100da0 = 0x00000000;
}
if (!next->bios.ramcfg_DLLoff)
r004018 |= 0x00004000;
/* pll2pll requires to switch to a safe clock first */
ctrl = ram_rd32(fuc, 0x004000);
pll2pll = (!(ctrl & 0x00000008)) && mclk.pll;
/* Pre, NVIDIA does this outside the script */
if (next->bios.ramcfg_10_02_10) {
ram_mask(fuc, 0x111104, 0x00000600, 0x00000000);
} else {
ram_mask(fuc, 0x111100, 0x40000000, 0x40000000);
ram_mask(fuc, 0x111104, 0x00000180, 0x00000000);
}
/* Always disable this bit during reclock */
ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
/* If switching from non-pll to pll, lock before disabling FB */
if (mclk.pll && !pll2pll) {
ram_mask(fuc, 0x004128, 0x003f3141, mclk.clk | 0x00000101);
gt215_ram_lock_pll(fuc, &mclk);
}
/* Start with disabling some CRTCs and PFIFO? */
ram_wait_vblank(fuc);
ram_wr32(fuc, 0x611200, 0x3300);
ram_mask(fuc, 0x002504, 0x1, 0x1);
ram_nsec(fuc, 10000);
ram_wait(fuc, 0x002504, 0x10, 0x10, 20000); /* XXX: or longer? */
ram_block(fuc);
ram_nsec(fuc, 2000);
if (!next->bios.ramcfg_10_02_10) {
if (ram->base.type == NVKM_RAM_TYPE_GDDR3)
ram_mask(fuc, 0x111100, 0x04020000, 0x00020000);
else
ram_mask(fuc, 0x111100, 0x04020000, 0x04020000);
}
/* If we're disabling the DLL, do it now */
switch (next->bios.ramcfg_DLLoff * ram->base.type) {
case NVKM_RAM_TYPE_DDR3:
nvkm_sddr3_dll_disable(fuc, ram->base.mr);
break;
case NVKM_RAM_TYPE_GDDR3:
nvkm_gddr3_dll_disable(fuc, ram->base.mr);
break;
}
if (next->bios.timing_10_ODT)
gt215_ram_gpio(fuc, 0x2e, 1);
/* Brace RAM for impact */
ram_wr32(fuc, 0x1002d4, 0x00000001);
ram_wr32(fuc, 0x1002d0, 0x00000001);
ram_wr32(fuc, 0x1002d0, 0x00000001);
ram_wr32(fuc, 0x100210, 0x00000000);
ram_wr32(fuc, 0x1002dc, 0x00000001);
ram_nsec(fuc, 2000);
if (device->chipset == 0xa3 && freq <= 500000)
ram_mask(fuc, 0x100700, 0x00000006, 0x00000006);
/* Alter FBVDD/Q, apparently must be done with PLL disabled, thus
* set it to bypass */
if (nvkm_gpio_get(gpio, 0, 0x18, DCB_GPIO_UNUSED) ==
next->bios.ramcfg_FBVDDQ) {
data = ram_rd32(fuc, 0x004000) & 0x9;
if (data == 0x1)
ram_mask(fuc, 0x004000, 0x8, 0x8);
if (data & 0x1)
ram_mask(fuc, 0x004000, 0x1, 0x0);
gt215_ram_gpio(fuc, 0x18, !next->bios.ramcfg_FBVDDQ);
if (data & 0x1)
ram_mask(fuc, 0x004000, 0x1, 0x1);
}
/* Fiddle with clocks */
/* There's 4 scenario's
* pll->pll: first switch to a 324MHz clock, set up new PLL, switch
* clk->pll: Set up new PLL, switch
* pll->clk: Set up clock, switch
* clk->clk: Overwrite ctrl and other bits, switch */
/* Switch to regular clock - 324MHz */
if (pll2pll) {
ram_mask(fuc, 0x004000, 0x00000004, 0x00000004);
ram_mask(fuc, 0x004168, 0x003f3141, 0x00083101);
ram_mask(fuc, 0x004000, 0x00000008, 0x00000008);
ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
ram_wr32(fuc, 0x004018, 0x00001000);
gt215_ram_lock_pll(fuc, &mclk);
}
if (mclk.pll) {
ram_mask(fuc, 0x004000, 0x00000105, 0x00000105);
ram_wr32(fuc, 0x004018, 0x00001000 | r004018);
ram_wr32(fuc, 0x100da0, r100da0);
} else {
ram_mask(fuc, 0x004168, 0x003f3141, mclk.clk | 0x00000101);
ram_mask(fuc, 0x004000, 0x00000108, 0x00000008);
ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
ram_wr32(fuc, 0x004018, 0x00009000 | r004018);
ram_wr32(fuc, 0x100da0, r100da0);
}
ram_nsec(fuc, 20000);
if (next->bios.rammap_10_04_08) {
ram_wr32(fuc, 0x1005a0, next->bios.ramcfg_10_06 << 16 |
next->bios.ramcfg_10_05 << 8 |
next->bios.ramcfg_10_05);
ram_wr32(fuc, 0x1005a4, next->bios.ramcfg_10_08 << 8 |
next->bios.ramcfg_10_07);
ram_wr32(fuc, 0x10f804, next->bios.ramcfg_10_09_f0 << 20 |
next->bios.ramcfg_10_03_0f << 16 |
next->bios.ramcfg_10_09_0f |
0x80000000);
ram_mask(fuc, 0x10053c, 0x00001000, 0x00000000);
} else {
if (train->state == NVA3_TRAIN_DONE) {
ram_wr32(fuc, 0x100080, 0x1020);
ram_mask(fuc, 0x111400, 0xffffffff, train->r_111400);
ram_mask(fuc, 0x1111e0, 0xffffffff, train->r_1111e0);
ram_mask(fuc, 0x100720, 0xffffffff, train->r_100720);
}
ram_mask(fuc, 0x10053c, 0x00001000, 0x00001000);
ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
ram_mask(fuc, 0x100760, 0x22222222, r100760);
ram_mask(fuc, 0x1007a0, 0x22222222, r100760);
ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
}
if (device->chipset == 0xa3 && freq > 500000) {
ram_mask(fuc, 0x100700, 0x00000006, 0x00000000);
}
/* Final switch */
if (mclk.pll) {
ram_mask(fuc, 0x1110e0, 0x00088000, 0x00011000);
ram_mask(fuc, 0x004000, 0x00000008, 0x00000000);
}
ram_wr32(fuc, 0x1002dc, 0x00000000);
ram_wr32(fuc, 0x1002d4, 0x00000001);
ram_wr32(fuc, 0x100210, 0x80000000);
ram_nsec(fuc, 2000);
/* Set RAM MR parameters and timings */
for (i = 2; i >= 0; i--) {
if (ram_rd32(fuc, mr[i]) != ram->base.mr[i]) {
ram_wr32(fuc, mr[i], ram->base.mr[i]);
ram_nsec(fuc, 1000);
}
}
ram_wr32(fuc, 0x100220[3], timing[3]);
ram_wr32(fuc, 0x100220[1], timing[1]);
ram_wr32(fuc, 0x100220[6], timing[6]);
ram_wr32(fuc, 0x100220[7], timing[7]);
ram_wr32(fuc, 0x100220[2], timing[2]);
ram_wr32(fuc, 0x100220[4], timing[4]);
ram_wr32(fuc, 0x100220[5], timing[5]);
ram_wr32(fuc, 0x100220[0], timing[0]);
ram_wr32(fuc, 0x100220[8], timing[8]);
/* Misc */
ram_mask(fuc, 0x100200, 0x00001000, !next->bios.ramcfg_10_02_08 << 12);
/* XXX: A lot of "chipset"/"ram type" specific stuff...? */
unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000130;
unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100;
unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
r111100 = ram_rd32(fuc, 0x111100) & ~0x3a800000;
/* NVA8 seems to skip various bits related to ramcfg_10_02_04 */
if (device->chipset == 0xa8) {
r111100 |= 0x08000000;
if (!next->bios.ramcfg_10_02_04)
unk714 |= 0x00000010;
} else {
if (next->bios.ramcfg_10_02_04) {
switch (ram->base.type) {
case NVKM_RAM_TYPE_DDR2:
case NVKM_RAM_TYPE_DDR3:
r111100 &= ~0x00000020;
if (next->bios.ramcfg_10_02_10)
r111100 |= 0x08000004;
else
r111100 |= 0x00000024;
break;
default:
break;
}
} else {
switch (ram->base.type) {
case NVKM_RAM_TYPE_DDR2:
case NVKM_RAM_TYPE_DDR3:
r111100 &= ~0x00000024;
r111100 |= 0x12800000;
if (next->bios.ramcfg_10_02_10)
r111100 |= 0x08000000;
unk714 |= 0x00000010;
break;
case NVKM_RAM_TYPE_GDDR3:
r111100 |= 0x30000000;
unk714 |= 0x00000020;
break;
default:
break;
}
}
}
unk714 |= (next->bios.ramcfg_10_04_01) << 8;
if (next->bios.ramcfg_10_02_20)
unk714 |= 0xf0000000;
if (next->bios.ramcfg_10_02_02)
unk718 |= 0x00000100;
if (next->bios.ramcfg_10_02_01)
unk71c |= 0x00000100;
if (next->bios.timing_10_24 != 0xff) {
unk718 &= ~0xf0000000;
unk718 |= next->bios.timing_10_24 << 28;
}
if (next->bios.ramcfg_10_02_10)
r111100 &= ~0x04020000;
ram_mask(fuc, 0x100714, 0xffffffff, unk714);
ram_mask(fuc, 0x10071c, 0xffffffff, unk71c);
ram_mask(fuc, 0x100718, 0xffffffff, unk718);
ram_mask(fuc, 0x111100, 0xffffffff, r111100);
if (!next->bios.timing_10_ODT)
gt215_ram_gpio(fuc, 0x2e, 0);
/* Reset DLL */
if (!next->bios.ramcfg_DLLoff)
nvkm_sddr2_dll_reset(fuc);
if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
ram_nsec(fuc, 31000);
} else {
ram_nsec(fuc, 14000);
}
if (ram->base.type == NVKM_RAM_TYPE_DDR3) {
ram_wr32(fuc, 0x100264, 0x1);
ram_nsec(fuc, 2000);
}
ram_nuke(fuc, 0x100700);
ram_mask(fuc, 0x100700, 0x01000000, 0x01000000);
ram_mask(fuc, 0x100700, 0x01000000, 0x00000000);
/* Re-enable FB */
ram_unblock(fuc);
ram_wr32(fuc, 0x611200, 0x3330);
/* Post fiddlings */
if (next->bios.rammap_10_04_02)
ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
if (next->bios.ramcfg_10_02_10) {
ram_mask(fuc, 0x111104, 0x00000180, 0x00000180);
ram_mask(fuc, 0x111100, 0x40000000, 0x00000000);
} else {
ram_mask(fuc, 0x111104, 0x00000600, 0x00000600);
}
if (mclk.pll) {
ram_mask(fuc, 0x004168, 0x00000001, 0x00000000);
ram_mask(fuc, 0x004168, 0x00000100, 0x00000000);
} else {
ram_mask(fuc, 0x004000, 0x00000001, 0x00000000);
ram_mask(fuc, 0x004128, 0x00000001, 0x00000000);
ram_mask(fuc, 0x004128, 0x00000100, 0x00000000);
}
return 0;
}
static int
gt215_ram_prog(struct nvkm_ram *base)
{
struct gt215_ram *ram = gt215_ram(base);
struct gt215_ramfuc *fuc = &ram->fuc;
struct nvkm_device *device = ram->base.fb->subdev.device;
bool exec = nvkm_boolopt(device->cfgopt, "NvMemExec", true);
if (exec) {
nvkm_mask(device, 0x001534, 0x2, 0x2);
ram_exec(fuc, true);
/* Post-processing, avoids flicker */
nvkm_mask(device, 0x002504, 0x1, 0x0);
nvkm_mask(device, 0x001534, 0x2, 0x0);
nvkm_mask(device, 0x616308, 0x10, 0x10);
nvkm_mask(device, 0x616b08, 0x10, 0x10);
} else {
ram_exec(fuc, false);
}
return 0;
}
static void
gt215_ram_tidy(struct nvkm_ram *base)
{
struct gt215_ram *ram = gt215_ram(base);
ram_exec(&ram->fuc, false);
}
static int
gt215_ram_init(struct nvkm_ram *base)
{
struct gt215_ram *ram = gt215_ram(base);
gt215_link_train_init(ram);
return 0;
}
static void *
gt215_ram_dtor(struct nvkm_ram *base)
{
struct gt215_ram *ram = gt215_ram(base);
gt215_link_train_fini(ram);
return ram;
}
static const struct nvkm_ram_func
gt215_ram_func = {
.dtor = gt215_ram_dtor,
.init = gt215_ram_init,
.calc = gt215_ram_calc,
.prog = gt215_ram_prog,
.tidy = gt215_ram_tidy,
};
int
gt215_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct gt215_ram *ram;
int ret, i;
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
return -ENOMEM;
*pram = &ram->base;
ret = nv50_ram_ctor(>215_ram_func, fb, &ram->base);
if (ret)
return ret;
ram->fuc.r_0x001610 = ramfuc_reg(0x001610);
ram->fuc.r_0x001700 = ramfuc_reg(0x001700);
ram->fuc.r_0x002504 = ramfuc_reg(0x002504);
ram->fuc.r_0x004000 = ramfuc_reg(0x004000);
ram->fuc.r_0x004004 = ramfuc_reg(0x004004);
ram->fuc.r_0x004018 = ramfuc_reg(0x004018);
ram->fuc.r_0x004128 = ramfuc_reg(0x004128);
ram->fuc.r_0x004168 = ramfuc_reg(0x004168);
ram->fuc.r_0x100080 = ramfuc_reg(0x100080);
ram->fuc.r_0x100200 = ramfuc_reg(0x100200);
ram->fuc.r_0x100210 = ramfuc_reg(0x100210);
for (i = 0; i < 9; i++)
ram->fuc.r_0x100220[i] = ramfuc_reg(0x100220 + (i * 4));
ram->fuc.r_0x100264 = ramfuc_reg(0x100264);
ram->fuc.r_0x1002d0 = ramfuc_reg(0x1002d0);
ram->fuc.r_0x1002d4 = ramfuc_reg(0x1002d4);
ram->fuc.r_0x1002dc = ramfuc_reg(0x1002dc);
ram->fuc.r_0x10053c = ramfuc_reg(0x10053c);
ram->fuc.r_0x1005a0 = ramfuc_reg(0x1005a0);
ram->fuc.r_0x1005a4 = ramfuc_reg(0x1005a4);
ram->fuc.r_0x100700 = ramfuc_reg(0x100700);
ram->fuc.r_0x100714 = ramfuc_reg(0x100714);
ram->fuc.r_0x100718 = ramfuc_reg(0x100718);
ram->fuc.r_0x10071c = ramfuc_reg(0x10071c);
ram->fuc.r_0x100720 = ramfuc_reg(0x100720);
ram->fuc.r_0x100760 = ramfuc_stride(0x100760, 4, ram->base.part_mask);
ram->fuc.r_0x1007a0 = ramfuc_stride(0x1007a0, 4, ram->base.part_mask);
ram->fuc.r_0x1007e0 = ramfuc_stride(0x1007e0, 4, ram->base.part_mask);
ram->fuc.r_0x100da0 = ramfuc_stride(0x100da0, 4, ram->base.part_mask);
ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804);
ram->fuc.r_0x1110e0 = ramfuc_stride(0x1110e0, 4, ram->base.part_mask);
ram->fuc.r_0x111100 = ramfuc_reg(0x111100);
ram->fuc.r_0x111104 = ramfuc_reg(0x111104);
ram->fuc.r_0x1111e0 = ramfuc_reg(0x1111e0);
ram->fuc.r_0x111400 = ramfuc_reg(0x111400);
ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
if (ram->base.ranks > 1) {
ram->fuc.r_mr[0] = ramfuc_reg2(0x1002c0, 0x1002c8);
ram->fuc.r_mr[1] = ramfuc_reg2(0x1002c4, 0x1002cc);
ram->fuc.r_mr[2] = ramfuc_reg2(0x1002e0, 0x1002e8);
ram->fuc.r_mr[3] = ramfuc_reg2(0x1002e4, 0x1002ec);
} else {
ram->fuc.r_mr[0] = ramfuc_reg(0x1002c0);
ram->fuc.r_mr[1] = ramfuc_reg(0x1002c4);
ram->fuc.r_mr[2] = ramfuc_reg(0x1002e0);
ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
}
ram->fuc.r_gpio[0] = ramfuc_reg(0x00e104);
ram->fuc.r_gpio[1] = ramfuc_reg(0x00e108);
ram->fuc.r_gpio[2] = ramfuc_reg(0x00e120);
ram->fuc.r_gpio[3] = ramfuc_reg(0x00e124);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "gf100.h"
#include "ram.h"
#include <engine/nvdec.h>
int
gp102_fb_vpr_scrub(struct nvkm_fb *fb)
{
return nvkm_falcon_fw_boot(&fb->vpr_scrubber, &fb->subdev, true, NULL, NULL, 0, 0x00000000);
}
bool
gp102_fb_vpr_scrub_required(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
nvkm_wr32(device, 0x100cd0, 0x2);
return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0;
}
u64
gp102_fb_vidmem_size(struct nvkm_fb *fb)
{
const u32 data = nvkm_rd32(fb->subdev.device, 0x100ce0);
const u32 lmag = (data & 0x000003f0) >> 4;
const u32 lsca = (data & 0x0000000f);
const u64 size = (u64)lmag << (lsca + 20);
if (data & 0x40000000)
return size / 16 * 15;
return size;
}
int
gp102_fb_oneinit(struct nvkm_fb *fb)
{
struct nvkm_subdev *subdev = &fb->subdev;
nvkm_falcon_fw_ctor_hs(&gm200_flcn_fw, "mem-unlock", subdev, NULL, "nvdec/scrubber",
0, &subdev->device->nvdec[0]->falcon, &fb->vpr_scrubber);
return gf100_fb_oneinit(fb);
}
static const struct nvkm_fb_func
gp102_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gp102_fb_oneinit,
.init = gm200_fb_init,
.init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.vidmem.size = gp102_fb_vidmem_size,
.vpr.scrub_required = gp102_fb_vpr_scrub_required,
.vpr.scrub = gp102_fb_vpr_scrub,
.ram_new = gp102_ram_new,
};
int
gp102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gp102_fb, device, type, inst, pfb);
}
MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
static const struct nvkm_fb_func
nv47_fb = {
.tags = nv20_fb_tags,
.init = nv41_fb_init,
.tile.regions = 15,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv40_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
.ram_new = nv41_ram_new,
};
int
nv47_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv47_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
static void
nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
else tile->zcomp |= 0x20000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14;
#ifdef __BIG_ENDIAN
tile->zcomp |= 0x80000000;
#endif
}
}
static const struct nvkm_fb_func
nv36_fb = {
.tags = nv20_fb_tags,
.init = nv30_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv36_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
};
int
nv36_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv36_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "ram.h"
#include <core/memory.h>
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/M0203.h>
#include <engine/gr.h>
#include <engine/mpeg.h>
void
nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
{
fb->func->tile.fini(fb, region, tile);
}
void
nvkm_fb_tile_init(struct nvkm_fb *fb, int region, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *tile)
{
fb->func->tile.init(fb, region, addr, size, pitch, flags, tile);
}
void
nvkm_fb_tile_prog(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
{
struct nvkm_device *device = fb->subdev.device;
if (fb->func->tile.prog) {
fb->func->tile.prog(fb, region, tile);
if (device->gr)
nvkm_engine_tile(&device->gr->engine, region);
if (device->mpeg)
nvkm_engine_tile(device->mpeg, region);
}
}
static void
nvkm_fb_sysmem_flush_page_init(struct nvkm_device *device)
{
struct nvkm_fb *fb = device->fb;
if (fb->func->sysmem.flush_page_init)
fb->func->sysmem.flush_page_init(fb);
}
int
nvkm_fb_bios_memtype(struct nvkm_bios *bios)
{
struct nvkm_subdev *subdev = &bios->subdev;
struct nvkm_device *device = subdev->device;
const u8 ramcfg = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
struct nvbios_M0203E M0203E;
u8 ver, hdr;
if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
switch (M0203E.type) {
case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
case M0203E_TYPE_GDDR3 : return NVKM_RAM_TYPE_GDDR3;
case M0203E_TYPE_GDDR5 : return NVKM_RAM_TYPE_GDDR5;
case M0203E_TYPE_GDDR5X: return NVKM_RAM_TYPE_GDDR5X;
case M0203E_TYPE_GDDR6 : return NVKM_RAM_TYPE_GDDR6;
case M0203E_TYPE_HBM2 : return NVKM_RAM_TYPE_HBM2;
default:
nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type);
return NVKM_RAM_TYPE_UNKNOWN;
}
}
nvkm_warn(subdev, "M0203E not matched!\n");
return NVKM_RAM_TYPE_UNKNOWN;
}
static void
nvkm_fb_intr(struct nvkm_subdev *subdev)
{
struct nvkm_fb *fb = nvkm_fb(subdev);
if (fb->func->intr)
fb->func->intr(fb);
}
static int
nvkm_fb_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_fb *fb = nvkm_fb(subdev);
u32 tags = 0;
if (fb->func->ram_new) {
int ret = fb->func->ram_new(fb, &fb->ram);
if (ret) {
nvkm_error(subdev, "vram setup failed, %d\n", ret);
return ret;
}
}
if (fb->func->oneinit) {
int ret = fb->func->oneinit(fb);
if (ret)
return ret;
}
/* Initialise compression tag allocator.
*
* LTC oneinit() will override this on Fermi and newer.
*/
if (fb->func->tags) {
tags = fb->func->tags(fb);
nvkm_debug(subdev, "%d comptags\n", tags);
}
return nvkm_mm_init(&fb->tags.mm, 0, 0, tags, 1);
}
int
nvkm_fb_mem_unlock(struct nvkm_fb *fb)
{
struct nvkm_subdev *subdev = &fb->subdev;
int ret;
if (!fb->func->vpr.scrub_required)
return 0;
ret = nvkm_subdev_oneinit(subdev);
if (ret)
return ret;
if (!fb->func->vpr.scrub_required(fb)) {
nvkm_debug(subdev, "VPR not locked\n");
return 0;
}
nvkm_debug(subdev, "VPR locked, running scrubber binary\n");
if (!fb->vpr_scrubber.fw.img) {
nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n");
return 0;
}
ret = fb->func->vpr.scrub(fb);
if (ret) {
nvkm_error(subdev, "VPR scrubber binary failed\n");
return ret;
}
if (fb->func->vpr.scrub_required(fb)) {
nvkm_error(subdev, "VPR still locked after scrub!\n");
return -EIO;
}
nvkm_debug(subdev, "VPR scrubber binary successful\n");
return 0;
}
u64
nvkm_fb_vidmem_size(struct nvkm_device *device)
{
struct nvkm_fb *fb = device->fb;
if (fb && fb->func->vidmem.size)
return fb->func->vidmem.size(fb);
WARN_ON(1);
return 0;
}
static int
nvkm_fb_init(struct nvkm_subdev *subdev)
{
struct nvkm_fb *fb = nvkm_fb(subdev);
int ret, i;
if (fb->ram) {
ret = nvkm_ram_init(fb->ram);
if (ret)
return ret;
}
for (i = 0; i < fb->tile.regions; i++)
fb->func->tile.prog(fb, i, &fb->tile.region[i]);
nvkm_fb_sysmem_flush_page_init(subdev->device);
if (fb->func->init)
fb->func->init(fb);
if (fb->func->init_remapper)
fb->func->init_remapper(fb);
if (fb->func->init_page) {
ret = fb->func->init_page(fb);
if (WARN_ON(ret))
return ret;
}
if (fb->func->init_unkn)
fb->func->init_unkn(fb);
return 0;
}
static int
nvkm_fb_preinit(struct nvkm_subdev *subdev)
{
nvkm_fb_sysmem_flush_page_init(subdev->device);
return 0;
}
static void *
nvkm_fb_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_fb *fb = nvkm_fb(subdev);
int i;
nvkm_memory_unref(&fb->mmu_wr);
nvkm_memory_unref(&fb->mmu_rd);
for (i = 0; i < fb->tile.regions; i++)
fb->func->tile.fini(fb, i, &fb->tile.region[i]);
nvkm_mm_fini(&fb->tags.mm);
mutex_destroy(&fb->tags.mutex);
nvkm_ram_del(&fb->ram);
nvkm_falcon_fw_dtor(&fb->vpr_scrubber);
if (fb->sysmem.flush_page) {
dma_unmap_page(subdev->device->dev, fb->sysmem.flush_page_addr,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fb->sysmem.flush_page);
}
if (fb->func->dtor)
return fb->func->dtor(fb);
return fb;
}
static const struct nvkm_subdev_func
nvkm_fb = {
.dtor = nvkm_fb_dtor,
.preinit = nvkm_fb_preinit,
.oneinit = nvkm_fb_oneinit,
.init = nvkm_fb_init,
.intr = nvkm_fb_intr,
};
int
nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_fb *fb)
{
nvkm_subdev_ctor(&nvkm_fb, device, type, inst, &fb->subdev);
fb->func = func;
fb->tile.regions = fb->func->tile.regions;
fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", fb->func->default_bigpage);
mutex_init(&fb->tags.mutex);
if (func->sysmem.flush_page_init) {
fb->sysmem.flush_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!fb->sysmem.flush_page)
return -ENOMEM;
fb->sysmem.flush_page_addr = dma_map_page(device->dev, fb->sysmem.flush_page,
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(device->dev, fb->sysmem.flush_page_addr))
return -EFAULT;
}
return 0;
}
int
nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL)))
return -ENOMEM;
return nvkm_fb_ctor(func, device, type, inst, *pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c |
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gf100.h"
static const struct nvkm_fb_func
gp10b_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gm200_fb_init,
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
};
int
gp10b_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gp10b_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c |
/*
* Copyright 2014 Roy Spliet
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Roy Spliet <[email protected]>
* Ben Skeggs
*/
#include "priv.h"
#include "ram.h"
struct ramxlat {
int id;
u8 enc;
};
static inline int
ramxlat(const struct ramxlat *xlat, int id)
{
while (xlat->id >= 0) {
if (xlat->id == id)
return xlat->enc;
xlat++;
}
return -EINVAL;
}
static const struct ramxlat
ramddr2_cl[] = {
{ 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 },
/* The following are available in some, but not all DDR2 docs */
{ 7, 7 },
{ -1 }
};
static const struct ramxlat
ramddr2_wr[] = {
{ 2, 1 }, { 3, 2 }, { 4, 3 }, { 5, 4 }, { 6, 5 },
/* The following are available in some, but not all DDR2 docs */
{ 7, 6 },
{ -1 }
};
int
nvkm_sddr2_calc(struct nvkm_ram *ram)
{
int CL, WR, DLL = 0, ODT = 0;
switch (ram->next->bios.timing_ver) {
case 0x10:
CL = ram->next->bios.timing_10_CL;
WR = ram->next->bios.timing_10_WR;
DLL = !ram->next->bios.ramcfg_DLLoff;
ODT = ram->next->bios.timing_10_ODT & 3;
break;
case 0x20:
CL = (ram->next->bios.timing[1] & 0x0000001f);
WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
break;
default:
return -ENOSYS;
}
if (ram->next->bios.timing_ver == 0x20 ||
ram->next->bios.ramcfg_timing == 0xff) {
ODT = (ram->mr[1] & 0x004) >> 2 |
(ram->mr[1] & 0x040) >> 5;
}
CL = ramxlat(ramddr2_cl, CL);
WR = ramxlat(ramddr2_wr, WR);
if (CL < 0 || WR < 0)
return -EINVAL;
ram->mr[0] &= ~0xf70;
ram->mr[0] |= (WR & 0x07) << 9;
ram->mr[0] |= (CL & 0x07) << 4;
ram->mr[1] &= ~0x045;
ram->mr[1] |= (ODT & 0x1) << 2;
ram->mr[1] |= (ODT & 0x2) << 5;
ram->mr[1] |= !DLL;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include "ram.h"
static const struct nv50_fb_func
mcp89_fb = {
.ram_new = mcp77_ram_new,
.trap = 0x089d1fff,
};
int
mcp89_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nv50_fb_new_(&mcp89_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "ramnv40.h"
int
nv49_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_device *device = fb->subdev.device;
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
u32 fb914 = nvkm_rd32(device, 0x100914);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
switch (fb914 & 0x00000003) {
case 0x00000000: type = NVKM_RAM_TYPE_DDR1 ; break;
case 0x00000001: type = NVKM_RAM_TYPE_DDR2 ; break;
case 0x00000002: type = NVKM_RAM_TYPE_GDDR3; break;
case 0x00000003: break;
}
ret = nv40_ram_new_(fb, type, size, pram);
if (ret)
return ret;
(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "gf100.h"
#include "ram.h"
int
gv100_fb_init_page(struct nvkm_fb *fb)
{
return (fb->page == 16) ? 0 : -EINVAL;
}
static const struct nvkm_fb_func
gv100_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gp102_fb_oneinit,
.init = gm200_fb_init,
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.vidmem.size = gp102_fb_vidmem_size,
.vpr.scrub_required = gp102_fb_vpr_scrub_required,
.vpr.scrub = gp102_fb_vpr_scrub,
.ram_new = gp102_ram_new,
.default_bigpage = 16,
};
int
gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gv100_fb, device, type, inst, pfb);
}
MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#define nvkm_vram(p) container_of((p), struct nvkm_vram, memory)
#include "ram.h"
#include <core/memory.h>
#include <subdev/instmem.h>
#include <subdev/mmu.h>
struct nvkm_vram {
struct nvkm_memory memory;
struct nvkm_ram *ram;
u8 page;
struct nvkm_mm_node *mn;
};
static int
nvkm_vram_kmap(struct nvkm_memory *memory, struct nvkm_memory **pmemory)
{
return nvkm_instobj_wrap(nvkm_vram(memory)->ram->fb->subdev.device, memory, pmemory);
}
static int
nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
struct nvkm_vma *vma, void *argv, u32 argc)
{
struct nvkm_vram *vram = nvkm_vram(memory);
struct nvkm_vmm_map map = {
.memory = &vram->memory,
.offset = offset,
.mem = vram->mn,
};
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
}
static u64
nvkm_vram_size(struct nvkm_memory *memory)
{
return (u64)nvkm_mm_size(nvkm_vram(memory)->mn) << NVKM_RAM_MM_SHIFT;
}
static u64
nvkm_vram_addr(struct nvkm_memory *memory)
{
struct nvkm_vram *vram = nvkm_vram(memory);
if (!nvkm_mm_contiguous(vram->mn))
return ~0ULL;
return (u64)nvkm_mm_addr(vram->mn) << NVKM_RAM_MM_SHIFT;
}
static u8
nvkm_vram_page(struct nvkm_memory *memory)
{
return nvkm_vram(memory)->page;
}
static enum nvkm_memory_target
nvkm_vram_target(struct nvkm_memory *memory)
{
return NVKM_MEM_TARGET_VRAM;
}
static void *
nvkm_vram_dtor(struct nvkm_memory *memory)
{
struct nvkm_vram *vram = nvkm_vram(memory);
struct nvkm_mm_node *next = vram->mn;
struct nvkm_mm_node *node;
mutex_lock(&vram->ram->mutex);
while ((node = next)) {
next = node->next;
nvkm_mm_free(&vram->ram->vram, &node);
}
mutex_unlock(&vram->ram->mutex);
return vram;
}
static const struct nvkm_memory_func
nvkm_vram = {
.dtor = nvkm_vram_dtor,
.target = nvkm_vram_target,
.page = nvkm_vram_page,
.addr = nvkm_vram_addr,
.size = nvkm_vram_size,
.map = nvkm_vram_map,
.kmap = nvkm_vram_kmap,
};
int
nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
bool contig, bool back, struct nvkm_memory **pmemory)
{
struct nvkm_ram *ram;
struct nvkm_mm *mm;
struct nvkm_mm_node **node, *r;
struct nvkm_vram *vram;
u8 page = max(rpage, (u8)NVKM_RAM_MM_SHIFT);
u32 align = (1 << page) >> NVKM_RAM_MM_SHIFT;
u32 max = ALIGN(size, 1 << page) >> NVKM_RAM_MM_SHIFT;
u32 min = contig ? max : align;
int ret;
if (!device->fb || !(ram = device->fb->ram))
return -ENODEV;
ram = device->fb->ram;
mm = &ram->vram;
if (!(vram = kzalloc(sizeof(*vram), GFP_KERNEL)))
return -ENOMEM;
nvkm_memory_ctor(&nvkm_vram, &vram->memory);
vram->ram = ram;
vram->page = page;
*pmemory = &vram->memory;
mutex_lock(&ram->mutex);
node = &vram->mn;
do {
if (back)
ret = nvkm_mm_tail(mm, heap, type, max, min, align, &r);
else
ret = nvkm_mm_head(mm, heap, type, max, min, align, &r);
if (ret) {
mutex_unlock(&ram->mutex);
nvkm_memory_unref(pmemory);
return ret;
}
*node = r;
node = &r->next;
max -= r->length;
} while (max);
mutex_unlock(&ram->mutex);
return 0;
}
int
nvkm_ram_init(struct nvkm_ram *ram)
{
if (ram->func->init)
return ram->func->init(ram);
return 0;
}
void
nvkm_ram_del(struct nvkm_ram **pram)
{
struct nvkm_ram *ram = *pram;
if (ram && !WARN_ON(!ram->func)) {
if (ram->func->dtor)
*pram = ram->func->dtor(ram);
nvkm_mm_fini(&ram->vram);
mutex_destroy(&ram->mutex);
kfree(*pram);
*pram = NULL;
}
}
int
nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
enum nvkm_ram_type type, u64 size, struct nvkm_ram *ram)
{
static const char *name[] = {
[NVKM_RAM_TYPE_UNKNOWN] = "of unknown memory type",
[NVKM_RAM_TYPE_STOLEN ] = "stolen system memory",
[NVKM_RAM_TYPE_SGRAM ] = "SGRAM",
[NVKM_RAM_TYPE_SDRAM ] = "SDRAM",
[NVKM_RAM_TYPE_DDR1 ] = "DDR1",
[NVKM_RAM_TYPE_DDR2 ] = "DDR2",
[NVKM_RAM_TYPE_DDR3 ] = "DDR3",
[NVKM_RAM_TYPE_GDDR2 ] = "GDDR2",
[NVKM_RAM_TYPE_GDDR3 ] = "GDDR3",
[NVKM_RAM_TYPE_GDDR4 ] = "GDDR4",
[NVKM_RAM_TYPE_GDDR5 ] = "GDDR5",
[NVKM_RAM_TYPE_GDDR5X ] = "GDDR5X",
[NVKM_RAM_TYPE_GDDR6 ] = "GDDR6",
[NVKM_RAM_TYPE_HBM2 ] = "HBM2",
};
struct nvkm_subdev *subdev = &fb->subdev;
int ret;
nvkm_info(subdev, "%d MiB %s\n", (int)(size >> 20), name[type]);
ram->func = func;
ram->fb = fb;
ram->type = type;
ram->size = size;
mutex_init(&ram->mutex);
if (!nvkm_mm_initialised(&ram->vram)) {
ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, 0,
size >> NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
}
return 0;
}
int
nvkm_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
enum nvkm_ram_type type, u64 size, struct nvkm_ram **pram)
{
if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
return -ENOMEM;
return nvkm_ram_ctor(func, fb, type, size, *pram);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
void
nv46_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
/* for performance, select alternate bank offset for zeta */
if (!(flags & 4)) tile->addr = (0 << 3);
else tile->addr = (1 << 3);
tile->addr |= 0x00000001; /* mode = vram */
tile->addr |= addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
}
static const struct nvkm_fb_func
nv46_fb = {
.init = nv44_fb_init,
.tile.regions = 15,
.tile.init = nv46_fb_tile_init,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new,
};
int
nv46_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv46_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "ram.h"
/* binary driver only executes this path if the condition (a) is true
* for any configuration (combination of rammap+ramcfg+timing) that
* can be reached on a given card. for now, we will execute the branch
* unconditionally in the hope that a "false everywhere" in the bios
* tables doesn't actually mean "don't touch this".
*/
#define NOTE00(a) 1
int
nvkm_gddr5_calc(struct nvkm_ram *ram, bool nuts)
{
int pd, lf, xd, vh, vr, vo, l3;
int WL, CL, WR, at[2], dt, ds;
int rq = ram->freq < 1000000; /* XXX */
xd = !ram->next->bios.ramcfg_DLLoff;
switch (ram->next->bios.ramcfg_ver) {
case 0x11:
pd = ram->next->bios.ramcfg_11_01_80;
lf = ram->next->bios.ramcfg_11_01_40;
vh = ram->next->bios.ramcfg_11_02_10;
vr = ram->next->bios.ramcfg_11_02_04;
vo = ram->next->bios.ramcfg_11_06;
l3 = !ram->next->bios.ramcfg_11_07_02;
break;
default:
return -ENOSYS;
}
switch (ram->next->bios.timing_ver) {
case 0x20:
WL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
CL = (ram->next->bios.timing[1] & 0x0000001f);
WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
at[0] = ram->next->bios.timing_20_2e_c0;
at[1] = ram->next->bios.timing_20_2e_30;
dt = ram->next->bios.timing_20_2e_03;
ds = ram->next->bios.timing_20_2f_03;
break;
default:
return -ENOSYS;
}
if (WL < 1 || WL > 7 || CL < 5 || CL > 36 || WR < 4 || WR > 35)
return -EINVAL;
CL -= 5;
WR -= 4;
ram->mr[0] &= ~0xf7f;
ram->mr[0] |= (WR & 0x0f) << 8;
ram->mr[0] |= (CL & 0x0f) << 3;
ram->mr[0] |= (WL & 0x07) << 0;
ram->mr[1] &= ~0x0bf;
ram->mr[1] |= (xd & 0x01) << 7;
ram->mr[1] |= (at[0] & 0x03) << 4;
ram->mr[1] |= (dt & 0x03) << 2;
ram->mr[1] |= (ds & 0x03) << 0;
/* this seems wrong, alternate field used for the broadcast
* on nuts vs non-nuts configs.. meh, it matches for now.
*/
ram->mr1_nuts = ram->mr[1];
if (nuts) {
ram->mr[1] &= ~0x030;
ram->mr[1] |= (at[1] & 0x03) << 4;
}
ram->mr[3] &= ~0x020;
ram->mr[3] |= (rq & 0x01) << 5;
ram->mr[5] &= ~0x004;
ram->mr[5] |= (l3 << 2);
if (!vo)
vo = (ram->mr[6] & 0xff0) >> 4;
if (ram->mr[6] & 0x001)
pd = 1; /* binary driver does this.. bug? */
ram->mr[6] &= ~0xff1;
ram->mr[6] |= (vo & 0xff) << 4;
ram->mr[6] |= (pd & 0x01) << 0;
if (NOTE00(vr)) {
ram->mr[7] &= ~0x300;
ram->mr[7] |= (vr & 0x03) << 8;
}
ram->mr[7] &= ~0x088;
ram->mr[7] |= (vh & 0x01) << 7;
ram->mr[7] |= (lf & 0x01) << 3;
ram->mr[8] &= ~0x003;
ram->mr[8] |= (WR & 0x10) >> 3;
ram->mr[8] |= (CL & 0x10) >> 4;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include "ram.h"
static const struct nv50_fb_func
g84_fb = {
.ram_new = nv50_ram_new,
.tags = nv20_fb_tags,
.trap = 0x001d07ff,
};
int
g84_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nv50_fb_new_(&g84_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "ram.h"
u32
gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
struct nvkm_device *device, int fbp, int *pltcs)
{
u32 ltcs = nvkm_rd32(device, 0x022450);
u32 fbpas = nvkm_rd32(device, 0x022458);
u32 fbpa = fbp * fbpas;
u32 size = 0;
if (!(nvkm_rd32(device, 0x021d38) & BIT(fbp))) {
u32 ltco = nvkm_rd32(device, 0x021d70 + (fbp * 4));
u32 ltcm = ~ltco & ((1 << ltcs) - 1);
while (fbpas--) {
if (!(fbpao & (1 << fbpa)))
size += func->probe_fbpa_amount(device, fbpa);
fbpa++;
}
*pltcs = hweight32(ltcm);
}
return size;
}
static const struct nvkm_ram_func
gm200_ram = {
.upper = 0x1000000000ULL,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gm200_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.dtor = gk104_ram_dtor,
.init = gk104_ram_init,
.calc = gk104_ram_calc,
.prog = gk104_ram_prog,
.tidy = gk104_ram_tidy,
};
int
gm200_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
return gk104_ram_new_(&gm200_ram, fb, pram);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "gf100.h"
#include "ram.h"
static const struct nvkm_fb_func
gf108_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gf108_ram_new,
.default_bigpage = 17,
};
int
gf108_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gf108_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
* Roy Spliet <[email protected]>
*/
#include "priv.h"
#include "ram.h"
struct ramxlat {
int id;
u8 enc;
};
static inline int
ramxlat(const struct ramxlat *xlat, int id)
{
while (xlat->id >= 0) {
if (xlat->id == id)
return xlat->enc;
xlat++;
}
return -EINVAL;
}
static const struct ramxlat
ramddr3_cl[] = {
{ 5, 2 }, { 6, 4 }, { 7, 6 }, { 8, 8 }, { 9, 10 }, { 10, 12 },
{ 11, 14 },
/* the below are mentioned in some, but not all, ddr3 docs */
{ 12, 1 }, { 13, 3 }, { 14, 5 },
{ -1 }
};
static const struct ramxlat
ramddr3_wr[] = {
{ 5, 1 }, { 6, 2 }, { 7, 3 }, { 8, 4 }, { 10, 5 }, { 12, 6 },
/* the below are mentioned in some, but not all, ddr3 docs */
{ 14, 7 }, { 15, 7 }, { 16, 0 },
{ -1 }
};
static const struct ramxlat
ramddr3_cwl[] = {
{ 5, 0 }, { 6, 1 }, { 7, 2 }, { 8, 3 },
/* the below are mentioned in some, but not all, ddr3 docs */
{ 9, 4 }, { 10, 5 },
{ -1 }
};
int
nvkm_sddr3_calc(struct nvkm_ram *ram)
{
int CWL, CL, WR, DLL = 0, ODT = 0;
DLL = !ram->next->bios.ramcfg_DLLoff;
switch (ram->next->bios.timing_ver) {
case 0x10:
if (ram->next->bios.timing_hdr < 0x17) {
/* XXX: NV50: Get CWL from the timing register */
return -ENOSYS;
}
CWL = ram->next->bios.timing_10_CWL;
CL = ram->next->bios.timing_10_CL;
WR = ram->next->bios.timing_10_WR;
ODT = ram->next->bios.timing_10_ODT;
break;
case 0x20:
CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
CL = (ram->next->bios.timing[1] & 0x0000001f) >> 0;
WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
/* XXX: Get these values from the VBIOS instead */
ODT = (ram->mr[1] & 0x004) >> 2 |
(ram->mr[1] & 0x040) >> 5 |
(ram->mr[1] & 0x200) >> 7;
break;
default:
return -ENOSYS;
}
CWL = ramxlat(ramddr3_cwl, CWL);
CL = ramxlat(ramddr3_cl, CL);
WR = ramxlat(ramddr3_wr, WR);
if (CL < 0 || CWL < 0 || WR < 0)
return -EINVAL;
ram->mr[0] &= ~0xf74;
ram->mr[0] |= (WR & 0x07) << 9;
ram->mr[0] |= (CL & 0x0e) << 3;
ram->mr[0] |= (CL & 0x01) << 2;
ram->mr[1] &= ~0x245;
ram->mr[1] |= (ODT & 0x1) << 2;
ram->mr[1] |= (ODT & 0x2) << 5;
ram->mr[1] |= (ODT & 0x4) << 7;
ram->mr[1] |= !DLL;
ram->mr[2] &= ~0x038;
ram->mr[2] |= (CWL & 0x07) << 3;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include "ram.h"
static const struct nv50_fb_func
gt215_fb = {
.ram_new = gt215_ram_new,
.tags = nv20_fb_tags,
.trap = 0x000d0fff,
};
int
gt215_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nv50_fb_new_(>215_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c |
// SPDX-License-Identifier: MIT
#include "ram.h"
#include <subdev/bios.h>
static const struct nvkm_ram_func
gp102_ram = {
};
int
gp102_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
enum nvkm_ram_type type = nvkm_fb_bios_memtype(fb->subdev.device->bios);
const u32 rsvd_head = ( 256 * 1024); /* vga memory */
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
u64 size = fb->func->vidmem.size(fb);
int ret;
ret = nvkm_ram_new_(&gp102_ram, fb, type, size, pram);
if (ret)
return ret;
nvkm_mm_fini(&(*pram)->vram);
return nvkm_mm_init(&(*pram)->vram, NVKM_RAM_MM_NORMAL,
rsvd_head >> NVKM_RAM_MM_SHIFT,
(size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
1);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
void
nv41_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
struct nvkm_device *device = fb->subdev.device;
nvkm_wr32(device, 0x100604 + (i * 0x10), tile->limit);
nvkm_wr32(device, 0x100608 + (i * 0x10), tile->pitch);
nvkm_wr32(device, 0x100600 + (i * 0x10), tile->addr);
nvkm_rd32(device, 0x100600 + (i * 0x10));
nvkm_wr32(device, 0x100700 + (i * 0x04), tile->zcomp);
}
void
nv41_fb_init(struct nvkm_fb *fb)
{
nvkm_wr32(fb->subdev.device, 0x100800, 0x00000001);
}
static const struct nvkm_fb_func
nv41_fb = {
.tags = nv20_fb_tags,
.init = nv41_fb_init,
.tile.regions = 12,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv40_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
.ram_new = nv41_ram_new,
};
int
nv41_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv41_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "gf100.h"
#include "ram.h"
#include <engine/nvdec.h>
static u64
ga102_fb_vidmem_size(struct nvkm_fb *fb)
{
return (u64)nvkm_rd32(fb->subdev.device, 0x1183a4) << 20;
}
static int
ga102_fb_oneinit(struct nvkm_fb *fb)
{
struct nvkm_subdev *subdev = &fb->subdev;
nvkm_falcon_fw_ctor_hs_v2(&ga102_flcn_fw, "mem-unlock", subdev, "nvdec/scrubber",
0, &subdev->device->nvdec[0]->falcon, &fb->vpr_scrubber);
return gf100_fb_oneinit(fb);
}
static const struct nvkm_fb_func
ga102_fb = {
.dtor = gf100_fb_dtor,
.oneinit = ga102_fb_oneinit,
.init = gm200_fb_init,
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.vidmem.size = ga102_fb_vidmem_size,
.ram_new = gp102_ram_new,
.default_bigpage = 16,
.vpr.scrub_required = tu102_fb_vpr_scrub_required,
.vpr.scrub = gp102_fb_vpr_scrub,
};
int
ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&ga102_fb, device, type, inst, pfb);
}
MODULE_FIRMWARE("nvidia/ga102/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/ga103/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/ga104/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/ga106/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/ga107/nvdec/scrubber.bin");
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "ramnv40.h"
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/clk/pll.h>
#include <subdev/timer.h>
static int
nv40_ram_calc(struct nvkm_ram *base, u32 freq)
{
struct nv40_ram *ram = nv40_ram(base);
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_pll pll;
int N1, M1, N2, M2;
int log2P, ret;
ret = nvbios_pll_parse(bios, 0x04, &pll);
if (ret) {
nvkm_error(subdev, "mclk pll data not found\n");
return ret;
}
ret = nv04_pll_calc(subdev, &pll, freq, &N1, &M1, &N2, &M2, &log2P);
if (ret < 0)
return ret;
ram->ctrl = 0x80000000 | (log2P << 16);
ram->ctrl |= min(pll.bias_p + log2P, (int)pll.max_p) << 20;
if (N2 == M2) {
ram->ctrl |= 0x00000100;
ram->coef = (N1 << 8) | M1;
} else {
ram->ctrl |= 0x40000000;
ram->coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
}
return 0;
}
static int
nv40_ram_prog(struct nvkm_ram *base)
{
struct nv40_ram *ram = nv40_ram(base);
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
struct bit_entry M;
u32 crtc_mask = 0;
u8 sr1[2];
int i;
/* determine which CRTCs are active, fetch VGA_SR1 for each */
for (i = 0; i < 2; i++) {
u32 vbl = nvkm_rd32(device, 0x600808 + (i * 0x2000));
u32 cnt = 0;
do {
if (vbl != nvkm_rd32(device, 0x600808 + (i * 0x2000))) {
nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
sr1[i] = nvkm_rd08(device, 0x0c03c5 + (i * 0x2000));
if (!(sr1[i] & 0x20))
crtc_mask |= (1 << i);
break;
}
udelay(1);
} while (cnt++ < 32);
}
/* wait for vblank start on active crtcs, disable memory access */
for (i = 0; i < 2; i++) {
if (!(crtc_mask & (1 << i)))
continue;
nvkm_msec(device, 2000,
u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
if (!(tmp & 0x00010000))
break;
);
nvkm_msec(device, 2000,
u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
if ( (tmp & 0x00010000))
break;
);
nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
}
/* prepare ram for reclocking */
nvkm_wr32(device, 0x1002d4, 0x00000001); /* precharge */
nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */
nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */
nvkm_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
nvkm_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
/* change the PLL of each memory partition */
nvkm_mask(device, 0x00c040, 0x0000c000, 0x00000000);
switch (device->chipset) {
case 0x40:
case 0x45:
case 0x41:
case 0x42:
case 0x47:
nvkm_mask(device, 0x004044, 0xc0771100, ram->ctrl);
nvkm_mask(device, 0x00402c, 0xc0771100, ram->ctrl);
nvkm_wr32(device, 0x004048, ram->coef);
nvkm_wr32(device, 0x004030, ram->coef);
fallthrough;
case 0x43:
case 0x49:
case 0x4b:
nvkm_mask(device, 0x004038, 0xc0771100, ram->ctrl);
nvkm_wr32(device, 0x00403c, ram->coef);
fallthrough;
default:
nvkm_mask(device, 0x004020, 0xc0771100, ram->ctrl);
nvkm_wr32(device, 0x004024, ram->coef);
break;
}
udelay(100);
nvkm_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
/* re-enable normal operation of memory controller */
nvkm_wr32(device, 0x1002dc, 0x00000000);
nvkm_mask(device, 0x100210, 0x80000000, 0x80000000);
udelay(100);
/* execute memory reset script from vbios */
if (!bit_entry(bios, 'M', &M))
nvbios_init(subdev, nvbios_rd16(bios, M.offset + 0x00));
/* make sure we're in vblank (hopefully the same one as before), and
* then re-enable crtc memory access
*/
for (i = 0; i < 2; i++) {
if (!(crtc_mask & (1 << i)))
continue;
nvkm_msec(device, 2000,
u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
if ( (tmp & 0x00010000))
break;
);
nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
}
return 0;
}
static void
nv40_ram_tidy(struct nvkm_ram *base)
{
}
static const struct nvkm_ram_func
nv40_ram_func = {
.calc = nv40_ram_calc,
.prog = nv40_ram_prog,
.tidy = nv40_ram_tidy,
};
int
nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type type, u64 size,
struct nvkm_ram **pram)
{
struct nv40_ram *ram;
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
return -ENOMEM;
*pram = &ram->base;
return nvkm_ram_ctor(&nv40_ram_func, fb, type, size, &ram->base);
}
int
nv40_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_device *device = fb->subdev.device;
u32 pbus1218 = nvkm_rd32(device, 0x001218);
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
switch (pbus1218 & 0x00000300) {
case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
case 0x00000300: type = NVKM_RAM_TYPE_DDR2 ; break;
}
ret = nv40_ram_new_(fb, type, size, pram);
if (ret)
return ret;
(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
void
nv20_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
tile->addr = 0x00000001 | addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
if (flags & 4) {
fb->func->tile.comp(fb, i, size, flags, tile);
tile->addr |= 2;
}
}
static void
nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
else tile->zcomp = 0x04000000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
tile->zcomp |= 0x80000000; /* enable */
#ifdef __BIG_ENDIAN
tile->zcomp |= 0x08000000;
#endif
}
}
void
nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
tile->addr = 0;
tile->limit = 0;
tile->pitch = 0;
tile->zcomp = 0;
nvkm_mm_free(&fb->tags.mm, &tile->tag);
}
void
nv20_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
{
struct nvkm_device *device = fb->subdev.device;
nvkm_wr32(device, 0x100244 + (i * 0x10), tile->limit);
nvkm_wr32(device, 0x100248 + (i * 0x10), tile->pitch);
nvkm_wr32(device, 0x100240 + (i * 0x10), tile->addr);
nvkm_rd32(device, 0x100240 + (i * 0x10));
nvkm_wr32(device, 0x100300 + (i * 0x04), tile->zcomp);
}
u32
nv20_fb_tags(struct nvkm_fb *fb)
{
const u32 tags = nvkm_rd32(fb->subdev.device, 0x100320);
return tags ? tags + 1 : 0;
}
static const struct nvkm_fb_func
nv20_fb = {
.tags = nv20_fb_tags,
.tile.regions = 8,
.tile.init = nv20_fb_tile_init,
.tile.comp = nv20_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
};
int
nv20_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv20_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define gf100_ram(p) container_of((p), struct gf100_ram, base)
#include "ram.h"
#include "ramfuc.h"
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/bios/rammap.h>
#include <subdev/bios/timing.h>
#include <subdev/clk.h>
#include <subdev/clk/pll.h>
struct gf100_ramfuc {
struct ramfuc base;
struct ramfuc_reg r_0x10fe20;
struct ramfuc_reg r_0x10fe24;
struct ramfuc_reg r_0x137320;
struct ramfuc_reg r_0x137330;
struct ramfuc_reg r_0x132000;
struct ramfuc_reg r_0x132004;
struct ramfuc_reg r_0x132100;
struct ramfuc_reg r_0x137390;
struct ramfuc_reg r_0x10f290;
struct ramfuc_reg r_0x10f294;
struct ramfuc_reg r_0x10f298;
struct ramfuc_reg r_0x10f29c;
struct ramfuc_reg r_0x10f2a0;
struct ramfuc_reg r_0x10f300;
struct ramfuc_reg r_0x10f338;
struct ramfuc_reg r_0x10f340;
struct ramfuc_reg r_0x10f344;
struct ramfuc_reg r_0x10f348;
struct ramfuc_reg r_0x10f910;
struct ramfuc_reg r_0x10f914;
struct ramfuc_reg r_0x100b0c;
struct ramfuc_reg r_0x10f050;
struct ramfuc_reg r_0x10f090;
struct ramfuc_reg r_0x10f200;
struct ramfuc_reg r_0x10f210;
struct ramfuc_reg r_0x10f310;
struct ramfuc_reg r_0x10f314;
struct ramfuc_reg r_0x10f610;
struct ramfuc_reg r_0x10f614;
struct ramfuc_reg r_0x10f800;
struct ramfuc_reg r_0x10f808;
struct ramfuc_reg r_0x10f824;
struct ramfuc_reg r_0x10f830;
struct ramfuc_reg r_0x10f988;
struct ramfuc_reg r_0x10f98c;
struct ramfuc_reg r_0x10f990;
struct ramfuc_reg r_0x10f998;
struct ramfuc_reg r_0x10f9b0;
struct ramfuc_reg r_0x10f9b4;
struct ramfuc_reg r_0x10fb04;
struct ramfuc_reg r_0x10fb08;
struct ramfuc_reg r_0x137300;
struct ramfuc_reg r_0x137310;
struct ramfuc_reg r_0x137360;
struct ramfuc_reg r_0x1373ec;
struct ramfuc_reg r_0x1373f0;
struct ramfuc_reg r_0x1373f8;
struct ramfuc_reg r_0x61c140;
struct ramfuc_reg r_0x611200;
struct ramfuc_reg r_0x13d8f4;
};
struct gf100_ram {
struct nvkm_ram base;
struct gf100_ramfuc fuc;
struct nvbios_pll refpll;
struct nvbios_pll mempll;
};
static void
gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
{
struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
struct nvkm_fb *fb = ram->base.fb;
struct nvkm_device *device = fb->subdev.device;
u32 part = nvkm_rd32(device, 0x022438), i;
u32 mask = nvkm_rd32(device, 0x022554);
u32 addr = 0x110974;
ram_wr32(fuc, 0x10f910, magic);
ram_wr32(fuc, 0x10f914, magic);
for (i = 0; (magic & 0x80000000) && i < part; addr += 0x1000, i++) {
if (mask & (1 << i))
continue;
ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
}
}
int
gf100_ram_calc(struct nvkm_ram *base, u32 freq)
{
struct gf100_ram *ram = gf100_ram(base);
struct gf100_ramfuc *fuc = &ram->fuc;
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_clk *clk = device->clk;
struct nvkm_bios *bios = device->bios;
struct nvbios_ramcfg cfg;
u8 ver, cnt, len, strap;
struct {
u32 data;
u8 size;
} rammap, ramcfg, timing;
int ref, div, out;
int from, mode;
int N1, M1, P;
int ret;
/* lookup memory config data relevant to the target frequency */
rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
&cnt, &ramcfg.size, &cfg);
if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
nvkm_error(subdev, "invalid/missing rammap entry\n");
return -EINVAL;
}
/* locate specific data set for the attached memory */
strap = nvbios_ramcfg_index(subdev);
if (strap >= cnt) {
nvkm_error(subdev, "invalid ramcfg strap\n");
return -EINVAL;
}
ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
nvkm_error(subdev, "invalid/missing ramcfg entry\n");
return -EINVAL;
}
/* lookup memory timings, if bios says they're present */
strap = nvbios_rd08(bios, ramcfg.data + 0x01);
if (strap != 0xff) {
timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
&cnt, &len);
if (!timing.data || ver != 0x10 || timing.size < 0x19) {
nvkm_error(subdev, "invalid/missing timing entry\n");
return -EINVAL;
}
} else {
timing.data = 0;
}
ret = ram_init(fuc, ram->base.fb);
if (ret)
return ret;
/* determine current mclk configuration */
from = !!(ram_rd32(fuc, 0x1373f0) & 0x00000002); /*XXX: ok? */
/* determine target mclk configuration */
if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
ref = nvkm_clk_read(clk, nv_clk_src_sppll0);
else
ref = nvkm_clk_read(clk, nv_clk_src_sppll1);
div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
out = (ref * 2) / (div + 2);
mode = freq != out;
ram_mask(fuc, 0x137360, 0x00000002, 0x00000000);
if ((ram_rd32(fuc, 0x132000) & 0x00000002) || 0 /*XXX*/) {
ram_nuke(fuc, 0x132000);
ram_mask(fuc, 0x132000, 0x00000002, 0x00000002);
ram_mask(fuc, 0x132000, 0x00000002, 0x00000000);
}
if (mode == 1) {
ram_nuke(fuc, 0x10fe20);
ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000002);
ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000000);
}
// 0x00020034 // 0x0000000a
ram_wr32(fuc, 0x132100, 0x00000001);
if (mode == 1 && from == 0) {
/* calculate refpll */
ret = gt215_pll_calc(subdev, &ram->refpll, ram->mempll.refclk,
&N1, NULL, &M1, &P);
if (ret <= 0) {
nvkm_error(subdev, "unable to calc refpll\n");
return ret ? ret : -ERANGE;
}
ram_wr32(fuc, 0x10fe20, 0x20010000);
ram_wr32(fuc, 0x137320, 0x00000003);
ram_wr32(fuc, 0x137330, 0x81200006);
ram_wr32(fuc, 0x10fe24, (P << 16) | (N1 << 8) | M1);
ram_wr32(fuc, 0x10fe20, 0x20010001);
ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
/* calculate mempll */
ret = gt215_pll_calc(subdev, &ram->mempll, freq,
&N1, NULL, &M1, &P);
if (ret <= 0) {
nvkm_error(subdev, "unable to calc refpll\n");
return ret ? ret : -ERANGE;
}
ram_wr32(fuc, 0x10fe20, 0x20010005);
ram_wr32(fuc, 0x132004, (P << 16) | (N1 << 8) | M1);
ram_wr32(fuc, 0x132000, 0x18010101);
ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
} else
if (mode == 0) {
ram_wr32(fuc, 0x137300, 0x00000003);
}
if (from == 0) {
ram_nuke(fuc, 0x10fb04);
ram_mask(fuc, 0x10fb04, 0x0000ffff, 0x00000000);
ram_nuke(fuc, 0x10fb08);
ram_mask(fuc, 0x10fb08, 0x0000ffff, 0x00000000);
ram_wr32(fuc, 0x10f988, 0x2004ff00);
ram_wr32(fuc, 0x10f98c, 0x003fc040);
ram_wr32(fuc, 0x10f990, 0x20012001);
ram_wr32(fuc, 0x10f998, 0x00011a00);
ram_wr32(fuc, 0x13d8f4, 0x00000000);
} else {
ram_wr32(fuc, 0x10f988, 0x20010000);
ram_wr32(fuc, 0x10f98c, 0x00000000);
ram_wr32(fuc, 0x10f990, 0x20012001);
ram_wr32(fuc, 0x10f998, 0x00010a00);
}
if (from == 0) {
// 0x00020039 // 0x000000ba
}
// 0x0002003a // 0x00000002
ram_wr32(fuc, 0x100b0c, 0x00080012);
// 0x00030014 // 0x00000000 // 0x02b5f070
// 0x00030014 // 0x00010000 // 0x02b5f070
ram_wr32(fuc, 0x611200, 0x00003300);
// 0x00020034 // 0x0000000a
// 0x00030020 // 0x00000001 // 0x00000000
ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
ram_wr32(fuc, 0x10f210, 0x00000000);
ram_nsec(fuc, 1000);
if (mode == 0)
gf100_ram_train(fuc, 0x000c1001);
ram_wr32(fuc, 0x10f310, 0x00000001);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f090, 0x00000061);
ram_wr32(fuc, 0x10f090, 0xc000007f);
ram_nsec(fuc, 1000);
if (from == 0) {
ram_wr32(fuc, 0x10f824, 0x00007fd4);
} else {
ram_wr32(fuc, 0x1373ec, 0x00020404);
}
if (mode == 0) {
ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
ram_wr32(fuc, 0x10f830, 0x41500010);
ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
ram_mask(fuc, 0x132100, 0x00000100, 0x00000100);
ram_wr32(fuc, 0x10f050, 0xff000090);
ram_wr32(fuc, 0x1373ec, 0x00020f0f);
ram_wr32(fuc, 0x1373f0, 0x00000003);
ram_wr32(fuc, 0x137310, 0x81201616);
ram_wr32(fuc, 0x132100, 0x00000001);
// 0x00020039 // 0x000000ba
ram_wr32(fuc, 0x10f830, 0x00300017);
ram_wr32(fuc, 0x1373f0, 0x00000001);
ram_wr32(fuc, 0x10f824, 0x00007e77);
ram_wr32(fuc, 0x132000, 0x18030001);
ram_wr32(fuc, 0x10f090, 0x4000007e);
ram_nsec(fuc, 2000);
ram_wr32(fuc, 0x10f314, 0x00000001);
ram_wr32(fuc, 0x10f210, 0x80000000);
ram_wr32(fuc, 0x10f338, 0x00300220);
ram_wr32(fuc, 0x10f300, 0x0000011d);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f290, 0x02060505);
ram_wr32(fuc, 0x10f294, 0x34208288);
ram_wr32(fuc, 0x10f298, 0x44050411);
ram_wr32(fuc, 0x10f29c, 0x0000114c);
ram_wr32(fuc, 0x10f2a0, 0x42e10069);
ram_wr32(fuc, 0x10f614, 0x40044f77);
ram_wr32(fuc, 0x10f610, 0x40044f77);
ram_wr32(fuc, 0x10f344, 0x00600009);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f348, 0x00700008);
ram_wr32(fuc, 0x61c140, 0x19240000);
ram_wr32(fuc, 0x10f830, 0x00300017);
gf100_ram_train(fuc, 0x80021001);
gf100_ram_train(fuc, 0x80081001);
ram_wr32(fuc, 0x10f340, 0x00500004);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f830, 0x01300017);
ram_wr32(fuc, 0x10f830, 0x00300017);
// 0x00030020 // 0x00000000 // 0x00000000
// 0x00020034 // 0x0000000b
ram_wr32(fuc, 0x100b0c, 0x00080028);
ram_wr32(fuc, 0x611200, 0x00003330);
} else {
ram_wr32(fuc, 0x10f800, 0x00001800);
ram_wr32(fuc, 0x13d8f4, 0x00000000);
ram_wr32(fuc, 0x1373ec, 0x00020404);
ram_wr32(fuc, 0x1373f0, 0x00000003);
ram_wr32(fuc, 0x10f830, 0x40700010);
ram_wr32(fuc, 0x10f830, 0x40500010);
ram_wr32(fuc, 0x13d8f4, 0x00000000);
ram_wr32(fuc, 0x1373f8, 0x00000000);
ram_wr32(fuc, 0x132100, 0x00000101);
ram_wr32(fuc, 0x137310, 0x89201616);
ram_wr32(fuc, 0x10f050, 0xff000090);
ram_wr32(fuc, 0x1373ec, 0x00030404);
ram_wr32(fuc, 0x1373f0, 0x00000002);
// 0x00020039 // 0x00000011
ram_wr32(fuc, 0x132100, 0x00000001);
ram_wr32(fuc, 0x1373f8, 0x00002000);
ram_nsec(fuc, 2000);
ram_wr32(fuc, 0x10f808, 0x7aaa0050);
ram_wr32(fuc, 0x10f830, 0x00500010);
ram_wr32(fuc, 0x10f200, 0x00ce1000);
ram_wr32(fuc, 0x10f090, 0x4000007e);
ram_nsec(fuc, 2000);
ram_wr32(fuc, 0x10f314, 0x00000001);
ram_wr32(fuc, 0x10f210, 0x80000000);
ram_wr32(fuc, 0x10f338, 0x00300200);
ram_wr32(fuc, 0x10f300, 0x0000084d);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f290, 0x0b343825);
ram_wr32(fuc, 0x10f294, 0x3483028e);
ram_wr32(fuc, 0x10f298, 0x440c0600);
ram_wr32(fuc, 0x10f29c, 0x0000214c);
ram_wr32(fuc, 0x10f2a0, 0x42e20069);
ram_wr32(fuc, 0x10f200, 0x00ce0000);
ram_wr32(fuc, 0x10f614, 0x60044e77);
ram_wr32(fuc, 0x10f610, 0x60044e77);
ram_wr32(fuc, 0x10f340, 0x00500000);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f344, 0x00600228);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f348, 0x00700000);
ram_wr32(fuc, 0x13d8f4, 0x00000000);
ram_wr32(fuc, 0x61c140, 0x09a40000);
gf100_ram_train(fuc, 0x800e1008);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f800, 0x00001804);
// 0x00030020 // 0x00000000 // 0x00000000
// 0x00020034 // 0x0000000b
ram_wr32(fuc, 0x13d8f4, 0x00000000);
ram_wr32(fuc, 0x100b0c, 0x00080028);
ram_wr32(fuc, 0x611200, 0x00003330);
ram_nsec(fuc, 100000);
ram_wr32(fuc, 0x10f9b0, 0x05313f41);
ram_wr32(fuc, 0x10f9b4, 0x00002f50);
gf100_ram_train(fuc, 0x010c1001);
}
ram_mask(fuc, 0x10f200, 0x00000800, 0x00000800);
// 0x00020016 // 0x00000000
if (mode == 0)
ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
return 0;
}
int
gf100_ram_prog(struct nvkm_ram *base)
{
struct gf100_ram *ram = gf100_ram(base);
struct nvkm_device *device = ram->base.fb->subdev.device;
ram_exec(&ram->fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
return 0;
}
void
gf100_ram_tidy(struct nvkm_ram *base)
{
struct gf100_ram *ram = gf100_ram(base);
ram_exec(&ram->fuc, false);
}
int
gf100_ram_init(struct nvkm_ram *base)
{
static const u8 train0[] = {
0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
};
static const u32 train1[] = {
0x00000000, 0xffffffff,
0x55555555, 0xaaaaaaaa,
0x33333333, 0xcccccccc,
0xf0f0f0f0, 0x0f0f0f0f,
0x00ff00ff, 0xff00ff00,
0x0000ffff, 0xffff0000,
};
struct gf100_ram *ram = gf100_ram(base);
struct nvkm_device *device = ram->base.fb->subdev.device;
int i;
switch (ram->base.type) {
case NVKM_RAM_TYPE_GDDR5:
break;
default:
return 0;
}
/* prepare for ddr link training, and load training patterns */
for (i = 0; i < 0x30; i++) {
nvkm_wr32(device, 0x10f968, 0x00000000 | (i << 8));
nvkm_wr32(device, 0x10f96c, 0x00000000 | (i << 8));
nvkm_wr32(device, 0x10f920, 0x00000100 | train0[i % 12]);
nvkm_wr32(device, 0x10f924, 0x00000100 | train0[i % 12]);
nvkm_wr32(device, 0x10f918, train1[i % 12]);
nvkm_wr32(device, 0x10f91c, train1[i % 12]);
nvkm_wr32(device, 0x10f920, 0x00000000 | train0[i % 12]);
nvkm_wr32(device, 0x10f924, 0x00000000 | train0[i % 12]);
nvkm_wr32(device, 0x10f918, train1[i % 12]);
nvkm_wr32(device, 0x10f91c, train1[i % 12]);
}
return 0;
}
u32
gf100_ram_probe_fbpa_amount(struct nvkm_device *device, int fbpa)
{
return nvkm_rd32(device, 0x11020c + (fbpa * 0x1000));
}
u32
gf100_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
struct nvkm_device *device, int fbp, int *pltcs)
{
if (!(fbpao & BIT(fbp))) {
*pltcs = 1;
return func->probe_fbpa_amount(device, fbp);
}
return 0;
}
u32
gf100_ram_probe_fbp(const struct nvkm_ram_func *func,
struct nvkm_device *device, int fbp, int *pltcs)
{
u32 fbpao = nvkm_rd32(device, 0x022554);
return func->probe_fbp_amount(func, fbpao, device, fbp, pltcs);
}
int
gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
struct nvkm_ram *ram)
{
struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
const u32 rsvd_head = ( 256 * 1024); /* vga memory */
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios);
u32 fbps = nvkm_rd32(device, 0x022438);
u64 total = 0, lcomm = ~0, lower, ubase, usize;
int ret, fbp, ltcs, ltcn = 0;
nvkm_debug(subdev, "%d FBP(s)\n", fbps);
for (fbp = 0; fbp < fbps; fbp++) {
u32 size = func->probe_fbp(func, device, fbp, <cs);
if (size) {
nvkm_debug(subdev, "FBP %d: %4d MiB, %d LTC(s)\n",
fbp, size, ltcs);
lcomm = min(lcomm, (u64)(size / ltcs) << 20);
total += (u64) size << 20;
ltcn += ltcs;
} else {
nvkm_debug(subdev, "FBP %d: disabled\n", fbp);
}
}
lower = lcomm * ltcn;
ubase = lcomm + func->upper;
usize = total - lower;
nvkm_debug(subdev, "Lower: %4lld MiB @ %010llx\n", lower >> 20, 0ULL);
nvkm_debug(subdev, "Upper: %4lld MiB @ %010llx\n", usize >> 20, ubase);
nvkm_debug(subdev, "Total: %4lld MiB\n", total >> 20);
ret = nvkm_ram_ctor(func, fb, type, total, ram);
if (ret)
return ret;
nvkm_mm_fini(&ram->vram);
/* Some GPUs are in what's known as a "mixed memory" configuration.
*
* This is either where some FBPs have more memory than the others,
* or where LTCs have been disabled on a FBP.
*/
if (lower != total) {
/* The common memory amount is addressed normally. */
ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
rsvd_head >> NVKM_RAM_MM_SHIFT,
(lower - rsvd_head) >> NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
/* And the rest is much higher in the physical address
* space, and may not be usable for certain operations.
*/
ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_MIXED,
ubase >> NVKM_RAM_MM_SHIFT,
(usize - rsvd_tail) >> NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
} else {
/* GPUs without mixed-memory are a lot nicer... */
ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
rsvd_head >> NVKM_RAM_MM_SHIFT,
(total - rsvd_head - rsvd_tail) >>
NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
}
return 0;
}
int
gf100_ram_new_(const struct nvkm_ram_func *func,
struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct gf100_ram *ram;
int ret;
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
return -ENOMEM;
*pram = &ram->base;
ret = gf100_ram_ctor(func, fb, &ram->base);
if (ret)
return ret;
ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
if (ret) {
nvkm_error(subdev, "mclk refpll data not found\n");
return ret;
}
ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
if (ret) {
nvkm_error(subdev, "mclk pll data not found\n");
return ret;
}
ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
ram->fuc.r_0x137330 = ramfuc_reg(0x137330);
ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
ram->fuc.r_0x132100 = ramfuc_reg(0x132100);
ram->fuc.r_0x137390 = ramfuc_reg(0x137390);
ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
ram->fuc.r_0x10f300 = ramfuc_reg(0x10f300);
ram->fuc.r_0x10f338 = ramfuc_reg(0x10f338);
ram->fuc.r_0x10f340 = ramfuc_reg(0x10f340);
ram->fuc.r_0x10f344 = ramfuc_reg(0x10f344);
ram->fuc.r_0x10f348 = ramfuc_reg(0x10f348);
ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
ram->fuc.r_0x100b0c = ramfuc_reg(0x100b0c);
ram->fuc.r_0x10f050 = ramfuc_reg(0x10f050);
ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
ram->fuc.r_0x10f988 = ramfuc_reg(0x10f988);
ram->fuc.r_0x10f98c = ramfuc_reg(0x10f98c);
ram->fuc.r_0x10f990 = ramfuc_reg(0x10f990);
ram->fuc.r_0x10f998 = ramfuc_reg(0x10f998);
ram->fuc.r_0x10f9b0 = ramfuc_reg(0x10f9b0);
ram->fuc.r_0x10f9b4 = ramfuc_reg(0x10f9b4);
ram->fuc.r_0x10fb04 = ramfuc_reg(0x10fb04);
ram->fuc.r_0x10fb08 = ramfuc_reg(0x10fb08);
ram->fuc.r_0x137310 = ramfuc_reg(0x137300);
ram->fuc.r_0x137310 = ramfuc_reg(0x137310);
ram->fuc.r_0x137360 = ramfuc_reg(0x137360);
ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
ram->fuc.r_0x1373f8 = ramfuc_reg(0x1373f8);
ram->fuc.r_0x61c140 = ramfuc_reg(0x61c140);
ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
return 0;
}
static const struct nvkm_ram_func
gf100_ram = {
.upper = 0x0200000000ULL,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf100_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.init = gf100_ram_init,
.calc = gf100_ram_calc,
.prog = gf100_ram_prog,
.tidy = gf100_ram_tidy,
};
int
gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
return gf100_ram_new_(&gf100_ram, fb, pram);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "ram.h"
#include "regsnv04.h"
const struct nvkm_ram_func
nv04_ram_func = {
};
int
nv04_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_device *device = fb->subdev.device;
u32 boot0 = nvkm_rd32(device, NV04_PFB_BOOT_0);
u64 size;
enum nvkm_ram_type type;
if (boot0 & 0x00000100) {
size = ((boot0 >> 12) & 0xf) * 2 + 2;
size *= 1024 * 1024;
} else {
switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
size = 32 * 1024 * 1024;
break;
case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
size = 16 * 1024 * 1024;
break;
case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
size = 8 * 1024 * 1024;
break;
case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
size = 4 * 1024 * 1024;
break;
}
}
if ((boot0 & 0x00000038) <= 0x10)
type = NVKM_RAM_TYPE_SGRAM;
else
type = NVKM_RAM_TYPE_SDRAM;
return nvkm_ram_new_(&nv04_ram_func, fb, type, size, pram);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "ramnv40.h"
int
nv44_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_device *device = fb->subdev.device;
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
u32 fb474 = nvkm_rd32(device, 0x100474);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
if (fb474 & 0x00000004)
type = NVKM_RAM_TYPE_GDDR3;
if (fb474 & 0x00000002)
type = NVKM_RAM_TYPE_DDR2;
if (fb474 & 0x00000001)
type = NVKM_RAM_TYPE_DDR1;
return nv40_ram_new_(fb, type, size, pram);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
static const struct nvkm_fb_func
nv1a_fb = {
.tile.regions = 8,
.tile.init = nv10_fb_tile_init,
.tile.fini = nv10_fb_tile_fini,
.tile.prog = nv10_fb_tile_prog,
.ram_new = nv1a_ram_new,
};
int
nv1a_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv1a_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "gf100.h"
#include "ram.h"
#include <core/memory.h>
void
gp100_fb_init_unkn(struct nvkm_fb *base)
{
struct nvkm_device *device = gf100_fb(base)->base.subdev.device;
nvkm_wr32(device, 0x1fac80, nvkm_rd32(device, 0x100c80));
nvkm_wr32(device, 0x1facc4, nvkm_rd32(device, 0x100cc4));
nvkm_wr32(device, 0x1facc8, nvkm_rd32(device, 0x100cc8));
nvkm_wr32(device, 0x1faccc, nvkm_rd32(device, 0x100ccc));
}
void
gp100_fb_init_remapper(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
/* Disable address remapper. */
nvkm_mask(device, 0x100c14, 0x00040000, 0x00000000);
}
static const struct nvkm_fb_func
gp100_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gm200_fb_init,
.init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = gp100_ram_new,
};
int
gp100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gp100_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c |
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "gf100.h"
/* GK20A's FB is similar to GF100's, but without the ability to allocate VRAM */
static const struct nvkm_fb_func
gk20a_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.default_bigpage = 17,
};
int
gk20a_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gk20a_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "ram.h"
u32
gm107_ram_probe_fbp(const struct nvkm_ram_func *func,
struct nvkm_device *device, int fbp, int *pltcs)
{
u32 fbpao = nvkm_rd32(device, 0x021c14);
return func->probe_fbp_amount(func, fbpao, device, fbp, pltcs);
}
static const struct nvkm_ram_func
gm107_ram = {
.upper = 0x1000000000ULL,
.probe_fbp = gm107_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.dtor = gk104_ram_dtor,
.init = gk104_ram_init,
.calc = gk104_ram_calc,
.prog = gk104_ram_prog,
.tidy = gk104_ram_tidy,
};
int
gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
return gk104_ram_new_(&gm107_ram, fb, pram);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
static const struct nvkm_fb_func
nv4e_fb = {
.init = nv44_fb_init,
.tile.regions = 12,
.tile.init = nv46_fb_tile_init,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new,
};
int
nv4e_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv4e_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "ram.h"
int
nv10_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_device *device = fb->subdev.device;
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
u32 cfg0 = nvkm_rd32(device, 0x100200);
enum nvkm_ram_type type;
if (cfg0 & 0x00000001)
type = NVKM_RAM_TYPE_DDR1;
else
type = NVKM_RAM_TYPE_SDRAM;
return nvkm_ram_new_(&nv04_ram_func, fb, type, size, pram);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define nv50_ram(p) container_of((p), struct nv50_ram, base)
#include "ram.h"
#include "ramseq.h"
#include "nv50.h"
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/perf.h>
#include <subdev/bios/pll.h>
#include <subdev/bios/rammap.h>
#include <subdev/bios/timing.h>
#include <subdev/clk/pll.h>
#include <subdev/gpio.h>
struct nv50_ramseq {
struct hwsq base;
struct hwsq_reg r_0x002504;
struct hwsq_reg r_0x004008;
struct hwsq_reg r_0x00400c;
struct hwsq_reg r_0x00c040;
struct hwsq_reg r_0x100200;
struct hwsq_reg r_0x100210;
struct hwsq_reg r_0x10021c;
struct hwsq_reg r_0x1002d0;
struct hwsq_reg r_0x1002d4;
struct hwsq_reg r_0x1002dc;
struct hwsq_reg r_0x10053c;
struct hwsq_reg r_0x1005a0;
struct hwsq_reg r_0x1005a4;
struct hwsq_reg r_0x100710;
struct hwsq_reg r_0x100714;
struct hwsq_reg r_0x100718;
struct hwsq_reg r_0x10071c;
struct hwsq_reg r_0x100da0;
struct hwsq_reg r_0x100e20;
struct hwsq_reg r_0x100e24;
struct hwsq_reg r_0x611200;
struct hwsq_reg r_timing[9];
struct hwsq_reg r_mr[4];
struct hwsq_reg r_gpio[4];
};
struct nv50_ram {
struct nvkm_ram base;
struct nv50_ramseq hwsq;
};
#define T(t) cfg->timing_10_##t
static int
nv50_ram_timing_calc(struct nv50_ram *ram, u32 *timing)
{
struct nvbios_ramcfg *cfg = &ram->base.target.bios;
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
struct nvkm_device *device = subdev->device;
u32 cur2, cur4, cur7, cur8;
u8 unkt3b;
cur2 = nvkm_rd32(device, 0x100228);
cur4 = nvkm_rd32(device, 0x100230);
cur7 = nvkm_rd32(device, 0x10023c);
cur8 = nvkm_rd32(device, 0x100240);
switch ((!T(CWL)) * ram->base.type) {
case NVKM_RAM_TYPE_DDR2:
T(CWL) = T(CL) - 1;
break;
case NVKM_RAM_TYPE_GDDR3:
T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
break;
}
/* XXX: N=1 is not proper statistics */
if (device->chipset == 0xa0) {
unkt3b = 0x19 + ram->base.next->bios.rammap_00_16_40;
timing[6] = (0x2d + T(CL) - T(CWL) +
ram->base.next->bios.rammap_00_16_40) << 16 |
T(CWL) << 8 |
(0x2f + T(CL) - T(CWL));
} else {
unkt3b = 0x16;
timing[6] = (0x2b + T(CL) - T(CWL)) << 16 |
max_t(s8, T(CWL) - 2, 1) << 8 |
(0x2e + T(CL) - T(CWL));
}
timing[0] = (T(RP) << 24 | T(RAS) << 16 | T(RFC) << 8 | T(RC));
timing[1] = (T(WR) + 1 + T(CWL)) << 24 |
max_t(u8, T(18), 1) << 16 |
(T(WTR) + 1 + T(CWL)) << 8 |
(3 + T(CL) - T(CWL));
timing[2] = (T(CWL) - 1) << 24 |
(T(RRD) << 16) |
(T(RCDWR) << 8) |
T(RCDRD);
timing[3] = (unkt3b - 2 + T(CL)) << 24 |
unkt3b << 16 |
(T(CL) - 1) << 8 |
(T(CL) - 1);
timing[4] = (cur4 & 0xffff0000) |
T(13) << 8 |
T(13);
timing[5] = T(RFC) << 24 |
max_t(u8, T(RCDRD), T(RCDWR)) << 16 |
T(RP);
/* Timing 6 is already done above */
timing[7] = (cur7 & 0xff00ffff) | (T(CL) - 1) << 16;
timing[8] = (cur8 & 0xffffff00);
/* XXX: P.version == 1 only has DDR2 and GDDR3? */
if (ram->base.type == NVKM_RAM_TYPE_DDR2) {
timing[5] |= (T(CL) + 3) << 8;
timing[8] |= (T(CL) - 4);
} else
if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
timing[5] |= (T(CL) + 2) << 8;
timing[8] |= (T(CL) - 2);
}
nvkm_debug(subdev, " 220: %08x %08x %08x %08x\n",
timing[0], timing[1], timing[2], timing[3]);
nvkm_debug(subdev, " 230: %08x %08x %08x %08x\n",
timing[4], timing[5], timing[6], timing[7]);
nvkm_debug(subdev, " 240: %08x\n", timing[8]);
return 0;
}
static int
nv50_ram_timing_read(struct nv50_ram *ram, u32 *timing)
{
unsigned int i;
struct nvbios_ramcfg *cfg = &ram->base.target.bios;
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
struct nvkm_device *device = subdev->device;
for (i = 0; i <= 8; i++)
timing[i] = nvkm_rd32(device, 0x100220 + (i * 4));
/* Derive the bare minimum for the MR calculation to succeed */
cfg->timing_ver = 0x10;
T(CL) = (timing[3] & 0xff) + 1;
switch (ram->base.type) {
case NVKM_RAM_TYPE_DDR2:
T(CWL) = T(CL) - 1;
break;
case NVKM_RAM_TYPE_GDDR3:
T(CWL) = ((timing[2] & 0xff000000) >> 24) + 1;
break;
default:
return -ENOSYS;
}
T(WR) = ((timing[1] >> 24) & 0xff) - 1 - T(CWL);
return 0;
}
#undef T
static void
nvkm_sddr2_dll_reset(struct nv50_ramseq *hwsq)
{
ram_mask(hwsq, mr[0], 0x100, 0x100);
ram_mask(hwsq, mr[0], 0x100, 0x000);
ram_nsec(hwsq, 24000);
}
static void
nv50_ram_gpio(struct nv50_ramseq *hwsq, u8 tag, u32 val)
{
struct nvkm_gpio *gpio = hwsq->base.subdev->device->gpio;
struct dcb_gpio_func func;
u32 reg, sh, gpio_val;
int ret;
if (nvkm_gpio_get(gpio, 0, tag, DCB_GPIO_UNUSED) != val) {
ret = nvkm_gpio_find(gpio, 0, tag, DCB_GPIO_UNUSED, &func);
if (ret)
return;
reg = func.line >> 3;
sh = (func.line & 0x7) << 2;
gpio_val = ram_rd32(hwsq, gpio[reg]);
if (gpio_val & (8 << sh))
val = !val;
if (!(func.log[1] & 1))
val = !val;
ram_mask(hwsq, gpio[reg], (0x3 << sh), ((val | 0x2) << sh));
ram_nsec(hwsq, 20000);
}
}
static int
nv50_ram_calc(struct nvkm_ram *base, u32 freq)
{
struct nv50_ram *ram = nv50_ram(base);
struct nv50_ramseq *hwsq = &ram->hwsq;
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_perfE perfE;
struct nvbios_pll mpll;
struct nvkm_ram_data *next;
u8 ver, hdr, cnt, len, strap, size;
u32 data;
u32 r100da0, r004008, unk710, unk714, unk718, unk71c;
int N1, M1, N2, M2, P;
int ret, i;
u32 timing[9];
next = &ram->base.target;
next->freq = freq;
ram->base.next = next;
/* lookup closest matching performance table entry for frequency */
i = 0;
do {
data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
&size, &perfE);
if (!data || (ver < 0x25 || ver >= 0x40) ||
(size < 2)) {
nvkm_error(subdev, "invalid/missing perftab entry\n");
return -EINVAL;
}
} while (perfE.memory < freq);
nvbios_rammapEp_from_perf(bios, data, hdr, &next->bios);
/* locate specific data set for the attached memory */
strap = nvbios_ramcfg_index(subdev);
if (strap >= cnt) {
nvkm_error(subdev, "invalid ramcfg strap\n");
return -EINVAL;
}
data = nvbios_rammapSp_from_perf(bios, data + hdr, size, strap,
&next->bios);
if (!data) {
nvkm_error(subdev, "invalid/missing rammap entry ");
return -EINVAL;
}
/* lookup memory timings, if bios says they're present */
if (next->bios.ramcfg_timing != 0xff) {
data = nvbios_timingEp(bios, next->bios.ramcfg_timing,
&ver, &hdr, &cnt, &len, &next->bios);
if (!data || ver != 0x10 || hdr < 0x12) {
nvkm_error(subdev, "invalid/missing timing entry "
"%02x %04x %02x %02x\n",
strap, data, ver, hdr);
return -EINVAL;
}
nv50_ram_timing_calc(ram, timing);
} else {
nv50_ram_timing_read(ram, timing);
}
ret = ram_init(hwsq, subdev);
if (ret)
return ret;
/* Determine ram-specific MR values */
ram->base.mr[0] = ram_rd32(hwsq, mr[0]);
ram->base.mr[1] = ram_rd32(hwsq, mr[1]);
ram->base.mr[2] = ram_rd32(hwsq, mr[2]);
switch (ram->base.type) {
case NVKM_RAM_TYPE_GDDR3:
ret = nvkm_gddr3_calc(&ram->base);
break;
default:
ret = -ENOSYS;
break;
}
if (ret) {
nvkm_error(subdev, "Could not calculate MR\n");
return ret;
}
if (subdev->device->chipset <= 0x96 && !next->bios.ramcfg_00_03_02)
ram_mask(hwsq, 0x100710, 0x00000200, 0x00000000);
/* Always disable this bit during reclock */
ram_mask(hwsq, 0x100200, 0x00000800, 0x00000000);
ram_wait_vblank(hwsq);
ram_wr32(hwsq, 0x611200, 0x00003300);
ram_wr32(hwsq, 0x002504, 0x00000001); /* block fifo */
ram_nsec(hwsq, 8000);
ram_setf(hwsq, 0x10, 0x00); /* disable fb */
ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
ram_nsec(hwsq, 2000);
if (next->bios.timing_10_ODT)
nv50_ram_gpio(hwsq, 0x2e, 1);
ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */
ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
ram_wr32(hwsq, 0x100210, 0x00000000); /* disable auto-refresh */
ram_wr32(hwsq, 0x1002dc, 0x00000001); /* enable self-refresh */
ret = nvbios_pll_parse(bios, 0x004008, &mpll);
mpll.vco2.max_freq = 0;
if (ret >= 0) {
ret = nv04_pll_calc(subdev, &mpll, freq,
&N1, &M1, &N2, &M2, &P);
if (ret <= 0)
ret = -EINVAL;
}
if (ret < 0)
return ret;
/* XXX: 750MHz seems rather arbitrary */
if (freq <= 750000) {
r100da0 = 0x00000010;
r004008 = 0x90000000;
} else {
r100da0 = 0x00000000;
r004008 = 0x80000000;
}
r004008 |= (mpll.bias_p << 19) | (P << 22) | (P << 16);
ram_mask(hwsq, 0x00c040, 0xc000c000, 0x0000c000);
/* XXX: Is rammap_00_16_40 the DLL bit we've seen in GT215? Why does
* it have a different rammap bit from DLLoff? */
ram_mask(hwsq, 0x004008, 0x00004200, 0x00000200 |
next->bios.rammap_00_16_40 << 14);
ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
ram_mask(hwsq, 0x004008, 0x91ff0000, r004008);
/* XXX: GDDR3 only? */
if (subdev->device->chipset >= 0x92)
ram_wr32(hwsq, 0x100da0, r100da0);
nv50_ram_gpio(hwsq, 0x18, !next->bios.ramcfg_FBVDDQ);
ram_nsec(hwsq, 64000); /*XXX*/
ram_nsec(hwsq, 32000); /*XXX*/
ram_mask(hwsq, 0x004008, 0x00002200, 0x00002000);
ram_wr32(hwsq, 0x1002dc, 0x00000000); /* disable self-refresh */
ram_wr32(hwsq, 0x1002d4, 0x00000001); /* disable self-refresh */
ram_wr32(hwsq, 0x100210, 0x80000000); /* enable auto-refresh */
ram_nsec(hwsq, 12000);
switch (ram->base.type) {
case NVKM_RAM_TYPE_DDR2:
ram_nuke(hwsq, mr[0]); /* force update */
ram_mask(hwsq, mr[0], 0x000, 0x000);
break;
case NVKM_RAM_TYPE_GDDR3:
ram_nuke(hwsq, mr[1]); /* force update */
ram_wr32(hwsq, mr[1], ram->base.mr[1]);
ram_nuke(hwsq, mr[0]); /* force update */
ram_wr32(hwsq, mr[0], ram->base.mr[0]);
break;
default:
break;
}
ram_mask(hwsq, timing[3], 0xffffffff, timing[3]);
ram_mask(hwsq, timing[1], 0xffffffff, timing[1]);
ram_mask(hwsq, timing[6], 0xffffffff, timing[6]);
ram_mask(hwsq, timing[7], 0xffffffff, timing[7]);
ram_mask(hwsq, timing[8], 0xffffffff, timing[8]);
ram_mask(hwsq, timing[0], 0xffffffff, timing[0]);
ram_mask(hwsq, timing[2], 0xffffffff, timing[2]);
ram_mask(hwsq, timing[4], 0xffffffff, timing[4]);
ram_mask(hwsq, timing[5], 0xffffffff, timing[5]);
if (!next->bios.ramcfg_00_03_02)
ram_mask(hwsq, 0x10021c, 0x00010000, 0x00000000);
ram_mask(hwsq, 0x100200, 0x00001000, !next->bios.ramcfg_00_04_02 << 12);
/* XXX: A lot of this could be "chipset"/"ram type" specific stuff */
unk710 = ram_rd32(hwsq, 0x100710) & ~0x00000100;
unk714 = ram_rd32(hwsq, 0x100714) & ~0xf0000020;
unk718 = ram_rd32(hwsq, 0x100718) & ~0x00000100;
unk71c = ram_rd32(hwsq, 0x10071c) & ~0x00000100;
if (subdev->device->chipset <= 0x96) {
unk710 &= ~0x0000006e;
unk714 &= ~0x00000100;
if (!next->bios.ramcfg_00_03_08)
unk710 |= 0x00000060;
if (!next->bios.ramcfg_FBVDDQ)
unk714 |= 0x00000100;
if ( next->bios.ramcfg_00_04_04)
unk710 |= 0x0000000e;
} else {
unk710 &= ~0x00000001;
if (!next->bios.ramcfg_00_03_08)
unk710 |= 0x00000001;
}
if ( next->bios.ramcfg_00_03_01)
unk71c |= 0x00000100;
if ( next->bios.ramcfg_00_03_02)
unk710 |= 0x00000100;
if (!next->bios.ramcfg_00_03_08)
unk714 |= 0x00000020;
if ( next->bios.ramcfg_00_04_04)
unk714 |= 0x70000000;
if ( next->bios.ramcfg_00_04_20)
unk718 |= 0x00000100;
ram_mask(hwsq, 0x100714, 0xffffffff, unk714);
ram_mask(hwsq, 0x10071c, 0xffffffff, unk71c);
ram_mask(hwsq, 0x100718, 0xffffffff, unk718);
ram_mask(hwsq, 0x100710, 0xffffffff, unk710);
/* XXX: G94 does not even test these regs in trace. Harmless we do it,
* but why is it omitted? */
if (next->bios.rammap_00_16_20) {
ram_wr32(hwsq, 0x1005a0, next->bios.ramcfg_00_07 << 16 |
next->bios.ramcfg_00_06 << 8 |
next->bios.ramcfg_00_05);
ram_wr32(hwsq, 0x1005a4, next->bios.ramcfg_00_09 << 8 |
next->bios.ramcfg_00_08);
ram_mask(hwsq, 0x10053c, 0x00001000, 0x00000000);
} else {
ram_mask(hwsq, 0x10053c, 0x00001000, 0x00001000);
}
ram_mask(hwsq, mr[1], 0xffffffff, ram->base.mr[1]);
if (!next->bios.timing_10_ODT)
nv50_ram_gpio(hwsq, 0x2e, 0);
/* Reset DLL */
if (!next->bios.ramcfg_DLLoff)
nvkm_sddr2_dll_reset(hwsq);
ram_setf(hwsq, 0x10, 0x01); /* enable fb */
ram_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
ram_wr32(hwsq, 0x611200, 0x00003330);
ram_wr32(hwsq, 0x002504, 0x00000000); /* un-block fifo */
if (next->bios.rammap_00_17_02)
ram_mask(hwsq, 0x100200, 0x00000800, 0x00000800);
if (!next->bios.rammap_00_16_40)
ram_mask(hwsq, 0x004008, 0x00004000, 0x00000000);
if (next->bios.ramcfg_00_03_02)
ram_mask(hwsq, 0x10021c, 0x00010000, 0x00010000);
if (subdev->device->chipset <= 0x96 && next->bios.ramcfg_00_03_02)
ram_mask(hwsq, 0x100710, 0x00000200, 0x00000200);
return 0;
}
static int
nv50_ram_prog(struct nvkm_ram *base)
{
struct nv50_ram *ram = nv50_ram(base);
struct nvkm_device *device = ram->base.fb->subdev.device;
ram_exec(&ram->hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
return 0;
}
static void
nv50_ram_tidy(struct nvkm_ram *base)
{
struct nv50_ram *ram = nv50_ram(base);
ram_exec(&ram->hwsq, false);
}
static const struct nvkm_ram_func
nv50_ram_func = {
.calc = nv50_ram_calc,
.prog = nv50_ram_prog,
.tidy = nv50_ram_tidy,
};
static u32
nv50_fb_vram_rblock(struct nvkm_ram *ram)
{
struct nvkm_subdev *subdev = &ram->fb->subdev;
struct nvkm_device *device = subdev->device;
int colbits, rowbitsa, rowbitsb, banks;
u64 rowsize, predicted;
u32 r0, r4, rt, rblock_size;
r0 = nvkm_rd32(device, 0x100200);
r4 = nvkm_rd32(device, 0x100204);
rt = nvkm_rd32(device, 0x100250);
nvkm_debug(subdev, "memcfg %08x %08x %08x %08x\n",
r0, r4, rt, nvkm_rd32(device, 0x001540));
colbits = (r4 & 0x0000f000) >> 12;
rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
rowsize = ram->parts * banks * (1 << colbits) * 8;
predicted = rowsize << rowbitsa;
if (r0 & 0x00000004)
predicted += rowsize << rowbitsb;
if (predicted != ram->size) {
nvkm_warn(subdev, "memory controller reports %d MiB VRAM\n",
(u32)(ram->size >> 20));
}
rblock_size = rowsize;
if (rt & 1)
rblock_size *= 3;
nvkm_debug(subdev, "rblock %d bytes\n", rblock_size);
return rblock_size;
}
int
nv50_ram_ctor(const struct nvkm_ram_func *func,
struct nvkm_fb *fb, struct nvkm_ram *ram)
{
struct nvkm_device *device = fb->subdev.device;
struct nvkm_bios *bios = device->bios;
const u32 rsvd_head = ( 256 * 1024); /* vga memory */
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
u64 size = nvkm_rd32(device, 0x10020c);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
switch (nvkm_rd32(device, 0x100714) & 0x00000007) {
case 0: type = NVKM_RAM_TYPE_DDR1; break;
case 1:
if (nvkm_fb_bios_memtype(bios) == NVKM_RAM_TYPE_DDR3)
type = NVKM_RAM_TYPE_DDR3;
else
type = NVKM_RAM_TYPE_DDR2;
break;
case 2: type = NVKM_RAM_TYPE_GDDR3; break;
case 3: type = NVKM_RAM_TYPE_GDDR4; break;
case 4: type = NVKM_RAM_TYPE_GDDR5; break;
default:
break;
}
size = (size & 0x000000ff) << 32 | (size & 0xffffff00);
ret = nvkm_ram_ctor(func, fb, type, size, ram);
if (ret)
return ret;
ram->part_mask = (nvkm_rd32(device, 0x001540) & 0x00ff0000) >> 16;
ram->parts = hweight8(ram->part_mask);
ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1;
nvkm_mm_fini(&ram->vram);
return nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
rsvd_head >> NVKM_RAM_MM_SHIFT,
(size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT);
}
int
nv50_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nv50_ram *ram;
int ret, i;
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
return -ENOMEM;
*pram = &ram->base;
ret = nv50_ram_ctor(&nv50_ram_func, fb, &ram->base);
if (ret)
return ret;
ram->hwsq.r_0x002504 = hwsq_reg(0x002504);
ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040);
ram->hwsq.r_0x004008 = hwsq_reg(0x004008);
ram->hwsq.r_0x00400c = hwsq_reg(0x00400c);
ram->hwsq.r_0x100200 = hwsq_reg(0x100200);
ram->hwsq.r_0x100210 = hwsq_reg(0x100210);
ram->hwsq.r_0x10021c = hwsq_reg(0x10021c);
ram->hwsq.r_0x1002d0 = hwsq_reg(0x1002d0);
ram->hwsq.r_0x1002d4 = hwsq_reg(0x1002d4);
ram->hwsq.r_0x1002dc = hwsq_reg(0x1002dc);
ram->hwsq.r_0x10053c = hwsq_reg(0x10053c);
ram->hwsq.r_0x1005a0 = hwsq_reg(0x1005a0);
ram->hwsq.r_0x1005a4 = hwsq_reg(0x1005a4);
ram->hwsq.r_0x100710 = hwsq_reg(0x100710);
ram->hwsq.r_0x100714 = hwsq_reg(0x100714);
ram->hwsq.r_0x100718 = hwsq_reg(0x100718);
ram->hwsq.r_0x10071c = hwsq_reg(0x10071c);
ram->hwsq.r_0x100da0 = hwsq_stride(0x100da0, 4, ram->base.part_mask);
ram->hwsq.r_0x100e20 = hwsq_reg(0x100e20);
ram->hwsq.r_0x100e24 = hwsq_reg(0x100e24);
ram->hwsq.r_0x611200 = hwsq_reg(0x611200);
for (i = 0; i < 9; i++)
ram->hwsq.r_timing[i] = hwsq_reg(0x100220 + (i * 0x04));
if (ram->base.ranks > 1) {
ram->hwsq.r_mr[0] = hwsq_reg2(0x1002c0, 0x1002c8);
ram->hwsq.r_mr[1] = hwsq_reg2(0x1002c4, 0x1002cc);
ram->hwsq.r_mr[2] = hwsq_reg2(0x1002e0, 0x1002e8);
ram->hwsq.r_mr[3] = hwsq_reg2(0x1002e4, 0x1002ec);
} else {
ram->hwsq.r_mr[0] = hwsq_reg(0x1002c0);
ram->hwsq.r_mr[1] = hwsq_reg(0x1002c4);
ram->hwsq.r_mr[2] = hwsq_reg(0x1002e0);
ram->hwsq.r_mr[3] = hwsq_reg(0x1002e4);
}
ram->hwsq.r_gpio[0] = hwsq_reg(0x00e104);
ram->hwsq.r_gpio[1] = hwsq_reg(0x00e108);
ram->hwsq.r_gpio[2] = hwsq_reg(0x00e120);
ram->hwsq.r_gpio[3] = hwsq_reg(0x00e124);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
void
nv30_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
u32 flags, struct nvkm_fb_tile *tile)
{
/* for performance, select alternate bank offset for zeta */
if (!(flags & 4)) {
tile->addr = (0 << 4);
} else {
if (fb->func->tile.comp) /* z compression */
fb->func->tile.comp(fb, i, size, flags, tile);
tile->addr = (1 << 4);
}
tile->addr |= 0x00000001; /* enable */
tile->addr |= addr;
tile->limit = max(1u, addr + size) - 1;
tile->pitch = pitch;
}
static void
nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
else tile->zcomp |= 0x02000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12;
#ifdef __BIG_ENDIAN
tile->zcomp |= 0x10000000;
#endif
}
}
static int
calc_bias(struct nvkm_fb *fb, int k, int i, int j)
{
struct nvkm_device *device = fb->subdev.device;
int b = (device->chipset > 0x30 ?
nvkm_rd32(device, 0x122c + 0x10 * k + 0x4 * j) >>
(4 * (i ^ 1)) :
0) & 0xf;
return 2 * (b & 0x8 ? b - 0x10 : b);
}
static int
calc_ref(struct nvkm_fb *fb, int l, int k, int i)
{
int j, x = 0;
for (j = 0; j < 4; j++) {
int m = (l >> (8 * i) & 0xff) + calc_bias(fb, k, i, j);
x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
}
return x;
}
void
nv30_fb_init(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
int i, j;
/* Init the memory timing regs at 0x10037c/0x1003ac */
if (device->chipset == 0x30 ||
device->chipset == 0x31 ||
device->chipset == 0x35) {
/* Related to ROP count */
int n = (device->chipset == 0x31 ? 2 : 4);
int l = nvkm_rd32(device, 0x1003d0);
for (i = 0; i < n; i++) {
for (j = 0; j < 3; j++)
nvkm_wr32(device, 0x10037c + 0xc * i + 0x4 * j,
calc_ref(fb, l, 0, j));
for (j = 0; j < 2; j++)
nvkm_wr32(device, 0x1003ac + 0x8 * i + 0x4 * j,
calc_ref(fb, l, 1, j));
}
}
}
static const struct nvkm_fb_func
nv30_fb = {
.tags = nv20_fb_tags,
.init = nv30_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv30_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
};
int
nv30_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv30_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c |
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "gf100.h"
/* GM20B's FB is similar to GM200, but without the ability to allocate VRAM */
static const struct nvkm_fb_func
gm20b_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gm200_fb_init,
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.default_bigpage = 0 /* per-instance. */,
};
int
gm20b_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gm20b_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define gk104_ram(p) container_of((p), struct gk104_ram, base)
#include "ram.h"
#include "ramfuc.h"
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/init.h>
#include <subdev/bios/M0205.h>
#include <subdev/bios/M0209.h>
#include <subdev/bios/pll.h>
#include <subdev/bios/rammap.h>
#include <subdev/bios/timing.h>
#include <subdev/clk.h>
#include <subdev/clk/pll.h>
#include <subdev/gpio.h>
struct gk104_ramfuc {
struct ramfuc base;
struct nvbios_pll refpll;
struct nvbios_pll mempll;
struct ramfuc_reg r_gpioMV;
u32 r_funcMV[2];
struct ramfuc_reg r_gpio2E;
u32 r_func2E[2];
struct ramfuc_reg r_gpiotrig;
struct ramfuc_reg r_0x132020;
struct ramfuc_reg r_0x132028;
struct ramfuc_reg r_0x132024;
struct ramfuc_reg r_0x132030;
struct ramfuc_reg r_0x132034;
struct ramfuc_reg r_0x132000;
struct ramfuc_reg r_0x132004;
struct ramfuc_reg r_0x132040;
struct ramfuc_reg r_0x10f248;
struct ramfuc_reg r_0x10f290;
struct ramfuc_reg r_0x10f294;
struct ramfuc_reg r_0x10f298;
struct ramfuc_reg r_0x10f29c;
struct ramfuc_reg r_0x10f2a0;
struct ramfuc_reg r_0x10f2a4;
struct ramfuc_reg r_0x10f2a8;
struct ramfuc_reg r_0x10f2ac;
struct ramfuc_reg r_0x10f2cc;
struct ramfuc_reg r_0x10f2e8;
struct ramfuc_reg r_0x10f250;
struct ramfuc_reg r_0x10f24c;
struct ramfuc_reg r_0x10fec4;
struct ramfuc_reg r_0x10fec8;
struct ramfuc_reg r_0x10f604;
struct ramfuc_reg r_0x10f614;
struct ramfuc_reg r_0x10f610;
struct ramfuc_reg r_0x100770;
struct ramfuc_reg r_0x100778;
struct ramfuc_reg r_0x10f224;
struct ramfuc_reg r_0x10f870;
struct ramfuc_reg r_0x10f698;
struct ramfuc_reg r_0x10f694;
struct ramfuc_reg r_0x10f6b8;
struct ramfuc_reg r_0x10f808;
struct ramfuc_reg r_0x10f670;
struct ramfuc_reg r_0x10f60c;
struct ramfuc_reg r_0x10f830;
struct ramfuc_reg r_0x1373ec;
struct ramfuc_reg r_0x10f800;
struct ramfuc_reg r_0x10f82c;
struct ramfuc_reg r_0x10f978;
struct ramfuc_reg r_0x10f910;
struct ramfuc_reg r_0x10f914;
struct ramfuc_reg r_mr[16]; /* MR0 - MR8, MR15 */
struct ramfuc_reg r_0x62c000;
struct ramfuc_reg r_0x10f200;
struct ramfuc_reg r_0x10f210;
struct ramfuc_reg r_0x10f310;
struct ramfuc_reg r_0x10f314;
struct ramfuc_reg r_0x10f318;
struct ramfuc_reg r_0x10f090;
struct ramfuc_reg r_0x10f69c;
struct ramfuc_reg r_0x10f824;
struct ramfuc_reg r_0x1373f0;
struct ramfuc_reg r_0x1373f4;
struct ramfuc_reg r_0x137320;
struct ramfuc_reg r_0x10f65c;
struct ramfuc_reg r_0x10f6bc;
struct ramfuc_reg r_0x100710;
struct ramfuc_reg r_0x100750;
};
struct gk104_ram {
struct nvkm_ram base;
struct gk104_ramfuc fuc;
struct list_head cfg;
u32 parts;
u32 pmask;
u32 pnuts;
struct nvbios_ramcfg diff;
int from;
int mode;
int N1, fN1, M1, P1;
int N2, M2, P2;
};
/*******************************************************************************
* GDDR5
******************************************************************************/
static void
gk104_ram_train(struct gk104_ramfuc *fuc, u32 mask, u32 data)
{
struct gk104_ram *ram = container_of(fuc, typeof(*ram), fuc);
u32 addr = 0x110974, i;
ram_mask(fuc, 0x10f910, mask, data);
ram_mask(fuc, 0x10f914, mask, data);
for (i = 0; (data & 0x80000000) && i < ram->parts; addr += 0x1000, i++) {
if (ram->pmask & (1 << i))
continue;
ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
}
}
static void
r1373f4_init(struct gk104_ramfuc *fuc)
{
struct gk104_ram *ram = container_of(fuc, typeof(*ram), fuc);
const u32 mcoef = ((--ram->P2 << 28) | (ram->N2 << 8) | ram->M2);
const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
const u32 runk0 = ram->fN1 << 16;
const u32 runk1 = ram->fN1;
if (ram->from == 2) {
ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100);
ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010);
} else {
ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
}
ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
/* (re)program refpll, if required */
if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef ||
(ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) {
ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
ram_mask(fuc, 0x132020, 0x00000001, 0x00000000);
ram_wr32(fuc, 0x137320, 0x00000000);
ram_mask(fuc, 0x132030, 0xffff0000, runk0);
ram_mask(fuc, 0x132034, 0x0000ffff, runk1);
ram_wr32(fuc, 0x132024, rcoef);
ram_mask(fuc, 0x132028, 0x00080000, 0x00080000);
ram_mask(fuc, 0x132020, 0x00000001, 0x00000001);
ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
ram_mask(fuc, 0x132028, 0x00080000, 0x00000000);
}
/* (re)program mempll, if required */
if (ram->mode == 2) {
ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
ram_mask(fuc, 0x132000, 0x80000000, 0x80000000);
ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100);
} else {
ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010100);
}
ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010);
}
static void
r1373f4_fini(struct gk104_ramfuc *fuc)
{
struct gk104_ram *ram = container_of(fuc, typeof(*ram), fuc);
struct nvkm_ram_data *next = ram->base.next;
u8 v0 = next->bios.ramcfg_11_03_c0;
u8 v1 = next->bios.ramcfg_11_03_30;
u32 tmp;
tmp = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
ram_wr32(fuc, 0x1373ec, tmp | (v1 << 16));
ram_mask(fuc, 0x1373f0, (~ram->mode & 3), 0x00000000);
if (ram->mode == 2) {
ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000002);
ram_mask(fuc, 0x1373f4, 0x00001100, 0x00000000);
} else {
ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000001);
ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
}
ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4);
}
static void
gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
u32 _mask, u32 _data, u32 _copy)
{
struct nvkm_fb *fb = ram->base.fb;
struct ramfuc *fuc = &ram->fuc.base;
struct nvkm_device *device = fb->subdev.device;
u32 addr = 0x110000 + (reg->addr & 0xfff);
u32 mask = _mask | _copy;
u32 data = (_data & _mask) | (reg->data & _copy);
u32 i;
for (i = 0; i < 16; i++, addr += 0x1000) {
if (ram->pnuts & (1 << i)) {
u32 prev = nvkm_rd32(device, addr);
u32 next = (prev & ~mask) | data;
nvkm_memx_wr32(fuc->memx, addr, next);
}
}
}
#define ram_nuts(s,r,m,d,c) \
gk104_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
static int
gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
{
struct gk104_ramfuc *fuc = &ram->fuc;
struct nvkm_ram_data *next = ram->base.next;
int vc = !next->bios.ramcfg_11_02_08;
int mv = !next->bios.ramcfg_11_02_04;
u32 mask, data;
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
if (ram->base.fb->subdev.device->disp)
ram_wr32(fuc, 0x62c000, 0x0f0f0000);
/* MR1: turn termination on early, for some reason.. */
if ((ram->base.mr[1] & 0x03c) != 0x030) {
ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c);
ram_nuts(ram, mr[1], 0x03c, ram->base.mr1_nuts & 0x03c, 0x000);
}
if (vc == 1 && ram_have(fuc, gpio2E)) {
u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
if (temp != ram_rd32(fuc, gpio2E)) {
ram_wr32(fuc, gpiotrig, 1);
ram_nsec(fuc, 20000);
}
}
ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
gk104_ram_train(fuc, 0x01020000, 0x000c0000);
ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
ram_nsec(fuc, 1000);
ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
ram_wr32(fuc, 0x10f090, 0x00000061);
ram_wr32(fuc, 0x10f090, 0xc000007f);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f698, 0x00000000);
ram_wr32(fuc, 0x10f69c, 0x00000000);
/*XXX: there does appear to be some kind of condition here, simply
* modifying these bits in the vbios from the default pl0
* entries shows no change. however, the data does appear to
* be correct and may be required for the transition back
*/
mask = 0x800f07e0;
data = 0x00030000;
if (ram_rd32(fuc, 0x10f978) & 0x00800000)
data |= 0x00040000;
if (1) {
data |= 0x800807e0;
switch (next->bios.ramcfg_11_03_c0) {
case 3: data &= ~0x00000040; break;
case 2: data &= ~0x00000100; break;
case 1: data &= ~0x80000000; break;
case 0: data &= ~0x00000400; break;
}
switch (next->bios.ramcfg_11_03_30) {
case 3: data &= ~0x00000020; break;
case 2: data &= ~0x00000080; break;
case 1: data &= ~0x00080000; break;
case 0: data &= ~0x00000200; break;
}
}
if (next->bios.ramcfg_11_02_80)
mask |= 0x03000000;
if (next->bios.ramcfg_11_02_40)
mask |= 0x00002000;
if (next->bios.ramcfg_11_07_10)
mask |= 0x00004000;
if (next->bios.ramcfg_11_07_08)
mask |= 0x00000003;
else {
mask |= 0x34000000;
if (ram_rd32(fuc, 0x10f978) & 0x00800000)
mask |= 0x40000000;
}
ram_mask(fuc, 0x10f824, mask, data);
ram_mask(fuc, 0x132040, 0x00010000, 0x00000000);
if (ram->from == 2 && ram->mode != 2) {
ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
ram_mask(fuc, 0x10f200, 0x18008000, 0x00008000);
ram_mask(fuc, 0x10f800, 0x00000000, 0x00000004);
ram_mask(fuc, 0x10f830, 0x00008000, 0x01040010);
ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
r1373f4_init(fuc);
ram_mask(fuc, 0x1373f0, 0x00000002, 0x00000001);
r1373f4_fini(fuc);
ram_mask(fuc, 0x10f830, 0x00c00000, 0x00240001);
} else
if (ram->from != 2 && ram->mode != 2) {
r1373f4_init(fuc);
r1373f4_fini(fuc);
}
if (ram_have(fuc, gpioMV)) {
u32 temp = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]);
if (temp != ram_rd32(fuc, gpioMV)) {
ram_wr32(fuc, gpiotrig, 1);
ram_nsec(fuc, 64000);
}
}
if (next->bios.ramcfg_11_02_40 ||
next->bios.ramcfg_11_07_10) {
ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
ram_nsec(fuc, 20000);
}
if (ram->from != 2 && ram->mode == 2) {
if (0 /*XXX: Titan */)
ram_mask(fuc, 0x10f200, 0x18000000, 0x18000000);
ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
ram_mask(fuc, 0x1373f0, 0x00000000, 0x00000002);
ram_mask(fuc, 0x10f830, 0x00800001, 0x00408010);
r1373f4_init(fuc);
r1373f4_fini(fuc);
ram_mask(fuc, 0x10f808, 0x00000000, 0x00080000);
ram_mask(fuc, 0x10f200, 0x00808000, 0x00800000);
} else
if (ram->from == 2 && ram->mode == 2) {
ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
r1373f4_init(fuc);
r1373f4_fini(fuc);
}
if (ram->mode != 2) /*XXX*/ {
if (next->bios.ramcfg_11_07_40)
ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
}
ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c);
ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09);
ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09);
if (!next->bios.ramcfg_11_07_08 && !next->bios.ramcfg_11_07_04) {
ram_wr32(fuc, 0x10f698, 0x01010101 * next->bios.ramcfg_11_04);
ram_wr32(fuc, 0x10f69c, 0x01010101 * next->bios.ramcfg_11_04);
} else
if (!next->bios.ramcfg_11_07_08) {
ram_wr32(fuc, 0x10f698, 0x00000000);
ram_wr32(fuc, 0x10f69c, 0x00000000);
}
if (ram->mode != 2) {
u32 data = 0x01000100 * next->bios.ramcfg_11_04;
ram_nuke(fuc, 0x10f694);
ram_mask(fuc, 0x10f694, 0xff00ff00, data);
}
if (ram->mode == 2 && next->bios.ramcfg_11_08_10)
data = 0x00000080;
else
data = 0x00000000;
ram_mask(fuc, 0x10f60c, 0x00000080, data);
mask = 0x00070000;
data = 0x00000000;
if (!next->bios.ramcfg_11_02_80)
data |= 0x03000000;
if (!next->bios.ramcfg_11_02_40)
data |= 0x00002000;
if (!next->bios.ramcfg_11_07_10)
data |= 0x00004000;
if (!next->bios.ramcfg_11_07_08)
data |= 0x00000003;
else
data |= 0x74000000;
ram_mask(fuc, 0x10f824, mask, data);
if (next->bios.ramcfg_11_01_08)
data = 0x00000000;
else
data = 0x00001000;
ram_mask(fuc, 0x10f200, 0x00001000, data);
if (ram_rd32(fuc, 0x10f670) & 0x80000000) {
ram_nsec(fuc, 10000);
ram_mask(fuc, 0x10f670, 0x80000000, 0x00000000);
}
if (next->bios.ramcfg_11_08_01)
data = 0x00100000;
else
data = 0x00000000;
ram_mask(fuc, 0x10f82c, 0x00100000, data);
data = 0x00000000;
if (next->bios.ramcfg_11_08_08)
data |= 0x00002000;
if (next->bios.ramcfg_11_08_04)
data |= 0x00001000;
if (next->bios.ramcfg_11_08_02)
data |= 0x00004000;
ram_mask(fuc, 0x10f830, 0x00007000, data);
/* PFB timing */
ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]);
ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]);
ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]);
ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]);
ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]);
ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]);
ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]);
ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]);
ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]);
ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]);
ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]);
data = mask = 0x00000000;
if (ram->diff.ramcfg_11_08_20) {
if (next->bios.ramcfg_11_08_20)
data |= 0x01000000;
mask |= 0x01000000;
}
ram_mask(fuc, 0x10f200, mask, data);
data = mask = 0x00000000;
if (ram->diff.ramcfg_11_02_03) {
data |= next->bios.ramcfg_11_02_03 << 8;
mask |= 0x00000300;
}
if (ram->diff.ramcfg_11_01_10) {
if (next->bios.ramcfg_11_01_10)
data |= 0x70000000;
mask |= 0x70000000;
}
ram_mask(fuc, 0x10f604, mask, data);
data = mask = 0x00000000;
if (ram->diff.timing_20_30_07) {
data |= next->bios.timing_20_30_07 << 28;
mask |= 0x70000000;
}
if (ram->diff.ramcfg_11_01_01) {
if (next->bios.ramcfg_11_01_01)
data |= 0x00000100;
mask |= 0x00000100;
}
ram_mask(fuc, 0x10f614, mask, data);
data = mask = 0x00000000;
if (ram->diff.timing_20_30_07) {
data |= next->bios.timing_20_30_07 << 28;
mask |= 0x70000000;
}
if (ram->diff.ramcfg_11_01_02) {
if (next->bios.ramcfg_11_01_02)
data |= 0x00000100;
mask |= 0x00000100;
}
ram_mask(fuc, 0x10f610, mask, data);
mask = 0x33f00000;
data = 0x00000000;
if (!next->bios.ramcfg_11_01_04)
data |= 0x20200000;
if (!next->bios.ramcfg_11_07_80)
data |= 0x12800000;
/*XXX: see note above about there probably being some condition
* for the 10f824 stuff that uses ramcfg 3...
*/
if (next->bios.ramcfg_11_03_f0) {
if (next->bios.rammap_11_08_0c) {
if (!next->bios.ramcfg_11_07_80)
mask |= 0x00000020;
else
data |= 0x00000020;
mask |= 0x00000004;
}
} else {
mask |= 0x40000020;
data |= 0x00000004;
}
ram_mask(fuc, 0x10f808, mask, data);
ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f);
data = mask = 0x00000000;
if (ram->diff.ramcfg_11_02_03) {
data |= next->bios.ramcfg_11_02_03;
mask |= 0x00000003;
}
if (ram->diff.ramcfg_11_01_10) {
if (next->bios.ramcfg_11_01_10)
data |= 0x00000004;
mask |= 0x00000004;
}
if ((ram_mask(fuc, 0x100770, mask, data) & mask & 4) != (data & 4)) {
ram_mask(fuc, 0x100750, 0x00000008, 0x00000008);
ram_wr32(fuc, 0x100710, 0x00000000);
ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
}
data = next->bios.timing_20_30_07 << 8;
if (next->bios.ramcfg_11_01_01)
data |= 0x80000000;
ram_mask(fuc, 0x100778, 0x00000700, data);
ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4);
data = (next->bios.timing[10] & 0x7f000000) >> 24;
if (data < next->bios.timing_20_2c_1fc0)
data = next->bios.timing_20_2c_1fc0;
ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16);
ram_mask(fuc, 0x10fec4, 0x041e0f07, next->bios.timing_20_31_0800 << 26 |
next->bios.timing_20_31_0780 << 17 |
next->bios.timing_20_31_0078 << 8 |
next->bios.timing_20_31_0007);
ram_mask(fuc, 0x10fec8, 0x00000027, next->bios.timing_20_31_8000 << 5 |
next->bios.timing_20_31_7000);
ram_wr32(fuc, 0x10f090, 0x4000007e);
ram_nsec(fuc, 2000);
ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
if (next->bios.ramcfg_11_08_10 && (ram->mode == 2) /*XXX*/) {
u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
gk104_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f294, temp);
}
ram_mask(fuc, mr[3], 0xfff, ram->base.mr[3]);
ram_wr32(fuc, mr[0], ram->base.mr[0]);
ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]);
ram_nsec(fuc, 1000);
ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]);
ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5] & ~0x004); /* LP3 later */
ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]);
ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]);
if (vc == 0 && ram_have(fuc, gpio2E)) {
u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]);
if (temp != ram_rd32(fuc, gpio2E)) {
ram_wr32(fuc, gpiotrig, 1);
ram_nsec(fuc, 20000);
}
}
ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
ram_nsec(fuc, 1000);
ram_nuts(ram, 0x10f200, 0x18808800, 0x00000000, 0x18808800);
data = ram_rd32(fuc, 0x10f978);
data &= ~0x00046144;
data |= 0x0000000b;
if (!next->bios.ramcfg_11_07_08) {
if (!next->bios.ramcfg_11_07_04)
data |= 0x0000200c;
else
data |= 0x00000000;
} else {
data |= 0x00040044;
}
ram_wr32(fuc, 0x10f978, data);
if (ram->mode == 1) {
data = ram_rd32(fuc, 0x10f830) | 0x00000001;
ram_wr32(fuc, 0x10f830, data);
}
if (!next->bios.ramcfg_11_07_08) {
data = 0x88020000;
if ( next->bios.ramcfg_11_07_04)
data |= 0x10000000;
if (!next->bios.rammap_11_08_10)
data |= 0x00080000;
} else {
data = 0xa40e0000;
}
gk104_ram_train(fuc, 0xbc0f0000, data);
if (1) /* XXX: not always? */
ram_nsec(fuc, 1000);
if (ram->mode == 2) { /*XXX*/
ram_mask(fuc, 0x10f800, 0x00000004, 0x00000004);
}
/* LP3 */
if (ram_mask(fuc, mr[5], 0x004, ram->base.mr[5]) != ram->base.mr[5])
ram_nsec(fuc, 1000);
if (ram->mode != 2) {
ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
}
if (next->bios.ramcfg_11_07_02)
gk104_ram_train(fuc, 0x80020000, 0x01000000);
ram_unblock(fuc);
if (ram->base.fb->subdev.device->disp)
ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
data = 0x00000800;
else
data = 0x00000000;
ram_mask(fuc, 0x10f200, 0x00000800, data);
ram_nuts(ram, 0x10f200, 0x18808800, data, 0x18808800);
return 0;
}
/*******************************************************************************
* DDR3
******************************************************************************/
static void
nvkm_sddr3_dll_reset(struct gk104_ramfuc *fuc)
{
ram_nuke(fuc, mr[0]);
ram_mask(fuc, mr[0], 0x100, 0x100);
ram_mask(fuc, mr[0], 0x100, 0x000);
}
static void
nvkm_sddr3_dll_disable(struct gk104_ramfuc *fuc)
{
u32 mr1_old = ram_rd32(fuc, mr[1]);
if (!(mr1_old & 0x1)) {
ram_mask(fuc, mr[1], 0x1, 0x1);
ram_nsec(fuc, 1000);
}
}
static int
gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
{
struct gk104_ramfuc *fuc = &ram->fuc;
const u32 rcoef = (( ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
const u32 runk0 = ram->fN1 << 16;
const u32 runk1 = ram->fN1;
struct nvkm_ram_data *next = ram->base.next;
int vc = !next->bios.ramcfg_11_02_08;
int mv = !next->bios.ramcfg_11_02_04;
u32 mask, data;
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
ram_block(fuc);
if (ram->base.fb->subdev.device->disp)
ram_wr32(fuc, 0x62c000, 0x0f0f0000);
if (vc == 1 && ram_have(fuc, gpio2E)) {
u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
if (temp != ram_rd32(fuc, gpio2E)) {
ram_wr32(fuc, gpiotrig, 1);
ram_nsec(fuc, 20000);
}
}
ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
if (next->bios.ramcfg_11_03_f0)
ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
if (next->bios.ramcfg_DLLoff)
nvkm_sddr3_dll_disable(fuc);
ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f090, 0x00000060);
ram_wr32(fuc, 0x10f090, 0xc000007e);
/*XXX: there does appear to be some kind of condition here, simply
* modifying these bits in the vbios from the default pl0
* entries shows no change. however, the data does appear to
* be correct and may be required for the transition back
*/
mask = 0x00010000;
data = 0x00010000;
if (1) {
mask |= 0x800807e0;
data |= 0x800807e0;
switch (next->bios.ramcfg_11_03_c0) {
case 3: data &= ~0x00000040; break;
case 2: data &= ~0x00000100; break;
case 1: data &= ~0x80000000; break;
case 0: data &= ~0x00000400; break;
}
switch (next->bios.ramcfg_11_03_30) {
case 3: data &= ~0x00000020; break;
case 2: data &= ~0x00000080; break;
case 1: data &= ~0x00080000; break;
case 0: data &= ~0x00000200; break;
}
}
if (next->bios.ramcfg_11_02_80)
mask |= 0x03000000;
if (next->bios.ramcfg_11_02_40)
mask |= 0x00002000;
if (next->bios.ramcfg_11_07_10)
mask |= 0x00004000;
if (next->bios.ramcfg_11_07_08)
mask |= 0x00000003;
else
mask |= 0x14000000;
ram_mask(fuc, 0x10f824, mask, data);
ram_mask(fuc, 0x132040, 0x00010000, 0x00000000);
ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
data = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
data |= next->bios.ramcfg_11_03_30 << 16;
ram_wr32(fuc, 0x1373ec, data);
ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
/* (re)program refpll, if required */
if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef ||
(ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) {
ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
ram_mask(fuc, 0x132020, 0x00000001, 0x00000000);
ram_wr32(fuc, 0x137320, 0x00000000);
ram_mask(fuc, 0x132030, 0xffff0000, runk0);
ram_mask(fuc, 0x132034, 0x0000ffff, runk1);
ram_wr32(fuc, 0x132024, rcoef);
ram_mask(fuc, 0x132028, 0x00080000, 0x00080000);
ram_mask(fuc, 0x132020, 0x00000001, 0x00000001);
ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
ram_mask(fuc, 0x132028, 0x00080000, 0x00000000);
}
ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000010);
ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000001);
ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
if (ram_have(fuc, gpioMV)) {
u32 temp = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]);
if (temp != ram_rd32(fuc, gpioMV)) {
ram_wr32(fuc, gpiotrig, 1);
ram_nsec(fuc, 64000);
}
}
if (next->bios.ramcfg_11_02_40 ||
next->bios.ramcfg_11_07_10) {
ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
ram_nsec(fuc, 20000);
}
if (ram->mode != 2) /*XXX*/ {
if (next->bios.ramcfg_11_07_40)
ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
}
ram_wr32(fuc, 0x10f65c, 0x00000011 * next->bios.rammap_11_11_0c);
ram_wr32(fuc, 0x10f6b8, 0x01010101 * next->bios.ramcfg_11_09);
ram_wr32(fuc, 0x10f6bc, 0x01010101 * next->bios.ramcfg_11_09);
mask = 0x00010000;
data = 0x00000000;
if (!next->bios.ramcfg_11_02_80)
data |= 0x03000000;
if (!next->bios.ramcfg_11_02_40)
data |= 0x00002000;
if (!next->bios.ramcfg_11_07_10)
data |= 0x00004000;
if (!next->bios.ramcfg_11_07_08)
data |= 0x00000003;
else
data |= 0x14000000;
ram_mask(fuc, 0x10f824, mask, data);
ram_nsec(fuc, 1000);
if (next->bios.ramcfg_11_08_01)
data = 0x00100000;
else
data = 0x00000000;
ram_mask(fuc, 0x10f82c, 0x00100000, data);
/* PFB timing */
ram_mask(fuc, 0x10f248, 0xffffffff, next->bios.timing[10]);
ram_mask(fuc, 0x10f290, 0xffffffff, next->bios.timing[0]);
ram_mask(fuc, 0x10f294, 0xffffffff, next->bios.timing[1]);
ram_mask(fuc, 0x10f298, 0xffffffff, next->bios.timing[2]);
ram_mask(fuc, 0x10f29c, 0xffffffff, next->bios.timing[3]);
ram_mask(fuc, 0x10f2a0, 0xffffffff, next->bios.timing[4]);
ram_mask(fuc, 0x10f2a4, 0xffffffff, next->bios.timing[5]);
ram_mask(fuc, 0x10f2a8, 0xffffffff, next->bios.timing[6]);
ram_mask(fuc, 0x10f2ac, 0xffffffff, next->bios.timing[7]);
ram_mask(fuc, 0x10f2cc, 0xffffffff, next->bios.timing[8]);
ram_mask(fuc, 0x10f2e8, 0xffffffff, next->bios.timing[9]);
mask = 0x33f00000;
data = 0x00000000;
if (!next->bios.ramcfg_11_01_04)
data |= 0x20200000;
if (!next->bios.ramcfg_11_07_80)
data |= 0x12800000;
/*XXX: see note above about there probably being some condition
* for the 10f824 stuff that uses ramcfg 3...
*/
if (next->bios.ramcfg_11_03_f0) {
if (next->bios.rammap_11_08_0c) {
if (!next->bios.ramcfg_11_07_80)
mask |= 0x00000020;
else
data |= 0x00000020;
mask |= 0x08000004;
}
data |= 0x04000000;
} else {
mask |= 0x44000020;
data |= 0x08000004;
}
ram_mask(fuc, 0x10f808, mask, data);
ram_wr32(fuc, 0x10f870, 0x11111111 * next->bios.ramcfg_11_03_0f);
ram_mask(fuc, 0x10f250, 0x000003f0, next->bios.timing_20_2c_003f << 4);
data = (next->bios.timing[10] & 0x7f000000) >> 24;
if (data < next->bios.timing_20_2c_1fc0)
data = next->bios.timing_20_2c_1fc0;
ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16);
ram_wr32(fuc, 0x10f090, 0x4000007f);
ram_nsec(fuc, 1000);
ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
ram_nsec(fuc, 1000);
if (!next->bios.ramcfg_DLLoff) {
ram_mask(fuc, mr[1], 0x1, 0x0);
nvkm_sddr3_dll_reset(fuc);
}
ram_mask(fuc, mr[2], 0x00000fff, ram->base.mr[2]);
ram_mask(fuc, mr[1], 0xffffffff, ram->base.mr[1]);
ram_wr32(fuc, mr[0], ram->base.mr[0]);
ram_nsec(fuc, 1000);
if (!next->bios.ramcfg_DLLoff) {
nvkm_sddr3_dll_reset(fuc);
ram_nsec(fuc, 1000);
}
if (vc == 0 && ram_have(fuc, gpio2E)) {
u32 temp = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]);
if (temp != ram_rd32(fuc, gpio2E)) {
ram_wr32(fuc, gpiotrig, 1);
ram_nsec(fuc, 20000);
}
}
if (ram->mode != 2) {
ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
}
ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
ram_nsec(fuc, 1000);
ram_unblock(fuc);
if (ram->base.fb->subdev.device->disp)
ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
if (next->bios.rammap_11_08_01)
data = 0x00000800;
else
data = 0x00000000;
ram_mask(fuc, 0x10f200, 0x00000800, data);
return 0;
}
/*******************************************************************************
* main hooks
******************************************************************************/
static int
gk104_ram_calc_data(struct gk104_ram *ram, u32 khz, struct nvkm_ram_data *data)
{
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
struct nvkm_ram_data *cfg;
u32 mhz = khz / 1000;
list_for_each_entry(cfg, &ram->cfg, head) {
if (mhz >= cfg->bios.rammap_min &&
mhz <= cfg->bios.rammap_max) {
*data = *cfg;
data->freq = khz;
return 0;
}
}
nvkm_error(subdev, "ramcfg data for %dMHz not found\n", mhz);
return -EINVAL;
}
static int
gk104_calc_pll_output(int fN, int M, int N, int P, int clk)
{
return ((clk * N) + (((u16)(fN + 4096) * clk) >> 13)) / (M * P);
}
static int
gk104_pll_calc_hiclk(int target_khz, int crystal,
int *N1, int *fN1, int *M1, int *P1,
int *N2, int *M2, int *P2)
{
int best_err = target_khz, p_ref, n_ref;
bool upper = false;
*M1 = 1;
/* M has to be 1, otherwise it gets unstable */
*M2 = 1;
/* can be 1 or 2, sticking with 1 for simplicity */
*P2 = 1;
for (p_ref = 0x7; p_ref >= 0x5; --p_ref) {
for (n_ref = 0x25; n_ref <= 0x2b; ++n_ref) {
int cur_N, cur_clk, cur_err;
cur_clk = gk104_calc_pll_output(0, 1, n_ref, p_ref, crystal);
cur_N = target_khz / cur_clk;
cur_err = target_khz
- gk104_calc_pll_output(0xf000, 1, cur_N, 1, cur_clk);
/* we found a better combination */
if (cur_err < best_err) {
best_err = cur_err;
*N2 = cur_N;
*N1 = n_ref;
*P1 = p_ref;
upper = false;
}
cur_N += 1;
cur_err = gk104_calc_pll_output(0xf000, 1, cur_N, 1, cur_clk)
- target_khz;
if (cur_err < best_err) {
best_err = cur_err;
*N2 = cur_N;
*N1 = n_ref;
*P1 = p_ref;
upper = true;
}
}
}
/* adjust fN to get closer to the target clock */
*fN1 = (u16)((((best_err / *N2 * *P2) * (*P1 * *M1)) << 13) / crystal);
if (upper)
*fN1 = (u16)(1 - *fN1);
return gk104_calc_pll_output(*fN1, 1, *N1, *P1, crystal);
}
static int
gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
{
struct gk104_ramfuc *fuc = &ram->fuc;
struct nvkm_subdev *subdev = &ram->base.fb->subdev;
int refclk, i;
int ret;
ret = ram_init(fuc, ram->base.fb);
if (ret)
return ret;
ram->mode = (next->freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f;
/* XXX: this is *not* what nvidia do. on fermi nvidia generally
* select, based on some unknown condition, one of the two possible
* reference frequencies listed in the vbios table for mempll and
* program refpll to that frequency.
*
* so far, i've seen very weird values being chosen by nvidia on
* kepler boards, no idea how/why they're chosen.
*/
refclk = next->freq;
if (ram->mode == 2) {
ret = gk104_pll_calc_hiclk(next->freq, subdev->device->crystal,
&ram->N1, &ram->fN1, &ram->M1, &ram->P1,
&ram->N2, &ram->M2, &ram->P2);
fuc->mempll.refclk = ret;
if (ret <= 0) {
nvkm_error(subdev, "unable to calc plls\n");
return -EINVAL;
}
nvkm_debug(subdev, "successfully calced PLLs for clock %i kHz"
" (refclock: %i kHz)\n", next->freq, ret);
} else {
/* calculate refpll coefficients */
ret = gt215_pll_calc(subdev, &fuc->refpll, refclk, &ram->N1,
&ram->fN1, &ram->M1, &ram->P1);
fuc->mempll.refclk = ret;
if (ret <= 0) {
nvkm_error(subdev, "unable to calc refpll\n");
return -EINVAL;
}
}
for (i = 0; i < ARRAY_SIZE(fuc->r_mr); i++) {
if (ram_have(fuc, mr[i]))
ram->base.mr[i] = ram_rd32(fuc, mr[i]);
}
ram->base.freq = next->freq;
switch (ram->base.type) {
case NVKM_RAM_TYPE_DDR3:
ret = nvkm_sddr3_calc(&ram->base);
if (ret == 0)
ret = gk104_ram_calc_sddr3(ram, next->freq);
break;
case NVKM_RAM_TYPE_GDDR5:
ret = nvkm_gddr5_calc(&ram->base, ram->pnuts != 0);
if (ret == 0)
ret = gk104_ram_calc_gddr5(ram, next->freq);
break;
default:
ret = -ENOSYS;
break;
}
return ret;
}
int
gk104_ram_calc(struct nvkm_ram *base, u32 freq)
{
struct gk104_ram *ram = gk104_ram(base);
struct nvkm_clk *clk = ram->base.fb->subdev.device->clk;
struct nvkm_ram_data *xits = &ram->base.xition;
struct nvkm_ram_data *copy;
int ret;
if (ram->base.next == NULL) {
ret = gk104_ram_calc_data(ram,
nvkm_clk_read(clk, nv_clk_src_mem),
&ram->base.former);
if (ret)
return ret;
ret = gk104_ram_calc_data(ram, freq, &ram->base.target);
if (ret)
return ret;
if (ram->base.target.freq < ram->base.former.freq) {
*xits = ram->base.target;
copy = &ram->base.former;
} else {
*xits = ram->base.former;
copy = &ram->base.target;
}
xits->bios.ramcfg_11_02_04 = copy->bios.ramcfg_11_02_04;
xits->bios.ramcfg_11_02_03 = copy->bios.ramcfg_11_02_03;
xits->bios.timing_20_30_07 = copy->bios.timing_20_30_07;
ram->base.next = &ram->base.target;
if (memcmp(xits, &ram->base.former, sizeof(xits->bios)))
ram->base.next = &ram->base.xition;
} else {
BUG_ON(ram->base.next != &ram->base.xition);
ram->base.next = &ram->base.target;
}
return gk104_ram_calc_xits(ram, ram->base.next);
}
static void
gk104_ram_prog_0(struct gk104_ram *ram, u32 freq)
{
struct nvkm_device *device = ram->base.fb->subdev.device;
struct nvkm_ram_data *cfg;
u32 mhz = freq / 1000;
u32 mask, data;
list_for_each_entry(cfg, &ram->cfg, head) {
if (mhz >= cfg->bios.rammap_min &&
mhz <= cfg->bios.rammap_max)
break;
}
if (&cfg->head == &ram->cfg)
return;
if (mask = 0, data = 0, ram->diff.rammap_11_0a_03fe) {
data |= cfg->bios.rammap_11_0a_03fe << 12;
mask |= 0x001ff000;
}
if (ram->diff.rammap_11_09_01ff) {
data |= cfg->bios.rammap_11_09_01ff;
mask |= 0x000001ff;
}
nvkm_mask(device, 0x10f468, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0a_0400) {
data |= cfg->bios.rammap_11_0a_0400;
mask |= 0x00000001;
}
nvkm_mask(device, 0x10f420, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0a_0800) {
data |= cfg->bios.rammap_11_0a_0800;
mask |= 0x00000001;
}
nvkm_mask(device, 0x10f430, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0b_01f0) {
data |= cfg->bios.rammap_11_0b_01f0;
mask |= 0x0000001f;
}
nvkm_mask(device, 0x10f400, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0b_0200) {
data |= cfg->bios.rammap_11_0b_0200 << 9;
mask |= 0x00000200;
}
nvkm_mask(device, 0x10f410, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0d) {
data |= cfg->bios.rammap_11_0d << 16;
mask |= 0x00ff0000;
}
if (ram->diff.rammap_11_0f) {
data |= cfg->bios.rammap_11_0f << 8;
mask |= 0x0000ff00;
}
nvkm_mask(device, 0x10f440, mask, data);
if (mask = 0, data = 0, ram->diff.rammap_11_0e) {
data |= cfg->bios.rammap_11_0e << 8;
mask |= 0x0000ff00;
}
if (ram->diff.rammap_11_0b_0800) {
data |= cfg->bios.rammap_11_0b_0800 << 7;
mask |= 0x00000080;
}
if (ram->diff.rammap_11_0b_0400) {
data |= cfg->bios.rammap_11_0b_0400 << 5;
mask |= 0x00000020;
}
nvkm_mask(device, 0x10f444, mask, data);
}
int
gk104_ram_prog(struct nvkm_ram *base)
{
struct gk104_ram *ram = gk104_ram(base);
struct gk104_ramfuc *fuc = &ram->fuc;
struct nvkm_device *device = ram->base.fb->subdev.device;
struct nvkm_ram_data *next = ram->base.next;
if (!nvkm_boolopt(device->cfgopt, "NvMemExec", true)) {
ram_exec(fuc, false);
return (ram->base.next == &ram->base.xition);
}
gk104_ram_prog_0(ram, 1000);
ram_exec(fuc, true);
gk104_ram_prog_0(ram, next->freq);
return (ram->base.next == &ram->base.xition);
}
void
gk104_ram_tidy(struct nvkm_ram *base)
{
struct gk104_ram *ram = gk104_ram(base);
ram->base.next = NULL;
ram_exec(&ram->fuc, false);
}
struct gk104_ram_train {
u16 mask;
struct nvbios_M0209S remap;
struct nvbios_M0209S type00;
struct nvbios_M0209S type01;
struct nvbios_M0209S type04;
struct nvbios_M0209S type06;
struct nvbios_M0209S type07;
struct nvbios_M0209S type08;
struct nvbios_M0209S type09;
};
static int
gk104_ram_train_type(struct nvkm_ram *ram, int i, u8 ramcfg,
struct gk104_ram_train *train)
{
struct nvkm_bios *bios = ram->fb->subdev.device->bios;
struct nvbios_M0205E M0205E;
struct nvbios_M0205S M0205S;
struct nvbios_M0209E M0209E;
struct nvbios_M0209S *remap = &train->remap;
struct nvbios_M0209S *value;
u8 ver, hdr, cnt, len;
u32 data;
/* determine type of data for this index */
if (!(data = nvbios_M0205Ep(bios, i, &ver, &hdr, &cnt, &len, &M0205E)))
return -ENOENT;
switch (M0205E.type) {
case 0x00: value = &train->type00; break;
case 0x01: value = &train->type01; break;
case 0x04: value = &train->type04; break;
case 0x06: value = &train->type06; break;
case 0x07: value = &train->type07; break;
case 0x08: value = &train->type08; break;
case 0x09: value = &train->type09; break;
default:
return 0;
}
/* training data index determined by ramcfg strap */
if (!(data = nvbios_M0205Sp(bios, i, ramcfg, &ver, &hdr, &M0205S)))
return -EINVAL;
i = M0205S.data;
/* training data format information */
if (!(data = nvbios_M0209Ep(bios, i, &ver, &hdr, &cnt, &len, &M0209E)))
return -EINVAL;
/* ... and the raw data */
if (!(data = nvbios_M0209Sp(bios, i, 0, &ver, &hdr, value)))
return -EINVAL;
if (M0209E.v02_07 == 2) {
/* of course! why wouldn't we have a pointer to another entry
* in the same table, and use the first one as an array of
* remap indices...
*/
if (!(data = nvbios_M0209Sp(bios, M0209E.v03, 0, &ver, &hdr,
remap)))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(value->data); i++)
value->data[i] = remap->data[value->data[i]];
} else
if (M0209E.v02_07 != 1)
return -EINVAL;
train->mask |= 1 << M0205E.type;
return 0;
}
static int
gk104_ram_train_init_0(struct nvkm_ram *ram, struct gk104_ram_train *train)
{
struct nvkm_subdev *subdev = &ram->fb->subdev;
struct nvkm_device *device = subdev->device;
int i, j;
if ((train->mask & 0x03d3) != 0x03d3) {
nvkm_warn(subdev, "missing link training data\n");
return -EINVAL;
}
for (i = 0; i < 0x30; i++) {
for (j = 0; j < 8; j += 4) {
nvkm_wr32(device, 0x10f968 + j, 0x00000000 | (i << 8));
nvkm_wr32(device, 0x10f920 + j, 0x00000000 |
train->type08.data[i] << 4 |
train->type06.data[i]);
nvkm_wr32(device, 0x10f918 + j, train->type00.data[i]);
nvkm_wr32(device, 0x10f920 + j, 0x00000100 |
train->type09.data[i] << 4 |
train->type07.data[i]);
nvkm_wr32(device, 0x10f918 + j, train->type01.data[i]);
}
}
for (j = 0; j < 8; j += 4) {
for (i = 0; i < 0x100; i++) {
nvkm_wr32(device, 0x10f968 + j, i);
nvkm_wr32(device, 0x10f900 + j, train->type04.data[i]);
}
}
return 0;
}
static int
gk104_ram_train_init(struct nvkm_ram *ram)
{
u8 ramcfg = nvbios_ramcfg_index(&ram->fb->subdev);
struct gk104_ram_train *train;
int ret, i;
if (!(train = kzalloc(sizeof(*train), GFP_KERNEL)))
return -ENOMEM;
for (i = 0; i < 0x100; i++) {
ret = gk104_ram_train_type(ram, i, ramcfg, train);
if (ret && ret != -ENOENT)
break;
}
switch (ram->type) {
case NVKM_RAM_TYPE_GDDR5:
ret = gk104_ram_train_init_0(ram, train);
break;
default:
ret = 0;
break;
}
kfree(train);
return ret;
}
int
gk104_ram_init(struct nvkm_ram *ram)
{
struct nvkm_subdev *subdev = &ram->fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
u8 ver, hdr, cnt, len, snr, ssz;
u32 data, save;
int i;
/* run a bunch of tables from rammap table. there's actually
* individual pointers for each rammap entry too, but, nvidia
* seem to just run the last two entries' scripts early on in
* their init, and never again.. we'll just run 'em all once
* for now.
*
* i strongly suspect that each script is for a separate mode
* (likely selected by 0x10f65c's lower bits?), and the
* binary driver skips the one that's already been setup by
* the init tables.
*/
data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
if (!data || hdr < 0x15)
return -EINVAL;
cnt = nvbios_rd08(bios, data + 0x14); /* guess at count */
data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
save = nvkm_rd32(device, 0x10f65c) & 0x000000f0;
for (i = 0; i < cnt; i++, data += 4) {
if (i != save >> 4) {
nvkm_mask(device, 0x10f65c, 0x000000f0, i << 4);
nvbios_init(subdev, nvbios_rd32(bios, data));
}
}
nvkm_mask(device, 0x10f65c, 0x000000f0, save);
nvkm_mask(device, 0x10f584, 0x11000000, 0x00000000);
nvkm_wr32(device, 0x10ecc0, 0xffffffff);
nvkm_mask(device, 0x10f160, 0x00000010, 0x00000010);
return gk104_ram_train_init(ram);
}
static int
gk104_ram_ctor_data(struct gk104_ram *ram, u8 ramcfg, int i)
{
struct nvkm_bios *bios = ram->base.fb->subdev.device->bios;
struct nvkm_ram_data *cfg;
struct nvbios_ramcfg *d = &ram->diff;
struct nvbios_ramcfg *p, *n;
u8 ver, hdr, cnt, len;
u32 data;
int ret;
if (!(cfg = kmalloc(sizeof(*cfg), GFP_KERNEL)))
return -ENOMEM;
p = &list_last_entry(&ram->cfg, typeof(*cfg), head)->bios;
n = &cfg->bios;
/* memory config data for a range of target frequencies */
data = nvbios_rammapEp(bios, i, &ver, &hdr, &cnt, &len, &cfg->bios);
if (ret = -ENOENT, !data)
goto done;
if (ret = -ENOSYS, ver != 0x11 || hdr < 0x12)
goto done;
/* ... and a portion specific to the attached memory */
data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, ramcfg,
&ver, &hdr, &cfg->bios);
if (ret = -EINVAL, !data)
goto done;
if (ret = -ENOSYS, ver != 0x11 || hdr < 0x0a)
goto done;
/* lookup memory timings, if bios says they're present */
if (cfg->bios.ramcfg_timing != 0xff) {
data = nvbios_timingEp(bios, cfg->bios.ramcfg_timing,
&ver, &hdr, &cnt, &len,
&cfg->bios);
if (ret = -EINVAL, !data)
goto done;
if (ret = -ENOSYS, ver != 0x20 || hdr < 0x33)
goto done;
}
list_add_tail(&cfg->head, &ram->cfg);
if (ret = 0, i == 0)
goto done;
d->rammap_11_0a_03fe |= p->rammap_11_0a_03fe != n->rammap_11_0a_03fe;
d->rammap_11_09_01ff |= p->rammap_11_09_01ff != n->rammap_11_09_01ff;
d->rammap_11_0a_0400 |= p->rammap_11_0a_0400 != n->rammap_11_0a_0400;
d->rammap_11_0a_0800 |= p->rammap_11_0a_0800 != n->rammap_11_0a_0800;
d->rammap_11_0b_01f0 |= p->rammap_11_0b_01f0 != n->rammap_11_0b_01f0;
d->rammap_11_0b_0200 |= p->rammap_11_0b_0200 != n->rammap_11_0b_0200;
d->rammap_11_0d |= p->rammap_11_0d != n->rammap_11_0d;
d->rammap_11_0f |= p->rammap_11_0f != n->rammap_11_0f;
d->rammap_11_0e |= p->rammap_11_0e != n->rammap_11_0e;
d->rammap_11_0b_0800 |= p->rammap_11_0b_0800 != n->rammap_11_0b_0800;
d->rammap_11_0b_0400 |= p->rammap_11_0b_0400 != n->rammap_11_0b_0400;
d->ramcfg_11_01_01 |= p->ramcfg_11_01_01 != n->ramcfg_11_01_01;
d->ramcfg_11_01_02 |= p->ramcfg_11_01_02 != n->ramcfg_11_01_02;
d->ramcfg_11_01_10 |= p->ramcfg_11_01_10 != n->ramcfg_11_01_10;
d->ramcfg_11_02_03 |= p->ramcfg_11_02_03 != n->ramcfg_11_02_03;
d->ramcfg_11_08_20 |= p->ramcfg_11_08_20 != n->ramcfg_11_08_20;
d->timing_20_30_07 |= p->timing_20_30_07 != n->timing_20_30_07;
done:
if (ret)
kfree(cfg);
return ret;
}
void *
gk104_ram_dtor(struct nvkm_ram *base)
{
struct gk104_ram *ram = gk104_ram(base);
struct nvkm_ram_data *cfg, *tmp;
list_for_each_entry_safe(cfg, tmp, &ram->cfg, head) {
kfree(cfg);
}
return ram;
}
int
gk104_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
struct nvkm_ram **pram)
{
struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
struct dcb_gpio_func gpio;
struct gk104_ram *ram;
int ret, i;
u8 ramcfg = nvbios_ramcfg_index(subdev);
u32 tmp;
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
return -ENOMEM;
*pram = &ram->base;
ret = gf100_ram_ctor(func, fb, &ram->base);
if (ret)
return ret;
INIT_LIST_HEAD(&ram->cfg);
/* calculate a mask of differently configured memory partitions,
* because, of course reclocking wasn't complicated enough
* already without having to treat some of them differently to
* the others....
*/
ram->parts = nvkm_rd32(device, 0x022438);
ram->pmask = nvkm_rd32(device, 0x022554);
ram->pnuts = 0;
for (i = 0, tmp = 0; i < ram->parts; i++) {
if (!(ram->pmask & (1 << i))) {
u32 cfg1 = nvkm_rd32(device, 0x110204 + (i * 0x1000));
if (tmp && tmp != cfg1) {
ram->pnuts |= (1 << i);
continue;
}
tmp = cfg1;
}
}
/* parse bios data for all rammap table entries up-front, and
* build information on whether certain fields differ between
* any of the entries.
*
* the binary driver appears to completely ignore some fields
* when all entries contain the same value. at first, it was
* hoped that these were mere optimisations and the bios init
* tables had configured as per the values here, but there is
* evidence now to suggest that this isn't the case and we do
* need to treat this condition as a "don't touch" indicator.
*/
for (i = 0; !ret; i++) {
ret = gk104_ram_ctor_data(ram, ramcfg, i);
if (ret && ret != -ENOENT) {
nvkm_error(subdev, "failed to parse ramcfg data\n");
return ret;
}
}
/* parse bios data for both pll's */
ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
if (ret) {
nvkm_error(subdev, "mclk refpll data not found\n");
return ret;
}
ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll);
if (ret) {
nvkm_error(subdev, "mclk pll data not found\n");
return ret;
}
/* lookup memory voltage gpios */
ret = nvkm_gpio_find(device->gpio, 0, 0x18, DCB_GPIO_UNUSED, &gpio);
if (ret == 0) {
ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (gpio.line * 0x04));
ram->fuc.r_funcMV[0] = (gpio.log[0] ^ 2) << 12;
ram->fuc.r_funcMV[1] = (gpio.log[1] ^ 2) << 12;
}
ret = nvkm_gpio_find(device->gpio, 0, 0x2e, DCB_GPIO_UNUSED, &gpio);
if (ret == 0) {
ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (gpio.line * 0x04));
ram->fuc.r_func2E[0] = (gpio.log[0] ^ 2) << 12;
ram->fuc.r_func2E[1] = (gpio.log[1] ^ 2) << 12;
}
ram->fuc.r_gpiotrig = ramfuc_reg(0x00d604);
ram->fuc.r_0x132020 = ramfuc_reg(0x132020);
ram->fuc.r_0x132028 = ramfuc_reg(0x132028);
ram->fuc.r_0x132024 = ramfuc_reg(0x132024);
ram->fuc.r_0x132030 = ramfuc_reg(0x132030);
ram->fuc.r_0x132034 = ramfuc_reg(0x132034);
ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
ram->fuc.r_0x132040 = ramfuc_reg(0x132040);
ram->fuc.r_0x10f248 = ramfuc_reg(0x10f248);
ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
ram->fuc.r_0x10f2a4 = ramfuc_reg(0x10f2a4);
ram->fuc.r_0x10f2a8 = ramfuc_reg(0x10f2a8);
ram->fuc.r_0x10f2ac = ramfuc_reg(0x10f2ac);
ram->fuc.r_0x10f2cc = ramfuc_reg(0x10f2cc);
ram->fuc.r_0x10f2e8 = ramfuc_reg(0x10f2e8);
ram->fuc.r_0x10f250 = ramfuc_reg(0x10f250);
ram->fuc.r_0x10f24c = ramfuc_reg(0x10f24c);
ram->fuc.r_0x10fec4 = ramfuc_reg(0x10fec4);
ram->fuc.r_0x10fec8 = ramfuc_reg(0x10fec8);
ram->fuc.r_0x10f604 = ramfuc_reg(0x10f604);
ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
ram->fuc.r_0x100770 = ramfuc_reg(0x100770);
ram->fuc.r_0x100778 = ramfuc_reg(0x100778);
ram->fuc.r_0x10f224 = ramfuc_reg(0x10f224);
ram->fuc.r_0x10f870 = ramfuc_reg(0x10f870);
ram->fuc.r_0x10f698 = ramfuc_reg(0x10f698);
ram->fuc.r_0x10f694 = ramfuc_reg(0x10f694);
ram->fuc.r_0x10f6b8 = ramfuc_reg(0x10f6b8);
ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
ram->fuc.r_0x10f670 = ramfuc_reg(0x10f670);
ram->fuc.r_0x10f60c = ramfuc_reg(0x10f60c);
ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
ram->fuc.r_0x10f82c = ramfuc_reg(0x10f82c);
ram->fuc.r_0x10f978 = ramfuc_reg(0x10f978);
ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
switch (ram->base.type) {
case NVKM_RAM_TYPE_GDDR5:
ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
ram->fuc.r_mr[1] = ramfuc_reg(0x10f330);
ram->fuc.r_mr[2] = ramfuc_reg(0x10f334);
ram->fuc.r_mr[3] = ramfuc_reg(0x10f338);
ram->fuc.r_mr[4] = ramfuc_reg(0x10f33c);
ram->fuc.r_mr[5] = ramfuc_reg(0x10f340);
ram->fuc.r_mr[6] = ramfuc_reg(0x10f344);
ram->fuc.r_mr[7] = ramfuc_reg(0x10f348);
ram->fuc.r_mr[8] = ramfuc_reg(0x10f354);
ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c);
break;
case NVKM_RAM_TYPE_DDR3:
ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
ram->fuc.r_mr[1] = ramfuc_reg(0x10f304);
ram->fuc.r_mr[2] = ramfuc_reg(0x10f320);
break;
default:
break;
}
ram->fuc.r_0x62c000 = ramfuc_reg(0x62c000);
ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
ram->fuc.r_0x10f318 = ramfuc_reg(0x10f318);
ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
ram->fuc.r_0x10f69c = ramfuc_reg(0x10f69c);
ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
ram->fuc.r_0x1373f4 = ramfuc_reg(0x1373f4);
ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
ram->fuc.r_0x10f65c = ramfuc_reg(0x10f65c);
ram->fuc.r_0x10f6bc = ramfuc_reg(0x10f6bc);
ram->fuc.r_0x100710 = ramfuc_reg(0x100710);
ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
return 0;
}
static const struct nvkm_ram_func
gk104_ram = {
.upper = 0x0200000000ULL,
.probe_fbp = gf100_ram_probe_fbp,
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.dtor = gk104_ram_dtor,
.init = gk104_ram_init,
.calc = gk104_ram_calc,
.prog = gk104_ram_prog,
.tidy = gk104_ram_tidy,
};
int
gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
return gk104_ram_new_(&gk104_ram, fb, pram);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include "ram.h"
static const struct nv50_fb_func
mcp77_fb = {
.ram_new = mcp77_ram_new,
.trap = 0x001d07ff,
};
int
mcp77_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nv50_fb_new_(&mcp77_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "pad.h"
#include "bus.h"
static const struct nvkm_i2c_pad_func
nv50_i2c_pad_func = {
.bus_new_4 = nv50_i2c_bus_new,
};
int
nv50_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
{
return nvkm_i2c_pad_new_(&nv50_i2c_pad_func, i2c, id, ppad);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "pad.h"
static const struct nvkm_i2c_func
nv04_i2c = {
.pad_x_new = nv04_i2c_pad_new,
};
int
nv04_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_i2c **pi2c)
{
return nvkm_i2c_new_(&nv04_i2c, device, type, inst, pi2c);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "pad.h"
void
gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
{
struct nvkm_device *device = i2c->subdev.device;
u32 intr = nvkm_rd32(device, 0x00dc60);
u32 stat = nvkm_rd32(device, 0x00dc68) & intr, i;
for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
}
nvkm_wr32(device, 0x00dc60, intr);
}
void
gk104_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
{
struct nvkm_device *device = i2c->subdev.device;
u32 temp = nvkm_rd32(device, 0x00dc68), i;
for (i = 0; i < 8; i++) {
if (mask & (1 << i)) {
if (!(data & (1 << i))) {
temp &= ~(type << (i * 4));
continue;
}
temp |= type << (i * 4);
}
}
nvkm_wr32(device, 0x00dc68, temp);
}
static const struct nvkm_i2c_func
gk104_i2c = {
.pad_x_new = gf119_i2c_pad_x_new,
.pad_s_new = gf119_i2c_pad_s_new,
.aux = 4,
.aux_stat = gk104_aux_stat,
.aux_mask = gk104_aux_mask,
};
int
gk104_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_i2c **pi2c)
{
return nvkm_i2c_new_(&gk104_i2c, device, type, inst, pi2c);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#define anx9805_pad(p) container_of((p), struct anx9805_pad, base)
#define anx9805_bus(p) container_of((p), struct anx9805_bus, base)
#define anx9805_aux(p) container_of((p), struct anx9805_aux, base)
#include "aux.h"
#include "bus.h"
struct anx9805_pad {
struct nvkm_i2c_pad base;
struct nvkm_i2c_bus *bus;
u8 addr;
};
struct anx9805_bus {
struct nvkm_i2c_bus base;
struct anx9805_pad *pad;
u8 addr;
};
static int
anx9805_bus_xfer(struct nvkm_i2c_bus *base, struct i2c_msg *msgs, int num)
{
struct anx9805_bus *bus = anx9805_bus(base);
struct anx9805_pad *pad = bus->pad;
struct i2c_adapter *adap = &pad->bus->i2c;
struct i2c_msg *msg = msgs;
int ret = -ETIMEDOUT;
int i, j, cnt = num;
u8 seg = 0x00, off = 0x00, tmp;
tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x10;
nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x10);
nvkm_wri2cr(adap, pad->addr, 0x07, tmp);
nvkm_wri2cr(adap, bus->addr, 0x43, 0x05);
mdelay(5);
while (cnt--) {
if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
nvkm_wri2cr(adap, bus->addr, 0x40, msg->addr << 1);
nvkm_wri2cr(adap, bus->addr, 0x41, seg);
nvkm_wri2cr(adap, bus->addr, 0x42, off);
nvkm_wri2cr(adap, bus->addr, 0x44, msg->len);
nvkm_wri2cr(adap, bus->addr, 0x45, 0x00);
nvkm_wri2cr(adap, bus->addr, 0x43, 0x01);
for (i = 0; i < msg->len; i++) {
j = 0;
while (nvkm_rdi2cr(adap, bus->addr, 0x46) & 0x10) {
mdelay(5);
if (j++ == 32)
goto done;
}
msg->buf[i] = nvkm_rdi2cr(adap, bus->addr, 0x47);
}
} else
if (!(msg->flags & I2C_M_RD)) {
if (msg->addr == 0x50 && msg->len == 0x01) {
off = msg->buf[0];
} else
if (msg->addr == 0x30 && msg->len == 0x01) {
seg = msg->buf[0];
} else
goto done;
} else {
goto done;
}
msg++;
}
ret = num;
done:
nvkm_wri2cr(adap, bus->addr, 0x43, 0x00);
return ret;
}
static const struct nvkm_i2c_bus_func
anx9805_bus_func = {
.xfer = anx9805_bus_xfer,
};
static int
anx9805_bus_new(struct nvkm_i2c_pad *base, int id, u8 drive,
struct nvkm_i2c_bus **pbus)
{
struct anx9805_pad *pad = anx9805_pad(base);
struct anx9805_bus *bus;
int ret;
if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
return -ENOMEM;
*pbus = &bus->base;
bus->pad = pad;
ret = nvkm_i2c_bus_ctor(&anx9805_bus_func, &pad->base, id, &bus->base);
if (ret)
return ret;
switch (pad->addr) {
case 0x39: bus->addr = 0x3d; break;
case 0x3b: bus->addr = 0x3f; break;
default:
return -ENOSYS;
}
return 0;
}
struct anx9805_aux {
struct nvkm_i2c_aux base;
struct anx9805_pad *pad;
u8 addr;
};
static int
anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
struct anx9805_aux *aux = anx9805_aux(base);
struct anx9805_pad *pad = aux->pad;
struct i2c_adapter *adap = &pad->bus->i2c;
int i, ret = -ETIMEDOUT;
u8 buf[16] = {};
u8 tmp;
AUX_DBG(&aux->base, "%02x %05x %d", type, addr, *size);
tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x04;
nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x04);
nvkm_wri2cr(adap, pad->addr, 0x07, tmp);
nvkm_wri2cr(adap, pad->addr, 0xf7, 0x01);
nvkm_wri2cr(adap, aux->addr, 0xe4, 0x80);
if (!(type & 1)) {
memcpy(buf, data, *size);
AUX_DBG(&aux->base, "%16ph", buf);
for (i = 0; i < *size; i++)
nvkm_wri2cr(adap, aux->addr, 0xf0 + i, buf[i]);
}
nvkm_wri2cr(adap, aux->addr, 0xe5, ((*size - 1) << 4) | type);
nvkm_wri2cr(adap, aux->addr, 0xe6, (addr & 0x000ff) >> 0);
nvkm_wri2cr(adap, aux->addr, 0xe7, (addr & 0x0ff00) >> 8);
nvkm_wri2cr(adap, aux->addr, 0xe8, (addr & 0xf0000) >> 16);
nvkm_wri2cr(adap, aux->addr, 0xe9, 0x01);
i = 0;
while ((tmp = nvkm_rdi2cr(adap, aux->addr, 0xe9)) & 0x01) {
mdelay(5);
if (i++ == 32)
goto done;
}
if ((tmp = nvkm_rdi2cr(adap, pad->addr, 0xf7)) & 0x01) {
ret = -EIO;
goto done;
}
if (type & 1) {
for (i = 0; i < *size; i++)
buf[i] = nvkm_rdi2cr(adap, aux->addr, 0xf0 + i);
AUX_DBG(&aux->base, "%16ph", buf);
memcpy(data, buf, *size);
}
ret = 0;
done:
nvkm_wri2cr(adap, pad->addr, 0xf7, 0x01);
return ret;
}
static int
anx9805_aux_lnk_ctl(struct nvkm_i2c_aux *base,
int link_nr, int link_bw, bool enh)
{
struct anx9805_aux *aux = anx9805_aux(base);
struct anx9805_pad *pad = aux->pad;
struct i2c_adapter *adap = &pad->bus->i2c;
u8 tmp, i;
AUX_DBG(&aux->base, "ANX9805 train %d %02x %d",
link_nr, link_bw, enh);
nvkm_wri2cr(adap, aux->addr, 0xa0, link_bw);
nvkm_wri2cr(adap, aux->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
nvkm_wri2cr(adap, aux->addr, 0xa2, 0x01);
nvkm_wri2cr(adap, aux->addr, 0xa8, 0x01);
i = 0;
while ((tmp = nvkm_rdi2cr(adap, aux->addr, 0xa8)) & 0x01) {
mdelay(5);
if (i++ == 100) {
AUX_ERR(&aux->base, "link training timeout");
return -ETIMEDOUT;
}
}
if (tmp & 0x70) {
AUX_ERR(&aux->base, "link training failed");
return -EIO;
}
return 0;
}
static const struct nvkm_i2c_aux_func
anx9805_aux_func = {
.xfer = anx9805_aux_xfer,
.lnk_ctl = anx9805_aux_lnk_ctl,
};
static int
anx9805_aux_new(struct nvkm_i2c_pad *base, int id, u8 drive,
struct nvkm_i2c_aux **pbus)
{
struct anx9805_pad *pad = anx9805_pad(base);
struct anx9805_aux *aux;
int ret;
if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
return -ENOMEM;
*pbus = &aux->base;
aux->pad = pad;
ret = nvkm_i2c_aux_ctor(&anx9805_aux_func, &pad->base, id, &aux->base);
if (ret)
return ret;
switch (pad->addr) {
case 0x39: aux->addr = 0x38; break;
case 0x3b: aux->addr = 0x3c; break;
default:
return -ENOSYS;
}
return 0;
}
static const struct nvkm_i2c_pad_func
anx9805_pad_func = {
.bus_new_4 = anx9805_bus_new,
.aux_new_6 = anx9805_aux_new,
};
int
anx9805_pad_new(struct nvkm_i2c_bus *bus, int id, u8 addr,
struct nvkm_i2c_pad **ppad)
{
struct anx9805_pad *pad;
if (!(pad = kzalloc(sizeof(*pad), GFP_KERNEL)))
return -ENOMEM;
*ppad = &pad->base;
nvkm_i2c_pad_ctor(&anx9805_pad_func, bus->pad->i2c, id, &pad->base);
pad->bus = bus;
pad->addr = addr;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial busions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#define gf119_i2c_bus(p) container_of((p), struct gf119_i2c_bus, base)
#include "bus.h"
struct gf119_i2c_bus {
struct nvkm_i2c_bus base;
u32 addr;
};
static void
gf119_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
{
struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
nvkm_mask(device, bus->addr, 0x00000001, state ? 0x00000001 : 0);
}
static void
gf119_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
{
struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
nvkm_mask(device, bus->addr, 0x00000002, state ? 0x00000002 : 0);
}
static int
gf119_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
{
struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
return !!(nvkm_rd32(device, bus->addr) & 0x00000010);
}
static int
gf119_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
{
struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
return !!(nvkm_rd32(device, bus->addr) & 0x00000020);
}
static void
gf119_i2c_bus_init(struct nvkm_i2c_bus *base)
{
struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
nvkm_wr32(device, bus->addr, 0x00000007);
}
static const struct nvkm_i2c_bus_func
gf119_i2c_bus_func = {
.init = gf119_i2c_bus_init,
.drive_scl = gf119_i2c_bus_drive_scl,
.drive_sda = gf119_i2c_bus_drive_sda,
.sense_scl = gf119_i2c_bus_sense_scl,
.sense_sda = gf119_i2c_bus_sense_sda,
.xfer = nvkm_i2c_bit_xfer,
};
int
gf119_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
struct nvkm_i2c_bus **pbus)
{
struct gf119_i2c_bus *bus;
if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
return -ENOMEM;
*pbus = &bus->base;
nvkm_i2c_bus_ctor(&gf119_i2c_bus_func, pad, id, &bus->base);
bus->addr = 0x00d014 + (drive * 0x20);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "pad.h"
static const struct nvkm_i2c_func
nv50_i2c = {
.pad_x_new = nv50_i2c_pad_new,
};
int
nv50_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_i2c **pi2c)
{
return nvkm_i2c_new_(&nv50_i2c, device, type, inst, pi2c);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "pad.h"
static void
gk110_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
{
nvkm_mask(i2c->subdev.device, 0x00e4f8 + (aux * 0x50), 0x00010000, enable << 16);
}
static const struct nvkm_i2c_func
gk110_i2c = {
.pad_x_new = gf119_i2c_pad_x_new,
.pad_s_new = gf119_i2c_pad_s_new,
.aux = 4,
.aux_stat = gk104_aux_stat,
.aux_mask = gk104_aux_mask,
.aux_autodpcd = gk110_aux_autodpcd,
};
int
gk110_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_i2c **pi2c)
{
return nvkm_i2c_new_(&gk110_i2c, device, type, inst, pi2c);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "pad.h"
#include "bus.h"
static const struct nvkm_i2c_pad_func
nv04_i2c_pad_func = {
.bus_new_0 = nv04_i2c_bus_new,
};
int
nv04_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
{
return nvkm_i2c_pad_new_(&nv04_i2c_pad_func, i2c, id, ppad);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "pad.h"
#include "aux.h"
#include "bus.h"
void
g94_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
{
struct nvkm_subdev *subdev = &pad->i2c->subdev;
struct nvkm_device *device = subdev->device;
const u32 base = (pad->id - NVKM_I2C_PAD_HYBRID(0)) * 0x50;
switch (mode) {
case NVKM_I2C_PAD_OFF:
nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000001);
break;
case NVKM_I2C_PAD_I2C:
nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x0000c001);
nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
break;
case NVKM_I2C_PAD_AUX:
nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x00000002);
nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
break;
default:
WARN_ON(1);
break;
}
}
static const struct nvkm_i2c_pad_func
g94_i2c_pad_s_func = {
.bus_new_4 = nv50_i2c_bus_new,
.aux_new_6 = g94_i2c_aux_new,
.mode = g94_i2c_pad_mode,
};
int
g94_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
{
return nvkm_i2c_pad_new_(&g94_i2c_pad_s_func, i2c, id, ppad);
}
static const struct nvkm_i2c_pad_func
g94_i2c_pad_x_func = {
.bus_new_4 = nv50_i2c_bus_new,
.aux_new_6 = g94_i2c_aux_new,
};
int
g94_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
{
return nvkm_i2c_pad_new_(&g94_i2c_pad_x_func, i2c, id, ppad);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "pad.h"
static const struct nvkm_i2c_func
gf117_i2c = {
.pad_x_new = gf119_i2c_pad_x_new,
};
int
gf117_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_i2c **pi2c)
{
return nvkm_i2c_new_(&gf117_i2c, device, type, inst, pi2c);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial busions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#define gm200_i2c_aux(p) container_of((p), struct gm200_i2c_aux, base)
#include "aux.h"
struct gm200_i2c_aux {
struct nvkm_i2c_aux base;
int ch;
};
static void
gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
{
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000);
}
static int
gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
{
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
const u32 unksel = 1; /* nfi which to use, or if it matters.. */
const u32 ureq = unksel ? 0x00100000 : 0x00200000;
const u32 urep = unksel ? 0x01000000 : 0x02000000;
u32 ctrl, timeout;
/* wait up to 1ms for any previous transaction to be done... */
timeout = 1000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
udelay(1);
if (!timeout--) {
AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
return -EBUSY;
}
} while (ctrl & 0x07010000);
/* set some magic, and wait up to 1ms for it to appear */
nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq);
timeout = 1000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
udelay(1);
if (!timeout--) {
AUX_ERR(&aux->base, "magic wait %08x", ctrl);
gm200_i2c_aux_fini(aux);
return -EBUSY;
}
} while ((ctrl & 0x07000000) != urep);
return 0;
}
static int
gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
struct nvkm_i2c *i2c = aux->base.pad->i2c;
struct nvkm_device *device = i2c->subdev.device;
const u32 base = aux->ch * 0x50;
u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
int ret, i;
AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, *size);
ret = gm200_i2c_aux_init(aux);
if (ret < 0)
goto out;
stat = nvkm_rd32(device, 0x00d958 + base);
if (!(stat & 0x10000000)) {
AUX_TRACE(&aux->base, "sink not detected");
ret = -ENXIO;
goto out;
}
nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
if (!(type & 1)) {
memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) {
AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
nvkm_wr32(device, 0x00d930 + base + i, xbuf[i / 4]);
}
}
ctrl = nvkm_rd32(device, 0x00d954 + base);
ctrl &= ~0x0001f1ff;
ctrl |= type << 12;
ctrl |= (*size ? (*size - 1) : 0x00000100);
nvkm_wr32(device, 0x00d950 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
do {
/* reset, and delay a while if this is a retry */
nvkm_wr32(device, 0x00d954 + base, 0x80000000 | ctrl);
nvkm_wr32(device, 0x00d954 + base, 0x00000000 | ctrl);
if (retries)
udelay(400);
/* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + base);
udelay(1);
if (!timeout--) {
AUX_ERR(&aux->base, "timeout %08x", ctrl);
ret = -EIO;
goto out_err;
}
} while (ctrl & 0x00010000);
ret = 0;
/* read status, and check if transaction completed ok */
stat = nvkm_mask(device, 0x00d958 + base, 0, 0);
if ((stat & 0x000f0000) == 0x00080000 ||
(stat & 0x000f0000) == 0x00020000)
ret = 1;
if ((stat & 0x00000100))
ret = -ETIMEDOUT;
if ((stat & 0x00000e00))
ret = -EIO;
AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
} while (ret && retry && retries++ < 32);
if (type & 1) {
for (i = 0; i < 16; i += 4) {
xbuf[i / 4] = nvkm_rd32(device, 0x00d940 + base + i);
AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
}
memcpy(data, xbuf, *size);
*size = stat & 0x0000001f;
}
out_err:
nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
out:
gm200_i2c_aux_fini(aux);
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
}
static const struct nvkm_i2c_aux_func
gm200_i2c_aux_func = {
.address_only = true,
.xfer = gm200_i2c_aux_xfer,
};
int
gm200_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
struct nvkm_i2c_aux **paux)
{
struct gm200_i2c_aux *aux;
if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
return -ENOMEM;
*paux = &aux->base;
nvkm_i2c_aux_ctor(&gm200_i2c_aux_func, pad, index, &aux->base);
aux->ch = drive;
aux->base.intr = 1 << aux->ch;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "pad.h"
static void
gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
{
nvkm_mask(i2c->subdev.device, 0x00d968 + (aux * 0x50), 0x00010000, enable << 16);
}
static const struct nvkm_i2c_func
gm200_i2c = {
.pad_x_new = gf119_i2c_pad_x_new,
.pad_s_new = gm200_i2c_pad_s_new,
.aux = 8,
.aux_stat = gk104_aux_stat,
.aux_mask = gk104_aux_mask,
.aux_autodpcd = gm200_aux_autodpcd,
};
int
gm200_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_i2c **pi2c)
{
return nvkm_i2c_new_(&gm200_i2c, device, type, inst, pi2c);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial busions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#define nv04_i2c_bus(p) container_of((p), struct nv04_i2c_bus, base)
#include "bus.h"
#include <subdev/vga.h>
struct nv04_i2c_bus {
struct nvkm_i2c_bus base;
u8 drive;
u8 sense;
};
static void
nv04_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
{
struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
u8 val = nvkm_rdvgac(device, 0, bus->drive);
if (state) val |= 0x20;
else val &= 0xdf;
nvkm_wrvgac(device, 0, bus->drive, val | 0x01);
}
static void
nv04_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
{
struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
u8 val = nvkm_rdvgac(device, 0, bus->drive);
if (state) val |= 0x10;
else val &= 0xef;
nvkm_wrvgac(device, 0, bus->drive, val | 0x01);
}
static int
nv04_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
{
struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
return !!(nvkm_rdvgac(device, 0, bus->sense) & 0x04);
}
static int
nv04_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
{
struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
return !!(nvkm_rdvgac(device, 0, bus->sense) & 0x08);
}
static const struct nvkm_i2c_bus_func
nv04_i2c_bus_func = {
.drive_scl = nv04_i2c_bus_drive_scl,
.drive_sda = nv04_i2c_bus_drive_sda,
.sense_scl = nv04_i2c_bus_sense_scl,
.sense_sda = nv04_i2c_bus_sense_sda,
.xfer = nvkm_i2c_bit_xfer,
};
int
nv04_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive, u8 sense,
struct nvkm_i2c_bus **pbus)
{
struct nv04_i2c_bus *bus;
if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
return -ENOMEM;
*pbus = &bus->base;
nvkm_i2c_bus_ctor(&nv04_i2c_bus_func, pad, id, &bus->base);
bus->drive = drive;
bus->sense = sense;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c |
/*
* Copyright 2009 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <linux/string_helpers.h>
#include "aux.h"
#include "pad.h"
static int
nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct nvkm_i2c_aux *aux = container_of(adap, typeof(*aux), i2c);
struct i2c_msg *msg = msgs;
int ret, mcnt = num;
ret = nvkm_i2c_aux_acquire(aux);
if (ret)
return ret;
while (mcnt--) {
u8 remaining = msg->len;
u8 *ptr = msg->buf;
while (remaining) {
u8 cnt, retries, cmd;
if (msg->flags & I2C_M_RD)
cmd = 1;
else
cmd = 0;
if (mcnt || remaining > 16)
cmd |= 4; /* MOT */
for (retries = 0, cnt = 0;
retries < 32 && !cnt;
retries++) {
cnt = min_t(u8, remaining, 16);
ret = aux->func->xfer(aux, true, cmd,
msg->addr, ptr, &cnt);
if (ret < 0)
goto out;
}
if (!cnt) {
AUX_TRACE(aux, "no data after 32 retries");
ret = -EIO;
goto out;
}
ptr += cnt;
remaining -= cnt;
}
msg++;
}
ret = num;
out:
nvkm_i2c_aux_release(aux);
return ret;
}
static u32
nvkm_i2c_aux_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm
nvkm_i2c_aux_i2c_algo = {
.master_xfer = nvkm_i2c_aux_i2c_xfer,
.functionality = nvkm_i2c_aux_i2c_func
};
void
nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *aux, bool monitor)
{
struct nvkm_i2c_pad *pad = aux->pad;
AUX_TRACE(aux, "monitor: %s", str_yes_no(monitor));
if (monitor)
nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_AUX);
else
nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_OFF);
}
void
nvkm_i2c_aux_release(struct nvkm_i2c_aux *aux)
{
struct nvkm_i2c_pad *pad = aux->pad;
AUX_TRACE(aux, "release");
nvkm_i2c_pad_release(pad);
mutex_unlock(&aux->mutex);
}
int
nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux)
{
struct nvkm_i2c_pad *pad = aux->pad;
int ret;
AUX_TRACE(aux, "acquire");
mutex_lock(&aux->mutex);
if (aux->enabled)
ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
else
ret = -EIO;
if (ret)
mutex_unlock(&aux->mutex);
return ret;
}
int
nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
u32 addr, u8 *data, u8 *size)
{
if (!*size && !aux->func->address_only) {
AUX_ERR(aux, "address-only transaction dropped");
return -ENOSYS;
}
return aux->func->xfer(aux, retry, type, addr, data, size);
}
int
nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *aux, int nr, int bw, bool ef)
{
if (aux->func->lnk_ctl)
return aux->func->lnk_ctl(aux, nr, bw, ef);
return -ENODEV;
}
void
nvkm_i2c_aux_del(struct nvkm_i2c_aux **paux)
{
struct nvkm_i2c_aux *aux = *paux;
if (aux && !WARN_ON(!aux->func)) {
AUX_TRACE(aux, "dtor");
list_del(&aux->head);
i2c_del_adapter(&aux->i2c);
kfree(*paux);
*paux = NULL;
}
}
void
nvkm_i2c_aux_init(struct nvkm_i2c_aux *aux)
{
AUX_TRACE(aux, "init");
mutex_lock(&aux->mutex);
aux->enabled = true;
mutex_unlock(&aux->mutex);
}
void
nvkm_i2c_aux_fini(struct nvkm_i2c_aux *aux)
{
AUX_TRACE(aux, "fini");
mutex_lock(&aux->mutex);
aux->enabled = false;
mutex_unlock(&aux->mutex);
}
int
nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *func,
struct nvkm_i2c_pad *pad, int id,
struct nvkm_i2c_aux *aux)
{
struct nvkm_device *device = pad->i2c->subdev.device;
aux->func = func;
aux->pad = pad;
aux->id = id;
mutex_init(&aux->mutex);
list_add_tail(&aux->head, &pad->i2c->aux);
AUX_TRACE(aux, "ctor");
snprintf(aux->i2c.name, sizeof(aux->i2c.name), "nvkm-%s-aux-%04x",
dev_name(device->dev), id);
aux->i2c.owner = THIS_MODULE;
aux->i2c.dev.parent = device->dev;
aux->i2c.algo = &nvkm_i2c_aux_i2c_algo;
return i2c_add_adapter(&aux->i2c);
}
int
nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
struct nvkm_i2c_pad *pad, int id,
struct nvkm_i2c_aux **paux)
{
if (!(*paux = kzalloc(sizeof(**paux), GFP_KERNEL)))
return -ENOMEM;
return nvkm_i2c_aux_ctor(func, pad, id, *paux);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "aux.h"
static const struct nvkm_i2c_aux_func
gf119_i2c_aux = {
.address_only = true,
.xfer = g94_i2c_aux_xfer,
};
int
gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
struct nvkm_i2c_aux **paux)
{
return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "pad.h"
static const struct nvkm_i2c_func
gf119_i2c = {
.pad_x_new = gf119_i2c_pad_x_new,
.pad_s_new = gf119_i2c_pad_s_new,
.aux = 4,
.aux_stat = g94_aux_stat,
.aux_mask = g94_aux_mask,
};
int
gf119_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_i2c **pi2c)
{
return nvkm_i2c_new_(&gf119_i2c, device, type, inst, pi2c);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "aux.h"
#include "bus.h"
#include "pad.h"
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/i2c.h>
static struct nvkm_i2c_pad *
nvkm_i2c_pad_find(struct nvkm_i2c *i2c, int id)
{
struct nvkm_i2c_pad *pad;
list_for_each_entry(pad, &i2c->pad, head) {
if (pad->id == id)
return pad;
}
return NULL;
}
struct nvkm_i2c_bus *
nvkm_i2c_bus_find(struct nvkm_i2c *i2c, int id)
{
struct nvkm_bios *bios = i2c->subdev.device->bios;
struct nvkm_i2c_bus *bus;
if (id == NVKM_I2C_BUS_PRI || id == NVKM_I2C_BUS_SEC) {
u8 ver, hdr, cnt, len;
u16 i2c = dcb_i2c_table(bios, &ver, &hdr, &cnt, &len);
if (i2c && ver >= 0x30) {
u8 auxidx = nvbios_rd08(bios, i2c + 4);
if (id == NVKM_I2C_BUS_PRI)
id = NVKM_I2C_BUS_CCB((auxidx & 0x0f) >> 0);
else
id = NVKM_I2C_BUS_CCB((auxidx & 0xf0) >> 4);
} else {
id = NVKM_I2C_BUS_CCB(2);
}
}
list_for_each_entry(bus, &i2c->bus, head) {
if (bus->id == id)
return bus;
}
return NULL;
}
struct nvkm_i2c_aux *
nvkm_i2c_aux_find(struct nvkm_i2c *i2c, int id)
{
struct nvkm_i2c_aux *aux;
list_for_each_entry(aux, &i2c->aux, head) {
if (aux->id == id)
return aux;
}
return NULL;
}
static void
nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int id)
{
struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, id);
if (aux)
i2c->func->aux_mask(i2c, type, aux->intr, 0);
}
static void
nvkm_i2c_intr_init(struct nvkm_event *event, int type, int id)
{
struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, id);
if (aux)
i2c->func->aux_mask(i2c, type, aux->intr, aux->intr);
}
static const struct nvkm_event_func
nvkm_i2c_intr_func = {
.init = nvkm_i2c_intr_init,
.fini = nvkm_i2c_intr_fini,
};
static void
nvkm_i2c_intr(struct nvkm_subdev *subdev)
{
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
struct nvkm_i2c_aux *aux;
u32 hi, lo, rq, tx;
if (!i2c->func->aux_stat)
return;
i2c->func->aux_stat(i2c, &hi, &lo, &rq, &tx);
if (!hi && !lo && !rq && !tx)
return;
list_for_each_entry(aux, &i2c->aux, head) {
u32 mask = 0;
if (hi & aux->intr) mask |= NVKM_I2C_PLUG;
if (lo & aux->intr) mask |= NVKM_I2C_UNPLUG;
if (rq & aux->intr) mask |= NVKM_I2C_IRQ;
if (tx & aux->intr) mask |= NVKM_I2C_DONE;
if (mask)
nvkm_event_ntfy(&i2c->event, aux->id, mask);
}
}
static int
nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
struct nvkm_i2c_pad *pad;
struct nvkm_i2c_bus *bus;
struct nvkm_i2c_aux *aux;
u32 mask;
list_for_each_entry(aux, &i2c->aux, head) {
nvkm_i2c_aux_fini(aux);
}
list_for_each_entry(bus, &i2c->bus, head) {
nvkm_i2c_bus_fini(bus);
}
if ((mask = (1 << i2c->func->aux) - 1), i2c->func->aux_stat) {
i2c->func->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
i2c->func->aux_stat(i2c, &mask, &mask, &mask, &mask);
}
list_for_each_entry(pad, &i2c->pad, head) {
nvkm_i2c_pad_fini(pad);
}
return 0;
}
static int
nvkm_i2c_preinit(struct nvkm_subdev *subdev)
{
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
struct nvkm_i2c_bus *bus;
struct nvkm_i2c_pad *pad;
/*
* We init our i2c busses as early as possible, since they may be
* needed by the vbios init scripts on some cards
*/
list_for_each_entry(pad, &i2c->pad, head)
nvkm_i2c_pad_init(pad);
list_for_each_entry(bus, &i2c->bus, head)
nvkm_i2c_bus_init(bus);
return 0;
}
static int
nvkm_i2c_init(struct nvkm_subdev *subdev)
{
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
struct nvkm_i2c_bus *bus;
struct nvkm_i2c_pad *pad;
struct nvkm_i2c_aux *aux;
list_for_each_entry(pad, &i2c->pad, head) {
nvkm_i2c_pad_init(pad);
}
list_for_each_entry(bus, &i2c->bus, head) {
nvkm_i2c_bus_init(bus);
}
list_for_each_entry(aux, &i2c->aux, head) {
nvkm_i2c_aux_init(aux);
}
return 0;
}
static void *
nvkm_i2c_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
nvkm_event_fini(&i2c->event);
while (!list_empty(&i2c->aux)) {
struct nvkm_i2c_aux *aux =
list_first_entry(&i2c->aux, typeof(*aux), head);
nvkm_i2c_aux_del(&aux);
}
while (!list_empty(&i2c->bus)) {
struct nvkm_i2c_bus *bus =
list_first_entry(&i2c->bus, typeof(*bus), head);
nvkm_i2c_bus_del(&bus);
}
while (!list_empty(&i2c->pad)) {
struct nvkm_i2c_pad *pad =
list_first_entry(&i2c->pad, typeof(*pad), head);
nvkm_i2c_pad_del(&pad);
}
return i2c;
}
static const struct nvkm_subdev_func
nvkm_i2c = {
.dtor = nvkm_i2c_dtor,
.preinit = nvkm_i2c_preinit,
.init = nvkm_i2c_init,
.fini = nvkm_i2c_fini,
.intr = nvkm_i2c_intr,
};
static const struct nvkm_i2c_drv {
u8 bios;
u8 addr;
int (*pad_new)(struct nvkm_i2c_bus *, int id, u8 addr,
struct nvkm_i2c_pad **);
}
nvkm_i2c_drv[] = {
{ 0x0d, 0x39, anx9805_pad_new },
{ 0x0e, 0x3b, anx9805_pad_new },
{}
};
int
nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_i2c **pi2c)
{
struct nvkm_bios *bios = device->bios;
struct nvkm_i2c *i2c;
struct nvkm_i2c_aux *aux;
struct dcb_i2c_entry ccbE;
struct dcb_output dcbE;
u8 ver, hdr;
int ret, i, ids;
if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_i2c, device, type, inst, &i2c->subdev);
i2c->func = func;
INIT_LIST_HEAD(&i2c->pad);
INIT_LIST_HEAD(&i2c->bus);
INIT_LIST_HEAD(&i2c->aux);
i = -1;
while (!dcb_i2c_parse(bios, ++i, &ccbE)) {
struct nvkm_i2c_pad *pad = NULL;
struct nvkm_i2c_bus *bus = NULL;
struct nvkm_i2c_aux *aux = NULL;
nvkm_debug(&i2c->subdev, "ccb %02x: type %02x drive %02x "
"sense %02x share %02x auxch %02x\n", i, ccbE.type,
ccbE.drive, ccbE.sense, ccbE.share, ccbE.auxch);
if (ccbE.share != DCB_I2C_UNUSED) {
const int id = NVKM_I2C_PAD_HYBRID(ccbE.share);
if (!(pad = nvkm_i2c_pad_find(i2c, id)))
ret = func->pad_s_new(i2c, id, &pad);
else
ret = 0;
} else {
ret = func->pad_x_new(i2c, NVKM_I2C_PAD_CCB(i), &pad);
}
if (ret) {
nvkm_error(&i2c->subdev, "ccb %02x pad, %d\n", i, ret);
nvkm_i2c_pad_del(&pad);
continue;
}
if (pad->func->bus_new_0 && ccbE.type == DCB_I2C_NV04_BIT) {
ret = pad->func->bus_new_0(pad, NVKM_I2C_BUS_CCB(i),
ccbE.drive,
ccbE.sense, &bus);
} else
if (pad->func->bus_new_4 &&
( ccbE.type == DCB_I2C_NV4E_BIT ||
ccbE.type == DCB_I2C_NVIO_BIT ||
(ccbE.type == DCB_I2C_PMGR &&
ccbE.drive != DCB_I2C_UNUSED))) {
ret = pad->func->bus_new_4(pad, NVKM_I2C_BUS_CCB(i),
ccbE.drive, &bus);
}
if (ret) {
nvkm_error(&i2c->subdev, "ccb %02x bus, %d\n", i, ret);
nvkm_i2c_bus_del(&bus);
}
if (pad->func->aux_new_6 &&
( ccbE.type == DCB_I2C_NVIO_AUX ||
(ccbE.type == DCB_I2C_PMGR &&
ccbE.auxch != DCB_I2C_UNUSED))) {
ret = pad->func->aux_new_6(pad, NVKM_I2C_BUS_CCB(i),
ccbE.auxch, &aux);
} else {
ret = 0;
}
if (ret) {
nvkm_error(&i2c->subdev, "ccb %02x aux, %d\n", i, ret);
nvkm_i2c_aux_del(&aux);
}
if (ccbE.type != DCB_I2C_UNUSED && !bus && !aux) {
nvkm_warn(&i2c->subdev, "ccb %02x was ignored\n", i);
continue;
}
}
i = -1;
while (dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE)) {
const struct nvkm_i2c_drv *drv = nvkm_i2c_drv;
struct nvkm_i2c_bus *bus;
struct nvkm_i2c_pad *pad;
/* internal outputs handled by native i2c busses (above) */
if (!dcbE.location)
continue;
/* we need an i2c bus to talk to the external encoder */
bus = nvkm_i2c_bus_find(i2c, dcbE.i2c_index);
if (!bus) {
nvkm_debug(&i2c->subdev, "dcb %02x no bus\n", i);
continue;
}
/* ... and a driver for it */
while (drv->pad_new) {
if (drv->bios == dcbE.extdev)
break;
drv++;
}
if (!drv->pad_new) {
nvkm_debug(&i2c->subdev, "dcb %02x drv %02x unknown\n",
i, dcbE.extdev);
continue;
}
/* find/create an instance of the driver */
pad = nvkm_i2c_pad_find(i2c, NVKM_I2C_PAD_EXT(dcbE.extdev));
if (!pad) {
const int id = NVKM_I2C_PAD_EXT(dcbE.extdev);
ret = drv->pad_new(bus, id, drv->addr, &pad);
if (ret) {
nvkm_error(&i2c->subdev, "dcb %02x pad, %d\n",
i, ret);
nvkm_i2c_pad_del(&pad);
continue;
}
}
/* create any i2c bus / aux channel required by the output */
if (pad->func->aux_new_6 && dcbE.type == DCB_OUTPUT_DP) {
const int id = NVKM_I2C_AUX_EXT(dcbE.extdev);
struct nvkm_i2c_aux *aux = NULL;
ret = pad->func->aux_new_6(pad, id, 0, &aux);
if (ret) {
nvkm_error(&i2c->subdev, "dcb %02x aux, %d\n",
i, ret);
nvkm_i2c_aux_del(&aux);
}
} else
if (pad->func->bus_new_4) {
const int id = NVKM_I2C_BUS_EXT(dcbE.extdev);
struct nvkm_i2c_bus *bus = NULL;
ret = pad->func->bus_new_4(pad, id, 0, &bus);
if (ret) {
nvkm_error(&i2c->subdev, "dcb %02x bus, %d\n",
i, ret);
nvkm_i2c_bus_del(&bus);
}
}
}
ids = 0;
list_for_each_entry(aux, &i2c->aux, head)
ids = max(ids, aux->id + 1);
if (!ids)
return 0;
return nvkm_event_init(&nvkm_i2c_intr_func, &i2c->subdev, 4, ids, &i2c->event);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial busions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#define nv50_i2c_bus(p) container_of((p), struct nv50_i2c_bus, base)
#include "bus.h"
#include <subdev/vga.h>
struct nv50_i2c_bus {
struct nvkm_i2c_bus base;
u32 addr;
u32 data;
};
static void
nv50_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
{
struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
if (state) bus->data |= 0x01;
else bus->data &= 0xfe;
nvkm_wr32(device, bus->addr, bus->data);
}
static void
nv50_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
{
struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
if (state) bus->data |= 0x02;
else bus->data &= 0xfd;
nvkm_wr32(device, bus->addr, bus->data);
}
static int
nv50_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
{
struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
return !!(nvkm_rd32(device, bus->addr) & 0x00000001);
}
static int
nv50_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
{
struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
return !!(nvkm_rd32(device, bus->addr) & 0x00000002);
}
static void
nv50_i2c_bus_init(struct nvkm_i2c_bus *base)
{
struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
nvkm_wr32(device, bus->addr, (bus->data = 0x00000007));
}
static const struct nvkm_i2c_bus_func
nv50_i2c_bus_func = {
.init = nv50_i2c_bus_init,
.drive_scl = nv50_i2c_bus_drive_scl,
.drive_sda = nv50_i2c_bus_drive_sda,
.sense_scl = nv50_i2c_bus_sense_scl,
.sense_sda = nv50_i2c_bus_sense_sda,
.xfer = nvkm_i2c_bit_xfer,
};
int
nv50_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
struct nvkm_i2c_bus **pbus)
{
static const u32 addr[] = {
0x00e138, 0x00e150, 0x00e168, 0x00e180,
0x00e254, 0x00e274, 0x00e764, 0x00e780,
0x00e79c, 0x00e7b8
};
struct nv50_i2c_bus *bus;
if (drive >= ARRAY_SIZE(addr)) {
nvkm_warn(&pad->i2c->subdev, "bus %d unknown\n", drive);
return -ENODEV;
}
if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
return -ENOMEM;
*pbus = &bus->base;
nvkm_i2c_bus_ctor(&nv50_i2c_bus_func, pad, id, &bus->base);
bus->addr = addr[drive];
bus->data = 0x00000007;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "bus.h"
#include "pad.h"
#include <core/option.h>
/*******************************************************************************
* i2c-algo-bit
******************************************************************************/
static int
nvkm_i2c_bus_pre_xfer(struct i2c_adapter *adap)
{
struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
return nvkm_i2c_bus_acquire(bus);
}
static void
nvkm_i2c_bus_post_xfer(struct i2c_adapter *adap)
{
struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
return nvkm_i2c_bus_release(bus);
}
static void
nvkm_i2c_bus_setscl(void *data, int state)
{
struct nvkm_i2c_bus *bus = data;
bus->func->drive_scl(bus, state);
}
static void
nvkm_i2c_bus_setsda(void *data, int state)
{
struct nvkm_i2c_bus *bus = data;
bus->func->drive_sda(bus, state);
}
static int
nvkm_i2c_bus_getscl(void *data)
{
struct nvkm_i2c_bus *bus = data;
return bus->func->sense_scl(bus);
}
static int
nvkm_i2c_bus_getsda(void *data)
{
struct nvkm_i2c_bus *bus = data;
return bus->func->sense_sda(bus);
}
/*******************************************************************************
* !i2c-algo-bit (off-chip i2c bus / hw i2c / internal bit-banging algo)
******************************************************************************/
static int
nvkm_i2c_bus_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
int ret;
ret = nvkm_i2c_bus_acquire(bus);
if (ret)
return ret;
ret = bus->func->xfer(bus, msgs, num);
nvkm_i2c_bus_release(bus);
return ret;
}
static u32
nvkm_i2c_bus_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm
nvkm_i2c_bus_algo = {
.master_xfer = nvkm_i2c_bus_xfer,
.functionality = nvkm_i2c_bus_func,
};
/*******************************************************************************
* nvkm_i2c_bus base
******************************************************************************/
void
nvkm_i2c_bus_init(struct nvkm_i2c_bus *bus)
{
BUS_TRACE(bus, "init");
if (bus->func->init)
bus->func->init(bus);
mutex_lock(&bus->mutex);
bus->enabled = true;
mutex_unlock(&bus->mutex);
}
void
nvkm_i2c_bus_fini(struct nvkm_i2c_bus *bus)
{
BUS_TRACE(bus, "fini");
mutex_lock(&bus->mutex);
bus->enabled = false;
mutex_unlock(&bus->mutex);
}
void
nvkm_i2c_bus_release(struct nvkm_i2c_bus *bus)
{
struct nvkm_i2c_pad *pad = bus->pad;
BUS_TRACE(bus, "release");
nvkm_i2c_pad_release(pad);
mutex_unlock(&bus->mutex);
}
int
nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *bus)
{
struct nvkm_i2c_pad *pad = bus->pad;
int ret;
BUS_TRACE(bus, "acquire");
mutex_lock(&bus->mutex);
if (bus->enabled)
ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
else
ret = -EIO;
if (ret)
mutex_unlock(&bus->mutex);
return ret;
}
int
nvkm_i2c_bus_probe(struct nvkm_i2c_bus *bus, const char *what,
struct nvkm_i2c_bus_probe *info,
bool (*match)(struct nvkm_i2c_bus *,
struct i2c_board_info *, void *), void *data)
{
int i;
BUS_DBG(bus, "probing %ss", what);
for (i = 0; info[i].dev.addr; i++) {
u8 orig_udelay = 0;
if ((bus->i2c.algo == &i2c_bit_algo) && (info[i].udelay != 0)) {
struct i2c_algo_bit_data *algo = bus->i2c.algo_data;
BUS_DBG(bus, "%dms delay instead of %dms",
info[i].udelay, algo->udelay);
orig_udelay = algo->udelay;
algo->udelay = info[i].udelay;
}
if (nvkm_probe_i2c(&bus->i2c, info[i].dev.addr) &&
(!match || match(bus, &info[i].dev, data))) {
BUS_DBG(bus, "detected %s: %s",
what, info[i].dev.type);
return i;
}
if (orig_udelay) {
struct i2c_algo_bit_data *algo = bus->i2c.algo_data;
algo->udelay = orig_udelay;
}
}
BUS_DBG(bus, "no devices found.");
return -ENODEV;
}
void
nvkm_i2c_bus_del(struct nvkm_i2c_bus **pbus)
{
struct nvkm_i2c_bus *bus = *pbus;
if (bus && !WARN_ON(!bus->func)) {
BUS_TRACE(bus, "dtor");
list_del(&bus->head);
i2c_del_adapter(&bus->i2c);
kfree(bus->i2c.algo_data);
kfree(*pbus);
*pbus = NULL;
}
}
int
nvkm_i2c_bus_ctor(const struct nvkm_i2c_bus_func *func,
struct nvkm_i2c_pad *pad, int id,
struct nvkm_i2c_bus *bus)
{
struct nvkm_device *device = pad->i2c->subdev.device;
struct i2c_algo_bit_data *bit;
#ifndef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
const bool internal = false;
#else
const bool internal = true;
#endif
int ret;
bus->func = func;
bus->pad = pad;
bus->id = id;
mutex_init(&bus->mutex);
list_add_tail(&bus->head, &pad->i2c->bus);
BUS_TRACE(bus, "ctor");
snprintf(bus->i2c.name, sizeof(bus->i2c.name), "nvkm-%s-bus-%04x",
dev_name(device->dev), id);
bus->i2c.owner = THIS_MODULE;
bus->i2c.dev.parent = device->dev;
if ( bus->func->drive_scl &&
!nvkm_boolopt(device->cfgopt, "NvI2C", internal)) {
if (!(bit = kzalloc(sizeof(*bit), GFP_KERNEL)))
return -ENOMEM;
bit->udelay = 10;
bit->timeout = usecs_to_jiffies(2200);
bit->data = bus;
bit->pre_xfer = nvkm_i2c_bus_pre_xfer;
bit->post_xfer = nvkm_i2c_bus_post_xfer;
bit->setscl = nvkm_i2c_bus_setscl;
bit->setsda = nvkm_i2c_bus_setsda;
bit->getscl = nvkm_i2c_bus_getscl;
bit->getsda = nvkm_i2c_bus_getsda;
bus->i2c.algo_data = bit;
ret = i2c_bit_add_bus(&bus->i2c);
} else {
bus->i2c.algo = &nvkm_i2c_bus_algo;
ret = i2c_add_adapter(&bus->i2c);
}
return ret;
}
int
nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *func,
struct nvkm_i2c_pad *pad, int id,
struct nvkm_i2c_bus **pbus)
{
if (!(*pbus = kzalloc(sizeof(**pbus), GFP_KERNEL)))
return -ENOMEM;
return nvkm_i2c_bus_ctor(func, pad, id, *pbus);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "pad.h"
void
g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
{
struct nvkm_device *device = i2c->subdev.device;
u32 intr = nvkm_rd32(device, 0x00e06c);
u32 stat = nvkm_rd32(device, 0x00e068) & intr, i;
for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
}
nvkm_wr32(device, 0x00e06c, intr);
}
void
g94_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
{
struct nvkm_device *device = i2c->subdev.device;
u32 temp = nvkm_rd32(device, 0x00e068), i;
for (i = 0; i < 8; i++) {
if (mask & (1 << i)) {
if (!(data & (1 << i))) {
temp &= ~(type << (i * 4));
continue;
}
temp |= type << (i * 4);
}
}
nvkm_wr32(device, 0x00e068, temp);
}
static const struct nvkm_i2c_func
g94_i2c = {
.pad_x_new = g94_i2c_pad_x_new,
.pad_s_new = g94_i2c_pad_s_new,
.aux = 4,
.aux_stat = g94_aux_stat,
.aux_mask = g94_aux_mask,
};
int
g94_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_i2c **pi2c)
{
return nvkm_i2c_new_(&g94_i2c, device, type, inst, pi2c);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial busions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#define g94_i2c_aux(p) container_of((p), struct g94_i2c_aux, base)
#include "aux.h"
struct g94_i2c_aux {
struct nvkm_i2c_aux base;
int ch;
};
static void
g94_i2c_aux_fini(struct g94_i2c_aux *aux)
{
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
nvkm_mask(device, 0x00e4e4 + (aux->ch * 0x50), 0x00310000, 0x00000000);
}
static int
g94_i2c_aux_init(struct g94_i2c_aux *aux)
{
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
const u32 unksel = 1; /* nfi which to use, or if it matters.. */
const u32 ureq = unksel ? 0x00100000 : 0x00200000;
const u32 urep = unksel ? 0x01000000 : 0x02000000;
u32 ctrl, timeout;
/* wait up to 1ms for any previous transaction to be done... */
timeout = 1000;
do {
ctrl = nvkm_rd32(device, 0x00e4e4 + (aux->ch * 0x50));
udelay(1);
if (!timeout--) {
AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
return -EBUSY;
}
} while (ctrl & 0x03010000);
/* set some magic, and wait up to 1ms for it to appear */
nvkm_mask(device, 0x00e4e4 + (aux->ch * 0x50), 0x00300000, ureq);
timeout = 1000;
do {
ctrl = nvkm_rd32(device, 0x00e4e4 + (aux->ch * 0x50));
udelay(1);
if (!timeout--) {
AUX_ERR(&aux->base, "magic wait %08x", ctrl);
g94_i2c_aux_fini(aux);
return -EBUSY;
}
} while ((ctrl & 0x03000000) != urep);
return 0;
}
int
g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
struct g94_i2c_aux *aux = g94_i2c_aux(obj);
struct nvkm_i2c *i2c = aux->base.pad->i2c;
struct nvkm_device *device = i2c->subdev.device;
const u32 base = aux->ch * 0x50;
u32 ctrl, stat, timeout, retries = 0;
u32 xbuf[4] = {};
int ret, i;
AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, *size);
ret = g94_i2c_aux_init(aux);
if (ret < 0)
goto out;
stat = nvkm_rd32(device, 0x00e4e8 + base);
if (!(stat & 0x10000000)) {
AUX_TRACE(&aux->base, "sink not detected");
ret = -ENXIO;
goto out;
}
nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
if (!(type & 1)) {
memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) {
AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
nvkm_wr32(device, 0x00e4c0 + base + i, xbuf[i / 4]);
}
}
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
ctrl &= ~0x0001f1ff;
ctrl |= type << 12;
ctrl |= (*size ? (*size - 1) : 0x00000100);
nvkm_wr32(device, 0x00e4e0 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
do {
/* reset, and delay a while if this is a retry */
nvkm_wr32(device, 0x00e4e4 + base, 0x80000000 | ctrl);
nvkm_wr32(device, 0x00e4e4 + base, 0x00000000 | ctrl);
if (retries)
udelay(400);
/* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
udelay(1);
if (!timeout--) {
AUX_ERR(&aux->base, "timeout %08x", ctrl);
ret = -EIO;
goto out_err;
}
} while (ctrl & 0x00010000);
ret = 0;
/* read status, and check if transaction completed ok */
stat = nvkm_mask(device, 0x00e4e8 + base, 0, 0);
if ((stat & 0x000f0000) == 0x00080000 ||
(stat & 0x000f0000) == 0x00020000)
ret = 1;
if ((stat & 0x00000100))
ret = -ETIMEDOUT;
if ((stat & 0x00000e00))
ret = -EIO;
AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
} while (ret && retry && retries++ < 32);
if (type & 1) {
for (i = 0; i < 16; i += 4) {
xbuf[i / 4] = nvkm_rd32(device, 0x00e4d0 + base + i);
AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
}
memcpy(data, xbuf, *size);
*size = stat & 0x0000001f;
}
out_err:
nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
out:
g94_i2c_aux_fini(aux);
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
}
int
g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
struct nvkm_i2c_pad *pad, int index, u8 drive,
struct nvkm_i2c_aux **paux)
{
struct g94_i2c_aux *aux;
if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
return -ENOMEM;
*paux = &aux->base;
nvkm_i2c_aux_ctor(func, pad, index, &aux->base);
aux->ch = drive;
aux->base.intr = 1 << aux->ch;
return 0;
}
static const struct nvkm_i2c_aux_func
g94_i2c_aux = {
.xfer = g94_i2c_aux_xfer,
};
int
g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
struct nvkm_i2c_aux **paux)
{
return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial busions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#define nv4e_i2c_bus(p) container_of((p), struct nv4e_i2c_bus, base)
#include "bus.h"
struct nv4e_i2c_bus {
struct nvkm_i2c_bus base;
u32 addr;
};
static void
nv4e_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
{
struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
nvkm_mask(device, bus->addr, 0x2f, state ? 0x21 : 0x01);
}
static void
nv4e_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
{
struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
nvkm_mask(device, bus->addr, 0x1f, state ? 0x11 : 0x01);
}
static int
nv4e_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
{
struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
return !!(nvkm_rd32(device, bus->addr) & 0x00040000);
}
static int
nv4e_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
{
struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
return !!(nvkm_rd32(device, bus->addr) & 0x00080000);
}
static const struct nvkm_i2c_bus_func
nv4e_i2c_bus_func = {
.drive_scl = nv4e_i2c_bus_drive_scl,
.drive_sda = nv4e_i2c_bus_drive_sda,
.sense_scl = nv4e_i2c_bus_sense_scl,
.sense_sda = nv4e_i2c_bus_sense_sda,
.xfer = nvkm_i2c_bit_xfer,
};
int
nv4e_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
struct nvkm_i2c_bus **pbus)
{
struct nv4e_i2c_bus *bus;
if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
return -ENOMEM;
*pbus = &bus->base;
nvkm_i2c_bus_ctor(&nv4e_i2c_bus_func, pad, id, &bus->base);
bus->addr = 0x600800 + drive;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial busions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "bus.h"
#ifdef CONFIG_NOUVEAU_I2C_INTERNAL
#define T_TIMEOUT 2200000
#define T_RISEFALL 1000
#define T_HOLD 5000
static inline void
nvkm_i2c_drive_scl(struct nvkm_i2c_bus *bus, int state)
{
bus->func->drive_scl(bus, state);
}
static inline void
nvkm_i2c_drive_sda(struct nvkm_i2c_bus *bus, int state)
{
bus->func->drive_sda(bus, state);
}
static inline int
nvkm_i2c_sense_scl(struct nvkm_i2c_bus *bus)
{
return bus->func->sense_scl(bus);
}
static inline int
nvkm_i2c_sense_sda(struct nvkm_i2c_bus *bus)
{
return bus->func->sense_sda(bus);
}
static void
nvkm_i2c_delay(struct nvkm_i2c_bus *bus, u32 nsec)
{
udelay((nsec + 500) / 1000);
}
static bool
nvkm_i2c_raise_scl(struct nvkm_i2c_bus *bus)
{
u32 timeout = T_TIMEOUT / T_RISEFALL;
nvkm_i2c_drive_scl(bus, 1);
do {
nvkm_i2c_delay(bus, T_RISEFALL);
} while (!nvkm_i2c_sense_scl(bus) && --timeout);
return timeout != 0;
}
static int
i2c_start(struct nvkm_i2c_bus *bus)
{
int ret = 0;
if (!nvkm_i2c_sense_scl(bus) ||
!nvkm_i2c_sense_sda(bus)) {
nvkm_i2c_drive_scl(bus, 0);
nvkm_i2c_drive_sda(bus, 1);
if (!nvkm_i2c_raise_scl(bus))
ret = -EBUSY;
}
nvkm_i2c_drive_sda(bus, 0);
nvkm_i2c_delay(bus, T_HOLD);
nvkm_i2c_drive_scl(bus, 0);
nvkm_i2c_delay(bus, T_HOLD);
return ret;
}
static void
i2c_stop(struct nvkm_i2c_bus *bus)
{
nvkm_i2c_drive_scl(bus, 0);
nvkm_i2c_drive_sda(bus, 0);
nvkm_i2c_delay(bus, T_RISEFALL);
nvkm_i2c_drive_scl(bus, 1);
nvkm_i2c_delay(bus, T_HOLD);
nvkm_i2c_drive_sda(bus, 1);
nvkm_i2c_delay(bus, T_HOLD);
}
static int
i2c_bitw(struct nvkm_i2c_bus *bus, int sda)
{
nvkm_i2c_drive_sda(bus, sda);
nvkm_i2c_delay(bus, T_RISEFALL);
if (!nvkm_i2c_raise_scl(bus))
return -ETIMEDOUT;
nvkm_i2c_delay(bus, T_HOLD);
nvkm_i2c_drive_scl(bus, 0);
nvkm_i2c_delay(bus, T_HOLD);
return 0;
}
static int
i2c_bitr(struct nvkm_i2c_bus *bus)
{
int sda;
nvkm_i2c_drive_sda(bus, 1);
nvkm_i2c_delay(bus, T_RISEFALL);
if (!nvkm_i2c_raise_scl(bus))
return -ETIMEDOUT;
nvkm_i2c_delay(bus, T_HOLD);
sda = nvkm_i2c_sense_sda(bus);
nvkm_i2c_drive_scl(bus, 0);
nvkm_i2c_delay(bus, T_HOLD);
return sda;
}
static int
nvkm_i2c_get_byte(struct nvkm_i2c_bus *bus, u8 *byte, bool last)
{
int i, bit;
*byte = 0;
for (i = 7; i >= 0; i--) {
bit = i2c_bitr(bus);
if (bit < 0)
return bit;
*byte |= bit << i;
}
return i2c_bitw(bus, last ? 1 : 0);
}
static int
nvkm_i2c_put_byte(struct nvkm_i2c_bus *bus, u8 byte)
{
int i, ret;
for (i = 7; i >= 0; i--) {
ret = i2c_bitw(bus, !!(byte & (1 << i)));
if (ret < 0)
return ret;
}
ret = i2c_bitr(bus);
if (ret == 1) /* nack */
ret = -EIO;
return ret;
}
static int
i2c_addr(struct nvkm_i2c_bus *bus, struct i2c_msg *msg)
{
u32 addr = msg->addr << 1;
if (msg->flags & I2C_M_RD)
addr |= 1;
return nvkm_i2c_put_byte(bus, addr);
}
int
nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *bus, struct i2c_msg *msgs, int num)
{
struct i2c_msg *msg = msgs;
int ret = 0, mcnt = num;
while (!ret && mcnt--) {
u8 remaining = msg->len;
u8 *ptr = msg->buf;
ret = i2c_start(bus);
if (ret == 0)
ret = i2c_addr(bus, msg);
if (msg->flags & I2C_M_RD) {
while (!ret && remaining--)
ret = nvkm_i2c_get_byte(bus, ptr++, !remaining);
} else {
while (!ret && remaining--)
ret = nvkm_i2c_put_byte(bus, *ptr++);
}
msg++;
}
i2c_stop(bus);
return (ret < 0) ? ret : num;
}
#else
int
nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *bus, struct i2c_msg *msgs, int num)
{
return -ENODEV;
}
#endif
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "pad.h"
static void
nvkm_i2c_pad_mode_locked(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
{
PAD_TRACE(pad, "-> %s", (mode == NVKM_I2C_PAD_AUX) ? "aux" :
(mode == NVKM_I2C_PAD_I2C) ? "i2c" : "off");
if (pad->func->mode)
pad->func->mode(pad, mode);
}
void
nvkm_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
{
PAD_TRACE(pad, "mode %d", mode);
mutex_lock(&pad->mutex);
nvkm_i2c_pad_mode_locked(pad, mode);
pad->mode = mode;
mutex_unlock(&pad->mutex);
}
void
nvkm_i2c_pad_release(struct nvkm_i2c_pad *pad)
{
PAD_TRACE(pad, "release");
if (pad->mode == NVKM_I2C_PAD_OFF)
nvkm_i2c_pad_mode_locked(pad, pad->mode);
mutex_unlock(&pad->mutex);
}
int
nvkm_i2c_pad_acquire(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
{
PAD_TRACE(pad, "acquire");
mutex_lock(&pad->mutex);
if (pad->mode != mode) {
if (pad->mode != NVKM_I2C_PAD_OFF) {
mutex_unlock(&pad->mutex);
return -EBUSY;
}
nvkm_i2c_pad_mode_locked(pad, mode);
}
return 0;
}
void
nvkm_i2c_pad_fini(struct nvkm_i2c_pad *pad)
{
PAD_TRACE(pad, "fini");
nvkm_i2c_pad_mode_locked(pad, NVKM_I2C_PAD_OFF);
}
void
nvkm_i2c_pad_init(struct nvkm_i2c_pad *pad)
{
PAD_TRACE(pad, "init");
nvkm_i2c_pad_mode_locked(pad, pad->mode);
}
void
nvkm_i2c_pad_del(struct nvkm_i2c_pad **ppad)
{
struct nvkm_i2c_pad *pad = *ppad;
if (pad) {
PAD_TRACE(pad, "dtor");
list_del(&pad->head);
kfree(pad);
pad = NULL;
}
}
void
nvkm_i2c_pad_ctor(const struct nvkm_i2c_pad_func *func, struct nvkm_i2c *i2c,
int id, struct nvkm_i2c_pad *pad)
{
pad->func = func;
pad->i2c = i2c;
pad->id = id;
pad->mode = NVKM_I2C_PAD_OFF;
mutex_init(&pad->mutex);
list_add_tail(&pad->head, &i2c->pad);
PAD_TRACE(pad, "ctor");
}
int
nvkm_i2c_pad_new_(const struct nvkm_i2c_pad_func *func, struct nvkm_i2c *i2c,
int id, struct nvkm_i2c_pad **ppad)
{
if (!(*ppad = kzalloc(sizeof(**ppad), GFP_KERNEL)))
return -ENOMEM;
nvkm_i2c_pad_ctor(func, i2c, id, *ppad);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "pad.h"
#include "bus.h"
static const struct nvkm_i2c_pad_func
nv4e_i2c_pad_func = {
.bus_new_4 = nv4e_i2c_bus_new,
};
int
nv4e_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
{
return nvkm_i2c_pad_new_(&nv4e_i2c_pad_func, i2c, id, ppad);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "pad.h"
static const struct nvkm_i2c_func
nv4e_i2c = {
.pad_x_new = nv4e_i2c_pad_new,
};
int
nv4e_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_i2c **pi2c)
{
return nvkm_i2c_new_(&nv4e_i2c, device, type, inst, pi2c);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "pad.h"
#include "aux.h"
#include "bus.h"
static void
gm200_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
{
struct nvkm_subdev *subdev = &pad->i2c->subdev;
struct nvkm_device *device = subdev->device;
const u32 base = (pad->id - NVKM_I2C_PAD_HYBRID(0)) * 0x50;
switch (mode) {
case NVKM_I2C_PAD_OFF:
nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000001);
break;
case NVKM_I2C_PAD_I2C:
nvkm_mask(device, 0x00d970 + base, 0x0000c003, 0x0000c001);
nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000000);
break;
case NVKM_I2C_PAD_AUX:
nvkm_mask(device, 0x00d970 + base, 0x0000c003, 0x00000002);
nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000000);
break;
default:
WARN_ON(1);
break;
}
}
static const struct nvkm_i2c_pad_func
gm200_i2c_pad_s_func = {
.bus_new_4 = gf119_i2c_bus_new,
.aux_new_6 = gm200_i2c_aux_new,
.mode = gm200_i2c_pad_mode,
};
int
gm200_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
{
return nvkm_i2c_pad_new_(&gm200_i2c_pad_s_func, i2c, id, ppad);
}
static const struct nvkm_i2c_pad_func
gm200_i2c_pad_x_func = {
.bus_new_4 = gf119_i2c_bus_new,
.aux_new_6 = gm200_i2c_aux_new,
};
int
gm200_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
{
return nvkm_i2c_pad_new_(&gm200_i2c_pad_x_func, i2c, id, ppad);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "pad.h"
#include "aux.h"
#include "bus.h"
static const struct nvkm_i2c_pad_func
gf119_i2c_pad_s_func = {
.bus_new_4 = gf119_i2c_bus_new,
.aux_new_6 = gf119_i2c_aux_new,
.mode = g94_i2c_pad_mode,
};
int
gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
{
return nvkm_i2c_pad_new_(&gf119_i2c_pad_s_func, i2c, id, ppad);
}
static const struct nvkm_i2c_pad_func
gf119_i2c_pad_x_func = {
.bus_new_4 = gf119_i2c_bus_new,
.aux_new_6 = gf119_i2c_aux_new,
};
int
gf119_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
{
return nvkm_i2c_pad_new_(&gf119_i2c_pad_x_func, i2c, id, ppad);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#define gk208_pmu_code gm107_pmu_code
#define gk208_pmu_data gm107_pmu_data
#include "fuc/gk208.fuc5.h"
static const struct nvkm_pmu_func
gm107_pmu = {
.flcn = >215_pmu_flcn,
.code.data = gm107_pmu_code,
.code.size = sizeof(gm107_pmu_code),
.data.data = gm107_pmu_data,
.data.size = sizeof(gm107_pmu_data),
.enabled = gf100_pmu_enabled,
.reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
.send = gt215_pmu_send,
.recv = gt215_pmu_recv,
};
static const struct nvkm_pmu_fwif
gm107_pmu_fwif[] = {
{ -1, gf100_pmu_nofw, &gm107_pmu },
{}
};
int
gm107_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pmu **ppmu)
{
return nvkm_pmu_new_(gm107_pmu_fwif, device, type, inst, ppmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define gf119_pmu_code gk104_pmu_code
#define gf119_pmu_data gk104_pmu_data
#include "priv.h"
#include "fuc/gf119.fuc4.h"
#include <core/option.h>
#include <subdev/fuse.h>
#include <subdev/timer.h>
static void
magic_(struct nvkm_device *device, u32 ctrl, int size)
{
nvkm_wr32(device, 0x00c800, 0x00000000);
nvkm_wr32(device, 0x00c808, 0x00000000);
nvkm_wr32(device, 0x00c800, ctrl);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x00c800) & 0x40000000) {
while (size--)
nvkm_wr32(device, 0x00c804, 0x00000000);
break;
}
);
nvkm_wr32(device, 0x00c800, 0x00000000);
}
static void
magic(struct nvkm_device *device, u32 ctrl)
{
magic_(device, 0x8000a41f | ctrl, 6);
magic_(device, 0x80000421 | ctrl, 1);
}
static void
gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
{
struct nvkm_device *device = pmu->subdev.device;
if (!(nvkm_fuse_read(device->fuse, 0x31c) & 0x00000001))
return;
nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
nvkm_rd32(device, 0x000200);
nvkm_mask(device, 0x000200, 0x08000000, 0x08000000);
msleep(50);
nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002);
nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
nvkm_mask(device, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
msleep(50);
nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000);
nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
nvkm_mask(device, 0x000200, 0x08000000, 0x00000000);
nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
nvkm_rd32(device, 0x000200);
if (nvkm_boolopt(device->cfgopt, "War00C800_0", true)) {
switch (device->chipset) {
case 0xe4:
magic(device, 0x04000000);
magic(device, 0x06000000);
magic(device, 0x0c000000);
magic(device, 0x0e000000);
break;
case 0xe6:
magic(device, 0x02000000);
magic(device, 0x04000000);
magic(device, 0x0a000000);
break;
case 0xe7:
magic(device, 0x02000000);
break;
default:
break;
}
}
}
static const struct nvkm_pmu_func
gk104_pmu = {
.flcn = >215_pmu_flcn,
.code.data = gk104_pmu_code,
.code.size = sizeof(gk104_pmu_code),
.data.data = gk104_pmu_data,
.data.size = sizeof(gk104_pmu_data),
.enabled = gf100_pmu_enabled,
.reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
.send = gt215_pmu_send,
.recv = gt215_pmu_recv,
.pgob = gk104_pmu_pgob,
};
static const struct nvkm_pmu_fwif
gk104_pmu_fwif[] = {
{ -1, gf100_pmu_nofw, &gk104_pmu },
{}
};
int
gk104_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pmu **ppmu)
{
return nvkm_pmu_new_(gk104_pmu_fwif, device, type, inst, ppmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "fuc/gf100.fuc3.h"
#include <subdev/mc.h>
void
gf100_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
nvkm_mc_disable(device, NVKM_SUBDEV_PMU, 0);
nvkm_mc_enable(device, NVKM_SUBDEV_PMU, 0);
}
bool
gf100_pmu_enabled(struct nvkm_pmu *pmu)
{
return nvkm_mc_enabled(pmu->subdev.device, NVKM_SUBDEV_PMU, 0);
}
static const struct nvkm_pmu_func
gf100_pmu = {
.flcn = >215_pmu_flcn,
.code.data = gf100_pmu_code,
.code.size = sizeof(gf100_pmu_code),
.data.data = gf100_pmu_data,
.data.size = sizeof(gf100_pmu_data),
.enabled = gf100_pmu_enabled,
.reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
.send = gt215_pmu_send,
.recv = gt215_pmu_recv,
};
int
gf100_pmu_nofw(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
{
return 0;
}
static const struct nvkm_pmu_fwif
gf100_pmu_fwif[] = {
{ -1, gf100_pmu_nofw, &gf100_pmu },
{}
};
int
gf100_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pmu **ppmu)
{
return nvkm_pmu_new_(gf100_pmu_fwif, device, type, inst, ppmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define gf119_pmu_code gk110_pmu_code
#define gf119_pmu_data gk110_pmu_data
#include "priv.h"
#include "fuc/gf119.fuc4.h"
#include <subdev/timer.h>
void
gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
{
struct nvkm_device *device = pmu->subdev.device;
static const struct {
u32 addr;
u32 data;
} magic[] = {
{ 0x020520, 0xfffffffc },
{ 0x020524, 0xfffffffe },
{ 0x020524, 0xfffffffc },
{ 0x020524, 0xfffffff8 },
{ 0x020524, 0xffffffe0 },
{ 0x020530, 0xfffffffe },
{ 0x02052c, 0xfffffffa },
{ 0x02052c, 0xfffffff0 },
{ 0x02052c, 0xffffffc0 },
{ 0x02052c, 0xffffff00 },
{ 0x02052c, 0xfffffc00 },
{ 0x02052c, 0xfffcfc00 },
{ 0x02052c, 0xfff0fc00 },
{ 0x02052c, 0xff80fc00 },
{ 0x020528, 0xfffffffe },
{ 0x020528, 0xfffffffc },
};
int i;
nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
nvkm_rd32(device, 0x000200);
nvkm_mask(device, 0x000200, 0x08000000, 0x08000000);
msleep(50);
nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002);
nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
nvkm_mask(device, 0x0206b4, 0x00000000, 0x00000000);
for (i = 0; i < ARRAY_SIZE(magic); i++) {
nvkm_wr32(device, magic[i].addr, magic[i].data);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, magic[i].addr) & 0x80000000))
break;
);
}
nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000);
nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
nvkm_mask(device, 0x000200, 0x08000000, 0x00000000);
nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
nvkm_rd32(device, 0x000200);
}
static const struct nvkm_pmu_func
gk110_pmu = {
.flcn = >215_pmu_flcn,
.code.data = gk110_pmu_code,
.code.size = sizeof(gk110_pmu_code),
.data.data = gk110_pmu_data,
.data.size = sizeof(gk110_pmu_data),
.enabled = gf100_pmu_enabled,
.reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
.send = gt215_pmu_send,
.recv = gt215_pmu_recv,
.pgob = gk110_pmu_pgob,
};
static const struct nvkm_pmu_fwif
gk110_pmu_fwif[] = {
{ -1, gf100_pmu_nofw, &gk110_pmu },
{}
};
int
gk110_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pmu **ppmu)
{
return nvkm_pmu_new_(gk110_pmu_fwif, device, type, inst, ppmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
static int
gm200_pmu_flcn_bind_stat(struct nvkm_falcon *falcon, bool intr)
{
nvkm_falcon_wr32(falcon, 0x200, 0x0000030e);
return (nvkm_falcon_rd32(falcon, 0x20c) & 0x00007000) >> 12;
}
void
gm200_pmu_flcn_bind_inst(struct nvkm_falcon *falcon, int target, u64 addr)
{
nvkm_falcon_wr32(falcon, 0xe00, 4); /* DMAIDX_UCODE */
nvkm_falcon_wr32(falcon, 0xe04, 0); /* DMAIDX_VIRT */
nvkm_falcon_wr32(falcon, 0xe08, 4); /* DMAIDX_PHYS_VID */
nvkm_falcon_wr32(falcon, 0xe0c, 5); /* DMAIDX_PHYS_SYS_COH */
nvkm_falcon_wr32(falcon, 0xe10, 6); /* DMAIDX_PHYS_SYS_NCOH */
nvkm_falcon_mask(falcon, 0x090, 0x00010000, 0x00010000);
nvkm_falcon_wr32(falcon, 0x480, (1 << 30) | (target << 28) | (addr >> 12));
}
const struct nvkm_falcon_func
gm200_pmu_flcn = {
.disable = gm200_flcn_disable,
.enable = gm200_flcn_enable,
.reset_pmc = true,
.reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing,
.debug = 0xc08,
.bind_inst = gm200_pmu_flcn_bind_inst,
.bind_stat = gm200_pmu_flcn_bind_stat,
.imem_pio = &gm200_flcn_imem_pio,
.dmem_pio = &gm200_flcn_dmem_pio,
.start = nvkm_falcon_v1_start,
.cmdq = { 0x4a0, 0x4b0, 4 },
.msgq = { 0x4c8, 0x4cc, 0 },
};
static const struct nvkm_pmu_func
gm200_pmu = {
.flcn = &gm200_pmu_flcn,
.reset = gf100_pmu_reset,
};
int
gm200_pmu_nofw(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
{
nvkm_warn(&pmu->subdev, "firmware unavailable\n");
return 0;
}
static const struct nvkm_pmu_fwif
gm200_pmu_fwif[] = {
{ -1, gm200_pmu_nofw, &gm200_pmu },
{}
};
int
gm200_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pmu **ppmu)
{
return nvkm_pmu_new_(gm200_pmu_fwif, device, type, inst, ppmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
static const struct nvkm_falcon_func
gp102_pmu_flcn = {
.disable = gm200_flcn_disable,
.enable = gm200_flcn_enable,
.reset_eng = gp102_flcn_reset_eng,
.reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing,
.debug = 0xc08,
.bind_inst = gm200_pmu_flcn_bind_inst,
.bind_stat = gm200_flcn_bind_stat,
.imem_pio = &gm200_flcn_imem_pio,
.dmem_pio = &gm200_flcn_dmem_pio,
.start = nvkm_falcon_v1_start,
.cmdq = { 0x4a0, 0x4b0, 4 },
.msgq = { 0x4c8, 0x4cc, 0 },
};
static const struct nvkm_pmu_func
gp102_pmu = {
.flcn = &gp102_pmu_flcn,
};
static const struct nvkm_pmu_fwif
gp102_pmu_fwif[] = {
{ -1, gm200_pmu_nofw, &gp102_pmu },
{}
};
int
gp102_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pmu **ppmu)
{
return nvkm_pmu_new_(gp102_pmu_fwif, device, type, inst, ppmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "fuc/gf119.fuc4.h"
static const struct nvkm_pmu_func
gf119_pmu = {
.flcn = >215_pmu_flcn,
.code.data = gf119_pmu_code,
.code.size = sizeof(gf119_pmu_code),
.data.data = gf119_pmu_data,
.data.size = sizeof(gf119_pmu_data),
.enabled = gf100_pmu_enabled,
.reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
.send = gt215_pmu_send,
.recv = gt215_pmu_recv,
};
static const struct nvkm_pmu_fwif
gf119_pmu_fwif[] = {
{ -1, gf100_pmu_nofw, &gf119_pmu },
{}
};
int
gf119_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pmu **ppmu)
{
return nvkm_pmu_new_(gf119_pmu_fwif, device, type, inst, ppmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <core/firmware.h>
#include <subdev/timer.h>
bool
nvkm_pmu_fan_controlled(struct nvkm_device *device)
{
struct nvkm_pmu *pmu = device->pmu;
/* Internal PMU FW does not currently control fans in any way,
* allow SW control of fans instead.
*/
if (pmu && pmu->func->code.size)
return false;
/* Default (board-loaded, or VBIOS PMU/PREOS) PMU FW on Fermi
* and newer automatically control the fan speed, which would
* interfere with SW control.
*/
return (device->chipset >= 0xc0);
}
void
nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
{
if (pmu && pmu->func->pgob)
pmu->func->pgob(pmu, enable);
}
static void
nvkm_pmu_recv(struct work_struct *work)
{
struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work);
return pmu->func->recv(pmu);
}
int
nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
u32 process, u32 message, u32 data0, u32 data1)
{
if (!pmu || !pmu->func->send)
return -ENODEV;
return pmu->func->send(pmu, reply, process, message, data0, data1);
}
static void
nvkm_pmu_intr(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
if (!pmu->func->intr)
return;
pmu->func->intr(pmu);
}
static int
nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
if (!subdev->use.enabled)
return 0;
if (pmu->func->fini)
pmu->func->fini(pmu);
return 0;
}
static int
nvkm_pmu_init(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
if (!pmu->func->init)
return 0;
return pmu->func->init(pmu);
}
static void *
nvkm_pmu_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
nvkm_falcon_msgq_del(&pmu->msgq);
nvkm_falcon_cmdq_del(&pmu->lpq);
nvkm_falcon_cmdq_del(&pmu->hpq);
nvkm_falcon_qmgr_del(&pmu->qmgr);
nvkm_falcon_dtor(&pmu->falcon);
mutex_destroy(&pmu->send.mutex);
return nvkm_pmu(subdev);
}
static const struct nvkm_subdev_func
nvkm_pmu = {
.dtor = nvkm_pmu_dtor,
.init = nvkm_pmu_init,
.fini = nvkm_pmu_fini,
.intr = nvkm_pmu_intr,
};
int
nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_pmu *pmu)
{
int ret;
nvkm_subdev_ctor(&nvkm_pmu, device, type, inst, &pmu->subdev);
mutex_init(&pmu->send.mutex);
INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
init_waitqueue_head(&pmu->recv.wait);
fwif = nvkm_firmware_load(&pmu->subdev, fwif, "Pmu", pmu);
if (IS_ERR(fwif))
return PTR_ERR(fwif);
pmu->func = fwif->func;
ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev, pmu->subdev.name,
0x10a000, &pmu->falcon);
if (ret)
return ret;
if ((ret = nvkm_falcon_qmgr_new(&pmu->falcon, &pmu->qmgr)) ||
(ret = nvkm_falcon_cmdq_new(pmu->qmgr, "hpq", &pmu->hpq)) ||
(ret = nvkm_falcon_cmdq_new(pmu->qmgr, "lpq", &pmu->lpq)) ||
(ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq)))
return ret;
init_completion(&pmu->wpr_ready);
return 0;
}
int
nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_pmu **ppmu)
{
struct nvkm_pmu *pmu;
if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
return -ENOMEM;
return nvkm_pmu_ctor(fwif, device, type, inst, *ppmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c |
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <subdev/acr.h>
#include <nvfw/flcn.h>
#include <nvfw/pmu.h>
static int
gp10b_pmu_acr_bootstrap_multiple_falcons_cb(void *priv,
struct nvfw_falcon_msg *hdr)
{
struct nv_pmu_acr_bootstrap_multiple_falcons_msg *msg =
container_of(hdr, typeof(*msg), msg.hdr);
return msg->falcon_mask;
}
static int
gp10b_pmu_acr_bootstrap_multiple_falcons(struct nvkm_falcon *falcon, u32 mask)
{
struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
struct nv_pmu_acr_bootstrap_multiple_falcons_cmd cmd = {
.cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
.cmd.hdr.size = sizeof(cmd),
.cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS,
.flags = NV_PMU_ACR_BOOTSTRAP_MULTIPLE_FALCONS_FLAGS_RESET_YES,
.falcon_mask = mask,
.wpr_lo = 0, /*XXX*/
.wpr_hi = 0, /*XXX*/
};
int ret;
ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
gp10b_pmu_acr_bootstrap_multiple_falcons_cb,
&pmu->subdev, msecs_to_jiffies(1000));
if (ret >= 0) {
if (ret != cmd.falcon_mask)
ret = -EIO;
else
ret = 0;
}
return ret;
}
static const struct nvkm_acr_lsf_func
gp10b_pmu_acr = {
.flags = NVKM_ACR_LSF_DMACTL_REQ_CTX,
.bld_size = sizeof(struct loader_config),
.bld_write = gm20b_pmu_acr_bld_write,
.bld_patch = gm20b_pmu_acr_bld_patch,
.bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_PMU) |
BIT_ULL(NVKM_ACR_LSF_FECS) |
BIT_ULL(NVKM_ACR_LSF_GPCCS),
.bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
.bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
#endif
static const struct nvkm_pmu_fwif
gp10b_pmu_fwif[] = {
{ 0, gm20b_pmu_load, &gm20b_pmu, &gp10b_pmu_acr },
{ -1, gm200_pmu_nofw, &gm20b_pmu },
{}
};
int
gp10b_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pmu **ppmu)
{
return nvkm_pmu_new_(gp10b_pmu_fwif, device, type, inst, ppmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c |
// SPDX-License-Identifier: MIT
#ifndef __NVKM_PMU_MEMX_H__
#define __NVKM_PMU_MEMX_H__
#include "priv.h"
struct nvkm_memx {
struct nvkm_pmu *pmu;
u32 base;
u32 size;
struct {
u32 mthd;
u32 size;
u32 data[64];
} c;
};
static void
memx_out(struct nvkm_memx *memx)
{
struct nvkm_device *device = memx->pmu->subdev.device;
int i;
if (memx->c.mthd) {
nvkm_wr32(device, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
for (i = 0; i < memx->c.size; i++)
nvkm_wr32(device, 0x10a1c4, memx->c.data[i]);
memx->c.mthd = 0;
memx->c.size = 0;
}
}
static void
memx_cmd(struct nvkm_memx *memx, u32 mthd, u32 size, u32 data[])
{
if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
(memx->c.mthd && memx->c.mthd != mthd))
memx_out(memx);
memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
memx->c.size += size;
memx->c.mthd = mthd;
}
int
nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
{
struct nvkm_device *device = pmu->subdev.device;
struct nvkm_memx *memx;
u32 reply[2];
int ret;
ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
MEMX_INFO_DATA, 0);
if (ret)
return ret;
memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL);
if (!memx)
return -ENOMEM;
memx->pmu = pmu;
memx->base = reply[0];
memx->size = reply[1];
/* acquire data segment access */
do {
nvkm_wr32(device, 0x10a580, 0x00000003);
} while (nvkm_rd32(device, 0x10a580) != 0x00000003);
nvkm_wr32(device, 0x10a1c0, 0x01000000 | memx->base);
return 0;
}
int
nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
{
struct nvkm_memx *memx = *pmemx;
struct nvkm_pmu *pmu = memx->pmu;
struct nvkm_subdev *subdev = &pmu->subdev;
struct nvkm_device *device = subdev->device;
u32 finish, reply[2];
/* flush the cache... */
memx_out(memx);
/* release data segment access */
finish = nvkm_rd32(device, 0x10a1c0) & 0x00ffffff;
nvkm_wr32(device, 0x10a580, 0x00000000);
/* call MEMX process to execute the script, and wait for reply */
if (exec) {
nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
memx->base, finish);
nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
reply[0], reply[1]);
}
kfree(memx);
return 0;
}
void
nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data)
{
nvkm_debug(&memx->pmu->subdev, "R[%06x] = %08x\n", addr, data);
memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
}
void
nvkm_memx_wait(struct nvkm_memx *memx,
u32 addr, u32 mask, u32 data, u32 nsec)
{
nvkm_debug(&memx->pmu->subdev, "R[%06x] & %08x == %08x, %d us\n",
addr, mask, data, nsec);
memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
memx_out(memx); /* fuc can't handle multiple */
}
void
nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
{
nvkm_debug(&memx->pmu->subdev, " DELAY = %d ns\n", nsec);
memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
memx_out(memx); /* fuc can't handle multiple */
}
void
nvkm_memx_wait_vblank(struct nvkm_memx *memx)
{
struct nvkm_subdev *subdev = &memx->pmu->subdev;
struct nvkm_device *device = subdev->device;
u32 heads, x, y, px = 0;
int i, head_sync;
if (device->chipset < 0xd0) {
heads = nvkm_rd32(device, 0x610050);
for (i = 0; i < 2; i++) {
/* Heuristic: sync to head with biggest resolution */
if (heads & (2 << (i << 3))) {
x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
y = (x & 0xffff0000) >> 16;
x &= 0x0000ffff;
if ((x * y) > px) {
px = (x * y);
head_sync = i;
}
}
}
}
if (px == 0) {
nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
return;
}
nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
memx_out(memx); /* fuc can't handle multiple */
}
void
nvkm_memx_train(struct nvkm_memx *memx)
{
nvkm_debug(&memx->pmu->subdev, " MEM TRAIN\n");
memx_cmd(memx, MEMX_TRAIN, 0, NULL);
}
int
nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
{
struct nvkm_device *device = pmu->subdev.device;
u32 reply[2], base, size, i;
int ret;
ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
MEMX_INFO_TRAIN, 0);
if (ret)
return ret;
base = reply[0];
size = reply[1] >> 2;
if (size > rsize)
return -ENOMEM;
/* read the packet */
nvkm_wr32(device, 0x10a1c0, 0x02000000 | base);
for (i = 0; i < size; i++)
res[i] = nvkm_rd32(device, 0x10a1c4);
return 0;
}
void
nvkm_memx_block(struct nvkm_memx *memx)
{
nvkm_debug(&memx->pmu->subdev, " HOST BLOCKED\n");
memx_cmd(memx, MEMX_ENTER, 0, NULL);
}
void
nvkm_memx_unblock(struct nvkm_memx *memx)
{
nvkm_debug(&memx->pmu->subdev, " HOST UNBLOCKED\n");
memx_cmd(memx, MEMX_LEAVE, 0, NULL);
}
#endif
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "fuc/gk208.fuc5.h"
static const struct nvkm_pmu_func
gk208_pmu = {
.flcn = >215_pmu_flcn,
.code.data = gk208_pmu_code,
.code.size = sizeof(gk208_pmu_code),
.data.data = gk208_pmu_data,
.data.size = sizeof(gk208_pmu_data),
.enabled = gf100_pmu_enabled,
.reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
.send = gt215_pmu_send,
.recv = gt215_pmu_recv,
.pgob = gk110_pmu_pgob,
};
static const struct nvkm_pmu_fwif
gk208_pmu_fwif[] = {
{ -1, gf100_pmu_nofw, &gk208_pmu },
{}
};
int
gk208_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pmu **ppmu)
{
return nvkm_pmu_new_(gk208_pmu_fwif, device, type, inst, ppmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "fuc/gt215.fuc3.h"
#include <subdev/timer.h>
int
gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
u32 process, u32 message, u32 data0, u32 data1)
{
struct nvkm_subdev *subdev = &pmu->subdev;
struct nvkm_device *device = subdev->device;
u32 addr;
mutex_lock(&pmu->send.mutex);
/* wait for a free slot in the fifo */
addr = nvkm_rd32(device, 0x10a4a0);
if (nvkm_msec(device, 2000,
u32 tmp = nvkm_rd32(device, 0x10a4b0);
if (tmp != (addr ^ 8))
break;
) < 0) {
mutex_unlock(&pmu->send.mutex);
return -EBUSY;
}
/* we currently only support a single process at a time waiting
* on a synchronous reply, take the PMU mutex and tell the
* receive handler what we're waiting for
*/
if (reply) {
pmu->recv.message = message;
pmu->recv.process = process;
}
/* acquire data segment access */
do {
nvkm_wr32(device, 0x10a580, 0x00000001);
} while (nvkm_rd32(device, 0x10a580) != 0x00000001);
/* write the packet */
nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
pmu->send.base));
nvkm_wr32(device, 0x10a1c4, process);
nvkm_wr32(device, 0x10a1c4, message);
nvkm_wr32(device, 0x10a1c4, data0);
nvkm_wr32(device, 0x10a1c4, data1);
nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
/* release data segment access */
nvkm_wr32(device, 0x10a580, 0x00000000);
/* wait for reply, if requested */
if (reply) {
wait_event(pmu->recv.wait, (pmu->recv.process == 0));
reply[0] = pmu->recv.data[0];
reply[1] = pmu->recv.data[1];
}
mutex_unlock(&pmu->send.mutex);
return 0;
}
void
gt215_pmu_recv(struct nvkm_pmu *pmu)
{
struct nvkm_subdev *subdev = &pmu->subdev;
struct nvkm_device *device = subdev->device;
u32 process, message, data0, data1;
/* nothing to do if GET == PUT */
u32 addr = nvkm_rd32(device, 0x10a4cc);
if (addr == nvkm_rd32(device, 0x10a4c8))
return;
/* acquire data segment access */
do {
nvkm_wr32(device, 0x10a580, 0x00000002);
} while (nvkm_rd32(device, 0x10a580) != 0x00000002);
/* read the packet */
nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
pmu->recv.base));
process = nvkm_rd32(device, 0x10a1c4);
message = nvkm_rd32(device, 0x10a1c4);
data0 = nvkm_rd32(device, 0x10a1c4);
data1 = nvkm_rd32(device, 0x10a1c4);
nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
/* release data segment access */
nvkm_wr32(device, 0x10a580, 0x00000000);
/* wake process if it's waiting on a synchronous reply */
if (pmu->recv.process) {
if (process == pmu->recv.process &&
message == pmu->recv.message) {
pmu->recv.data[0] = data0;
pmu->recv.data[1] = data1;
pmu->recv.process = 0;
wake_up(&pmu->recv.wait);
return;
}
}
/* right now there's no other expected responses from the engine,
* so assume that any unexpected message is an error.
*/
nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
(char)((process & 0x000000ff) >> 0),
(char)((process & 0x0000ff00) >> 8),
(char)((process & 0x00ff0000) >> 16),
(char)((process & 0xff000000) >> 24),
process, message, data0, data1);
}
void
gt215_pmu_intr(struct nvkm_pmu *pmu)
{
struct nvkm_subdev *subdev = &pmu->subdev;
struct nvkm_device *device = subdev->device;
u32 disp = nvkm_rd32(device, 0x10a01c);
u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
if (intr & 0x00000020) {
u32 stat = nvkm_rd32(device, 0x10a16c);
if (stat & 0x80000000) {
nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
stat & 0x00ffffff,
nvkm_rd32(device, 0x10a168));
nvkm_wr32(device, 0x10a16c, 0x00000000);
intr &= ~0x00000020;
}
}
if (intr & 0x00000040) {
schedule_work(&pmu->recv.work);
nvkm_wr32(device, 0x10a004, 0x00000040);
intr &= ~0x00000040;
}
if (intr & 0x00000080) {
nvkm_info(subdev, "wr32 %06x %08x\n",
nvkm_rd32(device, 0x10a7a0),
nvkm_rd32(device, 0x10a7a4));
nvkm_wr32(device, 0x10a004, 0x00000080);
intr &= ~0x00000080;
}
if (intr) {
nvkm_error(subdev, "intr %08x\n", intr);
nvkm_wr32(device, 0x10a004, intr);
}
}
void
gt215_pmu_fini(struct nvkm_pmu *pmu)
{
nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060);
flush_work(&pmu->recv.work);
}
static void
gt215_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
nvkm_rd32(device, 0x022210);
}
static bool
gt215_pmu_enabled(struct nvkm_pmu *pmu)
{
return nvkm_rd32(pmu->subdev.device, 0x022210) & 0x00000001;
}
int
gt215_pmu_init(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
int i;
/* Inhibit interrupts, and wait for idle. */
if (pmu->func->enabled(pmu)) {
nvkm_wr32(device, 0x10a014, 0x0000ffff);
nvkm_msec(device, 2000,
if (!nvkm_rd32(device, 0x10a04c))
break;
);
}
pmu->func->reset(pmu);
/* Wait for IMEM/DMEM scrubbing to be complete. */
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
break;
);
/* upload data segment */
nvkm_wr32(device, 0x10a1c0, 0x01000000);
for (i = 0; i < pmu->func->data.size / 4; i++)
nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
/* upload code segment */
nvkm_wr32(device, 0x10a180, 0x01000000);
for (i = 0; i < pmu->func->code.size / 4; i++) {
if ((i & 0x3f) == 0)
nvkm_wr32(device, 0x10a188, i >> 6);
nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
}
/* start it running */
nvkm_wr32(device, 0x10a10c, 0x00000000);
nvkm_wr32(device, 0x10a104, 0x00000000);
nvkm_wr32(device, 0x10a100, 0x00000002);
/* wait for valid host->pmu ring configuration */
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x10a4d0))
break;
) < 0)
return -EBUSY;
pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
/* wait for valid pmu->host ring configuration */
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x10a4dc))
break;
) < 0)
return -EBUSY;
pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
nvkm_wr32(device, 0x10a010, 0x000000e0);
return 0;
}
const struct nvkm_falcon_func
gt215_pmu_flcn = {
};
static const struct nvkm_pmu_func
gt215_pmu = {
.flcn = >215_pmu_flcn,
.code.data = gt215_pmu_code,
.code.size = sizeof(gt215_pmu_code),
.data.data = gt215_pmu_data,
.data.size = sizeof(gt215_pmu_data),
.enabled = gt215_pmu_enabled,
.reset = gt215_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
.send = gt215_pmu_send,
.recv = gt215_pmu_recv,
};
static const struct nvkm_pmu_fwif
gt215_pmu_fwif[] = {
{ -1, gf100_pmu_nofw, >215_pmu },
{}
};
int
gt215_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pmu **ppmu)
{
return nvkm_pmu_new_(gt215_pmu_fwif, device, type, inst, ppmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c |
/*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base)
#include "priv.h"
#include <subdev/clk.h>
#include <subdev/timer.h>
#include <subdev/volt.h>
#define BUSY_SLOT 0
#define CLK_SLOT 7
struct gk20a_pmu_dvfs_data {
int p_load_target;
int p_load_max;
int p_smooth;
unsigned int avg_load;
};
struct gk20a_pmu {
struct nvkm_pmu base;
struct nvkm_alarm alarm;
struct gk20a_pmu_dvfs_data *data;
};
struct gk20a_pmu_dvfs_dev_status {
u32 total;
u32 busy;
};
static int
gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state)
{
struct nvkm_clk *clk = pmu->base.subdev.device->clk;
return nvkm_clk_astate(clk, *state, 0, false);
}
static void
gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state)
{
struct nvkm_clk *clk = pmu->base.subdev.device->clk;
*state = clk->pstate;
}
static int
gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu,
int *state, int load)
{
struct gk20a_pmu_dvfs_data *data = pmu->data;
struct nvkm_clk *clk = pmu->base.subdev.device->clk;
int cur_level, level;
/* For GK20A, the performance level is directly mapped to pstate */
level = cur_level = clk->pstate;
if (load > data->p_load_max) {
level = min(clk->state_nr - 1, level + (clk->state_nr / 3));
} else {
level += ((load - data->p_load_target) * 10 /
data->p_load_target) / 2;
level = max(0, level);
level = min(clk->state_nr - 1, level);
}
nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n",
cur_level, level);
*state = level;
return (level != cur_level);
}
static void
gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
struct gk20a_pmu_dvfs_dev_status *status)
{
struct nvkm_falcon *falcon = &pmu->base.falcon;
status->busy = nvkm_falcon_rd32(falcon, 0x508 + (BUSY_SLOT * 0x10));
status->total= nvkm_falcon_rd32(falcon, 0x508 + (CLK_SLOT * 0x10));
}
static void
gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
{
struct nvkm_falcon *falcon = &pmu->base.falcon;
nvkm_falcon_wr32(falcon, 0x508 + (BUSY_SLOT * 0x10), 0x80000000);
nvkm_falcon_wr32(falcon, 0x508 + (CLK_SLOT * 0x10), 0x80000000);
}
static void
gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
{
struct gk20a_pmu *pmu =
container_of(alarm, struct gk20a_pmu, alarm);
struct gk20a_pmu_dvfs_data *data = pmu->data;
struct gk20a_pmu_dvfs_dev_status status;
struct nvkm_subdev *subdev = &pmu->base.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_clk *clk = device->clk;
struct nvkm_timer *tmr = device->timer;
struct nvkm_volt *volt = device->volt;
u32 utilization = 0;
int state;
/*
* The PMU is initialized before CLK and VOLT, so we have to make sure the
* CLK and VOLT are ready here.
*/
if (!clk || !volt)
goto resched;
gk20a_pmu_dvfs_get_dev_status(pmu, &status);
if (status.total)
utilization = div_u64((u64)status.busy * 100, status.total);
data->avg_load = (data->p_smooth * data->avg_load) + utilization;
data->avg_load /= data->p_smooth + 1;
nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n",
utilization, data->avg_load);
gk20a_pmu_dvfs_get_cur_state(pmu, &state);
if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
nvkm_trace(subdev, "set new state to %d\n", state);
gk20a_pmu_dvfs_target(pmu, &state);
}
resched:
gk20a_pmu_dvfs_reset_dev_status(pmu);
nvkm_timer_alarm(tmr, 100000000, alarm);
}
static void
gk20a_pmu_fini(struct nvkm_pmu *pmu)
{
struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
nvkm_timer_alarm(pmu->subdev.device->timer, 0, &gpmu->alarm);
nvkm_falcon_put(&pmu->falcon, &pmu->subdev);
}
static int
gk20a_pmu_init(struct nvkm_pmu *pmu)
{
struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
struct nvkm_subdev *subdev = &pmu->subdev;
struct nvkm_device *device = pmu->subdev.device;
struct nvkm_falcon *falcon = &pmu->falcon;
int ret;
ret = nvkm_falcon_get(falcon, subdev);
if (ret) {
nvkm_error(subdev, "cannot acquire %s falcon!\n", falcon->name);
return ret;
}
/* init pwr perf counter */
nvkm_falcon_wr32(falcon, 0x504 + (BUSY_SLOT * 0x10), 0x00200001);
nvkm_falcon_wr32(falcon, 0x50c + (BUSY_SLOT * 0x10), 0x00000002);
nvkm_falcon_wr32(falcon, 0x50c + (CLK_SLOT * 0x10), 0x00000003);
nvkm_timer_alarm(device->timer, 2000000000, &gpmu->alarm);
return 0;
}
static struct gk20a_pmu_dvfs_data
gk20a_dvfs_data= {
.p_load_target = 70,
.p_load_max = 90,
.p_smooth = 1,
};
static const struct nvkm_pmu_func
gk20a_pmu = {
.flcn = >215_pmu_flcn,
.init = gk20a_pmu_init,
.fini = gk20a_pmu_fini,
.reset = gf100_pmu_reset,
};
static const struct nvkm_pmu_fwif
gk20a_pmu_fwif[] = {
{ -1, gf100_pmu_nofw, &gk20a_pmu },
{}
};
int
gk20a_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pmu **ppmu)
{
struct gk20a_pmu *pmu;
int ret;
if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
return -ENOMEM;
*ppmu = &pmu->base;
ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, type, inst, &pmu->base);
if (ret)
return ret;
pmu->data = &gk20a_dvfs_data;
nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c |
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/memory.h>
#include <subdev/acr.h>
#include <nvfw/flcn.h>
#include <nvfw/pmu.h>
static int
gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nvfw_falcon_msg *hdr)
{
struct nv_pmu_acr_bootstrap_falcon_msg *msg =
container_of(hdr, typeof(*msg), msg.hdr);
return msg->falcon_id;
}
int
gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
enum nvkm_acr_lsf_id id)
{
struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
struct nv_pmu_acr_bootstrap_falcon_cmd cmd = {
.cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
.cmd.hdr.size = sizeof(cmd),
.cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON,
.flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
.falcon_id = id,
};
int ret;
ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
gm20b_pmu_acr_bootstrap_falcon_cb,
&pmu->subdev, msecs_to_jiffies(1000));
if (ret >= 0) {
if (ret != cmd.falcon_id)
ret = -EIO;
else
ret = 0;
}
return ret;
}
void
gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
{
struct loader_config hdr;
u64 addr;
nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8);
hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8);
hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8);
addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8);
hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8);
hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8);
addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8);
hdr.overlay_dma_base = lower_32_bits((addr + adjust) << 8);
hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8);
nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
loader_config_dump(&acr->subdev, &hdr);
}
void
gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld,
struct nvkm_acr_lsfw *lsfw)
{
const u64 base = lsfw->offset.img + lsfw->app_start_offset;
const u64 code = (base + lsfw->app_resident_code_offset) >> 8;
const u64 data = (base + lsfw->app_resident_data_offset) >> 8;
const struct loader_config hdr = {
.dma_idx = FALCON_DMAIDX_UCODE,
.code_dma_base = lower_32_bits(code),
.code_size_total = lsfw->app_size,
.code_size_to_load = lsfw->app_resident_code_size,
.code_entry_point = lsfw->app_imem_entry,
.data_dma_base = lower_32_bits(data),
.data_size = lsfw->app_resident_data_size,
.overlay_dma_base = lower_32_bits(code),
.argc = 1,
.argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args),
.code_dma_base1 = upper_32_bits(code),
.data_dma_base1 = upper_32_bits(data),
.overlay_dma_base1 = upper_32_bits(code),
};
nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
}
static const struct nvkm_acr_lsf_func
gm20b_pmu_acr = {
.flags = NVKM_ACR_LSF_DMACTL_REQ_CTX,
.bld_size = sizeof(struct loader_config),
.bld_write = gm20b_pmu_acr_bld_write,
.bld_patch = gm20b_pmu_acr_bld_patch,
.bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_PMU) |
BIT_ULL(NVKM_ACR_LSF_FECS) |
BIT_ULL(NVKM_ACR_LSF_GPCCS),
.bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
};
static int
gm20b_pmu_acr_init_wpr_callback(void *priv, struct nvfw_falcon_msg *hdr)
{
struct nv_pmu_acr_init_wpr_region_msg *msg =
container_of(hdr, typeof(*msg), msg.hdr);
struct nvkm_pmu *pmu = priv;
struct nvkm_subdev *subdev = &pmu->subdev;
if (msg->error_code) {
nvkm_error(subdev, "ACR WPR init failure: %d\n",
msg->error_code);
return -EINVAL;
}
nvkm_debug(subdev, "ACR WPR init complete\n");
complete_all(&pmu->wpr_ready);
return 0;
}
static int
gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu)
{
struct nv_pmu_acr_init_wpr_region_cmd cmd = {
.cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
.cmd.hdr.size = sizeof(cmd),
.cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION,
.region_id = 1,
.wpr_offset = 0,
};
return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
gm20b_pmu_acr_init_wpr_callback, pmu, 0);
}
static int
gm20b_pmu_initmsg(struct nvkm_pmu *pmu)
{
struct nv_pmu_init_msg msg;
int ret;
ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg));
if (ret)
return ret;
if (msg.hdr.unit_id != NV_PMU_UNIT_INIT ||
msg.msg_type != NV_PMU_INIT_MSG_INIT)
return -EINVAL;
nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index,
msg.queue_info[0].offset,
msg.queue_info[0].size);
nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index,
msg.queue_info[1].offset,
msg.queue_info[1].size);
nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index,
msg.queue_info[4].offset,
msg.queue_info[4].size);
return gm20b_pmu_acr_init_wpr(pmu);
}
static void
gm20b_pmu_recv(struct nvkm_pmu *pmu)
{
if (!pmu->initmsg_received) {
int ret = pmu->func->initmsg(pmu);
if (ret) {
nvkm_error(&pmu->subdev, "error parsing init message: %d\n", ret);
return;
}
pmu->initmsg_received = true;
}
nvkm_falcon_msgq_recv(pmu->msgq);
}
static void
gm20b_pmu_fini(struct nvkm_pmu *pmu)
{
/*TODO: shutdown RTOS. */
flush_work(&pmu->recv.work);
nvkm_falcon_cmdq_fini(pmu->lpq);
nvkm_falcon_cmdq_fini(pmu->hpq);
reinit_completion(&pmu->wpr_ready);
nvkm_falcon_put(&pmu->falcon, &pmu->subdev);
}
static int
gm20b_pmu_init(struct nvkm_pmu *pmu)
{
struct nvkm_falcon *falcon = &pmu->falcon;
struct nv_pmu_args args = { .secure_mode = true };
u32 addr_args = falcon->data.limit - sizeof(args);
int ret;
ret = nvkm_falcon_get(&pmu->falcon, &pmu->subdev);
if (ret)
return ret;
pmu->initmsg_received = false;
nvkm_falcon_pio_wr(falcon, (u8 *)&args, 0, 0, DMEM, addr_args, sizeof(args), 0, false);
nvkm_falcon_start(falcon);
return 0;
}
const struct nvkm_pmu_func
gm20b_pmu = {
.flcn = &gm200_pmu_flcn,
.init = gm20b_pmu_init,
.fini = gm20b_pmu_fini,
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
.initmsg = gm20b_pmu_initmsg,
.reset = gf100_pmu_reset,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
#endif
int
gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
{
return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon,
NVKM_ACR_LSF_PMU, "pmu/",
ver, fwif->acr);
}
static const struct nvkm_pmu_fwif
gm20b_pmu_fwif[] = {
{ 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr },
{ -1, gm200_pmu_nofw, &gm20b_pmu },
{}
};
int
gm20b_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pmu **ppmu)
{
return nvkm_pmu_new_(gm20b_pmu_fwif, device, type, inst, ppmu);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c |
/*
* Copyright 2012 Nouveau Community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres <[email protected]>
* Ben Skeggs
*/
#include "priv.h"
#include <subdev/gpio.h>
static void
nv04_bus_intr(struct nvkm_bus *bus)
{
struct nvkm_subdev *subdev = &bus->subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
if (stat & 0x00000001) {
nvkm_error(subdev, "BUS ERROR\n");
stat &= ~0x00000001;
nvkm_wr32(device, 0x001100, 0x00000001);
}
if (stat & 0x00000110) {
struct nvkm_gpio *gpio = device->gpio;
if (gpio)
nvkm_subdev_intr(&gpio->subdev);
stat &= ~0x00000110;
nvkm_wr32(device, 0x001100, 0x00000110);
}
if (stat) {
nvkm_error(subdev, "intr %08x\n", stat);
nvkm_mask(device, 0x001140, stat, 0x00000000);
}
}
static void
nv04_bus_init(struct nvkm_bus *bus)
{
struct nvkm_device *device = bus->subdev.device;
nvkm_wr32(device, 0x001100, 0xffffffff);
nvkm_wr32(device, 0x001140, 0x00000111);
}
static const struct nvkm_bus_func
nv04_bus = {
.init = nv04_bus_init,
.intr = nv04_bus_intr,
};
int
nv04_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bus **pbus)
{
return nvkm_bus_new_(&nv04_bus, device, type, inst, pbus);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c |
/*
* Copyright 2012 Nouveau Community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres <[email protected]>
* Ben Skeggs
*/
#include "priv.h"
static void
gf100_bus_intr(struct nvkm_bus *bus)
{
struct nvkm_subdev *subdev = &bus->subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
if (stat & 0x0000000e) {
u32 addr = nvkm_rd32(device, 0x009084);
u32 data = nvkm_rd32(device, 0x009088);
nvkm_error_ratelimited(subdev,
"MMIO %s of %08x FAULT at %06x [ %s%s%s]\n",
(addr & 0x00000002) ? "write" : "read", data,
(addr & 0x00fffffc),
(stat & 0x00000002) ? "!ENGINE " : "",
(stat & 0x00000004) ? "PRIVRING " : "",
(stat & 0x00000008) ? "TIMEOUT " : "");
nvkm_wr32(device, 0x009084, 0x00000000);
nvkm_wr32(device, 0x001100, (stat & 0x0000000e));
stat &= ~0x0000000e;
}
if (stat) {
nvkm_error(subdev, "intr %08x\n", stat);
nvkm_mask(device, 0x001140, stat, 0x00000000);
}
}
static void
gf100_bus_init(struct nvkm_bus *bus)
{
struct nvkm_device *device = bus->subdev.device;
nvkm_wr32(device, 0x001100, 0xffffffff);
nvkm_wr32(device, 0x001140, 0x0000000e);
}
static const struct nvkm_bus_func
gf100_bus = {
.init = gf100_bus_init,
.intr = gf100_bus_intr,
};
int
gf100_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bus **pbus)
{
return nvkm_bus_new_(&gf100_bus, device, type, inst, pbus);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c |
/*
* Copyright 2012 Nouveau Community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres <[email protected]>
* Ben Skeggs
*/
#include "priv.h"
#include <subdev/gpio.h>
#include <subdev/therm.h>
static void
nv31_bus_intr(struct nvkm_bus *bus)
{
struct nvkm_subdev *subdev = &bus->subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
u32 gpio = nvkm_rd32(device, 0x001104) & nvkm_rd32(device, 0x001144);
if (gpio) {
struct nvkm_gpio *gpio = device->gpio;
if (gpio)
nvkm_subdev_intr(&gpio->subdev);
}
if (stat & 0x00000008) { /* NV41- */
u32 addr = nvkm_rd32(device, 0x009084);
u32 data = nvkm_rd32(device, 0x009088);
nvkm_error_ratelimited(subdev, "MMIO %s of %08x FAULT at %06x\n",
(addr & 0x00000002) ? "write" : "read", data,
(addr & 0x00fffffc));
stat &= ~0x00000008;
nvkm_wr32(device, 0x001100, 0x00000008);
}
if (stat & 0x00070000) {
struct nvkm_therm *therm = device->therm;
if (therm)
nvkm_subdev_intr(&therm->subdev);
stat &= ~0x00070000;
nvkm_wr32(device, 0x001100, 0x00070000);
}
if (stat) {
nvkm_error(subdev, "intr %08x\n", stat);
nvkm_mask(device, 0x001140, stat, 0x00000000);
}
}
static void
nv31_bus_init(struct nvkm_bus *bus)
{
struct nvkm_device *device = bus->subdev.device;
nvkm_wr32(device, 0x001100, 0xffffffff);
nvkm_wr32(device, 0x001140, 0x00070008);
}
static const struct nvkm_bus_func
nv31_bus = {
.init = nv31_bus_init,
.intr = nv31_bus_intr,
};
int
nv31_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bus **pbus)
{
return nvkm_bus_new_(&nv31_bus, device, type, inst, pbus);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.