python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/boost.h>
u32
nvbios_boostTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
u32 boost = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2 && bit_P.length >= 0x34)
boost = nvbios_rd32(bios, bit_P.offset + 0x30);
if (boost) {
*ver = nvbios_rd08(bios, boost + 0);
switch (*ver) {
case 0x11:
*hdr = nvbios_rd08(bios, boost + 1);
*cnt = nvbios_rd08(bios, boost + 5);
*len = nvbios_rd08(bios, boost + 2);
*snr = nvbios_rd08(bios, boost + 4);
*ssz = nvbios_rd08(bios, boost + 3);
return boost;
default:
break;
}
}
}
return 0;
}
u32
nvbios_boostEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
u32 data = nvbios_boostTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (data && idx < *cnt) {
data = data + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
*cnt = snr;
*len = ssz;
return data;
}
return 0;
}
u32
nvbios_boostEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
{
u32 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
if (data) {
info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
info->min = nvbios_rd16(bios, data + 0x02) * 1000;
info->max = nvbios_rd16(bios, data + 0x04) * 1000;
}
return data;
}
u32
nvbios_boostEm(struct nvkm_bios *bios, u8 pstate,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
{
u32 data, idx = 0;
while ((data = nvbios_boostEp(bios, idx++, ver, hdr, cnt, len, info))) {
if (info->pstate == pstate)
break;
}
return data;
}
u32
nvbios_boostSe(struct nvkm_bios *bios, int idx,
u32 data, u8 *ver, u8 *hdr, u8 cnt, u8 len)
{
if (data && idx < cnt) {
data = data + *hdr + (idx * len);
*hdr = len;
return data;
}
return 0;
}
u32
nvbios_boostSp(struct nvkm_bios *bios, int idx,
u32 data, u8 *ver, u8 *hdr, u8 cnt, u8 len,
struct nvbios_boostS *info)
{
data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
if (data) {
info->domain = nvbios_rd08(bios, data + 0x00);
info->percent = nvbios_rd08(bios, data + 0x01);
info->min = nvbios_rd16(bios, data + 0x02) * 1000;
info->max = nvbios_rd16(bios, data + 0x04) * 1000;
}
return data;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include <subdev/bios.h>
#include <subdev/bios/pcir.h>
u32
nvbios_pcirTe(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr)
{
u32 data = nvbios_rd16(bios, base + 0x18);
if (data) {
data += base;
switch (nvbios_rd32(bios, data + 0x00)) {
case 0x52494350: /* PCIR */
case 0x53494752: /* RGIS */
case 0x5344504e: /* NPDS */
*hdr = nvbios_rd16(bios, data + 0x0a);
*ver = nvbios_rd08(bios, data + 0x0c);
break;
default:
nvkm_debug(&bios->subdev,
"%08x: PCIR signature (%08x) unknown\n",
data, nvbios_rd32(bios, data + 0x00));
data = 0;
break;
}
}
return data;
}
u32
nvbios_pcirTp(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr,
struct nvbios_pcirT *info)
{
u32 data = nvbios_pcirTe(bios, base, ver, hdr);
memset(info, 0x00, sizeof(*info));
if (data) {
info->vendor_id = nvbios_rd16(bios, data + 0x04);
info->device_id = nvbios_rd16(bios, data + 0x06);
info->class_code[0] = nvbios_rd08(bios, data + 0x0d);
info->class_code[1] = nvbios_rd08(bios, data + 0x0e);
info->class_code[2] = nvbios_rd08(bios, data + 0x0f);
info->image_size = nvbios_rd16(bios, data + 0x10) * 512;
info->image_rev = nvbios_rd16(bios, data + 0x12);
info->image_type = nvbios_rd08(bios, data + 0x14);
info->last = nvbios_rd08(bios, data + 0x15) & 0x80;
}
return data;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include <core/pci.h>
#if defined(__powerpc__)
struct priv {
const void __iomem *data;
int size;
};
static u32
of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
struct priv *priv = data;
if (offset < priv->size) {
length = min_t(u32, length, priv->size - offset);
memcpy_fromio(bios->data + offset, priv->data + offset, length);
return length;
}
return 0;
}
static u32
of_size(void *data)
{
struct priv *priv = data;
return priv->size;
}
static void *
of_init(struct nvkm_bios *bios, const char *name)
{
struct nvkm_device *device = bios->subdev.device;
struct pci_dev *pdev = device->func->pci(device)->pdev;
struct device_node *dn;
struct priv *priv;
if (!(dn = pci_device_to_OF_node(pdev)))
return ERR_PTR(-ENODEV);
if (!(priv = kzalloc(sizeof(*priv), GFP_KERNEL)))
return ERR_PTR(-ENOMEM);
if ((priv->data = of_get_property(dn, "NVDA,BMP", &priv->size)))
return priv;
kfree(priv);
return ERR_PTR(-EINVAL);
}
const struct nvbios_source
nvbios_of = {
.name = "OpenFirmware",
.init = of_init,
.fini = (void(*)(void *))kfree,
.read = of_read,
.size = of_size,
.rw = false,
.ignore_checksum = true,
.no_pcir = true,
};
#else
const struct nvbios_source
nvbios_of = {
};
#endif
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include <subdev/bios.h>
#include <subdev/bios/npde.h>
#include <subdev/bios/pcir.h>
u32
nvbios_npdeTe(struct nvkm_bios *bios, u32 base)
{
struct nvbios_pcirT pcir;
u8 ver; u16 hdr;
u32 data = nvbios_pcirTp(bios, base, &ver, &hdr, &pcir);
if (data = (data + hdr + 0x0f) & ~0x0f, data) {
switch (nvbios_rd32(bios, data + 0x00)) {
case 0x4544504e: /* NPDE */
break;
default:
nvkm_debug(&bios->subdev,
"%08x: NPDE signature (%08x) unknown\n",
data, nvbios_rd32(bios, data + 0x00));
data = 0;
break;
}
}
return data;
}
u32
nvbios_npdeTp(struct nvkm_bios *bios, u32 base, struct nvbios_npdeT *info)
{
u32 data = nvbios_npdeTe(bios, base);
memset(info, 0x00, sizeof(*info));
if (data) {
info->image_size = nvbios_rd16(bios, data + 0x08) * 512;
info->last = nvbios_rd08(bios, data + 0x0a) & 0x80;
}
return data;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/gpio.h>
#include <subdev/bios/xpio.h>
u16
dcb_gpio_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u16 data = 0x0000;
u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
if (dcb) {
if (*ver >= 0x30 && *hdr >= 0x0c)
data = nvbios_rd16(bios, dcb + 0x0a);
else
if (*ver >= 0x22 && nvbios_rd08(bios, dcb - 1) >= 0x13)
data = nvbios_rd16(bios, dcb - 0x0f);
if (data) {
*ver = nvbios_rd08(bios, data + 0x00);
if (*ver < 0x30) {
*hdr = 3;
*cnt = nvbios_rd08(bios, data + 0x02);
*len = nvbios_rd08(bios, data + 0x01);
} else
if (*ver <= 0x41) {
*hdr = nvbios_rd08(bios, data + 0x01);
*cnt = nvbios_rd08(bios, data + 0x02);
*len = nvbios_rd08(bios, data + 0x03);
} else {
data = 0x0000;
}
}
}
return data;
}
u16
dcb_gpio_entry(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len)
{
u8 hdr, cnt, xver; /* use gpio version for xpio entry parsing */
u16 gpio;
if (!idx--)
gpio = dcb_gpio_table(bios, ver, &hdr, &cnt, len);
else
gpio = dcb_xpio_table(bios, idx, &xver, &hdr, &cnt, len);
if (gpio && ent < cnt)
return gpio + hdr + (ent * *len);
return 0x0000;
}
u16
dcb_gpio_parse(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len,
struct dcb_gpio_func *gpio)
{
u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
if (data) {
if (*ver < 0x40) {
u16 info = nvbios_rd16(bios, data);
*gpio = (struct dcb_gpio_func) {
.line = (info & 0x001f) >> 0,
.func = (info & 0x07e0) >> 5,
.log[0] = (info & 0x1800) >> 11,
.log[1] = (info & 0x6000) >> 13,
.param = !!(info & 0x8000),
};
} else
if (*ver < 0x41) {
u32 info = nvbios_rd32(bios, data);
*gpio = (struct dcb_gpio_func) {
.line = (info & 0x0000001f) >> 0,
.func = (info & 0x0000ff00) >> 8,
.log[0] = (info & 0x18000000) >> 27,
.log[1] = (info & 0x60000000) >> 29,
.param = !!(info & 0x80000000),
};
} else {
u32 info = nvbios_rd32(bios, data + 0);
u8 info1 = nvbios_rd32(bios, data + 4);
*gpio = (struct dcb_gpio_func) {
.line = (info & 0x0000003f) >> 0,
.func = (info & 0x0000ff00) >> 8,
.log[0] = (info1 & 0x30) >> 4,
.log[1] = (info1 & 0xc0) >> 6,
.param = !!(info & 0x80000000),
};
}
}
return data;
}
u16
dcb_gpio_match(struct nvkm_bios *bios, int idx, u8 func, u8 line,
u8 *ver, u8 *len, struct dcb_gpio_func *gpio)
{
u8 hdr, cnt, i = 0;
u16 data;
while ((data = dcb_gpio_parse(bios, idx, i++, ver, len, gpio))) {
if ((line == 0xff || line == gpio->line) &&
(func == 0xff || func == gpio->func))
return data;
}
/* DCB 2.2, fixed TVDAC GPIO data */
if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
u8 conf = nvbios_rd08(bios, data - 5);
u8 addr = nvbios_rd08(bios, data - 4);
if (conf & 0x01) {
*gpio = (struct dcb_gpio_func) {
.func = DCB_GPIO_TVDAC0,
.line = addr >> 4,
.log[0] = !!(conf & 0x02),
.log[1] = !(conf & 0x02),
};
*ver = 0x00;
return data;
}
}
}
return 0x0000;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c |
/*
* Copyright 2012 Nouveau Community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/perf.h>
#include <subdev/pci.h>
u32
nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
u32 perf = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version <= 2) {
perf = nvbios_rd32(bios, bit_P.offset + 0);
if (perf) {
*ver = nvbios_rd08(bios, perf + 0);
*hdr = nvbios_rd08(bios, perf + 1);
if (*ver >= 0x40 && *ver < 0x41) {
*cnt = nvbios_rd08(bios, perf + 5);
*len = nvbios_rd08(bios, perf + 2);
*snr = nvbios_rd08(bios, perf + 4);
*ssz = nvbios_rd08(bios, perf + 3);
return perf;
} else
if (*ver >= 0x20 && *ver < 0x40) {
*cnt = nvbios_rd08(bios, perf + 2);
*len = nvbios_rd08(bios, perf + 3);
*snr = nvbios_rd08(bios, perf + 4);
*ssz = nvbios_rd08(bios, perf + 5);
return perf;
}
}
}
}
if (bios->bmp_offset) {
if (nvbios_rd08(bios, bios->bmp_offset + 6) >= 0x25) {
perf = nvbios_rd16(bios, bios->bmp_offset + 0x94);
if (perf) {
*hdr = nvbios_rd08(bios, perf + 0);
*ver = nvbios_rd08(bios, perf + 1);
*cnt = nvbios_rd08(bios, perf + 2);
*len = nvbios_rd08(bios, perf + 3);
*snr = 0;
*ssz = 0;
return perf;
}
}
}
return 0;
}
u32
nvbios_perf_entry(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
u32 perf = nvbios_perf_table(bios, ver, hdr, cnt, len, &snr, &ssz);
if (perf && idx < *cnt) {
perf = perf + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
*cnt = snr;
*len = ssz;
return perf;
}
return 0;
}
u32
nvbios_perfEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *info)
{
u32 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
info->pstate = nvbios_rd08(bios, perf + 0x00);
switch (!!perf * *ver) {
case 0x12:
case 0x13:
case 0x14:
info->core = nvbios_rd32(bios, perf + 0x01) * 10;
info->memory = nvbios_rd32(bios, perf + 0x05) * 20;
info->fanspeed = nvbios_rd08(bios, perf + 0x37);
if (*hdr > 0x38)
info->voltage = nvbios_rd08(bios, perf + 0x38);
break;
case 0x21:
case 0x23:
case 0x24:
info->fanspeed = nvbios_rd08(bios, perf + 0x04);
info->voltage = nvbios_rd08(bios, perf + 0x05);
info->shader = nvbios_rd16(bios, perf + 0x06) * 1000;
info->core = info->shader + (signed char)
nvbios_rd08(bios, perf + 0x08) * 1000;
switch (bios->subdev.device->chipset) {
case 0x49:
case 0x4b:
info->memory = nvbios_rd16(bios, perf + 0x0b) * 1000;
break;
default:
info->memory = nvbios_rd16(bios, perf + 0x0b) * 2000;
break;
}
break;
case 0x25:
info->fanspeed = nvbios_rd08(bios, perf + 0x04);
info->voltage = nvbios_rd08(bios, perf + 0x05);
info->core = nvbios_rd16(bios, perf + 0x06) * 1000;
info->shader = nvbios_rd16(bios, perf + 0x0a) * 1000;
info->memory = nvbios_rd16(bios, perf + 0x0c) * 1000;
break;
case 0x30:
info->script = nvbios_rd16(bios, perf + 0x02);
fallthrough;
case 0x35:
info->fanspeed = nvbios_rd08(bios, perf + 0x06);
info->voltage = nvbios_rd08(bios, perf + 0x07);
info->core = nvbios_rd16(bios, perf + 0x08) * 1000;
info->shader = nvbios_rd16(bios, perf + 0x0a) * 1000;
info->memory = nvbios_rd16(bios, perf + 0x0c) * 1000;
info->vdec = nvbios_rd16(bios, perf + 0x10) * 1000;
info->disp = nvbios_rd16(bios, perf + 0x14) * 1000;
break;
case 0x40:
info->voltage = nvbios_rd08(bios, perf + 0x02);
switch (nvbios_rd08(bios, perf + 0xb) & 0x3) {
case 0:
info->pcie_speed = NVKM_PCIE_SPEED_5_0;
break;
case 3:
case 1:
info->pcie_speed = NVKM_PCIE_SPEED_2_5;
break;
case 2:
info->pcie_speed = NVKM_PCIE_SPEED_8_0;
break;
default:
break;
}
info->pcie_width = 0xff;
break;
default:
return 0;
}
return perf;
}
u32
nvbios_perfSe(struct nvkm_bios *bios, u32 perfE, int idx,
u8 *ver, u8 *hdr, u8 cnt, u8 len)
{
u32 data = 0x00000000;
if (idx < cnt) {
data = perfE + *hdr + (idx * len);
*hdr = len;
}
return data;
}
u32
nvbios_perfSp(struct nvkm_bios *bios, u32 perfE, int idx,
u8 *ver, u8 *hdr, u8 cnt, u8 len,
struct nvbios_perfS *info)
{
u32 data = nvbios_perfSe(bios, perfE, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x40:
info->v40.freq = (nvbios_rd16(bios, data + 0x00) & 0x3fff) * 1000;
break;
default:
break;
}
return data;
}
int
nvbios_perf_fan_parse(struct nvkm_bios *bios,
struct nvbios_perf_fan *fan)
{
u8 ver, hdr, cnt, len, snr, ssz;
u32 perf = nvbios_perf_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
if (!perf)
return -ENODEV;
if (ver >= 0x20 && ver < 0x40 && hdr > 6)
fan->pwm_divisor = nvbios_rd16(bios, perf + 6);
else
fan->pwm_divisor = 0;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/bmp.h>
#include <subdev/bios/conn.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/dp.h>
#include <subdev/bios/gpio.h>
#include <subdev/bios/init.h>
#include <subdev/bios/ramcfg.h>
#include <subdev/devinit.h>
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/vga.h>
#include <linux/kernel.h>
#define bioslog(lvl, fmt, args...) do { \
nvkm_printk(init->subdev, lvl, info, "0x%08x[%c]: "fmt, \
init->offset, init_exec(init) ? \
'0' + (init->nested - 1) : ' ', ##args); \
} while(0)
#define cont(fmt, args...) do { \
if (init->subdev->debug >= NV_DBG_TRACE) \
printk(fmt, ##args); \
} while(0)
#define trace(fmt, args...) bioslog(TRACE, fmt, ##args)
#define warn(fmt, args...) bioslog(WARN, fmt, ##args)
#define error(fmt, args...) bioslog(ERROR, fmt, ##args)
/******************************************************************************
* init parser control flow helpers
*****************************************************************************/
static inline bool
init_exec(struct nvbios_init *init)
{
return (init->execute == 1) || ((init->execute & 5) == 5);
}
static inline void
init_exec_set(struct nvbios_init *init, bool exec)
{
if (exec) init->execute &= 0xfd;
else init->execute |= 0x02;
}
static inline void
init_exec_inv(struct nvbios_init *init)
{
init->execute ^= 0x02;
}
static inline void
init_exec_force(struct nvbios_init *init, bool exec)
{
if (exec) init->execute |= 0x04;
else init->execute &= 0xfb;
}
/******************************************************************************
* init parser wrappers for normal register/i2c/whatever accessors
*****************************************************************************/
static inline int
init_or(struct nvbios_init *init)
{
if (init_exec(init)) {
if (init->or >= 0)
return init->or;
error("script needs OR!!\n");
}
return 0;
}
static inline int
init_link(struct nvbios_init *init)
{
if (init_exec(init)) {
if (init->link)
return init->link == 2;
error("script needs OR link\n");
}
return 0;
}
static inline int
init_head(struct nvbios_init *init)
{
if (init_exec(init)) {
if (init->head >= 0)
return init->head;
error("script needs head\n");
}
return 0;
}
static u8
init_conn(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
struct nvbios_connE connE;
u8 ver, hdr;
u32 conn;
if (init_exec(init)) {
if (init->outp) {
conn = init->outp->connector;
conn = nvbios_connEp(bios, conn, &ver, &hdr, &connE);
if (conn)
return connE.type;
}
error("script needs connector type\n");
}
return 0xff;
}
static inline u32
init_nvreg(struct nvbios_init *init, u32 reg)
{
struct nvkm_devinit *devinit = init->subdev->device->devinit;
/* C51 (at least) sometimes has the lower bits set which the VBIOS
* interprets to mean that access needs to go through certain IO
* ports instead. The NVIDIA binary driver has been seen to access
* these through the NV register address, so lets assume we can
* do the same
*/
reg &= ~0x00000003;
/* GF8+ display scripts need register addresses mangled a bit to
* select a specific CRTC/OR
*/
if (init->subdev->device->card_type >= NV_50) {
if (reg & 0x80000000) {
reg += init_head(init) * 0x800;
reg &= ~0x80000000;
}
if (reg & 0x40000000) {
reg += init_or(init) * 0x800;
reg &= ~0x40000000;
if (reg & 0x20000000) {
reg += init_link(init) * 0x80;
reg &= ~0x20000000;
}
}
}
if (reg & ~0x00fffffc)
warn("unknown bits in register 0x%08x\n", reg);
return nvkm_devinit_mmio(devinit, reg);
}
static u32
init_rd32(struct nvbios_init *init, u32 reg)
{
struct nvkm_device *device = init->subdev->device;
reg = init_nvreg(init, reg);
if (reg != ~0 && init_exec(init))
return nvkm_rd32(device, reg);
return 0x00000000;
}
static void
init_wr32(struct nvbios_init *init, u32 reg, u32 val)
{
struct nvkm_device *device = init->subdev->device;
reg = init_nvreg(init, reg);
if (reg != ~0 && init_exec(init))
nvkm_wr32(device, reg, val);
}
static u32
init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val)
{
struct nvkm_device *device = init->subdev->device;
reg = init_nvreg(init, reg);
if (reg != ~0 && init_exec(init)) {
u32 tmp = nvkm_rd32(device, reg);
nvkm_wr32(device, reg, (tmp & ~mask) | val);
return tmp;
}
return 0x00000000;
}
static u8
init_rdport(struct nvbios_init *init, u16 port)
{
if (init_exec(init))
return nvkm_rdport(init->subdev->device, init->head, port);
return 0x00;
}
static void
init_wrport(struct nvbios_init *init, u16 port, u8 value)
{
if (init_exec(init))
nvkm_wrport(init->subdev->device, init->head, port, value);
}
static u8
init_rdvgai(struct nvbios_init *init, u16 port, u8 index)
{
struct nvkm_subdev *subdev = init->subdev;
if (init_exec(init)) {
int head = init->head < 0 ? 0 : init->head;
return nvkm_rdvgai(subdev->device, head, port, index);
}
return 0x00;
}
static void
init_wrvgai(struct nvbios_init *init, u16 port, u8 index, u8 value)
{
struct nvkm_device *device = init->subdev->device;
/* force head 0 for updates to cr44, it only exists on first head */
if (device->card_type < NV_50) {
if (port == 0x03d4 && index == 0x44)
init->head = 0;
}
if (init_exec(init)) {
int head = init->head < 0 ? 0 : init->head;
nvkm_wrvgai(device, head, port, index, value);
}
/* select head 1 if cr44 write selected it */
if (device->card_type < NV_50) {
if (port == 0x03d4 && index == 0x44 && value == 3)
init->head = 1;
}
}
static struct i2c_adapter *
init_i2c(struct nvbios_init *init, int index)
{
struct nvkm_i2c *i2c = init->subdev->device->i2c;
struct nvkm_i2c_bus *bus;
if (index == 0xff) {
index = NVKM_I2C_BUS_PRI;
if (init->outp && init->outp->i2c_upper_default)
index = NVKM_I2C_BUS_SEC;
} else
if (index == 0x80) {
index = NVKM_I2C_BUS_PRI;
} else
if (index == 0x81) {
index = NVKM_I2C_BUS_SEC;
}
bus = nvkm_i2c_bus_find(i2c, index);
return bus ? &bus->i2c : NULL;
}
static int
init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg)
{
struct i2c_adapter *adap = init_i2c(init, index);
if (adap && init_exec(init))
return nvkm_rdi2cr(adap, addr, reg);
return -ENODEV;
}
static int
init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
{
struct i2c_adapter *adap = init_i2c(init, index);
if (adap && init_exec(init))
return nvkm_wri2cr(adap, addr, reg, val);
return -ENODEV;
}
static struct nvkm_i2c_aux *
init_aux(struct nvbios_init *init)
{
struct nvkm_i2c *i2c = init->subdev->device->i2c;
if (!init->outp) {
if (init_exec(init))
error("script needs output for aux\n");
return NULL;
}
return nvkm_i2c_aux_find(i2c, init->outp->i2c_index);
}
static u8
init_rdauxr(struct nvbios_init *init, u32 addr)
{
struct nvkm_i2c_aux *aux = init_aux(init);
u8 data;
if (aux && init_exec(init)) {
int ret = nvkm_rdaux(aux, addr, &data, 1);
if (ret == 0)
return data;
trace("auxch read failed with %d\n", ret);
}
return 0x00;
}
static int
init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
{
struct nvkm_i2c_aux *aux = init_aux(init);
if (aux && init_exec(init)) {
int ret = nvkm_wraux(aux, addr, &data, 1);
if (ret)
trace("auxch write failed with %d\n", ret);
return ret;
}
return -ENODEV;
}
static void
init_prog_pll(struct nvbios_init *init, u32 id, u32 freq)
{
struct nvkm_devinit *devinit = init->subdev->device->devinit;
if (init_exec(init)) {
int ret = nvkm_devinit_pll_set(devinit, id, freq);
if (ret)
warn("failed to prog pll 0x%08x to %dkHz\n", id, freq);
}
}
/******************************************************************************
* parsing of bios structures that are required to execute init tables
*****************************************************************************/
static u16
init_table(struct nvkm_bios *bios, u16 *len)
{
struct bit_entry bit_I;
if (!bit_entry(bios, 'I', &bit_I)) {
*len = bit_I.length;
return bit_I.offset;
}
if (bmp_version(bios) >= 0x0510) {
*len = 14;
return bios->bmp_offset + 75;
}
return 0x0000;
}
static u16
init_table_(struct nvbios_init *init, u16 offset, const char *name)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 len, data = init_table(bios, &len);
if (data) {
if (len >= offset + 2) {
data = nvbios_rd16(bios, data + offset);
if (data)
return data;
warn("%s pointer invalid\n", name);
return 0x0000;
}
warn("init data too short for %s pointer", name);
return 0x0000;
}
warn("init data not found\n");
return 0x0000;
}
#define init_script_table(b) init_table_((b), 0x00, "script table")
#define init_macro_index_table(b) init_table_((b), 0x02, "macro index table")
#define init_macro_table(b) init_table_((b), 0x04, "macro table")
#define init_condition_table(b) init_table_((b), 0x06, "condition table")
#define init_io_condition_table(b) init_table_((b), 0x08, "io condition table")
#define init_io_flag_condition_table(b) init_table_((b), 0x0a, "io flag condition table")
#define init_function_table(b) init_table_((b), 0x0c, "function table")
#define init_xlat_table(b) init_table_((b), 0x10, "xlat table");
static u16
init_script(struct nvkm_bios *bios, int index)
{
struct nvbios_init init = { .subdev = &bios->subdev };
u16 bmp_ver = bmp_version(bios), data;
if (bmp_ver && bmp_ver < 0x0510) {
if (index > 1 || bmp_ver < 0x0100)
return 0x0000;
data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
return nvbios_rd16(bios, data + (index * 2));
}
data = init_script_table(&init);
if (data)
return nvbios_rd16(bios, data + (index * 2));
return 0x0000;
}
static u16
init_unknown_script(struct nvkm_bios *bios)
{
u16 len, data = init_table(bios, &len);
if (data && len >= 16)
return nvbios_rd16(bios, data + 14);
return 0x0000;
}
static u8
init_ram_restrict_group_count(struct nvbios_init *init)
{
return nvbios_ramcfg_count(init->subdev->device->bios);
}
static u8
init_ram_restrict(struct nvbios_init *init)
{
/* This appears to be the behaviour of the VBIOS parser, and *is*
* important to cache the NV_PEXTDEV_BOOT0 on later chipsets to
* avoid fucking up the memory controller (somehow) by reading it
* on every INIT_RAM_RESTRICT_ZM_GROUP opcode.
*
* Preserving the non-caching behaviour on earlier chipsets just
* in case *not* re-reading the strap causes similar breakage.
*/
if (!init->ramcfg || init->subdev->device->bios->version.major < 0x70)
init->ramcfg = 0x80000000 | nvbios_ramcfg_index(init->subdev);
return (init->ramcfg & 0x7fffffff);
}
static u8
init_xlat_(struct nvbios_init *init, u8 index, u8 offset)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 table = init_xlat_table(init);
if (table) {
u16 data = nvbios_rd16(bios, table + (index * 2));
if (data)
return nvbios_rd08(bios, data + offset);
warn("xlat table pointer %d invalid\n", index);
}
return 0x00;
}
/******************************************************************************
* utility functions used by various init opcode handlers
*****************************************************************************/
static bool
init_condition_met(struct nvbios_init *init, u8 cond)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 table = init_condition_table(init);
if (table) {
u32 reg = nvbios_rd32(bios, table + (cond * 12) + 0);
u32 msk = nvbios_rd32(bios, table + (cond * 12) + 4);
u32 val = nvbios_rd32(bios, table + (cond * 12) + 8);
trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n",
cond, reg, msk, val);
return (init_rd32(init, reg) & msk) == val;
}
return false;
}
static bool
init_io_condition_met(struct nvbios_init *init, u8 cond)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 table = init_io_condition_table(init);
if (table) {
u16 port = nvbios_rd16(bios, table + (cond * 5) + 0);
u8 index = nvbios_rd08(bios, table + (cond * 5) + 2);
u8 mask = nvbios_rd08(bios, table + (cond * 5) + 3);
u8 value = nvbios_rd08(bios, table + (cond * 5) + 4);
trace("\t[0x%02x] (0x%04x[0x%02x] & 0x%02x) == 0x%02x\n",
cond, port, index, mask, value);
return (init_rdvgai(init, port, index) & mask) == value;
}
return false;
}
static bool
init_io_flag_condition_met(struct nvbios_init *init, u8 cond)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 table = init_io_flag_condition_table(init);
if (table) {
u16 port = nvbios_rd16(bios, table + (cond * 9) + 0);
u8 index = nvbios_rd08(bios, table + (cond * 9) + 2);
u8 mask = nvbios_rd08(bios, table + (cond * 9) + 3);
u8 shift = nvbios_rd08(bios, table + (cond * 9) + 4);
u16 data = nvbios_rd16(bios, table + (cond * 9) + 5);
u8 dmask = nvbios_rd08(bios, table + (cond * 9) + 7);
u8 value = nvbios_rd08(bios, table + (cond * 9) + 8);
u8 ioval = (init_rdvgai(init, port, index) & mask) >> shift;
return (nvbios_rd08(bios, data + ioval) & dmask) == value;
}
return false;
}
static inline u32
init_shift(u32 data, u8 shift)
{
if (shift < 0x80)
return data >> shift;
return data << (0x100 - shift);
}
static u32
init_tmds_reg(struct nvbios_init *init, u8 tmds)
{
/* For mlv < 0x80, it is an index into a table of TMDS base addresses.
* For mlv == 0x80 use the "or" value of the dcb_entry indexed by
* CR58 for CR57 = 0 to index a table of offsets to the basic
* 0x6808b0 address.
* For mlv == 0x81 use the "or" value of the dcb_entry indexed by
* CR58 for CR57 = 0 to index a table of offsets to the basic
* 0x6808b0 address, and then flip the offset by 8.
*/
const int pramdac_offset[13] = {
0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
const u32 pramdac_table[4] = {
0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
if (tmds >= 0x80) {
if (init->outp) {
u32 dacoffset = pramdac_offset[init->outp->or];
if (tmds == 0x81)
dacoffset ^= 8;
return 0x6808b0 + dacoffset;
}
if (init_exec(init))
error("tmds opcodes need dcb\n");
} else {
if (tmds < ARRAY_SIZE(pramdac_table))
return pramdac_table[tmds];
error("tmds selector 0x%02x unknown\n", tmds);
}
return 0;
}
/******************************************************************************
* init opcode handlers
*****************************************************************************/
/**
* init_reserved - stub for various unknown/unused single-byte opcodes
*
*/
static void
init_reserved(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 opcode = nvbios_rd08(bios, init->offset);
u8 length, i;
switch (opcode) {
case 0xaa:
length = 4;
break;
default:
length = 1;
break;
}
trace("RESERVED 0x%02x\t", opcode);
for (i = 1; i < length; i++)
cont(" 0x%02x", nvbios_rd08(bios, init->offset + i));
cont("\n");
init->offset += length;
}
/**
* INIT_DONE - opcode 0x71
*
*/
static void
init_done(struct nvbios_init *init)
{
trace("DONE\n");
init->offset = 0x0000;
}
/**
* INIT_IO_RESTRICT_PROG - opcode 0x32
*
*/
static void
init_io_restrict_prog(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 port = nvbios_rd16(bios, init->offset + 1);
u8 index = nvbios_rd08(bios, init->offset + 3);
u8 mask = nvbios_rd08(bios, init->offset + 4);
u8 shift = nvbios_rd08(bios, init->offset + 5);
u8 count = nvbios_rd08(bios, init->offset + 6);
u32 reg = nvbios_rd32(bios, init->offset + 7);
u8 conf, i;
trace("IO_RESTRICT_PROG\tR[0x%06x] = "
"((0x%04x[0x%02x] & 0x%02x) >> %d) [{\n",
reg, port, index, mask, shift);
init->offset += 11;
conf = (init_rdvgai(init, port, index) & mask) >> shift;
for (i = 0; i < count; i++) {
u32 data = nvbios_rd32(bios, init->offset);
if (i == conf) {
trace("\t0x%08x *\n", data);
init_wr32(init, reg, data);
} else {
trace("\t0x%08x\n", data);
}
init->offset += 4;
}
trace("}]\n");
}
/**
* INIT_REPEAT - opcode 0x33
*
*/
static void
init_repeat(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 count = nvbios_rd08(bios, init->offset + 1);
u16 repeat = init->repeat;
trace("REPEAT\t0x%02x\n", count);
init->offset += 2;
init->repeat = init->offset;
init->repend = init->offset;
while (count--) {
init->offset = init->repeat;
nvbios_exec(init);
if (count)
trace("REPEAT\t0x%02x\n", count);
}
init->offset = init->repend;
init->repeat = repeat;
}
/**
* INIT_IO_RESTRICT_PLL - opcode 0x34
*
*/
static void
init_io_restrict_pll(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 port = nvbios_rd16(bios, init->offset + 1);
u8 index = nvbios_rd08(bios, init->offset + 3);
u8 mask = nvbios_rd08(bios, init->offset + 4);
u8 shift = nvbios_rd08(bios, init->offset + 5);
s8 iofc = nvbios_rd08(bios, init->offset + 6);
u8 count = nvbios_rd08(bios, init->offset + 7);
u32 reg = nvbios_rd32(bios, init->offset + 8);
u8 conf, i;
trace("IO_RESTRICT_PLL\tR[0x%06x] =PLL= "
"((0x%04x[0x%02x] & 0x%02x) >> 0x%02x) IOFCOND 0x%02x [{\n",
reg, port, index, mask, shift, iofc);
init->offset += 12;
conf = (init_rdvgai(init, port, index) & mask) >> shift;
for (i = 0; i < count; i++) {
u32 freq = nvbios_rd16(bios, init->offset) * 10;
if (i == conf) {
trace("\t%dkHz *\n", freq);
if (iofc > 0 && init_io_flag_condition_met(init, iofc))
freq *= 2;
init_prog_pll(init, reg, freq);
} else {
trace("\t%dkHz\n", freq);
}
init->offset += 2;
}
trace("}]\n");
}
/**
* INIT_END_REPEAT - opcode 0x36
*
*/
static void
init_end_repeat(struct nvbios_init *init)
{
trace("END_REPEAT\n");
init->offset += 1;
if (init->repeat) {
init->repend = init->offset;
init->offset = 0;
}
}
/**
* INIT_COPY - opcode 0x37
*
*/
static void
init_copy(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 reg = nvbios_rd32(bios, init->offset + 1);
u8 shift = nvbios_rd08(bios, init->offset + 5);
u8 smask = nvbios_rd08(bios, init->offset + 6);
u16 port = nvbios_rd16(bios, init->offset + 7);
u8 index = nvbios_rd08(bios, init->offset + 9);
u8 mask = nvbios_rd08(bios, init->offset + 10);
u8 data;
trace("COPY\t0x%04x[0x%02x] &= 0x%02x |= "
"((R[0x%06x] %s 0x%02x) & 0x%02x)\n",
port, index, mask, reg, (shift & 0x80) ? "<<" : ">>",
(shift & 0x80) ? (0x100 - shift) : shift, smask);
init->offset += 11;
data = init_rdvgai(init, port, index) & mask;
data |= init_shift(init_rd32(init, reg), shift) & smask;
init_wrvgai(init, port, index, data);
}
/**
* INIT_NOT - opcode 0x38
*
*/
static void
init_not(struct nvbios_init *init)
{
trace("NOT\n");
init->offset += 1;
init_exec_inv(init);
}
/**
* INIT_IO_FLAG_CONDITION - opcode 0x39
*
*/
static void
init_io_flag_condition(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 cond = nvbios_rd08(bios, init->offset + 1);
trace("IO_FLAG_CONDITION\t0x%02x\n", cond);
init->offset += 2;
if (!init_io_flag_condition_met(init, cond))
init_exec_set(init, false);
}
/**
* INIT_GENERIC_CONDITION - opcode 0x3a
*
*/
static void
init_generic_condition(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
struct nvbios_dpout info;
u8 cond = nvbios_rd08(bios, init->offset + 1);
u8 size = nvbios_rd08(bios, init->offset + 2);
u8 ver, hdr, cnt, len;
u16 data;
trace("GENERIC_CONDITION\t0x%02x 0x%02x\n", cond, size);
init->offset += 3;
switch (cond) {
case 0: /* CONDITION_ID_INT_DP. */
if (init_conn(init) != DCB_CONNECTOR_eDP)
init_exec_set(init, false);
break;
case 1: /* CONDITION_ID_USE_SPPLL0. */
case 2: /* CONDITION_ID_USE_SPPLL1. */
if ( init->outp &&
(data = nvbios_dpout_match(bios, DCB_OUTPUT_DP,
(init->outp->or << 0) |
(init->outp->sorconf.link << 6),
&ver, &hdr, &cnt, &len, &info)))
{
if (!(info.flags & cond))
init_exec_set(init, false);
break;
}
if (init_exec(init))
warn("script needs dp output table data\n");
break;
case 5: /* CONDITION_ID_ASSR_SUPPORT. */
if (!(init_rdauxr(init, 0x0d) & 1))
init_exec_set(init, false);
break;
case 7: /* CONDITION_ID_NO_PANEL_SEQ_DELAYS. */
init_exec_set(init, false);
break;
default:
warn("INIT_GENERIC_CONDITION: unknown 0x%02x\n", cond);
init->offset += size;
break;
}
}
/**
* INIT_IO_MASK_OR - opcode 0x3b
*
*/
static void
init_io_mask_or(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 index = nvbios_rd08(bios, init->offset + 1);
u8 or = init_or(init);
u8 data;
trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)\n", index, or);
init->offset += 2;
data = init_rdvgai(init, 0x03d4, index);
init_wrvgai(init, 0x03d4, index, data &= ~(1 << or));
}
/**
* INIT_IO_OR - opcode 0x3c
*
*/
static void
init_io_or(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 index = nvbios_rd08(bios, init->offset + 1);
u8 or = init_or(init);
u8 data;
trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)\n", index, or);
init->offset += 2;
data = init_rdvgai(init, 0x03d4, index);
init_wrvgai(init, 0x03d4, index, data | (1 << or));
}
/**
* INIT_ANDN_REG - opcode 0x47
*
*/
static void
init_andn_reg(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 reg = nvbios_rd32(bios, init->offset + 1);
u32 mask = nvbios_rd32(bios, init->offset + 5);
trace("ANDN_REG\tR[0x%06x] &= ~0x%08x\n", reg, mask);
init->offset += 9;
init_mask(init, reg, mask, 0);
}
/**
* INIT_OR_REG - opcode 0x48
*
*/
static void
init_or_reg(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 reg = nvbios_rd32(bios, init->offset + 1);
u32 mask = nvbios_rd32(bios, init->offset + 5);
trace("OR_REG\tR[0x%06x] |= 0x%08x\n", reg, mask);
init->offset += 9;
init_mask(init, reg, 0, mask);
}
/**
* INIT_INDEX_ADDRESS_LATCHED - opcode 0x49
*
*/
static void
init_idx_addr_latched(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 creg = nvbios_rd32(bios, init->offset + 1);
u32 dreg = nvbios_rd32(bios, init->offset + 5);
u32 mask = nvbios_rd32(bios, init->offset + 9);
u32 data = nvbios_rd32(bios, init->offset + 13);
u8 count = nvbios_rd08(bios, init->offset + 17);
trace("INDEX_ADDRESS_LATCHED\tR[0x%06x] : R[0x%06x]\n", creg, dreg);
trace("\tCTRL &= 0x%08x |= 0x%08x\n", mask, data);
init->offset += 18;
while (count--) {
u8 iaddr = nvbios_rd08(bios, init->offset + 0);
u8 idata = nvbios_rd08(bios, init->offset + 1);
trace("\t[0x%02x] = 0x%02x\n", iaddr, idata);
init->offset += 2;
init_wr32(init, dreg, idata);
init_mask(init, creg, ~mask, data | iaddr);
}
}
/**
* INIT_IO_RESTRICT_PLL2 - opcode 0x4a
*
*/
static void
init_io_restrict_pll2(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 port = nvbios_rd16(bios, init->offset + 1);
u8 index = nvbios_rd08(bios, init->offset + 3);
u8 mask = nvbios_rd08(bios, init->offset + 4);
u8 shift = nvbios_rd08(bios, init->offset + 5);
u8 count = nvbios_rd08(bios, init->offset + 6);
u32 reg = nvbios_rd32(bios, init->offset + 7);
u8 conf, i;
trace("IO_RESTRICT_PLL2\t"
"R[0x%06x] =PLL= ((0x%04x[0x%02x] & 0x%02x) >> 0x%02x) [{\n",
reg, port, index, mask, shift);
init->offset += 11;
conf = (init_rdvgai(init, port, index) & mask) >> shift;
for (i = 0; i < count; i++) {
u32 freq = nvbios_rd32(bios, init->offset);
if (i == conf) {
trace("\t%dkHz *\n", freq);
init_prog_pll(init, reg, freq);
} else {
trace("\t%dkHz\n", freq);
}
init->offset += 4;
}
trace("}]\n");
}
/**
* INIT_PLL2 - opcode 0x4b
*
*/
static void
init_pll2(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 reg = nvbios_rd32(bios, init->offset + 1);
u32 freq = nvbios_rd32(bios, init->offset + 5);
trace("PLL2\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
init->offset += 9;
init_prog_pll(init, reg, freq);
}
/**
* INIT_I2C_BYTE - opcode 0x4c
*
*/
static void
init_i2c_byte(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 index = nvbios_rd08(bios, init->offset + 1);
u8 addr = nvbios_rd08(bios, init->offset + 2) >> 1;
u8 count = nvbios_rd08(bios, init->offset + 3);
trace("I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
init->offset += 4;
while (count--) {
u8 reg = nvbios_rd08(bios, init->offset + 0);
u8 mask = nvbios_rd08(bios, init->offset + 1);
u8 data = nvbios_rd08(bios, init->offset + 2);
int val;
trace("\t[0x%02x] &= 0x%02x |= 0x%02x\n", reg, mask, data);
init->offset += 3;
val = init_rdi2cr(init, index, addr, reg);
if (val < 0)
continue;
init_wri2cr(init, index, addr, reg, (val & mask) | data);
}
}
/**
* INIT_ZM_I2C_BYTE - opcode 0x4d
*
*/
static void
init_zm_i2c_byte(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 index = nvbios_rd08(bios, init->offset + 1);
u8 addr = nvbios_rd08(bios, init->offset + 2) >> 1;
u8 count = nvbios_rd08(bios, init->offset + 3);
trace("ZM_I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
init->offset += 4;
while (count--) {
u8 reg = nvbios_rd08(bios, init->offset + 0);
u8 data = nvbios_rd08(bios, init->offset + 1);
trace("\t[0x%02x] = 0x%02x\n", reg, data);
init->offset += 2;
init_wri2cr(init, index, addr, reg, data);
}
}
/**
* INIT_ZM_I2C - opcode 0x4e
*
*/
static void
init_zm_i2c(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 index = nvbios_rd08(bios, init->offset + 1);
u8 addr = nvbios_rd08(bios, init->offset + 2) >> 1;
u8 count = nvbios_rd08(bios, init->offset + 3);
u8 data[256], i;
trace("ZM_I2C\tI2C[0x%02x][0x%02x]\n", index, addr);
init->offset += 4;
for (i = 0; i < count; i++) {
data[i] = nvbios_rd08(bios, init->offset);
trace("\t0x%02x\n", data[i]);
init->offset++;
}
if (init_exec(init)) {
struct i2c_adapter *adap = init_i2c(init, index);
struct i2c_msg msg = {
.addr = addr, .flags = 0, .len = count, .buf = data,
};
int ret;
if (adap && (ret = i2c_transfer(adap, &msg, 1)) != 1)
warn("i2c wr failed, %d\n", ret);
}
}
/**
* INIT_TMDS - opcode 0x4f
*
*/
static void
init_tmds(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 tmds = nvbios_rd08(bios, init->offset + 1);
u8 addr = nvbios_rd08(bios, init->offset + 2);
u8 mask = nvbios_rd08(bios, init->offset + 3);
u8 data = nvbios_rd08(bios, init->offset + 4);
u32 reg = init_tmds_reg(init, tmds);
trace("TMDS\tT[0x%02x][0x%02x] &= 0x%02x |= 0x%02x\n",
tmds, addr, mask, data);
init->offset += 5;
if (reg == 0)
return;
init_wr32(init, reg + 0, addr | 0x00010000);
init_wr32(init, reg + 4, data | (init_rd32(init, reg + 4) & mask));
init_wr32(init, reg + 0, addr);
}
/**
* INIT_ZM_TMDS_GROUP - opcode 0x50
*
*/
static void
init_zm_tmds_group(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 tmds = nvbios_rd08(bios, init->offset + 1);
u8 count = nvbios_rd08(bios, init->offset + 2);
u32 reg = init_tmds_reg(init, tmds);
trace("TMDS_ZM_GROUP\tT[0x%02x]\n", tmds);
init->offset += 3;
while (count--) {
u8 addr = nvbios_rd08(bios, init->offset + 0);
u8 data = nvbios_rd08(bios, init->offset + 1);
trace("\t[0x%02x] = 0x%02x\n", addr, data);
init->offset += 2;
init_wr32(init, reg + 4, data);
init_wr32(init, reg + 0, addr);
}
}
/**
* INIT_CR_INDEX_ADDRESS_LATCHED - opcode 0x51
*
*/
static void
init_cr_idx_adr_latch(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 addr0 = nvbios_rd08(bios, init->offset + 1);
u8 addr1 = nvbios_rd08(bios, init->offset + 2);
u8 base = nvbios_rd08(bios, init->offset + 3);
u8 count = nvbios_rd08(bios, init->offset + 4);
u8 save0;
trace("CR_INDEX_ADDR C[%02x] C[%02x]\n", addr0, addr1);
init->offset += 5;
save0 = init_rdvgai(init, 0x03d4, addr0);
while (count--) {
u8 data = nvbios_rd08(bios, init->offset);
trace("\t\t[0x%02x] = 0x%02x\n", base, data);
init->offset += 1;
init_wrvgai(init, 0x03d4, addr0, base++);
init_wrvgai(init, 0x03d4, addr1, data);
}
init_wrvgai(init, 0x03d4, addr0, save0);
}
/**
* INIT_CR - opcode 0x52
*
*/
static void
init_cr(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 addr = nvbios_rd08(bios, init->offset + 1);
u8 mask = nvbios_rd08(bios, init->offset + 2);
u8 data = nvbios_rd08(bios, init->offset + 3);
u8 val;
trace("CR\t\tC[0x%02x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
init->offset += 4;
val = init_rdvgai(init, 0x03d4, addr) & mask;
init_wrvgai(init, 0x03d4, addr, val | data);
}
/**
* INIT_ZM_CR - opcode 0x53
*
*/
static void
init_zm_cr(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 addr = nvbios_rd08(bios, init->offset + 1);
u8 data = nvbios_rd08(bios, init->offset + 2);
trace("ZM_CR\tC[0x%02x] = 0x%02x\n", addr, data);
init->offset += 3;
init_wrvgai(init, 0x03d4, addr, data);
}
/**
* INIT_ZM_CR_GROUP - opcode 0x54
*
*/
static void
init_zm_cr_group(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 count = nvbios_rd08(bios, init->offset + 1);
trace("ZM_CR_GROUP\n");
init->offset += 2;
while (count--) {
u8 addr = nvbios_rd08(bios, init->offset + 0);
u8 data = nvbios_rd08(bios, init->offset + 1);
trace("\t\tC[0x%02x] = 0x%02x\n", addr, data);
init->offset += 2;
init_wrvgai(init, 0x03d4, addr, data);
}
}
/**
* INIT_CONDITION_TIME - opcode 0x56
*
*/
static void
init_condition_time(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 cond = nvbios_rd08(bios, init->offset + 1);
u8 retry = nvbios_rd08(bios, init->offset + 2);
u8 wait = min((u16)retry * 50, 100);
trace("CONDITION_TIME\t0x%02x 0x%02x\n", cond, retry);
init->offset += 3;
if (!init_exec(init))
return;
while (wait--) {
if (init_condition_met(init, cond))
return;
mdelay(20);
}
init_exec_set(init, false);
}
/**
* INIT_LTIME - opcode 0x57
*
*/
static void
init_ltime(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 msec = nvbios_rd16(bios, init->offset + 1);
trace("LTIME\t0x%04x\n", msec);
init->offset += 3;
if (init_exec(init))
mdelay(msec);
}
/**
* INIT_ZM_REG_SEQUENCE - opcode 0x58
*
*/
static void
init_zm_reg_sequence(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 base = nvbios_rd32(bios, init->offset + 1);
u8 count = nvbios_rd08(bios, init->offset + 5);
trace("ZM_REG_SEQUENCE\t0x%02x\n", count);
init->offset += 6;
while (count--) {
u32 data = nvbios_rd32(bios, init->offset);
trace("\t\tR[0x%06x] = 0x%08x\n", base, data);
init->offset += 4;
init_wr32(init, base, data);
base += 4;
}
}
/**
* INIT_PLL_INDIRECT - opcode 0x59
*
*/
static void
init_pll_indirect(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 reg = nvbios_rd32(bios, init->offset + 1);
u16 addr = nvbios_rd16(bios, init->offset + 5);
u32 freq = (u32)nvbios_rd16(bios, addr) * 1000;
trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
reg, addr, freq);
init->offset += 7;
init_prog_pll(init, reg, freq);
}
/**
* INIT_ZM_REG_INDIRECT - opcode 0x5a
*
*/
static void
init_zm_reg_indirect(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 reg = nvbios_rd32(bios, init->offset + 1);
u16 addr = nvbios_rd16(bios, init->offset + 5);
u32 data = nvbios_rd32(bios, addr);
trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
reg, addr, data);
init->offset += 7;
init_wr32(init, addr, data);
}
/**
* INIT_SUB_DIRECT - opcode 0x5b
*
*/
static void
init_sub_direct(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 addr = nvbios_rd16(bios, init->offset + 1);
u16 save;
trace("SUB_DIRECT\t0x%04x\n", addr);
if (init_exec(init)) {
save = init->offset;
init->offset = addr;
if (nvbios_exec(init)) {
error("error parsing sub-table\n");
return;
}
init->offset = save;
}
init->offset += 3;
}
/**
* INIT_JUMP - opcode 0x5c
*
*/
static void
init_jump(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 offset = nvbios_rd16(bios, init->offset + 1);
trace("JUMP\t0x%04x\n", offset);
if (init_exec(init))
init->offset = offset;
else
init->offset += 3;
}
/**
* INIT_I2C_IF - opcode 0x5e
*
*/
static void
init_i2c_if(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 index = nvbios_rd08(bios, init->offset + 1);
u8 addr = nvbios_rd08(bios, init->offset + 2);
u8 reg = nvbios_rd08(bios, init->offset + 3);
u8 mask = nvbios_rd08(bios, init->offset + 4);
u8 data = nvbios_rd08(bios, init->offset + 5);
u8 value;
trace("I2C_IF\tI2C[0x%02x][0x%02x][0x%02x] & 0x%02x == 0x%02x\n",
index, addr, reg, mask, data);
init->offset += 6;
init_exec_force(init, true);
value = init_rdi2cr(init, index, addr, reg);
if ((value & mask) != data)
init_exec_set(init, false);
init_exec_force(init, false);
}
/**
* INIT_COPY_NV_REG - opcode 0x5f
*
*/
static void
init_copy_nv_reg(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 sreg = nvbios_rd32(bios, init->offset + 1);
u8 shift = nvbios_rd08(bios, init->offset + 5);
u32 smask = nvbios_rd32(bios, init->offset + 6);
u32 sxor = nvbios_rd32(bios, init->offset + 10);
u32 dreg = nvbios_rd32(bios, init->offset + 14);
u32 dmask = nvbios_rd32(bios, init->offset + 18);
u32 data;
trace("COPY_NV_REG\tR[0x%06x] &= 0x%08x |= "
"((R[0x%06x] %s 0x%02x) & 0x%08x ^ 0x%08x)\n",
dreg, dmask, sreg, (shift & 0x80) ? "<<" : ">>",
(shift & 0x80) ? (0x100 - shift) : shift, smask, sxor);
init->offset += 22;
data = init_shift(init_rd32(init, sreg), shift);
init_mask(init, dreg, ~dmask, (data & smask) ^ sxor);
}
/**
* INIT_ZM_INDEX_IO - opcode 0x62
*
*/
static void
init_zm_index_io(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 port = nvbios_rd16(bios, init->offset + 1);
u8 index = nvbios_rd08(bios, init->offset + 3);
u8 data = nvbios_rd08(bios, init->offset + 4);
trace("ZM_INDEX_IO\tI[0x%04x][0x%02x] = 0x%02x\n", port, index, data);
init->offset += 5;
init_wrvgai(init, port, index, data);
}
/**
* INIT_COMPUTE_MEM - opcode 0x63
*
*/
static void
init_compute_mem(struct nvbios_init *init)
{
struct nvkm_devinit *devinit = init->subdev->device->devinit;
trace("COMPUTE_MEM\n");
init->offset += 1;
init_exec_force(init, true);
if (init_exec(init))
nvkm_devinit_meminit(devinit);
init_exec_force(init, false);
}
/**
* INIT_RESET - opcode 0x65
*
*/
static void
init_reset(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 reg = nvbios_rd32(bios, init->offset + 1);
u32 data1 = nvbios_rd32(bios, init->offset + 5);
u32 data2 = nvbios_rd32(bios, init->offset + 9);
u32 savepci19;
trace("RESET\tR[0x%08x] = 0x%08x, 0x%08x", reg, data1, data2);
init->offset += 13;
init_exec_force(init, true);
savepci19 = init_mask(init, 0x00184c, 0x00000f00, 0x00000000);
init_wr32(init, reg, data1);
udelay(10);
init_wr32(init, reg, data2);
init_wr32(init, 0x00184c, savepci19);
init_mask(init, 0x001850, 0x00000001, 0x00000000);
init_exec_force(init, false);
}
/**
* INIT_CONFIGURE_MEM - opcode 0x66
*
*/
static u16
init_configure_mem_clk(struct nvbios_init *init)
{
u16 mdata = bmp_mem_init_table(init->subdev->device->bios);
if (mdata)
mdata += (init_rdvgai(init, 0x03d4, 0x3c) >> 4) * 66;
return mdata;
}
static void
init_configure_mem(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 mdata, sdata;
u32 addr, data;
trace("CONFIGURE_MEM\n");
init->offset += 1;
if (bios->version.major > 2) {
init_done(init);
return;
}
init_exec_force(init, true);
mdata = init_configure_mem_clk(init);
sdata = bmp_sdr_seq_table(bios);
if (nvbios_rd08(bios, mdata) & 0x01)
sdata = bmp_ddr_seq_table(bios);
mdata += 6; /* skip to data */
data = init_rdvgai(init, 0x03c4, 0x01);
init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
for (; (addr = nvbios_rd32(bios, sdata)) != 0xffffffff; sdata += 4) {
switch (addr) {
case 0x10021c: /* CKE_NORMAL */
case 0x1002d0: /* CMD_REFRESH */
case 0x1002d4: /* CMD_PRECHARGE */
data = 0x00000001;
break;
default:
data = nvbios_rd32(bios, mdata);
mdata += 4;
if (data == 0xffffffff)
continue;
break;
}
init_wr32(init, addr, data);
}
init_exec_force(init, false);
}
/**
* INIT_CONFIGURE_CLK - opcode 0x67
*
*/
static void
init_configure_clk(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 mdata, clock;
trace("CONFIGURE_CLK\n");
init->offset += 1;
if (bios->version.major > 2) {
init_done(init);
return;
}
init_exec_force(init, true);
mdata = init_configure_mem_clk(init);
/* NVPLL */
clock = nvbios_rd16(bios, mdata + 4) * 10;
init_prog_pll(init, 0x680500, clock);
/* MPLL */
clock = nvbios_rd16(bios, mdata + 2) * 10;
if (nvbios_rd08(bios, mdata) & 0x01)
clock *= 2;
init_prog_pll(init, 0x680504, clock);
init_exec_force(init, false);
}
/**
* INIT_CONFIGURE_PREINIT - opcode 0x68
*
*/
static void
init_configure_preinit(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 strap;
trace("CONFIGURE_PREINIT\n");
init->offset += 1;
if (bios->version.major > 2) {
init_done(init);
return;
}
init_exec_force(init, true);
strap = init_rd32(init, 0x101000);
strap = ((strap << 2) & 0xf0) | ((strap & 0x40) >> 6);
init_wrvgai(init, 0x03d4, 0x3c, strap);
init_exec_force(init, false);
}
/**
* INIT_IO - opcode 0x69
*
*/
static void
init_io(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 port = nvbios_rd16(bios, init->offset + 1);
u8 mask = nvbios_rd16(bios, init->offset + 3);
u8 data = nvbios_rd16(bios, init->offset + 4);
u8 value;
trace("IO\t\tI[0x%04x] &= 0x%02x |= 0x%02x\n", port, mask, data);
init->offset += 5;
/* ummm.. yes.. should really figure out wtf this is and why it's
* needed some day.. it's almost certainly wrong, but, it also
* somehow makes things work...
*/
if (bios->subdev.device->card_type >= NV_50 &&
port == 0x03c3 && data == 0x01) {
init_mask(init, 0x614100, 0xf0800000, 0x00800000);
init_mask(init, 0x00e18c, 0x00020000, 0x00020000);
init_mask(init, 0x614900, 0xf0800000, 0x00800000);
init_mask(init, 0x000200, 0x40000000, 0x00000000);
mdelay(10);
init_mask(init, 0x00e18c, 0x00020000, 0x00000000);
init_mask(init, 0x000200, 0x40000000, 0x40000000);
init_wr32(init, 0x614100, 0x00800018);
init_wr32(init, 0x614900, 0x00800018);
mdelay(10);
init_wr32(init, 0x614100, 0x10000018);
init_wr32(init, 0x614900, 0x10000018);
}
value = init_rdport(init, port) & mask;
init_wrport(init, port, data | value);
}
/**
* INIT_SUB - opcode 0x6b
*
*/
static void
init_sub(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 index = nvbios_rd08(bios, init->offset + 1);
u16 addr, save;
trace("SUB\t0x%02x\n", index);
addr = init_script(bios, index);
if (addr && init_exec(init)) {
save = init->offset;
init->offset = addr;
if (nvbios_exec(init)) {
error("error parsing sub-table\n");
return;
}
init->offset = save;
}
init->offset += 2;
}
/**
* INIT_RAM_CONDITION - opcode 0x6d
*
*/
static void
init_ram_condition(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 mask = nvbios_rd08(bios, init->offset + 1);
u8 value = nvbios_rd08(bios, init->offset + 2);
trace("RAM_CONDITION\t"
"(R[0x100000] & 0x%02x) == 0x%02x\n", mask, value);
init->offset += 3;
if ((init_rd32(init, 0x100000) & mask) != value)
init_exec_set(init, false);
}
/**
* INIT_NV_REG - opcode 0x6e
*
*/
static void
init_nv_reg(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 reg = nvbios_rd32(bios, init->offset + 1);
u32 mask = nvbios_rd32(bios, init->offset + 5);
u32 data = nvbios_rd32(bios, init->offset + 9);
trace("NV_REG\tR[0x%06x] &= 0x%08x |= 0x%08x\n", reg, mask, data);
init->offset += 13;
init_mask(init, reg, ~mask, data);
}
/**
* INIT_MACRO - opcode 0x6f
*
*/
static void
init_macro(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 macro = nvbios_rd08(bios, init->offset + 1);
u16 table;
trace("MACRO\t0x%02x\n", macro);
table = init_macro_table(init);
if (table) {
u32 addr = nvbios_rd32(bios, table + (macro * 8) + 0);
u32 data = nvbios_rd32(bios, table + (macro * 8) + 4);
trace("\t\tR[0x%06x] = 0x%08x\n", addr, data);
init_wr32(init, addr, data);
}
init->offset += 2;
}
/**
* INIT_RESUME - opcode 0x72
*
*/
static void
init_resume(struct nvbios_init *init)
{
trace("RESUME\n");
init->offset += 1;
init_exec_set(init, true);
}
/**
* INIT_STRAP_CONDITION - opcode 0x73
*
*/
static void
init_strap_condition(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 mask = nvbios_rd32(bios, init->offset + 1);
u32 value = nvbios_rd32(bios, init->offset + 5);
trace("STRAP_CONDITION\t(R[0x101000] & 0x%08x) == 0x%08x\n", mask, value);
init->offset += 9;
if ((init_rd32(init, 0x101000) & mask) != value)
init_exec_set(init, false);
}
/**
* INIT_TIME - opcode 0x74
*
*/
static void
init_time(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 usec = nvbios_rd16(bios, init->offset + 1);
trace("TIME\t0x%04x\n", usec);
init->offset += 3;
if (init_exec(init)) {
if (usec < 1000)
udelay(usec);
else
mdelay((usec + 900) / 1000);
}
}
/**
* INIT_CONDITION - opcode 0x75
*
*/
static void
init_condition(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 cond = nvbios_rd08(bios, init->offset + 1);
trace("CONDITION\t0x%02x\n", cond);
init->offset += 2;
if (!init_condition_met(init, cond))
init_exec_set(init, false);
}
/**
* INIT_IO_CONDITION - opcode 0x76
*
*/
static void
init_io_condition(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 cond = nvbios_rd08(bios, init->offset + 1);
trace("IO_CONDITION\t0x%02x\n", cond);
init->offset += 2;
if (!init_io_condition_met(init, cond))
init_exec_set(init, false);
}
/**
* INIT_ZM_REG16 - opcode 0x77
*
*/
static void
init_zm_reg16(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 addr = nvbios_rd32(bios, init->offset + 1);
u16 data = nvbios_rd16(bios, init->offset + 5);
trace("ZM_REG\tR[0x%06x] = 0x%04x\n", addr, data);
init->offset += 7;
init_wr32(init, addr, data);
}
/**
* INIT_INDEX_IO - opcode 0x78
*
*/
static void
init_index_io(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u16 port = nvbios_rd16(bios, init->offset + 1);
u8 index = nvbios_rd16(bios, init->offset + 3);
u8 mask = nvbios_rd08(bios, init->offset + 4);
u8 data = nvbios_rd08(bios, init->offset + 5);
u8 value;
trace("INDEX_IO\tI[0x%04x][0x%02x] &= 0x%02x |= 0x%02x\n",
port, index, mask, data);
init->offset += 6;
value = init_rdvgai(init, port, index) & mask;
init_wrvgai(init, port, index, data | value);
}
/**
* INIT_PLL - opcode 0x79
*
*/
static void
init_pll(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 reg = nvbios_rd32(bios, init->offset + 1);
u32 freq = nvbios_rd16(bios, init->offset + 5) * 10;
trace("PLL\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
init->offset += 7;
init_prog_pll(init, reg, freq);
}
/**
* INIT_ZM_REG - opcode 0x7a
*
*/
static void
init_zm_reg(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 addr = nvbios_rd32(bios, init->offset + 1);
u32 data = nvbios_rd32(bios, init->offset + 5);
trace("ZM_REG\tR[0x%06x] = 0x%08x\n", addr, data);
init->offset += 9;
if (addr == 0x000200)
data |= 0x00000001;
init_wr32(init, addr, data);
}
/**
* INIT_RAM_RESTRICT_PLL - opcde 0x87
*
*/
static void
init_ram_restrict_pll(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 type = nvbios_rd08(bios, init->offset + 1);
u8 count = init_ram_restrict_group_count(init);
u8 strap = init_ram_restrict(init);
u8 cconf;
trace("RAM_RESTRICT_PLL\t0x%02x\n", type);
init->offset += 2;
for (cconf = 0; cconf < count; cconf++) {
u32 freq = nvbios_rd32(bios, init->offset);
if (cconf == strap) {
trace("%dkHz *\n", freq);
init_prog_pll(init, type, freq);
} else {
trace("%dkHz\n", freq);
}
init->offset += 4;
}
}
/**
* INIT_RESET_BEGUN - opcode 0x8c
*
*/
static void
init_reset_begun(struct nvbios_init *init)
{
trace("RESET_BEGUN\n");
init->offset += 1;
}
/**
* INIT_RESET_END - opcode 0x8d
*
*/
static void
init_reset_end(struct nvbios_init *init)
{
trace("RESET_END\n");
init->offset += 1;
}
/**
* INIT_GPIO - opcode 0x8e
*
*/
static void
init_gpio(struct nvbios_init *init)
{
struct nvkm_gpio *gpio = init->subdev->device->gpio;
trace("GPIO\n");
init->offset += 1;
if (init_exec(init))
nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
}
/**
* INIT_RAM_RESTRICT_ZM_GROUP - opcode 0x8f
*
*/
static void
init_ram_restrict_zm_reg_group(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 addr = nvbios_rd32(bios, init->offset + 1);
u8 incr = nvbios_rd08(bios, init->offset + 5);
u8 num = nvbios_rd08(bios, init->offset + 6);
u8 count = init_ram_restrict_group_count(init);
u8 index = init_ram_restrict(init);
u8 i, j;
trace("RAM_RESTRICT_ZM_REG_GROUP\t"
"R[0x%08x] 0x%02x 0x%02x\n", addr, incr, num);
init->offset += 7;
for (i = 0; i < num; i++) {
trace("\tR[0x%06x] = {\n", addr);
for (j = 0; j < count; j++) {
u32 data = nvbios_rd32(bios, init->offset);
if (j == index) {
trace("\t\t0x%08x *\n", data);
init_wr32(init, addr, data);
} else {
trace("\t\t0x%08x\n", data);
}
init->offset += 4;
}
trace("\t}\n");
addr += incr;
}
}
/**
* INIT_COPY_ZM_REG - opcode 0x90
*
*/
static void
init_copy_zm_reg(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 sreg = nvbios_rd32(bios, init->offset + 1);
u32 dreg = nvbios_rd32(bios, init->offset + 5);
trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg);
init->offset += 9;
init_wr32(init, dreg, init_rd32(init, sreg));
}
/**
* INIT_ZM_REG_GROUP - opcode 0x91
*
*/
static void
init_zm_reg_group(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 addr = nvbios_rd32(bios, init->offset + 1);
u8 count = nvbios_rd08(bios, init->offset + 5);
trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr);
init->offset += 6;
while (count--) {
u32 data = nvbios_rd32(bios, init->offset);
trace("\t0x%08x\n", data);
init_wr32(init, addr, data);
init->offset += 4;
}
}
/**
* INIT_XLAT - opcode 0x96
*
*/
static void
init_xlat(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 saddr = nvbios_rd32(bios, init->offset + 1);
u8 sshift = nvbios_rd08(bios, init->offset + 5);
u8 smask = nvbios_rd08(bios, init->offset + 6);
u8 index = nvbios_rd08(bios, init->offset + 7);
u32 daddr = nvbios_rd32(bios, init->offset + 8);
u32 dmask = nvbios_rd32(bios, init->offset + 12);
u8 shift = nvbios_rd08(bios, init->offset + 16);
u32 data;
trace("INIT_XLAT\tR[0x%06x] &= 0x%08x |= "
"(X%02x((R[0x%06x] %s 0x%02x) & 0x%02x) << 0x%02x)\n",
daddr, dmask, index, saddr, (sshift & 0x80) ? "<<" : ">>",
(sshift & 0x80) ? (0x100 - sshift) : sshift, smask, shift);
init->offset += 17;
data = init_shift(init_rd32(init, saddr), sshift) & smask;
data = init_xlat_(init, index, data) << shift;
init_mask(init, daddr, ~dmask, data);
}
/**
* INIT_ZM_MASK_ADD - opcode 0x97
*
*/
static void
init_zm_mask_add(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 addr = nvbios_rd32(bios, init->offset + 1);
u32 mask = nvbios_rd32(bios, init->offset + 5);
u32 add = nvbios_rd32(bios, init->offset + 9);
u32 data;
trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
init->offset += 13;
data = init_rd32(init, addr);
data = (data & mask) | ((data + add) & ~mask);
init_wr32(init, addr, data);
}
/**
* INIT_AUXCH - opcode 0x98
*
*/
static void
init_auxch(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 addr = nvbios_rd32(bios, init->offset + 1);
u8 count = nvbios_rd08(bios, init->offset + 5);
trace("AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
init->offset += 6;
while (count--) {
u8 mask = nvbios_rd08(bios, init->offset + 0);
u8 data = nvbios_rd08(bios, init->offset + 1);
trace("\tAUX[0x%08x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
mask = init_rdauxr(init, addr) & mask;
init_wrauxr(init, addr, mask | data);
init->offset += 2;
}
}
/**
* INIT_AUXCH - opcode 0x99
*
*/
static void
init_zm_auxch(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u32 addr = nvbios_rd32(bios, init->offset + 1);
u8 count = nvbios_rd08(bios, init->offset + 5);
trace("ZM_AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
init->offset += 6;
while (count--) {
u8 data = nvbios_rd08(bios, init->offset + 0);
trace("\tAUX[0x%08x] = 0x%02x\n", addr, data);
init_wrauxr(init, addr, data);
init->offset += 1;
}
}
/**
* INIT_I2C_LONG_IF - opcode 0x9a
*
*/
static void
init_i2c_long_if(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
u8 index = nvbios_rd08(bios, init->offset + 1);
u8 addr = nvbios_rd08(bios, init->offset + 2) >> 1;
u8 reglo = nvbios_rd08(bios, init->offset + 3);
u8 reghi = nvbios_rd08(bios, init->offset + 4);
u8 mask = nvbios_rd08(bios, init->offset + 5);
u8 data = nvbios_rd08(bios, init->offset + 6);
struct i2c_adapter *adap;
trace("I2C_LONG_IF\t"
"I2C[0x%02x][0x%02x][0x%02x%02x] & 0x%02x == 0x%02x\n",
index, addr, reglo, reghi, mask, data);
init->offset += 7;
adap = init_i2c(init, index);
if (adap) {
u8 i[2] = { reghi, reglo };
u8 o[1] = {};
struct i2c_msg msg[] = {
{ .addr = addr, .flags = 0, .len = 2, .buf = i },
{ .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = o }
};
int ret;
ret = i2c_transfer(adap, msg, 2);
if (ret == 2 && ((o[0] & mask) == data))
return;
}
init_exec_set(init, false);
}
/**
* INIT_GPIO_NE - opcode 0xa9
*
*/
static void
init_gpio_ne(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
struct nvkm_gpio *gpio = bios->subdev.device->gpio;
struct dcb_gpio_func func;
u8 count = nvbios_rd08(bios, init->offset + 1);
u8 idx = 0, ver, len;
u16 data, i;
trace("GPIO_NE\t");
init->offset += 2;
for (i = init->offset; i < init->offset + count; i++)
cont("0x%02x ", nvbios_rd08(bios, i));
cont("\n");
while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
if (func.func != DCB_GPIO_UNUSED) {
for (i = init->offset; i < init->offset + count; i++) {
if (func.func == nvbios_rd08(bios, i))
break;
}
trace("\tFUNC[0x%02x]", func.func);
if (i == (init->offset + count)) {
cont(" *");
if (init_exec(init))
nvkm_gpio_reset(gpio, func.func);
}
cont("\n");
}
}
init->offset += count;
}
static struct nvbios_init_opcode {
void (*exec)(struct nvbios_init *);
} init_opcode[] = {
[0x32] = { init_io_restrict_prog },
[0x33] = { init_repeat },
[0x34] = { init_io_restrict_pll },
[0x36] = { init_end_repeat },
[0x37] = { init_copy },
[0x38] = { init_not },
[0x39] = { init_io_flag_condition },
[0x3a] = { init_generic_condition },
[0x3b] = { init_io_mask_or },
[0x3c] = { init_io_or },
[0x47] = { init_andn_reg },
[0x48] = { init_or_reg },
[0x49] = { init_idx_addr_latched },
[0x4a] = { init_io_restrict_pll2 },
[0x4b] = { init_pll2 },
[0x4c] = { init_i2c_byte },
[0x4d] = { init_zm_i2c_byte },
[0x4e] = { init_zm_i2c },
[0x4f] = { init_tmds },
[0x50] = { init_zm_tmds_group },
[0x51] = { init_cr_idx_adr_latch },
[0x52] = { init_cr },
[0x53] = { init_zm_cr },
[0x54] = { init_zm_cr_group },
[0x56] = { init_condition_time },
[0x57] = { init_ltime },
[0x58] = { init_zm_reg_sequence },
[0x59] = { init_pll_indirect },
[0x5a] = { init_zm_reg_indirect },
[0x5b] = { init_sub_direct },
[0x5c] = { init_jump },
[0x5e] = { init_i2c_if },
[0x5f] = { init_copy_nv_reg },
[0x62] = { init_zm_index_io },
[0x63] = { init_compute_mem },
[0x65] = { init_reset },
[0x66] = { init_configure_mem },
[0x67] = { init_configure_clk },
[0x68] = { init_configure_preinit },
[0x69] = { init_io },
[0x6b] = { init_sub },
[0x6d] = { init_ram_condition },
[0x6e] = { init_nv_reg },
[0x6f] = { init_macro },
[0x71] = { init_done },
[0x72] = { init_resume },
[0x73] = { init_strap_condition },
[0x74] = { init_time },
[0x75] = { init_condition },
[0x76] = { init_io_condition },
[0x77] = { init_zm_reg16 },
[0x78] = { init_index_io },
[0x79] = { init_pll },
[0x7a] = { init_zm_reg },
[0x87] = { init_ram_restrict_pll },
[0x8c] = { init_reset_begun },
[0x8d] = { init_reset_end },
[0x8e] = { init_gpio },
[0x8f] = { init_ram_restrict_zm_reg_group },
[0x90] = { init_copy_zm_reg },
[0x91] = { init_zm_reg_group },
[0x92] = { init_reserved },
[0x96] = { init_xlat },
[0x97] = { init_zm_mask_add },
[0x98] = { init_auxch },
[0x99] = { init_zm_auxch },
[0x9a] = { init_i2c_long_if },
[0xa9] = { init_gpio_ne },
[0xaa] = { init_reserved },
};
int
nvbios_exec(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->subdev->device->bios;
init->nested++;
while (init->offset) {
u8 opcode = nvbios_rd08(bios, init->offset);
if (opcode >= ARRAY_SIZE(init_opcode) ||
!init_opcode[opcode].exec) {
error("unknown opcode 0x%02x\n", opcode);
return -EINVAL;
}
init_opcode[opcode].exec(init);
}
init->nested--;
return 0;
}
int
nvbios_post(struct nvkm_subdev *subdev, bool execute)
{
struct nvkm_bios *bios = subdev->device->bios;
int ret = 0;
int i = -1;
u16 data;
if (execute)
nvkm_debug(subdev, "running init tables\n");
while (!ret && (data = (init_script(bios, ++i)))) {
ret = nvbios_init(subdev, data,
init.execute = execute ? 1 : 0;
);
}
/* the vbios parser will run this right after the normal init
* tables, whereas the binary driver appears to run it later.
*/
if (!ret && (data = init_unknown_script(bios))) {
ret = nvbios_init(subdev, data,
init.execute = execute ? 1 : 0;
);
}
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/conn.h>
u32
nvbios_connTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u32 dcb = dcb_table(bios, ver, hdr, cnt, len);
if (dcb && *ver >= 0x30 && *hdr >= 0x16) {
u32 data = nvbios_rd16(bios, dcb + 0x14);
if (data) {
*ver = nvbios_rd08(bios, data + 0);
*hdr = nvbios_rd08(bios, data + 1);
*cnt = nvbios_rd08(bios, data + 2);
*len = nvbios_rd08(bios, data + 3);
return data;
}
}
return 0x00000000;
}
u32
nvbios_connTp(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_connT *info)
{
u32 data = nvbios_connTe(bios, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x30:
case 0x40:
return data;
default:
break;
}
return 0x00000000;
}
u32
nvbios_connEe(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u32 data = nvbios_connTe(bios, ver, &hdr, &cnt, len);
if (data && idx < cnt)
return data + hdr + (idx * *len);
return 0x00000000;
}
u32
nvbios_connEp(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
struct nvbios_connE *info)
{
u32 data = nvbios_connEe(bios, idx, ver, len);
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x30:
case 0x40:
info->type = nvbios_rd08(bios, data + 0x00);
info->location = nvbios_rd08(bios, data + 0x01) & 0x0f;
info->hpd = (nvbios_rd08(bios, data + 0x01) & 0x30) >> 4;
info->dp = (nvbios_rd08(bios, data + 0x01) & 0xc0) >> 6;
if (*len < 4)
return data;
info->hpd |= (nvbios_rd08(bios, data + 0x02) & 0x03) << 2;
info->dp |= nvbios_rd08(bios, data + 0x02) & 0x0c;
info->di = (nvbios_rd08(bios, data + 0x02) & 0xf0) >> 4;
info->hpd |= (nvbios_rd08(bios, data + 0x03) & 0x07) << 4;
info->sr = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
info->lcdid = (nvbios_rd08(bios, data + 0x03) & 0x70) >> 4;
return data;
default:
break;
}
return 0x00000000;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <subdev/bios.h>
#include <subdev/bios/bmp.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/image.h>
static bool
nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
{
u32 p = *addr;
if (*addr >= bios->image0_size && bios->imaged_addr) {
*addr -= bios->image0_size;
*addr += bios->imaged_addr;
}
if (unlikely(*addr + size > bios->size)) {
nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
return false;
}
return true;
}
u8
nvbios_rd08(struct nvkm_bios *bios, u32 addr)
{
if (likely(nvbios_addr(bios, &addr, 1)))
return bios->data[addr];
return 0x00;
}
u16
nvbios_rd16(struct nvkm_bios *bios, u32 addr)
{
if (likely(nvbios_addr(bios, &addr, 2)))
return get_unaligned_le16(&bios->data[addr]);
return 0x0000;
}
u32
nvbios_rd32(struct nvkm_bios *bios, u32 addr)
{
if (likely(nvbios_addr(bios, &addr, 4)))
return get_unaligned_le32(&bios->data[addr]);
return 0x00000000;
}
u8
nvbios_checksum(const u8 *data, int size)
{
u8 sum = 0;
while (size--)
sum += *data++;
return sum;
}
u16
nvbios_findstr(const u8 *data, int size, const char *str, int len)
{
int i, j;
for (i = 0; i <= (size - len); i++) {
for (j = 0; j < len; j++)
if ((char)data[i + j] != str[j])
break;
if (j == len)
return i;
}
return 0;
}
int
nvbios_memcmp(struct nvkm_bios *bios, u32 addr, const char *str, u32 len)
{
unsigned char c1, c2;
while (len--) {
c1 = nvbios_rd08(bios, addr++);
c2 = *(str++);
if (c1 != c2)
return c1 - c2;
}
return 0;
}
int
nvbios_extend(struct nvkm_bios *bios, u32 length)
{
if (bios->size < length) {
u8 *prev = bios->data;
if (!(bios->data = kmalloc(length, GFP_KERNEL))) {
bios->data = prev;
return -ENOMEM;
}
memcpy(bios->data, prev, bios->size);
bios->size = length;
kfree(prev);
return 1;
}
return 0;
}
static void *
nvkm_bios_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_bios *bios = nvkm_bios(subdev);
kfree(bios->data);
return bios;
}
static const struct nvkm_subdev_func
nvkm_bios = {
.dtor = nvkm_bios_dtor,
};
int
nvkm_bios_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bios **pbios)
{
struct nvkm_bios *bios;
struct nvbios_image image;
struct bit_entry bit_i;
int ret, idx = 0;
if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_bios, device, type, inst, &bios->subdev);
ret = nvbios_shadow(bios);
if (ret)
return ret;
/* Some tables have weird pointers that need adjustment before
* they're dereferenced. I'm not entirely sure why...
*/
if (nvbios_image(bios, idx++, &image)) {
bios->image0_size = image.size;
while (nvbios_image(bios, idx++, &image)) {
if (image.type == 0xe0) {
bios->imaged_addr = image.base;
break;
}
}
}
/* detect type of vbios we're dealing with */
bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
"\xff\x7f""NV\0", 5);
if (bios->bmp_offset) {
nvkm_debug(&bios->subdev, "BMP version %x.%x\n",
bmp_version(bios) >> 8,
bmp_version(bios) & 0xff);
}
bios->bit_offset = nvbios_findstr(bios->data, bios->size,
"\xff\xb8""BIT", 5);
if (bios->bit_offset)
nvkm_debug(&bios->subdev, "BIT signature found\n");
/* determine the vbios version number */
if (!bit_entry(bios, 'i', &bit_i) && bit_i.length >= 4) {
bios->version.major = nvbios_rd08(bios, bit_i.offset + 3);
bios->version.chip = nvbios_rd08(bios, bit_i.offset + 2);
bios->version.minor = nvbios_rd08(bios, bit_i.offset + 1);
bios->version.micro = nvbios_rd08(bios, bit_i.offset + 0);
bios->version.patch = nvbios_rd08(bios, bit_i.offset + 4);
} else
if (bmp_version(bios)) {
bios->version.major = nvbios_rd08(bios, bios->bmp_offset + 13);
bios->version.chip = nvbios_rd08(bios, bios->bmp_offset + 12);
bios->version.minor = nvbios_rd08(bios, bios->bmp_offset + 11);
bios->version.micro = nvbios_rd08(bios, bios->bmp_offset + 10);
}
nvkm_info(&bios->subdev, "version %02x.%02x.%02x.%02x.%02x\n",
bios->version.major, bios->version.chip,
bios->version.minor, bios->version.micro, bios->version.patch);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/i2c.h>
u16
dcb_i2c_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u16 i2c = 0x0000;
u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
if (dcb) {
if (*ver >= 0x15)
i2c = nvbios_rd16(bios, dcb + 2);
if (*ver >= 0x30)
i2c = nvbios_rd16(bios, dcb + 4);
}
if (i2c && *ver >= 0x42) {
nvkm_warn(&bios->subdev, "ccb %02x not supported\n", *ver);
return 0x0000;
}
if (i2c && *ver >= 0x30) {
*ver = nvbios_rd08(bios, i2c + 0);
*hdr = nvbios_rd08(bios, i2c + 1);
*cnt = nvbios_rd08(bios, i2c + 2);
*len = nvbios_rd08(bios, i2c + 3);
} else {
*ver = *ver; /* use DCB version */
*hdr = 0;
*cnt = 16;
*len = 4;
}
return i2c;
}
u16
dcb_i2c_entry(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u16 i2c = dcb_i2c_table(bios, ver, &hdr, &cnt, len);
if (i2c && idx < cnt)
return i2c + hdr + (idx * *len);
return 0x0000;
}
int
dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
{
struct nvkm_subdev *subdev = &bios->subdev;
u8 ver, len;
u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
if (ent) {
if (ver >= 0x41) {
u32 ent_value = nvbios_rd32(bios, ent);
u8 i2c_port = (ent_value >> 0) & 0x1f;
u8 dpaux_port = (ent_value >> 5) & 0x1f;
/* value 0x1f means unused according to DCB 4.x spec */
if (i2c_port == 0x1f && dpaux_port == 0x1f)
info->type = DCB_I2C_UNUSED;
else
info->type = DCB_I2C_PMGR;
} else
if (ver >= 0x30) {
info->type = nvbios_rd08(bios, ent + 0x03);
} else {
info->type = nvbios_rd08(bios, ent + 0x03) & 0x07;
if (info->type == 0x07)
info->type = DCB_I2C_UNUSED;
}
info->drive = DCB_I2C_UNUSED;
info->sense = DCB_I2C_UNUSED;
info->share = DCB_I2C_UNUSED;
info->auxch = DCB_I2C_UNUSED;
switch (info->type) {
case DCB_I2C_NV04_BIT:
info->drive = nvbios_rd08(bios, ent + 0);
info->sense = nvbios_rd08(bios, ent + 1);
return 0;
case DCB_I2C_NV4E_BIT:
info->drive = nvbios_rd08(bios, ent + 1);
return 0;
case DCB_I2C_NVIO_BIT:
info->drive = nvbios_rd08(bios, ent + 0) & 0x0f;
if (nvbios_rd08(bios, ent + 1) & 0x01)
info->share = nvbios_rd08(bios, ent + 1) >> 1;
return 0;
case DCB_I2C_NVIO_AUX:
info->auxch = nvbios_rd08(bios, ent + 0) & 0x0f;
if (nvbios_rd08(bios, ent + 1) & 0x01)
info->share = info->auxch;
return 0;
case DCB_I2C_PMGR:
info->drive = (nvbios_rd16(bios, ent + 0) & 0x01f) >> 0;
if (info->drive == 0x1f)
info->drive = DCB_I2C_UNUSED;
info->auxch = (nvbios_rd16(bios, ent + 0) & 0x3e0) >> 5;
if (info->auxch == 0x1f)
info->auxch = DCB_I2C_UNUSED;
info->share = info->auxch;
return 0;
case DCB_I2C_UNUSED:
return 0;
default:
nvkm_warn(subdev, "unknown i2c type %d\n", info->type);
info->type = DCB_I2C_UNUSED;
return 0;
}
}
if (bios->bmp_offset && idx < 2) {
/* BMP (from v4.0 has i2c info in the structure, it's in a
* fixed location on earlier VBIOS
*/
if (nvbios_rd08(bios, bios->bmp_offset + 5) < 4)
ent = 0x0048;
else
ent = 0x0036 + bios->bmp_offset;
if (idx == 0) {
info->drive = nvbios_rd08(bios, ent + 4);
if (!info->drive) info->drive = 0x3f;
info->sense = nvbios_rd08(bios, ent + 5);
if (!info->sense) info->sense = 0x3e;
} else
if (idx == 1) {
info->drive = nvbios_rd08(bios, ent + 6);
if (!info->drive) info->drive = 0x37;
info->sense = nvbios_rd08(bios, ent + 7);
if (!info->sense) info->sense = 0x36;
}
info->type = DCB_I2C_NV04_BIT;
info->share = DCB_I2C_UNUSED;
return 0;
}
return -ENOENT;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/mxm.h>
u16
mxm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr)
{
struct nvkm_subdev *subdev = &bios->subdev;
struct bit_entry x;
if (bit_entry(bios, 'x', &x)) {
nvkm_debug(subdev, "BIT 'x' table not present\n");
return 0x0000;
}
*ver = x.version;
*hdr = x.length;
if (*ver != 1 || *hdr < 3) {
nvkm_warn(subdev, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
return 0x0000;
}
return x.offset;
}
/* These map MXM v2.x digital connection values to the appropriate SOR/link,
* hopefully they're correct for all boards within the same chipset...
*
* MXM v3.x VBIOS are nicer and provide pointers to these tables.
*/
static u8 g84_sor_map[16] = {
0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static u8 g92_sor_map[16] = {
0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static u8 g94_sor_map[16] = {
0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
};
static u8 g98_sor_map[16] = {
0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
u8
mxm_sor_map(struct nvkm_bios *bios, u8 conn)
{
struct nvkm_subdev *subdev = &bios->subdev;
u8 ver, hdr;
u16 mxm = mxm_table(bios, &ver, &hdr);
if (mxm && hdr >= 6) {
u16 map = nvbios_rd16(bios, mxm + 4);
if (map) {
ver = nvbios_rd08(bios, map);
if (ver == 0x10 || ver == 0x11) {
if (conn < nvbios_rd08(bios, map + 3)) {
map += nvbios_rd08(bios, map + 1);
map += conn;
return nvbios_rd08(bios, map);
}
return 0x00;
}
nvkm_warn(subdev, "unknown sor map v%02x\n", ver);
}
}
if (bios->version.chip == 0x84 || bios->version.chip == 0x86)
return g84_sor_map[conn];
if (bios->version.chip == 0x92)
return g92_sor_map[conn];
if (bios->version.chip == 0x94 || bios->version.chip == 0x96)
return g94_sor_map[conn];
if (bios->version.chip == 0x98)
return g98_sor_map[conn];
nvkm_warn(subdev, "missing sor map\n");
return 0x00;
}
u8
mxm_ddc_map(struct nvkm_bios *bios, u8 port)
{
struct nvkm_subdev *subdev = &bios->subdev;
u8 ver, hdr;
u16 mxm = mxm_table(bios, &ver, &hdr);
if (mxm && hdr >= 8) {
u16 map = nvbios_rd16(bios, mxm + 6);
if (map) {
ver = nvbios_rd08(bios, map);
if (ver == 0x10) {
if (port < nvbios_rd08(bios, map + 3)) {
map += nvbios_rd08(bios, map + 1);
map += port;
return nvbios_rd08(bios, map);
}
return 0x00;
}
nvkm_warn(subdev, "unknown ddc map v%02x\n", ver);
}
}
/* v2.x: directly write port as dcb i2cidx */
return (port << 4) | port;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/cstep.h>
u32
nvbios_cstepTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
{
struct bit_entry bit_P;
u32 cstep = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2 && bit_P.length >= 0x38)
cstep = nvbios_rd32(bios, bit_P.offset + 0x34);
if (cstep) {
*ver = nvbios_rd08(bios, cstep + 0);
switch (*ver) {
case 0x10:
*hdr = nvbios_rd08(bios, cstep + 1);
*cnt = nvbios_rd08(bios, cstep + 3);
*len = nvbios_rd08(bios, cstep + 2);
*xnr = nvbios_rd08(bios, cstep + 5);
*xsz = nvbios_rd08(bios, cstep + 4);
return cstep;
default:
break;
}
}
}
return 0;
}
u32
nvbios_cstepEe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len, xnr, xsz;
u32 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
if (data && idx < cnt) {
data = data + *hdr + (idx * len);
*hdr = len;
return data;
}
return 0;
}
u32
nvbios_cstepEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepE *info)
{
u32 data = nvbios_cstepEe(bios, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
if (data) {
info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
info->index = nvbios_rd08(bios, data + 0x03);
}
return data;
}
u32
nvbios_cstepEm(struct nvkm_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
struct nvbios_cstepE *info)
{
u32 data, idx = 0;
while ((data = nvbios_cstepEp(bios, idx++, ver, hdr, info))) {
if (info->pstate == pstate)
break;
}
return data;
}
u32
nvbios_cstepXe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len, xnr, xsz;
u32 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
if (data && idx < xnr) {
data = data + *hdr + (cnt * len) + (idx * xsz);
*hdr = xsz;
return data;
}
return 0;
}
u32
nvbios_cstepXp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_cstepX *info)
{
u32 data = nvbios_cstepXe(bios, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
if (data) {
info->freq = nvbios_rd16(bios, data + 0x00) * 1000;
info->unkn[0] = nvbios_rd08(bios, data + 0x02);
info->unkn[1] = nvbios_rd08(bios, data + 0x03);
info->voltage = nvbios_rd08(bios, data + 0x04);
}
return data;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
#include <subdev/bios/xpio.h>
static u16
dcb_xpiod_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len);
if (data && *ver >= 0x40 && *hdr >= 0x06) {
u16 xpio = nvbios_rd16(bios, data + 0x04);
if (xpio) {
*ver = nvbios_rd08(bios, data + 0x00);
*hdr = nvbios_rd08(bios, data + 0x01);
*cnt = nvbios_rd08(bios, data + 0x02);
*len = nvbios_rd08(bios, data + 0x03);
return xpio;
}
}
return 0x0000;
}
u16
dcb_xpio_table(struct nvkm_bios *bios, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len);
if (data && idx < *cnt) {
u16 xpio = nvbios_rd16(bios, data + *hdr + (idx * *len));
if (xpio) {
*ver = nvbios_rd08(bios, data + 0x00);
*hdr = nvbios_rd08(bios, data + 0x01);
*cnt = nvbios_rd08(bios, data + 0x02);
*len = nvbios_rd08(bios, data + 0x03);
return xpio;
}
}
return 0x0000;
}
u16
dcb_xpio_parse(struct nvkm_bios *bios, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_xpio *info)
{
u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len);
if (data && *len >= 6) {
info->type = nvbios_rd08(bios, data + 0x04);
info->addr = nvbios_rd08(bios, data + 0x05);
info->flags = nvbios_rd08(bios, data + 0x06);
}
return 0x0000;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include <core/pci.h>
struct priv {
struct pci_dev *pdev;
void __iomem *rom;
size_t size;
};
static u32
pcirom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
struct priv *priv = data;
if (offset + length <= priv->size) {
memcpy_fromio(bios->data + offset, priv->rom + offset, length);
return length;
}
return 0;
}
static void
pcirom_fini(void *data)
{
struct priv *priv = data;
pci_unmap_rom(priv->pdev, priv->rom);
pci_disable_rom(priv->pdev);
kfree(priv);
}
static void *
pcirom_init(struct nvkm_bios *bios, const char *name)
{
struct nvkm_device *device = bios->subdev.device;
struct priv *priv = NULL;
struct pci_dev *pdev;
int ret;
if (device->func->pci)
pdev = device->func->pci(device)->pdev;
else
return ERR_PTR(-ENODEV);
if (!(ret = pci_enable_rom(pdev))) {
if (ret = -ENOMEM,
(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
if (ret = -EFAULT,
(priv->rom = pci_map_rom(pdev, &priv->size))) {
priv->pdev = pdev;
return priv;
}
kfree(priv);
}
pci_disable_rom(pdev);
}
return ERR_PTR(ret);
}
const struct nvbios_source
nvbios_pcirom = {
.name = "PCIROM",
.init = pcirom_init,
.fini = pcirom_fini,
.read = pcirom_read,
.rw = true,
};
static void *
platform_init(struct nvkm_bios *bios, const char *name)
{
struct nvkm_device *device = bios->subdev.device;
struct pci_dev *pdev;
struct priv *priv;
int ret = -ENOMEM;
if (device->func->pci)
pdev = device->func->pci(device)->pdev;
else
return ERR_PTR(-ENODEV);
if (!pdev->rom || pdev->romlen == 0)
return ERR_PTR(-ENODEV);
if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
priv->size = pdev->romlen;
if (ret = -ENODEV,
(priv->rom = ioremap(pdev->rom, pdev->romlen)))
return priv;
kfree(priv);
}
return ERR_PTR(ret);
}
static void
platform_fini(void *data)
{
struct priv *priv = data;
iounmap(priv->rom);
kfree(priv);
}
const struct nvbios_source
nvbios_platform = {
.name = "PLATFORM",
.init = platform_init,
.fini = platform_fini,
.read = pcirom_read,
.rw = true,
};
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include <subdev/bios.h>
#include <subdev/bios/image.h>
#include <subdev/bios/pcir.h>
#include <subdev/bios/npde.h>
static bool
nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image)
{
struct nvkm_subdev *subdev = &bios->subdev;
struct nvbios_pcirT pcir;
struct nvbios_npdeT npde;
u8 ver;
u16 hdr;
u32 data;
switch ((data = nvbios_rd16(bios, image->base + 0x00))) {
case 0xaa55:
case 0xbb77:
case 0x4e56: /* NV */
break;
default:
nvkm_debug(subdev, "%08x: ROM signature (%04x) unknown\n",
image->base, data);
return false;
}
if (!(data = nvbios_pcirTp(bios, image->base, &ver, &hdr, &pcir)))
return false;
image->size = pcir.image_size;
image->type = pcir.image_type;
image->last = pcir.last;
if (image->type != 0x70) {
if (!(data = nvbios_npdeTp(bios, image->base, &npde)))
return true;
image->size = npde.image_size;
image->last = npde.last;
} else {
image->last = true;
}
return true;
}
bool
nvbios_image(struct nvkm_bios *bios, int idx, struct nvbios_image *image)
{
u32 imaged_addr = bios->imaged_addr;
memset(image, 0x00, sizeof(*image));
bios->imaged_addr = 0;
do {
image->base += image->size;
if (image->last || !nvbios_imagen(bios, image)) {
bios->imaged_addr = imaged_addr;
return false;
}
} while(idx--);
bios->imaged_addr = imaged_addr;
return true;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/disp.h>
u16
nvbios_disp_table(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub)
{
struct bit_entry U;
if (!bit_entry(bios, 'U', &U)) {
if (U.version == 1) {
u16 data = nvbios_rd16(bios, U.offset);
if (data) {
*ver = nvbios_rd08(bios, data + 0x00);
switch (*ver) {
case 0x20:
case 0x21:
case 0x22:
*hdr = nvbios_rd08(bios, data + 0x01);
*len = nvbios_rd08(bios, data + 0x02);
*cnt = nvbios_rd08(bios, data + 0x03);
*sub = nvbios_rd08(bios, data + 0x04);
return data;
default:
break;
}
}
}
}
return 0x0000;
}
u16
nvbios_disp_entry(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len, u8 *sub)
{
u8 hdr, cnt;
u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub);
if (data && idx < cnt)
return data + hdr + (idx * *len);
*ver = 0x00;
return 0x0000;
}
u16
nvbios_disp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len, u8 *sub,
struct nvbios_disp *info)
{
u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
if (data && *len >= 2) {
info->data = nvbios_rd16(bios, data + 0);
return data;
}
return 0x0000;
}
u16
nvbios_outp_entry(struct nvkm_bios *bios, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct nvbios_disp info;
u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
if (data) {
*cnt = nvbios_rd08(bios, info.data + 0x05);
*len = 0x06;
data = info.data;
}
return data;
}
u16
nvbios_outp_parse(struct nvkm_bios *bios, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *info)
{
u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
if (data && *hdr >= 0x0a) {
info->type = nvbios_rd16(bios, data + 0x00);
info->mask = nvbios_rd32(bios, data + 0x02);
if (*ver <= 0x20) /* match any link */
info->mask |= 0x00c0;
info->script[0] = nvbios_rd16(bios, data + 0x06);
info->script[1] = nvbios_rd16(bios, data + 0x08);
info->script[2] = 0x0000;
if (*hdr >= 0x0c)
info->script[2] = nvbios_rd16(bios, data + 0x0a);
return data;
}
return 0x0000;
}
u16
nvbios_outp_match(struct nvkm_bios *bios, u16 type, u16 mask,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *info)
{
u16 data, idx = 0;
while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
if (data && info->type == type) {
if ((info->mask & mask) == mask)
break;
}
}
return data;
}
u16
nvbios_ocfg_entry(struct nvkm_bios *bios, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
if (idx < *cnt)
return outp + *hdr + (idx * *len);
return 0x0000;
}
u16
nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
{
u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
if (data) {
info->proto = nvbios_rd08(bios, data + 0x00);
info->flags = nvbios_rd16(bios, data + 0x01);
info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
}
return data;
}
u16
nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
{
u16 data, idx = 0;
while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
if ((info->proto == proto || info->proto == 0xff) &&
(info->flags == flags))
break;
}
return data;
}
u16
nvbios_oclk_match(struct nvkm_bios *bios, u16 cmp, u32 khz)
{
while (cmp) {
if (khz / 10 >= nvbios_rd16(bios, cmp + 0x00))
return nvbios_rd16(bios, cmp + 0x02);
cmp += 0x04;
}
return 0x0000;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/M0209.h>
u32
nvbios_M0209Te(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_M;
u32 data = 0x00000000;
if (!bit_entry(bios, 'M', &bit_M)) {
if (bit_M.version == 2 && bit_M.length > 0x0c)
data = nvbios_rd32(bios, bit_M.offset + 0x09);
if (data) {
*ver = nvbios_rd08(bios, data + 0x00);
switch (*ver) {
case 0x10:
*hdr = nvbios_rd08(bios, data + 0x01);
*len = nvbios_rd08(bios, data + 0x02);
*ssz = nvbios_rd08(bios, data + 0x03);
*snr = 1;
*cnt = nvbios_rd08(bios, data + 0x04);
return data;
default:
break;
}
}
}
return 0x00000000;
}
u32
nvbios_M0209Ee(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
u32 data = nvbios_M0209Te(bios, ver, hdr, cnt, len, &snr, &ssz);
if (data && idx < *cnt) {
data = data + *hdr + idx * (*len + (snr * ssz));
*hdr = *len;
*cnt = snr;
*len = ssz;
return data;
}
return 0x00000000;
}
u32
nvbios_M0209Ep(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_M0209E *info)
{
u32 data = nvbios_M0209Ee(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
info->v00_40 = (nvbios_rd08(bios, data + 0x00) & 0x40) >> 6;
info->bits = nvbios_rd08(bios, data + 0x00) & 0x3f;
info->modulo = nvbios_rd08(bios, data + 0x01);
info->v02_40 = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
info->v02_07 = nvbios_rd08(bios, data + 0x02) & 0x07;
info->v03 = nvbios_rd08(bios, data + 0x03);
return data;
default:
break;
}
return 0x00000000;
}
u32
nvbios_M0209Se(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len;
u32 data = nvbios_M0209Ee(bios, ent, ver, hdr, &cnt, &len);
if (data && idx < cnt) {
data = data + *hdr + idx * len;
*hdr = len;
return data;
}
return 0x00000000;
}
u32
nvbios_M0209Sp(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
struct nvbios_M0209S *info)
{
struct nvbios_M0209E M0209E;
u8 cnt, len;
u32 data = nvbios_M0209Ep(bios, ent, ver, hdr, &cnt, &len, &M0209E);
if (data) {
u32 i, data = nvbios_M0209Se(bios, ent, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
for (i = 0; i < ARRAY_SIZE(info->data); i++) {
u32 bits = (i % M0209E.modulo) * M0209E.bits;
u32 mask = (1ULL << M0209E.bits) - 1;
u16 off = bits / 8;
u8 mod = bits % 8;
info->data[i] = nvbios_rd32(bios, data + off);
info->data[i] = info->data[i] >> mod;
info->data[i] = info->data[i] & mask;
}
return data;
default:
break;
}
}
return 0x00000000;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/M0203.h>
u32
nvbios_M0203Te(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_M;
u32 data = 0x00000000;
if (!bit_entry(bios, 'M', &bit_M)) {
if (bit_M.version == 2 && bit_M.length > 0x04)
data = nvbios_rd16(bios, bit_M.offset + 0x03);
if (data) {
*ver = nvbios_rd08(bios, data + 0x00);
switch (*ver) {
case 0x10:
*hdr = nvbios_rd08(bios, data + 0x01);
*len = nvbios_rd08(bios, data + 0x02);
*cnt = nvbios_rd08(bios, data + 0x03);
return data;
default:
break;
}
}
}
return 0x00000000;
}
u32
nvbios_M0203Tp(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_M0203T *info)
{
u32 data = nvbios_M0203Te(bios, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
info->type = nvbios_rd08(bios, data + 0x04);
info->pointer = nvbios_rd16(bios, data + 0x05);
break;
default:
break;
}
return data;
}
u32
nvbios_M0203Ee(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr)
{
u8 cnt, len;
u32 data = nvbios_M0203Te(bios, ver, hdr, &cnt, &len);
if (data && idx < cnt) {
data = data + *hdr + idx * len;
*hdr = len;
return data;
}
return 0x00000000;
}
u32
nvbios_M0203Ep(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_M0203E *info)
{
u32 data = nvbios_M0203Ee(bios, idx, ver, hdr);
memset(info, 0x00, sizeof(*info));
switch (!!data * *ver) {
case 0x10:
info->type = (nvbios_rd08(bios, data + 0x00) & 0x0f) >> 0;
info->strap = (nvbios_rd08(bios, data + 0x00) & 0xf0) >> 4;
info->group = (nvbios_rd08(bios, data + 0x01) & 0x0f) >> 0;
return data;
default:
break;
}
return 0x00000000;
}
u32
nvbios_M0203Em(struct nvkm_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
struct nvbios_M0203E *info)
{
struct nvkm_subdev *subdev = &bios->subdev;
struct nvbios_M0203T M0203T;
u8 cnt, len, idx = 0xff;
u32 data;
if (!nvbios_M0203Tp(bios, ver, hdr, &cnt, &len, &M0203T)) {
nvkm_warn(subdev, "M0203T not found\n");
return 0x00000000;
}
while ((data = nvbios_M0203Ep(bios, ++idx, ver, hdr, info))) {
switch (M0203T.type) {
case M0203T_TYPE_RAMCFG:
if (info->strap != ramcfg)
continue;
return data;
default:
nvkm_warn(subdev, "M0203T type %02x\n", M0203T.type);
return 0x00000000;
}
}
return data;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c |
/*
* Copyright 2015 Martin Peres
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/extdev.h>
#include <subdev/bios/iccsense.h>
static u32
nvbios_iccsense_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt,
u8 *len)
{
struct bit_entry bit_P;
u32 iccsense;
if (bit_entry(bios, 'P', &bit_P) || bit_P.version != 2 ||
bit_P.length < 0x2c)
return 0;
iccsense = nvbios_rd32(bios, bit_P.offset + 0x28);
if (!iccsense)
return 0;
*ver = nvbios_rd08(bios, iccsense + 0);
switch (*ver) {
case 0x10:
case 0x20:
*hdr = nvbios_rd08(bios, iccsense + 1);
*len = nvbios_rd08(bios, iccsense + 2);
*cnt = nvbios_rd08(bios, iccsense + 3);
return iccsense;
default:
break;
}
return 0;
}
int
nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
{
struct nvkm_subdev *subdev = &bios->subdev;
u8 ver, hdr, cnt, len, i;
u32 table, entry;
table = nvbios_iccsense_table(bios, &ver, &hdr, &cnt, &len);
if (!table || !cnt)
return -EINVAL;
if (ver != 0x10 && ver != 0x20) {
nvkm_error(subdev, "ICCSENSE version 0x%02x unknown\n", ver);
return -EINVAL;
}
iccsense->nr_entry = cnt;
iccsense->rail = kmalloc_array(cnt, sizeof(struct pwr_rail_t),
GFP_KERNEL);
if (!iccsense->rail)
return -ENOMEM;
for (i = 0; i < cnt; ++i) {
struct nvbios_extdev_func extdev;
struct pwr_rail_t *rail = &iccsense->rail[i];
u8 res_start = 0;
int r;
entry = table + hdr + i * len;
switch(ver) {
case 0x10:
if ((nvbios_rd08(bios, entry + 0x1) & 0xf8) == 0xf8)
rail->mode = 1;
else
rail->mode = 0;
rail->extdev_id = nvbios_rd08(bios, entry + 0x2);
res_start = 0x3;
break;
case 0x20:
rail->mode = nvbios_rd08(bios, entry);
rail->extdev_id = nvbios_rd08(bios, entry + 0x1);
res_start = 0x5;
break;
}
if (nvbios_extdev_parse(bios, rail->extdev_id, &extdev))
continue;
switch (extdev.type) {
case NVBIOS_EXTDEV_INA209:
case NVBIOS_EXTDEV_INA219:
rail->resistor_count = 1;
break;
case NVBIOS_EXTDEV_INA3221:
rail->resistor_count = 3;
break;
default:
rail->resistor_count = 0;
break;
}
for (r = 0; r < rail->resistor_count; ++r) {
rail->resistors[r].mohm = nvbios_rd08(bios, entry + res_start + r * 2);
rail->resistors[r].enabled = !(nvbios_rd08(bios, entry + res_start + r * 2 + 1) & 0x40);
}
rail->config = nvbios_rd16(bios, entry + res_start + rail->resistor_count * 2);
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c |
/*
* Copyright 2012 Nouveau Community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/vmap.h>
u32
nvbios_vmap_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
u32 vmap = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2) {
vmap = nvbios_rd32(bios, bit_P.offset + 0x20);
if (vmap) {
*ver = nvbios_rd08(bios, vmap + 0);
switch (*ver) {
case 0x10:
case 0x20:
*hdr = nvbios_rd08(bios, vmap + 1);
*cnt = nvbios_rd08(bios, vmap + 3);
*len = nvbios_rd08(bios, vmap + 2);
return vmap;
default:
break;
}
}
}
}
return 0;
}
u32
nvbios_vmap_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_vmap *info)
{
u32 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
switch (!!vmap * *ver) {
case 0x10:
info->max0 = 0xff;
info->max1 = 0xff;
info->max2 = 0xff;
break;
case 0x20:
info->max0 = nvbios_rd08(bios, vmap + 0x7);
info->max1 = nvbios_rd08(bios, vmap + 0x8);
if (*len >= 0xc)
info->max2 = nvbios_rd08(bios, vmap + 0xc);
else
info->max2 = 0xff;
break;
}
return vmap;
}
u32
nvbios_vmap_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u32 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len);
if (vmap && idx < cnt) {
vmap = vmap + hdr + (idx * *len);
return vmap;
}
return 0;
}
u32
nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
struct nvbios_vmap_entry *info)
{
u32 vmap = nvbios_vmap_entry(bios, idx, ver, len);
memset(info, 0x00, sizeof(*info));
switch (!!vmap * *ver) {
case 0x10:
info->link = 0xff;
info->min = nvbios_rd32(bios, vmap + 0x00);
info->max = nvbios_rd32(bios, vmap + 0x04);
info->arg[0] = nvbios_rd32(bios, vmap + 0x08);
info->arg[1] = nvbios_rd32(bios, vmap + 0x0c);
info->arg[2] = nvbios_rd32(bios, vmap + 0x10);
break;
case 0x20:
info->mode = nvbios_rd08(bios, vmap + 0x00);
info->link = nvbios_rd08(bios, vmap + 0x01);
info->min = nvbios_rd32(bios, vmap + 0x02);
info->max = nvbios_rd32(bios, vmap + 0x06);
info->arg[0] = nvbios_rd32(bios, vmap + 0x0a);
info->arg[1] = nvbios_rd32(bios, vmap + 0x0e);
info->arg[2] = nvbios_rd32(bios, vmap + 0x12);
info->arg[3] = nvbios_rd32(bios, vmap + 0x16);
info->arg[4] = nvbios_rd32(bios, vmap + 0x1a);
info->arg[5] = nvbios_rd32(bios, vmap + 0x1e);
break;
}
return vmap;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
u16
dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct nvkm_subdev *subdev = &bios->subdev;
struct nvkm_device *device = subdev->device;
u16 dcb = 0x0000;
if (device->card_type > NV_04)
dcb = nvbios_rd16(bios, 0x36);
if (!dcb) {
nvkm_warn(subdev, "DCB table not found\n");
return dcb;
}
*ver = nvbios_rd08(bios, dcb);
if (*ver >= 0x42) {
nvkm_warn(subdev, "DCB version 0x%02x unknown\n", *ver);
return 0x0000;
} else
if (*ver >= 0x30) {
if (nvbios_rd32(bios, dcb + 6) == 0x4edcbdcb) {
*hdr = nvbios_rd08(bios, dcb + 1);
*cnt = nvbios_rd08(bios, dcb + 2);
*len = nvbios_rd08(bios, dcb + 3);
return dcb;
}
} else
if (*ver >= 0x20) {
if (nvbios_rd32(bios, dcb + 4) == 0x4edcbdcb) {
u16 i2c = nvbios_rd16(bios, dcb + 2);
*hdr = 8;
*cnt = (i2c - dcb) / 8;
*len = 8;
return dcb;
}
} else
if (*ver >= 0x15) {
if (!nvbios_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
u16 i2c = nvbios_rd16(bios, dcb + 2);
*hdr = 4;
*cnt = (i2c - dcb) / 10;
*len = 10;
return dcb;
}
} else {
/*
* v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
* always has the same single (crt) entry, even when tv-out
* present, so the conclusion is this version cannot really
* be used.
*
* v1.2 tables (some NV6/10, and NV15+) normally have the
* same 5 entries, which are not specific to the card and so
* no use.
*
* v1.2 does have an I2C table that read_dcb_i2c_table can
* handle, but cards exist (nv11 in #14821) with a bad i2c
* table pointer, so use the indices parsed in
* parse_bmp_structure.
*
* v1.1 (NV5+, maybe some NV4) is entirely unhelpful
*/
nvkm_debug(subdev, "DCB contains no useful data\n");
return 0x0000;
}
nvkm_warn(subdev, "DCB header validation failed\n");
return 0x0000;
}
u16
dcb_outp(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u16 dcb = dcb_table(bios, ver, &hdr, &cnt, len);
if (dcb && idx < cnt)
return dcb + hdr + (idx * *len);
return 0x0000;
}
static inline u16
dcb_outp_hasht(struct dcb_output *outp)
{
return (outp->extdev << 8) | (outp->location << 4) | outp->type;
}
static inline u16
dcb_outp_hashm(struct dcb_output *outp)
{
return (outp->heads << 8) | (outp->link << 6) | outp->or;
}
u16
dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
struct dcb_output *outp)
{
u16 dcb = dcb_outp(bios, idx, ver, len);
memset(outp, 0x00, sizeof(*outp));
if (dcb) {
if (*ver >= 0x20) {
u32 conn = nvbios_rd32(bios, dcb + 0x00);
outp->or = (conn & 0x0f000000) >> 24;
outp->location = (conn & 0x00300000) >> 20;
outp->bus = (conn & 0x000f0000) >> 16;
outp->connector = (conn & 0x0000f000) >> 12;
outp->heads = (conn & 0x00000f00) >> 8;
outp->i2c_index = (conn & 0x000000f0) >> 4;
outp->type = (conn & 0x0000000f);
outp->link = 0;
} else {
dcb = 0x0000;
}
if (*ver >= 0x40) {
u32 conf = nvbios_rd32(bios, dcb + 0x04);
switch (outp->type) {
case DCB_OUTPUT_DP:
switch (conf & 0x00e00000) {
case 0x00000000: /* 1.62 */
outp->dpconf.link_bw = 0x06;
break;
case 0x00200000: /* 2.7 */
outp->dpconf.link_bw = 0x0a;
break;
case 0x00400000: /* 5.4 */
outp->dpconf.link_bw = 0x14;
break;
case 0x00600000: /* 8.1 */
default:
outp->dpconf.link_bw = 0x1e;
break;
}
switch ((conf & 0x0f000000) >> 24) {
case 0xf:
case 0x4:
outp->dpconf.link_nr = 4;
break;
case 0x3:
case 0x2:
outp->dpconf.link_nr = 2;
break;
case 0x1:
default:
outp->dpconf.link_nr = 1;
break;
}
fallthrough;
case DCB_OUTPUT_TMDS:
case DCB_OUTPUT_LVDS:
outp->link = (conf & 0x00000030) >> 4;
outp->sorconf.link = outp->link; /*XXX*/
outp->extdev = 0x00;
if (outp->location != 0)
outp->extdev = (conf & 0x0000ff00) >> 8;
break;
default:
break;
}
}
outp->hasht = dcb_outp_hasht(outp);
outp->hashm = dcb_outp_hashm(outp);
}
return dcb;
}
u16
dcb_outp_match(struct nvkm_bios *bios, u16 type, u16 mask,
u8 *ver, u8 *len, struct dcb_output *outp)
{
u16 dcb, idx = 0;
while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
if ((dcb_outp_hasht(outp) & 0x00ff) == (type & 0x00ff)) {
if ((dcb_outp_hashm(outp) & mask) == mask)
break;
}
}
return dcb;
}
int
dcb_outp_foreach(struct nvkm_bios *bios, void *data,
int (*exec)(struct nvkm_bios *, void *, int, u16))
{
int ret, idx = -1;
u8 ver, len;
u16 outp;
while ((outp = dcb_outp(bios, ++idx, &ver, &len))) {
if (nvbios_rd32(bios, outp) == 0x00000000)
break; /* seen on an NV11 with DCB v1.5 */
if (nvbios_rd32(bios, outp) == 0xffffffff)
break; /* seen on an NV17 with DCB v2.0 */
if (nvbios_rd08(bios, outp) == DCB_OUTPUT_UNUSED)
continue;
if (nvbios_rd08(bios, outp) == DCB_OUTPUT_EOL)
break;
ret = exec(bios, data, idx, outp);
if (ret)
return ret;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
int
bit_entry(struct nvkm_bios *bios, u8 id, struct bit_entry *bit)
{
if (likely(bios->bit_offset)) {
u8 entries = nvbios_rd08(bios, bios->bit_offset + 10);
u32 entry = bios->bit_offset + 12;
while (entries--) {
if (nvbios_rd08(bios, entry + 0) == id) {
bit->id = nvbios_rd08(bios, entry + 0);
bit->version = nvbios_rd08(bios, entry + 1);
bit->length = nvbios_rd16(bios, entry + 2);
bit->offset = nvbios_rd16(bios, entry + 4);
return 0;
}
entry += nvbios_rd08(bios, bios->bit_offset + 9);
}
return -ENOENT;
}
return -EINVAL;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c |
/*
* Copyright 2014 Martin Peres
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/fan.h>
static u32
nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_P;
u32 fan = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2 && bit_P.length >= 0x5c)
fan = nvbios_rd32(bios, bit_P.offset + 0x58);
if (fan) {
*ver = nvbios_rd08(bios, fan + 0);
switch (*ver) {
case 0x10:
*hdr = nvbios_rd08(bios, fan + 1);
*len = nvbios_rd08(bios, fan + 2);
*cnt = nvbios_rd08(bios, fan + 3);
return fan;
default:
break;
}
}
}
return 0;
}
static u32
nvbios_fan_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len)
{
u32 data = nvbios_fan_table(bios, ver, hdr, cnt, len);
if (data && idx < *cnt)
return data + *hdr + (idx * (*len));
return 0;
}
u32
nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
{
u8 ver, hdr, cnt, len;
u32 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len);
if (data) {
u8 type = nvbios_rd08(bios, data + 0x00);
switch (type) {
case 0:
fan->type = NVBIOS_THERM_FAN_TOGGLE;
break;
case 1:
case 2:
/* TODO: Understand the difference between the two! */
fan->type = NVBIOS_THERM_FAN_PWM;
break;
default:
fan->type = NVBIOS_THERM_FAN_UNK;
}
fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
fan->min_duty = nvbios_rd08(bios, data + 0x02);
fan->max_duty = nvbios_rd08(bios, data + 0x03);
fan->pwm_freq = nvbios_rd32(bios, data + 0x0b) & 0xffffff;
}
return data;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/timing.h>
u32
nvbios_timingTe(struct nvkm_bios *bios,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
u32 timing = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 1)
timing = nvbios_rd32(bios, bit_P.offset + 4);
else
if (bit_P.version == 2)
timing = nvbios_rd32(bios, bit_P.offset + 8);
if (timing) {
*ver = nvbios_rd08(bios, timing + 0);
switch (*ver) {
case 0x10:
*hdr = nvbios_rd08(bios, timing + 1);
*cnt = nvbios_rd08(bios, timing + 2);
*len = nvbios_rd08(bios, timing + 3);
*snr = 0;
*ssz = 0;
return timing;
case 0x20:
*hdr = nvbios_rd08(bios, timing + 1);
*cnt = nvbios_rd08(bios, timing + 5);
*len = nvbios_rd08(bios, timing + 2);
*snr = nvbios_rd08(bios, timing + 4);
*ssz = nvbios_rd08(bios, timing + 3);
return timing;
default:
break;
}
}
}
return 0;
}
u32
nvbios_timingEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
u32 timing = nvbios_timingTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (timing && idx < *cnt) {
timing += *hdr + idx * (*len + (snr * ssz));
*hdr = *len;
*cnt = snr;
*len = ssz;
return timing;
}
return 0;
}
u32
nvbios_timingEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
{
u32 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp;
p->timing_ver = *ver;
p->timing_hdr = *hdr;
switch (!!data * *ver) {
case 0x10:
p->timing_10_WR = nvbios_rd08(bios, data + 0x00);
p->timing_10_WTR = nvbios_rd08(bios, data + 0x01);
p->timing_10_CL = nvbios_rd08(bios, data + 0x02);
p->timing_10_RC = nvbios_rd08(bios, data + 0x03);
p->timing_10_RFC = nvbios_rd08(bios, data + 0x05);
p->timing_10_RAS = nvbios_rd08(bios, data + 0x07);
p->timing_10_RP = nvbios_rd08(bios, data + 0x09);
p->timing_10_RCDRD = nvbios_rd08(bios, data + 0x0a);
p->timing_10_RCDWR = nvbios_rd08(bios, data + 0x0b);
p->timing_10_RRD = nvbios_rd08(bios, data + 0x0c);
p->timing_10_13 = nvbios_rd08(bios, data + 0x0d);
p->timing_10_ODT = nvbios_rd08(bios, data + 0x0e) & 0x07;
if (p->ramcfg_ver >= 0x10)
p->ramcfg_RON = nvbios_rd08(bios, data + 0x0e) & 0x07;
p->timing_10_24 = 0xff;
p->timing_10_21 = 0;
p->timing_10_20 = 0;
p->timing_10_CWL = 0;
p->timing_10_18 = 0;
p->timing_10_16 = 0;
switch (min_t(u8, *hdr, 25)) {
case 25:
p->timing_10_24 = nvbios_rd08(bios, data + 0x18);
fallthrough;
case 24:
case 23:
case 22:
p->timing_10_21 = nvbios_rd08(bios, data + 0x15);
fallthrough;
case 21:
p->timing_10_20 = nvbios_rd08(bios, data + 0x14);
fallthrough;
case 20:
p->timing_10_CWL = nvbios_rd08(bios, data + 0x13);
fallthrough;
case 19:
p->timing_10_18 = nvbios_rd08(bios, data + 0x12);
fallthrough;
case 18:
case 17:
p->timing_10_16 = nvbios_rd08(bios, data + 0x10);
}
break;
case 0x20:
p->timing[0] = nvbios_rd32(bios, data + 0x00);
p->timing[1] = nvbios_rd32(bios, data + 0x04);
p->timing[2] = nvbios_rd32(bios, data + 0x08);
p->timing[3] = nvbios_rd32(bios, data + 0x0c);
p->timing[4] = nvbios_rd32(bios, data + 0x10);
p->timing[5] = nvbios_rd32(bios, data + 0x14);
p->timing[6] = nvbios_rd32(bios, data + 0x18);
p->timing[7] = nvbios_rd32(bios, data + 0x1c);
p->timing[8] = nvbios_rd32(bios, data + 0x20);
p->timing[9] = nvbios_rd32(bios, data + 0x24);
p->timing[10] = nvbios_rd32(bios, data + 0x28);
p->timing_20_2e_03 = (nvbios_rd08(bios, data + 0x2e) & 0x03) >> 0;
p->timing_20_2e_30 = (nvbios_rd08(bios, data + 0x2e) & 0x30) >> 4;
p->timing_20_2e_c0 = (nvbios_rd08(bios, data + 0x2e) & 0xc0) >> 6;
p->timing_20_2f_03 = (nvbios_rd08(bios, data + 0x2f) & 0x03) >> 0;
temp = nvbios_rd16(bios, data + 0x2c);
p->timing_20_2c_003f = (temp & 0x003f) >> 0;
p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6;
p->timing_20_30_07 = (nvbios_rd08(bios, data + 0x30) & 0x07) >> 0;
p->timing_20_30_f8 = (nvbios_rd08(bios, data + 0x30) & 0xf8) >> 3;
temp = nvbios_rd16(bios, data + 0x31);
p->timing_20_31_0007 = (temp & 0x0007) >> 0;
p->timing_20_31_0078 = (temp & 0x0078) >> 3;
p->timing_20_31_0780 = (temp & 0x0780) >> 7;
p->timing_20_31_0800 = (temp & 0x0800) >> 11;
p->timing_20_31_7000 = (temp & 0x7000) >> 12;
p->timing_20_31_8000 = (temp & 0x8000) >> 15;
break;
default:
data = 0;
break;
}
return data;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include <subdev/pci.h>
static u32
nvbios_prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
{
struct nvkm_device *device = data;
u32 i;
if (offset + length <= 0x00100000) {
for (i = offset; i < offset + length; i += 4)
*(u32 *)&bios->data[i] = nvkm_rd32(device, 0x300000 + i);
return length;
}
return 0;
}
static void
nvbios_prom_fini(void *data)
{
struct nvkm_device *device = data;
nvkm_pci_rom_shadow(device->pci, true);
}
static void *
nvbios_prom_init(struct nvkm_bios *bios, const char *name)
{
struct nvkm_device *device = bios->subdev.device;
if (device->card_type == NV_40 && device->chipset >= 0x4c)
return ERR_PTR(-ENODEV);
nvkm_pci_rom_shadow(device->pci, false);
return device;
}
const struct nvbios_source
nvbios_prom = {
.name = "PROM",
.init = nvbios_prom_init,
.fini = nvbios_prom_fini,
.read = nvbios_prom_read,
.rw = false,
};
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c |
/*
* Copyright 2016 Karol Herbst
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Karol Herbst
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/vpstate.h>
static u32
nvbios_vpstate_offset(struct nvkm_bios *b)
{
struct bit_entry bit_P;
if (!bit_entry(b, 'P', &bit_P)) {
if (bit_P.version == 2 && bit_P.length >= 0x3c)
return nvbios_rd32(b, bit_P.offset + 0x38);
}
return 0x0000;
}
int
nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h)
{
if (!h)
return -EINVAL;
h->offset = nvbios_vpstate_offset(b);
if (!h->offset)
return -ENODEV;
h->version = nvbios_rd08(b, h->offset);
switch (h->version) {
case 0x10:
h->hlen = nvbios_rd08(b, h->offset + 0x1);
h->elen = nvbios_rd08(b, h->offset + 0x2);
h->slen = nvbios_rd08(b, h->offset + 0x3);
h->scount = nvbios_rd08(b, h->offset + 0x4);
h->ecount = nvbios_rd08(b, h->offset + 0x5);
h->base_id = nvbios_rd08(b, h->offset + 0x0f);
if (h->hlen > 0x10)
h->boost_id = nvbios_rd08(b, h->offset + 0x10);
else
h->boost_id = 0xff;
if (h->hlen > 0x11)
h->tdp_id = nvbios_rd08(b, h->offset + 0x11);
else
h->tdp_id = 0xff;
return 0;
default:
return -EINVAL;
}
}
int
nvbios_vpstate_entry(struct nvkm_bios *b, struct nvbios_vpstate_header *h,
u8 idx, struct nvbios_vpstate_entry *e)
{
u32 offset;
if (!e || !h || idx > h->ecount)
return -EINVAL;
offset = h->offset + h->hlen + idx * (h->elen + (h->slen * h->scount));
e->pstate = nvbios_rd08(b, offset);
e->clock_mhz = nvbios_rd16(b, offset + 0x5);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/dp.h>
u16
nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry d;
if (!bit_entry(bios, 'd', &d)) {
if (d.version == 1 && d.length >= 2) {
u16 data = nvbios_rd16(bios, d.offset);
if (data) {
*ver = nvbios_rd08(bios, data + 0x00);
switch (*ver) {
case 0x20:
case 0x21:
case 0x30:
case 0x40:
case 0x41:
case 0x42:
*hdr = nvbios_rd08(bios, data + 0x01);
*len = nvbios_rd08(bios, data + 0x02);
*cnt = nvbios_rd08(bios, data + 0x03);
return data;
default:
break;
}
}
}
}
return 0x0000;
}
static u16
nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
if (data && idx < *cnt) {
u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
switch (*ver * !!outp) {
case 0x20:
case 0x21:
case 0x30:
*hdr = nvbios_rd08(bios, data + 0x04);
*len = nvbios_rd08(bios, data + 0x05);
*cnt = nvbios_rd08(bios, outp + 0x04);
break;
case 0x40:
case 0x41:
case 0x42:
*hdr = nvbios_rd08(bios, data + 0x04);
*cnt = 0;
*len = 0;
break;
default:
break;
}
return outp;
}
*ver = 0x00;
return 0x0000;
}
u16
nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_dpout *info)
{
u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
if (data && *ver) {
info->type = nvbios_rd16(bios, data + 0x00);
info->mask = nvbios_rd16(bios, data + 0x02);
switch (*ver) {
case 0x20:
info->mask |= 0x00c0; /* match any link */
fallthrough;
case 0x21:
case 0x30:
info->flags = nvbios_rd08(bios, data + 0x05);
info->script[0] = nvbios_rd16(bios, data + 0x06);
info->script[1] = nvbios_rd16(bios, data + 0x08);
if (*len >= 0x0c)
info->lnkcmp = nvbios_rd16(bios, data + 0x0a);
if (*len >= 0x0f) {
info->script[2] = nvbios_rd16(bios, data + 0x0c);
info->script[3] = nvbios_rd16(bios, data + 0x0e);
}
if (*len >= 0x11)
info->script[4] = nvbios_rd16(bios, data + 0x10);
break;
case 0x40:
case 0x41:
case 0x42:
info->flags = nvbios_rd08(bios, data + 0x04);
info->script[0] = nvbios_rd16(bios, data + 0x05);
info->script[1] = nvbios_rd16(bios, data + 0x07);
info->lnkcmp = nvbios_rd16(bios, data + 0x09);
info->script[2] = nvbios_rd16(bios, data + 0x0b);
info->script[3] = nvbios_rd16(bios, data + 0x0d);
info->script[4] = nvbios_rd16(bios, data + 0x0f);
break;
default:
data = 0x0000;
break;
}
}
return data;
}
u16
nvbios_dpout_match(struct nvkm_bios *bios, u16 type, u16 mask,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_dpout *info)
{
u16 data, idx = 0;
while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
if (data && info->type == type) {
if ((info->mask & mask) == mask)
break;
}
}
return data;
}
static u16
nvbios_dpcfg_entry(struct nvkm_bios *bios, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
if (*ver >= 0x40) {
outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
*hdr = *hdr + (*len * * cnt);
*len = nvbios_rd08(bios, outp + 0x06);
*cnt = nvbios_rd08(bios, outp + 0x07) *
nvbios_rd08(bios, outp + 0x05);
}
if (idx < *cnt)
return outp + *hdr + (idx * *len);
return 0x0000;
}
u16
nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_dpcfg *info)
{
u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len);
memset(info, 0x00, sizeof(*info));
if (data) {
switch (*ver) {
case 0x20:
case 0x21:
info->dc = nvbios_rd08(bios, data + 0x02);
info->pe = nvbios_rd08(bios, data + 0x03);
info->tx_pu = nvbios_rd08(bios, data + 0x04);
break;
case 0x30:
case 0x40:
case 0x41:
info->pc = nvbios_rd08(bios, data + 0x00);
info->dc = nvbios_rd08(bios, data + 0x01);
info->pe = nvbios_rd08(bios, data + 0x02);
info->tx_pu = nvbios_rd08(bios, data + 0x03);
break;
case 0x42:
info->dc = nvbios_rd08(bios, data + 0x00);
info->pe = nvbios_rd08(bios, data + 0x01);
info->tx_pu = nvbios_rd08(bios, data + 0x02);
break;
default:
data = 0x0000;
break;
}
}
return data;
}
u16
nvbios_dpcfg_match(struct nvkm_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_dpcfg *info)
{
u8 idx = 0xff;
u16 data;
if (*ver >= 0x30) {
static const u8 vsoff[] = { 0, 4, 7, 9 };
idx = (pc * 10) + vsoff[vs] + pe;
if (*ver >= 0x40 && *ver <= 0x41 && *hdr >= 0x12)
idx += nvbios_rd08(bios, outp + 0x11) * 40;
else
if (*ver >= 0x42)
idx += nvbios_rd08(bios, outp + 0x11) * 10;
} else {
while ((data = nvbios_dpcfg_entry(bios, outp, ++idx,
ver, hdr, cnt, len))) {
if (nvbios_rd08(bios, data + 0x00) == vs &&
nvbios_rd08(bios, data + 0x01) == pe)
break;
}
}
return nvbios_dpcfg_parse(bios, outp, idx, ver, hdr, cnt, len, info);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/rammap.h>
u32
nvbios_rammapTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
u32 rammap = 0x0000;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
rammap = nvbios_rd32(bios, bit_P.offset + 4);
if (rammap) {
*ver = nvbios_rd08(bios, rammap + 0);
switch (*ver) {
case 0x10:
case 0x11:
*hdr = nvbios_rd08(bios, rammap + 1);
*cnt = nvbios_rd08(bios, rammap + 5);
*len = nvbios_rd08(bios, rammap + 2);
*snr = nvbios_rd08(bios, rammap + 4);
*ssz = nvbios_rd08(bios, rammap + 3);
return rammap;
default:
break;
}
}
}
return 0x0000;
}
u32
nvbios_rammapEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
u32 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (rammap && idx < *cnt) {
rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
*cnt = snr;
*len = ssz;
return rammap;
}
return 0x0000;
}
/* Pretend a performance mode is also a rammap entry, helps coalesce entries
* later on */
u32
nvbios_rammapEp_from_perf(struct nvkm_bios *bios, u32 data, u8 size,
struct nvbios_ramcfg *p)
{
memset(p, 0x00, sizeof(*p));
p->rammap_00_16_20 = (nvbios_rd08(bios, data + 0x16) & 0x20) >> 5;
p->rammap_00_16_40 = (nvbios_rd08(bios, data + 0x16) & 0x40) >> 6;
p->rammap_00_17_02 = (nvbios_rd08(bios, data + 0x17) & 0x02) >> 1;
return data;
}
u32
nvbios_rammapEp(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
{
u32 data = nvbios_rammapEe(bios, idx, ver, hdr, cnt, len), temp;
memset(p, 0x00, sizeof(*p));
p->rammap_ver = *ver;
p->rammap_hdr = *hdr;
switch (!!data * *ver) {
case 0x10:
p->rammap_min = nvbios_rd16(bios, data + 0x00);
p->rammap_max = nvbios_rd16(bios, data + 0x02);
p->rammap_10_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
p->rammap_10_04_08 = (nvbios_rd08(bios, data + 0x04) & 0x08) >> 3;
break;
case 0x11:
p->rammap_min = nvbios_rd16(bios, data + 0x00);
p->rammap_max = nvbios_rd16(bios, data + 0x02);
p->rammap_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
p->rammap_11_08_0c = (nvbios_rd08(bios, data + 0x08) & 0x0c) >> 2;
p->rammap_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
temp = nvbios_rd32(bios, data + 0x09);
p->rammap_11_09_01ff = (temp & 0x000001ff) >> 0;
p->rammap_11_0a_03fe = (temp & 0x0003fe00) >> 9;
p->rammap_11_0a_0400 = (temp & 0x00040000) >> 18;
p->rammap_11_0a_0800 = (temp & 0x00080000) >> 19;
p->rammap_11_0b_01f0 = (temp & 0x01f00000) >> 20;
p->rammap_11_0b_0200 = (temp & 0x02000000) >> 25;
p->rammap_11_0b_0400 = (temp & 0x04000000) >> 26;
p->rammap_11_0b_0800 = (temp & 0x08000000) >> 27;
p->rammap_11_0d = nvbios_rd08(bios, data + 0x0d);
p->rammap_11_0e = nvbios_rd08(bios, data + 0x0e);
p->rammap_11_0f = nvbios_rd08(bios, data + 0x0f);
p->rammap_11_11_0c = (nvbios_rd08(bios, data + 0x11) & 0x0c) >> 2;
break;
default:
data = 0;
break;
}
return data;
}
u32
nvbios_rammapEm(struct nvkm_bios *bios, u16 mhz,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *info)
{
int idx = 0;
u32 data;
while ((data = nvbios_rammapEp(bios, idx++, ver, hdr, cnt, len, info))) {
if (mhz >= info->rammap_min && mhz <= info->rammap_max)
break;
}
return data;
}
u32
nvbios_rammapSe(struct nvkm_bios *bios, u32 data,
u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx, u8 *ver, u8 *hdr)
{
if (idx < ecnt) {
data = data + ehdr + (idx * elen);
*ver = ever;
*hdr = elen;
return data;
}
return 0;
}
u32
nvbios_rammapSp_from_perf(struct nvkm_bios *bios, u32 data, u8 size, int idx,
struct nvbios_ramcfg *p)
{
data += (idx * size);
if (size < 11)
return 0x00000000;
p->ramcfg_ver = 0;
p->ramcfg_timing = nvbios_rd08(bios, data + 0x01);
p->ramcfg_00_03_01 = (nvbios_rd08(bios, data + 0x03) & 0x01) >> 0;
p->ramcfg_00_03_02 = (nvbios_rd08(bios, data + 0x03) & 0x02) >> 1;
p->ramcfg_DLLoff = (nvbios_rd08(bios, data + 0x03) & 0x04) >> 2;
p->ramcfg_00_03_08 = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
p->ramcfg_RON = (nvbios_rd08(bios, data + 0x03) & 0x10) >> 3;
p->ramcfg_FBVDDQ = (nvbios_rd08(bios, data + 0x03) & 0x80) >> 7;
p->ramcfg_00_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
p->ramcfg_00_04_04 = (nvbios_rd08(bios, data + 0x04) & 0x04) >> 2;
p->ramcfg_00_04_20 = (nvbios_rd08(bios, data + 0x04) & 0x20) >> 5;
p->ramcfg_00_05 = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
p->ramcfg_00_06 = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
p->ramcfg_00_07 = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
p->ramcfg_00_08 = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
p->ramcfg_00_09 = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
p->ramcfg_00_0a_0f = (nvbios_rd08(bios, data + 0x0a) & 0x0f) >> 0;
p->ramcfg_00_0a_f0 = (nvbios_rd08(bios, data + 0x0a) & 0xf0) >> 4;
return data;
}
u32
nvbios_rammapSp(struct nvkm_bios *bios, u32 data,
u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
u8 *ver, u8 *hdr, struct nvbios_ramcfg *p)
{
data = nvbios_rammapSe(bios, data, ever, ehdr, ecnt, elen, idx, ver, hdr);
p->ramcfg_ver = *ver;
p->ramcfg_hdr = *hdr;
switch (!!data * *ver) {
case 0x10:
p->ramcfg_timing = nvbios_rd08(bios, data + 0x01);
p->ramcfg_10_02_01 = (nvbios_rd08(bios, data + 0x02) & 0x01) >> 0;
p->ramcfg_10_02_02 = (nvbios_rd08(bios, data + 0x02) & 0x02) >> 1;
p->ramcfg_10_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
p->ramcfg_10_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
p->ramcfg_10_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
p->ramcfg_10_02_20 = (nvbios_rd08(bios, data + 0x02) & 0x20) >> 5;
p->ramcfg_DLLoff = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
p->ramcfg_10_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
p->ramcfg_10_04_01 = (nvbios_rd08(bios, data + 0x04) & 0x01) >> 0;
p->ramcfg_FBVDDQ = (nvbios_rd08(bios, data + 0x04) & 0x08) >> 3;
p->ramcfg_10_05 = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
p->ramcfg_10_06 = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
p->ramcfg_10_07 = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
p->ramcfg_10_08 = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
p->ramcfg_10_09_0f = (nvbios_rd08(bios, data + 0x09) & 0x0f) >> 0;
p->ramcfg_10_09_f0 = (nvbios_rd08(bios, data + 0x09) & 0xf0) >> 4;
break;
case 0x11:
p->ramcfg_timing = nvbios_rd08(bios, data + 0x00);
p->ramcfg_11_01_01 = (nvbios_rd08(bios, data + 0x01) & 0x01) >> 0;
p->ramcfg_11_01_02 = (nvbios_rd08(bios, data + 0x01) & 0x02) >> 1;
p->ramcfg_11_01_04 = (nvbios_rd08(bios, data + 0x01) & 0x04) >> 2;
p->ramcfg_11_01_08 = (nvbios_rd08(bios, data + 0x01) & 0x08) >> 3;
p->ramcfg_11_01_10 = (nvbios_rd08(bios, data + 0x01) & 0x10) >> 4;
p->ramcfg_DLLoff = (nvbios_rd08(bios, data + 0x01) & 0x20) >> 5;
p->ramcfg_11_01_40 = (nvbios_rd08(bios, data + 0x01) & 0x40) >> 6;
p->ramcfg_11_01_80 = (nvbios_rd08(bios, data + 0x01) & 0x80) >> 7;
p->ramcfg_11_02_03 = (nvbios_rd08(bios, data + 0x02) & 0x03) >> 0;
p->ramcfg_11_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
p->ramcfg_11_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
p->ramcfg_11_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
p->ramcfg_11_02_40 = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
p->ramcfg_11_02_80 = (nvbios_rd08(bios, data + 0x02) & 0x80) >> 7;
p->ramcfg_11_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
p->ramcfg_11_03_30 = (nvbios_rd08(bios, data + 0x03) & 0x30) >> 4;
p->ramcfg_11_03_c0 = (nvbios_rd08(bios, data + 0x03) & 0xc0) >> 6;
p->ramcfg_11_03_f0 = (nvbios_rd08(bios, data + 0x03) & 0xf0) >> 4;
p->ramcfg_11_04 = (nvbios_rd08(bios, data + 0x04) & 0xff) >> 0;
p->ramcfg_11_06 = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
p->ramcfg_11_07_02 = (nvbios_rd08(bios, data + 0x07) & 0x02) >> 1;
p->ramcfg_11_07_04 = (nvbios_rd08(bios, data + 0x07) & 0x04) >> 2;
p->ramcfg_11_07_08 = (nvbios_rd08(bios, data + 0x07) & 0x08) >> 3;
p->ramcfg_11_07_10 = (nvbios_rd08(bios, data + 0x07) & 0x10) >> 4;
p->ramcfg_11_07_40 = (nvbios_rd08(bios, data + 0x07) & 0x40) >> 6;
p->ramcfg_11_07_80 = (nvbios_rd08(bios, data + 0x07) & 0x80) >> 7;
p->ramcfg_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
p->ramcfg_11_08_02 = (nvbios_rd08(bios, data + 0x08) & 0x02) >> 1;
p->ramcfg_11_08_04 = (nvbios_rd08(bios, data + 0x08) & 0x04) >> 2;
p->ramcfg_11_08_08 = (nvbios_rd08(bios, data + 0x08) & 0x08) >> 3;
p->ramcfg_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
p->ramcfg_11_08_20 = (nvbios_rd08(bios, data + 0x08) & 0x20) >> 5;
p->ramcfg_11_09 = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
break;
default:
data = 0;
break;
}
return data;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c |
/*
* Copyright 2005-2006 Erik Waling
* Copyright 2006 Stephane Marchesin
* Copyright 2007-2009 Stuart Bennett
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/bmp.h>
#include <subdev/bios/pll.h>
#include <subdev/vga.h>
struct pll_mapping {
u8 type;
u32 reg;
};
static struct pll_mapping
nv04_pll_mapping[] = {
{ PLL_CORE , 0x680500 },
{ PLL_MEMORY, 0x680504 },
{ PLL_VPLL0 , 0x680508 },
{ PLL_VPLL1 , 0x680520 },
{}
};
static struct pll_mapping
nv40_pll_mapping[] = {
{ PLL_CORE , 0x004000 },
{ PLL_MEMORY, 0x004020 },
{ PLL_VPLL0 , 0x680508 },
{ PLL_VPLL1 , 0x680520 },
{}
};
static struct pll_mapping
nv50_pll_mapping[] = {
{ PLL_CORE , 0x004028 },
{ PLL_SHADER, 0x004020 },
{ PLL_UNK03 , 0x004000 },
{ PLL_MEMORY, 0x004008 },
{ PLL_UNK40 , 0x00e810 },
{ PLL_UNK41 , 0x00e818 },
{ PLL_UNK42 , 0x00e824 },
{ PLL_VPLL0 , 0x614100 },
{ PLL_VPLL1 , 0x614900 },
{}
};
static struct pll_mapping
g84_pll_mapping[] = {
{ PLL_CORE , 0x004028 },
{ PLL_SHADER, 0x004020 },
{ PLL_MEMORY, 0x004008 },
{ PLL_VDEC , 0x004030 },
{ PLL_UNK41 , 0x00e818 },
{ PLL_VPLL0 , 0x614100 },
{ PLL_VPLL1 , 0x614900 },
{}
};
static u32
pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_C;
u32 data = 0x0000;
if (!bit_entry(bios, 'C', &bit_C)) {
if (bit_C.version == 1 && bit_C.length >= 10)
data = nvbios_rd16(bios, bit_C.offset + 8);
if (bit_C.version == 2 && bit_C.length >= 4)
data = nvbios_rd32(bios, bit_C.offset + 0);
if (data) {
*ver = nvbios_rd08(bios, data + 0);
*hdr = nvbios_rd08(bios, data + 1);
*len = nvbios_rd08(bios, data + 2);
*cnt = nvbios_rd08(bios, data + 3);
return data;
}
}
if (bmp_version(bios) >= 0x0524) {
data = nvbios_rd16(bios, bios->bmp_offset + 142);
if (data) {
*ver = nvbios_rd08(bios, data + 0);
*hdr = 1;
*cnt = 1;
*len = 0x18;
return data;
}
}
*ver = 0x00;
return data;
}
static struct pll_mapping *
pll_map(struct nvkm_bios *bios)
{
struct nvkm_device *device = bios->subdev.device;
switch (device->card_type) {
case NV_04:
case NV_10:
case NV_11:
case NV_20:
case NV_30:
return nv04_pll_mapping;
case NV_40:
return nv40_pll_mapping;
case NV_50:
if (device->chipset == 0x50)
return nv50_pll_mapping;
else
if (device->chipset < 0xa3 ||
device->chipset == 0xaa ||
device->chipset == 0xac)
return g84_pll_mapping;
fallthrough;
default:
return NULL;
}
}
static u32
pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
{
struct pll_mapping *map;
u8 hdr, cnt;
u32 data;
data = pll_limits_table(bios, ver, &hdr, &cnt, len);
if (data && *ver >= 0x30) {
data += hdr;
while (cnt--) {
if (nvbios_rd32(bios, data + 3) == reg) {
*type = nvbios_rd08(bios, data + 0);
return data;
}
data += *len;
}
return 0x0000;
}
map = pll_map(bios);
while (map && map->reg) {
if (map->reg == reg && *ver >= 0x20) {
u32 addr = (data += hdr);
*type = map->type;
while (cnt--) {
if (nvbios_rd32(bios, data) == map->reg)
return data;
data += *len;
}
return addr;
} else
if (map->reg == reg) {
*type = map->type;
return data + 1;
}
map++;
}
return 0x0000;
}
static u32
pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
{
struct pll_mapping *map;
u8 hdr, cnt;
u32 data;
data = pll_limits_table(bios, ver, &hdr, &cnt, len);
if (data && *ver >= 0x30) {
data += hdr;
while (cnt--) {
if (nvbios_rd08(bios, data + 0) == type) {
if (*ver < 0x50)
*reg = nvbios_rd32(bios, data + 3);
else
*reg = 0;
return data;
}
data += *len;
}
return 0x0000;
}
map = pll_map(bios);
while (map && map->reg) {
if (map->type == type && *ver >= 0x20) {
u32 addr = (data += hdr);
*reg = map->reg;
while (cnt--) {
if (nvbios_rd32(bios, data) == map->reg)
return data;
data += *len;
}
return addr;
} else
if (map->type == type) {
*reg = map->reg;
return data + 1;
}
map++;
}
return 0x0000;
}
int
nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
{
struct nvkm_subdev *subdev = &bios->subdev;
struct nvkm_device *device = subdev->device;
u8 ver, len;
u32 reg = type;
u32 data;
if (type > PLL_MAX) {
reg = type;
data = pll_map_reg(bios, reg, &type, &ver, &len);
} else {
data = pll_map_type(bios, type, ®, &ver, &len);
}
if (ver && !data)
return -ENOENT;
memset(info, 0, sizeof(*info));
info->type = type;
info->reg = reg;
switch (ver) {
case 0x00:
break;
case 0x10:
case 0x11:
info->vco1.min_freq = nvbios_rd32(bios, data + 0);
info->vco1.max_freq = nvbios_rd32(bios, data + 4);
info->vco2.min_freq = nvbios_rd32(bios, data + 8);
info->vco2.max_freq = nvbios_rd32(bios, data + 12);
info->vco1.min_inputfreq = nvbios_rd32(bios, data + 16);
info->vco2.min_inputfreq = nvbios_rd32(bios, data + 20);
info->vco1.max_inputfreq = INT_MAX;
info->vco2.max_inputfreq = INT_MAX;
info->max_p = 0x7;
info->max_p_usable = 0x6;
/* these values taken from nv30/31/36 */
switch (bios->version.chip) {
case 0x36:
info->vco1.min_n = 0x5;
break;
default:
info->vco1.min_n = 0x1;
break;
}
info->vco1.max_n = 0xff;
info->vco1.min_m = 0x1;
info->vco1.max_m = 0xd;
/*
* On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
* table version (apart from nv35)), N2 is compared to
* maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
* save a comparison
*/
info->vco2.min_n = 0x4;
switch (bios->version.chip) {
case 0x30:
case 0x35:
info->vco2.max_n = 0x1f;
break;
default:
info->vco2.max_n = 0x28;
break;
}
info->vco2.min_m = 0x1;
info->vco2.max_m = 0x4;
break;
case 0x20:
case 0x21:
info->vco1.min_freq = nvbios_rd16(bios, data + 4) * 1000;
info->vco1.max_freq = nvbios_rd16(bios, data + 6) * 1000;
info->vco2.min_freq = nvbios_rd16(bios, data + 8) * 1000;
info->vco2.max_freq = nvbios_rd16(bios, data + 10) * 1000;
info->vco1.min_inputfreq = nvbios_rd16(bios, data + 12) * 1000;
info->vco2.min_inputfreq = nvbios_rd16(bios, data + 14) * 1000;
info->vco1.max_inputfreq = nvbios_rd16(bios, data + 16) * 1000;
info->vco2.max_inputfreq = nvbios_rd16(bios, data + 18) * 1000;
info->vco1.min_n = nvbios_rd08(bios, data + 20);
info->vco1.max_n = nvbios_rd08(bios, data + 21);
info->vco1.min_m = nvbios_rd08(bios, data + 22);
info->vco1.max_m = nvbios_rd08(bios, data + 23);
info->vco2.min_n = nvbios_rd08(bios, data + 24);
info->vco2.max_n = nvbios_rd08(bios, data + 25);
info->vco2.min_m = nvbios_rd08(bios, data + 26);
info->vco2.max_m = nvbios_rd08(bios, data + 27);
info->max_p = nvbios_rd08(bios, data + 29);
info->max_p_usable = info->max_p;
if (bios->version.chip < 0x60)
info->max_p_usable = 0x6;
info->bias_p = nvbios_rd08(bios, data + 30);
if (len > 0x22)
info->refclk = nvbios_rd32(bios, data + 31);
break;
case 0x30:
data = nvbios_rd16(bios, data + 1);
info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000;
info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000;
info->vco2.min_freq = nvbios_rd16(bios, data + 4) * 1000;
info->vco2.max_freq = nvbios_rd16(bios, data + 6) * 1000;
info->vco1.min_inputfreq = nvbios_rd16(bios, data + 8) * 1000;
info->vco2.min_inputfreq = nvbios_rd16(bios, data + 10) * 1000;
info->vco1.max_inputfreq = nvbios_rd16(bios, data + 12) * 1000;
info->vco2.max_inputfreq = nvbios_rd16(bios, data + 14) * 1000;
info->vco1.min_n = nvbios_rd08(bios, data + 16);
info->vco1.max_n = nvbios_rd08(bios, data + 17);
info->vco1.min_m = nvbios_rd08(bios, data + 18);
info->vco1.max_m = nvbios_rd08(bios, data + 19);
info->vco2.min_n = nvbios_rd08(bios, data + 20);
info->vco2.max_n = nvbios_rd08(bios, data + 21);
info->vco2.min_m = nvbios_rd08(bios, data + 22);
info->vco2.max_m = nvbios_rd08(bios, data + 23);
info->max_p_usable = info->max_p = nvbios_rd08(bios, data + 25);
info->bias_p = nvbios_rd08(bios, data + 27);
info->refclk = nvbios_rd32(bios, data + 28);
break;
case 0x40:
info->refclk = nvbios_rd16(bios, data + 9) * 1000;
data = nvbios_rd16(bios, data + 1);
info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000;
info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000;
info->vco1.min_inputfreq = nvbios_rd16(bios, data + 4) * 1000;
info->vco1.max_inputfreq = nvbios_rd16(bios, data + 6) * 1000;
info->vco1.min_m = nvbios_rd08(bios, data + 8);
info->vco1.max_m = nvbios_rd08(bios, data + 9);
info->vco1.min_n = nvbios_rd08(bios, data + 10);
info->vco1.max_n = nvbios_rd08(bios, data + 11);
info->min_p = nvbios_rd08(bios, data + 12);
info->max_p = nvbios_rd08(bios, data + 13);
break;
case 0x50:
info->refclk = nvbios_rd16(bios, data + 1) * 1000;
/* info->refclk_alt = nvbios_rd16(bios, data + 3) * 1000; */
info->vco1.min_freq = nvbios_rd16(bios, data + 5) * 1000;
info->vco1.max_freq = nvbios_rd16(bios, data + 7) * 1000;
info->vco1.min_inputfreq = nvbios_rd16(bios, data + 9) * 1000;
info->vco1.max_inputfreq = nvbios_rd16(bios, data + 11) * 1000;
info->vco1.min_m = nvbios_rd08(bios, data + 13);
info->vco1.max_m = nvbios_rd08(bios, data + 14);
info->vco1.min_n = nvbios_rd08(bios, data + 15);
info->vco1.max_n = nvbios_rd08(bios, data + 16);
info->min_p = nvbios_rd08(bios, data + 17);
info->max_p = nvbios_rd08(bios, data + 18);
break;
default:
nvkm_error(subdev, "unknown pll limits version 0x%02x\n", ver);
return -EINVAL;
}
if (!info->refclk) {
info->refclk = device->crystal;
if (bios->version.chip == 0x51) {
u32 sel_clk = nvkm_rd32(device, 0x680524);
if ((info->reg == 0x680508 && sel_clk & 0x20) ||
(info->reg == 0x680520 && sel_clk & 0x80)) {
if (nvkm_rdvgac(device, 0, 0x27) < 0xa3)
info->refclk = 200000;
else
info->refclk = 25000;
}
}
}
/*
* By now any valid limit table ought to have set a max frequency for
* vco1, so if it's zero it's either a pre limit table bios, or one
* with an empty limit table (seen on nv18)
*/
if (!info->vco1.max_freq) {
info->vco1.max_freq = nvbios_rd32(bios, bios->bmp_offset + 67);
info->vco1.min_freq = nvbios_rd32(bios, bios->bmp_offset + 71);
if (bmp_version(bios) < 0x0506) {
info->vco1.max_freq = 256000;
info->vco1.min_freq = 128000;
}
info->vco1.min_inputfreq = 0;
info->vco1.max_inputfreq = INT_MAX;
info->vco1.min_n = 0x1;
info->vco1.max_n = 0xff;
info->vco1.min_m = 0x1;
if (device->crystal == 13500) {
/* nv05 does this, nv11 doesn't, nv10 unknown */
if (bios->version.chip < 0x11)
info->vco1.min_m = 0x7;
info->vco1.max_m = 0xd;
} else {
if (bios->version.chip < 0x11)
info->vco1.min_m = 0x8;
info->vco1.max_m = 0xe;
}
if (bios->version.chip < 0x17 ||
bios->version.chip == 0x1a ||
bios->version.chip == 0x20)
info->max_p = 4;
else
info->max_p = 5;
info->max_p_usable = info->max_p;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c |
/*
* Copyright 2012 Nouveau Community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/therm.h>
static u32
therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
{
struct bit_entry bit_P;
u32 therm = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 1)
therm = nvbios_rd32(bios, bit_P.offset + 12);
else if (bit_P.version == 2)
therm = nvbios_rd32(bios, bit_P.offset + 16);
else
nvkm_error(&bios->subdev,
"unknown offset for thermal in BIT P %d\n",
bit_P.version);
}
/* exit now if we haven't found the thermal table */
if (!therm)
return 0;
*ver = nvbios_rd08(bios, therm + 0);
*hdr = nvbios_rd08(bios, therm + 1);
*len = nvbios_rd08(bios, therm + 2);
*cnt = nvbios_rd08(bios, therm + 3);
return therm + nvbios_rd08(bios, therm + 1);
}
static u32
nvbios_therm_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
u8 hdr, cnt;
u32 therm = therm_table(bios, ver, &hdr, len, &cnt);
if (therm && idx < cnt)
return therm + idx * *len;
return 0;
}
int
nvbios_therm_sensor_parse(struct nvkm_bios *bios,
enum nvbios_therm_domain domain,
struct nvbios_therm_sensor *sensor)
{
s8 thrs_section, sensor_section, offset;
u8 ver, len, i;
u32 entry;
/* we only support the core domain for now */
if (domain != NVBIOS_THERM_DOMAIN_CORE)
return -EINVAL;
/* Read the entries from the table */
thrs_section = 0;
sensor_section = -1;
i = 0;
while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
s16 value = nvbios_rd16(bios, entry + 1);
switch (nvbios_rd08(bios, entry + 0)) {
case 0x0:
thrs_section = value;
if (value > 0)
return 0; /* we do not try to support ambient */
break;
case 0x01:
sensor_section++;
if (sensor_section == 0) {
offset = ((s8) nvbios_rd08(bios, entry + 2)) / 2;
sensor->offset_constant = offset;
}
break;
case 0x04:
if (thrs_section == 0) {
sensor->thrs_critical.temp = (value & 0xff0) >> 4;
sensor->thrs_critical.hysteresis = value & 0xf;
}
break;
case 0x07:
if (thrs_section == 0) {
sensor->thrs_down_clock.temp = (value & 0xff0) >> 4;
sensor->thrs_down_clock.hysteresis = value & 0xf;
}
break;
case 0x08:
if (thrs_section == 0) {
sensor->thrs_fan_boost.temp = (value & 0xff0) >> 4;
sensor->thrs_fan_boost.hysteresis = value & 0xf;
}
break;
case 0x10:
if (sensor_section == 0)
sensor->offset_num = value;
break;
case 0x11:
if (sensor_section == 0)
sensor->offset_den = value;
break;
case 0x12:
if (sensor_section == 0)
sensor->slope_mult = value;
break;
case 0x13:
if (sensor_section == 0)
sensor->slope_div = value;
break;
case 0x32:
if (thrs_section == 0) {
sensor->thrs_shutdown.temp = (value & 0xff0) >> 4;
sensor->thrs_shutdown.hysteresis = value & 0xf;
}
break;
}
}
return 0;
}
int
nvbios_therm_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
{
struct nvbios_therm_trip_point *cur_trip = NULL;
u8 ver, len, i;
u32 entry;
uint8_t duty_lut[] = { 0, 0, 25, 0, 40, 0, 50, 0,
75, 0, 85, 0, 100, 0, 100, 0 };
i = 0;
fan->nr_fan_trip = 0;
fan->fan_mode = NVBIOS_THERM_FAN_OTHER;
while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
s16 value = nvbios_rd16(bios, entry + 1);
switch (nvbios_rd08(bios, entry + 0)) {
case 0x22:
fan->min_duty = value & 0xff;
fan->max_duty = (value & 0xff00) >> 8;
break;
case 0x24:
fan->nr_fan_trip++;
if (fan->fan_mode > NVBIOS_THERM_FAN_TRIP)
fan->fan_mode = NVBIOS_THERM_FAN_TRIP;
cur_trip = &fan->trip[fan->nr_fan_trip - 1];
cur_trip->hysteresis = value & 0xf;
cur_trip->temp = (value & 0xff0) >> 4;
cur_trip->fan_duty = duty_lut[(value & 0xf000) >> 12];
break;
case 0x25:
cur_trip = &fan->trip[fan->nr_fan_trip - 1];
cur_trip->fan_duty = value;
break;
case 0x26:
if (!fan->pwm_freq)
fan->pwm_freq = value;
break;
case 0x3b:
fan->bump_period = value;
break;
case 0x3c:
fan->slow_down_period = value;
break;
case 0x46:
if (fan->fan_mode > NVBIOS_THERM_FAN_LINEAR)
fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
fan->linear_min_temp = nvbios_rd08(bios, entry + 1);
fan->linear_max_temp = nvbios_rd08(bios, entry + 2);
break;
}
}
/* starting from fermi, fan management is always linear */
if (bios->subdev.device->card_type >= NV_C0 &&
fan->fan_mode == NVBIOS_THERM_FAN_OTHER) {
fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "gf100.h"
#include <subdev/timer.h>
void
gm107_bar_bar1_wait(struct nvkm_bar *bar)
{
struct nvkm_device *device = bar->subdev.device;
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x001710) & 0x00000003))
break;
);
}
static void
gm107_bar_bar2_wait(struct nvkm_bar *bar)
{
struct nvkm_device *device = bar->subdev.device;
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x001710) & 0x0000000c))
break;
);
}
static const struct nvkm_bar_func
gm107_bar_func = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
.bar1.init = gf100_bar_bar1_init,
.bar1.fini = gf100_bar_bar1_fini,
.bar1.wait = gm107_bar_bar1_wait,
.bar1.vmm = gf100_bar_bar1_vmm,
.bar2.init = gf100_bar_bar2_init,
.bar2.fini = gf100_bar_bar2_fini,
.bar2.wait = gm107_bar_bar2_wait,
.bar2.vmm = gf100_bar_bar2_vmm,
.flush = g84_bar_flush,
};
int
gm107_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bar **pbar)
{
return gf100_bar_new_(&gm107_bar_func, device, type, inst, pbar);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "gf100.h"
#include <core/memory.h>
#include <core/option.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
struct nvkm_vmm *
gf100_bar_bar1_vmm(struct nvkm_bar *base)
{
return gf100_bar(base)->bar[1].vmm;
}
void
gf100_bar_bar1_wait(struct nvkm_bar *base)
{
/* NFI why it's twice. */
nvkm_bar_flush(base);
nvkm_bar_flush(base);
}
void
gf100_bar_bar1_fini(struct nvkm_bar *bar)
{
nvkm_mask(bar->subdev.device, 0x001704, 0x80000000, 0x00000000);
}
void
gf100_bar_bar1_init(struct nvkm_bar *base)
{
struct nvkm_device *device = base->subdev.device;
struct gf100_bar *bar = gf100_bar(base);
const u32 addr = nvkm_memory_addr(bar->bar[1].inst) >> 12;
nvkm_wr32(device, 0x001704, 0x80000000 | addr);
}
struct nvkm_vmm *
gf100_bar_bar2_vmm(struct nvkm_bar *base)
{
return gf100_bar(base)->bar[0].vmm;
}
void
gf100_bar_bar2_fini(struct nvkm_bar *bar)
{
nvkm_mask(bar->subdev.device, 0x001714, 0x80000000, 0x00000000);
}
void
gf100_bar_bar2_init(struct nvkm_bar *base)
{
struct nvkm_device *device = base->subdev.device;
struct gf100_bar *bar = gf100_bar(base);
u32 addr = nvkm_memory_addr(bar->bar[0].inst) >> 12;
if (bar->bar2_halve)
addr |= 0x40000000;
nvkm_wr32(device, 0x001714, 0x80000000 | addr);
}
static int
gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
struct lock_class_key *key, int bar_nr)
{
struct nvkm_device *device = bar->base.subdev.device;
resource_size_t bar_len;
int ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, false,
&bar_vm->inst);
if (ret)
return ret;
bar_len = device->func->resource_size(device, bar_nr);
if (!bar_len)
return -ENOMEM;
if (bar_nr == 3 && bar->bar2_halve)
bar_len >>= 1;
ret = nvkm_vmm_new(device, 0, bar_len, NULL, 0, key,
(bar_nr == 3) ? "bar2" : "bar1", &bar_vm->vmm);
if (ret)
return ret;
atomic_inc(&bar_vm->vmm->engref[NVKM_SUBDEV_BAR]);
bar_vm->vmm->debug = bar->base.subdev.debug;
/*
* Bootstrap page table lookup.
*/
if (bar_nr == 3) {
ret = nvkm_vmm_boot(bar_vm->vmm);
if (ret)
return ret;
}
return nvkm_vmm_join(bar_vm->vmm, bar_vm->inst);
}
int
gf100_bar_oneinit(struct nvkm_bar *base)
{
static struct lock_class_key bar1_lock;
static struct lock_class_key bar2_lock;
struct gf100_bar *bar = gf100_bar(base);
int ret;
/* BAR2 */
if (bar->base.func->bar2.init) {
ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, 3);
if (ret)
return ret;
bar->base.subdev.oneinit = true;
nvkm_bar_bar2_init(bar->base.subdev.device);
}
/* BAR1 */
ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, 1);
if (ret)
return ret;
return 0;
}
void *
gf100_bar_dtor(struct nvkm_bar *base)
{
struct gf100_bar *bar = gf100_bar(base);
nvkm_vmm_part(bar->bar[1].vmm, bar->bar[1].inst);
nvkm_vmm_unref(&bar->bar[1].vmm);
nvkm_memory_unref(&bar->bar[1].inst);
nvkm_vmm_part(bar->bar[0].vmm, bar->bar[0].inst);
nvkm_vmm_unref(&bar->bar[0].vmm);
nvkm_memory_unref(&bar->bar[0].inst);
return bar;
}
int
gf100_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar)
{
struct gf100_bar *bar;
if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
return -ENOMEM;
nvkm_bar_ctor(func, device, type, inst, &bar->base);
bar->bar2_halve = nvkm_boolopt(device->cfgopt, "NvBar2Halve", false);
*pbar = &bar->base;
return 0;
}
static const struct nvkm_bar_func
gf100_bar_func = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
.bar1.init = gf100_bar_bar1_init,
.bar1.fini = gf100_bar_bar1_fini,
.bar1.wait = gf100_bar_bar1_wait,
.bar1.vmm = gf100_bar_bar1_vmm,
.bar2.init = gf100_bar_bar2_init,
.bar2.fini = gf100_bar_bar2_fini,
.bar2.wait = gf100_bar_bar1_wait,
.bar2.vmm = gf100_bar_bar2_vmm,
.flush = g84_bar_flush,
};
int
gf100_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bar **pbar)
{
return gf100_bar_new_(&gf100_bar_func, device, type, inst, pbar);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "gf100.h"
#include <core/memory.h>
#include <subdev/timer.h>
static void
tu102_bar_bar2_wait(struct nvkm_bar *bar)
{
struct nvkm_device *device = bar->subdev.device;
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0xb80f50) & 0x0000000c))
break;
);
}
static void
tu102_bar_bar2_fini(struct nvkm_bar *bar)
{
nvkm_mask(bar->subdev.device, 0xb80f48, 0x80000000, 0x00000000);
}
static void
tu102_bar_bar2_init(struct nvkm_bar *base)
{
struct nvkm_device *device = base->subdev.device;
struct gf100_bar *bar = gf100_bar(base);
u32 addr = nvkm_memory_addr(bar->bar[0].inst) >> 12;
if (bar->bar2_halve)
addr |= 0x40000000;
nvkm_wr32(device, 0xb80f48, 0x80000000 | addr);
}
static void
tu102_bar_bar1_wait(struct nvkm_bar *bar)
{
struct nvkm_device *device = bar->subdev.device;
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0xb80f50) & 0x00000003))
break;
);
}
static void
tu102_bar_bar1_fini(struct nvkm_bar *bar)
{
nvkm_mask(bar->subdev.device, 0xb80f40, 0x80000000, 0x00000000);
}
static void
tu102_bar_bar1_init(struct nvkm_bar *base)
{
struct nvkm_device *device = base->subdev.device;
struct gf100_bar *bar = gf100_bar(base);
const u32 addr = nvkm_memory_addr(bar->bar[1].inst) >> 12;
nvkm_wr32(device, 0xb80f40, 0x80000000 | addr);
}
static const struct nvkm_bar_func
tu102_bar = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
.bar1.init = tu102_bar_bar1_init,
.bar1.fini = tu102_bar_bar1_fini,
.bar1.wait = tu102_bar_bar1_wait,
.bar1.vmm = gf100_bar_bar1_vmm,
.bar2.init = tu102_bar_bar2_init,
.bar2.fini = tu102_bar_bar2_fini,
.bar2.wait = tu102_bar_bar2_wait,
.bar2.vmm = gf100_bar_bar2_vmm,
.flush = g84_bar_flush,
};
int
tu102_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bar **pbar)
{
return gf100_bar_new_(&tu102_bar, device, type, inst, pbar);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
#include <subdev/timer.h>
static void
nv50_bar_flush(struct nvkm_bar *base)
{
struct nv50_bar *bar = nv50_bar(base);
struct nvkm_device *device = bar->base.subdev.device;
unsigned long flags;
spin_lock_irqsave(&bar->base.lock, flags);
nvkm_wr32(device, 0x00330c, 0x00000001);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x00330c) & 0x00000002))
break;
);
spin_unlock_irqrestore(&bar->base.lock, flags);
}
struct nvkm_vmm *
nv50_bar_bar1_vmm(struct nvkm_bar *base)
{
return nv50_bar(base)->bar1_vmm;
}
void
nv50_bar_bar1_wait(struct nvkm_bar *base)
{
nvkm_bar_flush(base);
}
void
nv50_bar_bar1_fini(struct nvkm_bar *bar)
{
nvkm_wr32(bar->subdev.device, 0x001708, 0x00000000);
}
void
nv50_bar_bar1_init(struct nvkm_bar *base)
{
struct nvkm_device *device = base->subdev.device;
struct nv50_bar *bar = nv50_bar(base);
nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
}
struct nvkm_vmm *
nv50_bar_bar2_vmm(struct nvkm_bar *base)
{
return nv50_bar(base)->bar2_vmm;
}
void
nv50_bar_bar2_fini(struct nvkm_bar *bar)
{
nvkm_wr32(bar->subdev.device, 0x00170c, 0x00000000);
}
void
nv50_bar_bar2_init(struct nvkm_bar *base)
{
struct nvkm_device *device = base->subdev.device;
struct nv50_bar *bar = nv50_bar(base);
nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar2->node->offset >> 4);
}
void
nv50_bar_init(struct nvkm_bar *base)
{
struct nv50_bar *bar = nv50_bar(base);
struct nvkm_device *device = bar->base.subdev.device;
int i;
for (i = 0; i < 8; i++)
nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
}
int
nv50_bar_oneinit(struct nvkm_bar *base)
{
struct nv50_bar *bar = nv50_bar(base);
struct nvkm_device *device = bar->base.subdev.device;
static struct lock_class_key bar1_lock;
static struct lock_class_key bar2_lock;
u64 start, limit, size;
int ret;
ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, bar->pgd_addr, 0, false, bar->mem,
&bar->pad);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x4000, 0, false, bar->mem, &bar->pgd);
if (ret)
return ret;
/* BAR2 */
start = 0x0100000000ULL;
size = device->func->resource_size(device, 3);
if (!size)
return -ENOMEM;
limit = start + size;
ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
&bar2_lock, "bar2", &bar->bar2_vmm);
if (ret)
return ret;
atomic_inc(&bar->bar2_vmm->engref[NVKM_SUBDEV_BAR]);
bar->bar2_vmm->debug = bar->base.subdev.debug;
ret = nvkm_vmm_boot(bar->bar2_vmm);
if (ret)
return ret;
ret = nvkm_vmm_join(bar->bar2_vmm, bar->mem->memory);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar2);
if (ret)
return ret;
nvkm_kmap(bar->bar2);
nvkm_wo32(bar->bar2, 0x00, 0x7fc00000);
nvkm_wo32(bar->bar2, 0x04, lower_32_bits(limit));
nvkm_wo32(bar->bar2, 0x08, lower_32_bits(start));
nvkm_wo32(bar->bar2, 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
nvkm_wo32(bar->bar2, 0x10, 0x00000000);
nvkm_wo32(bar->bar2, 0x14, 0x00000000);
nvkm_done(bar->bar2);
bar->base.subdev.oneinit = true;
nvkm_bar_bar2_init(device);
/* BAR1 */
start = 0x0000000000ULL;
size = device->func->resource_size(device, 1);
if (!size)
return -ENOMEM;
limit = start + size;
ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
&bar1_lock, "bar1", &bar->bar1_vmm);
if (ret)
return ret;
atomic_inc(&bar->bar1_vmm->engref[NVKM_SUBDEV_BAR]);
bar->bar1_vmm->debug = bar->base.subdev.debug;
ret = nvkm_vmm_join(bar->bar1_vmm, bar->mem->memory);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar1);
if (ret)
return ret;
nvkm_kmap(bar->bar1);
nvkm_wo32(bar->bar1, 0x00, 0x7fc00000);
nvkm_wo32(bar->bar1, 0x04, lower_32_bits(limit));
nvkm_wo32(bar->bar1, 0x08, lower_32_bits(start));
nvkm_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
nvkm_wo32(bar->bar1, 0x10, 0x00000000);
nvkm_wo32(bar->bar1, 0x14, 0x00000000);
nvkm_done(bar->bar1);
return 0;
}
void *
nv50_bar_dtor(struct nvkm_bar *base)
{
struct nv50_bar *bar = nv50_bar(base);
if (bar->mem) {
nvkm_gpuobj_del(&bar->bar1);
nvkm_vmm_part(bar->bar1_vmm, bar->mem->memory);
nvkm_vmm_unref(&bar->bar1_vmm);
nvkm_gpuobj_del(&bar->bar2);
nvkm_vmm_part(bar->bar2_vmm, bar->mem->memory);
nvkm_vmm_unref(&bar->bar2_vmm);
nvkm_gpuobj_del(&bar->pgd);
nvkm_gpuobj_del(&bar->pad);
nvkm_gpuobj_del(&bar->mem);
}
return bar;
}
int
nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, u32 pgd_addr, struct nvkm_bar **pbar)
{
struct nv50_bar *bar;
if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
return -ENOMEM;
nvkm_bar_ctor(func, device, type, inst, &bar->base);
bar->pgd_addr = pgd_addr;
*pbar = &bar->base;
return 0;
}
static const struct nvkm_bar_func
nv50_bar_func = {
.dtor = nv50_bar_dtor,
.oneinit = nv50_bar_oneinit,
.init = nv50_bar_init,
.bar1.init = nv50_bar_bar1_init,
.bar1.fini = nv50_bar_bar1_fini,
.bar1.wait = nv50_bar_bar1_wait,
.bar1.vmm = nv50_bar_bar1_vmm,
.bar2.init = nv50_bar_bar2_init,
.bar2.fini = nv50_bar_bar2_fini,
.bar2.wait = nv50_bar_bar1_wait,
.bar2.vmm = nv50_bar_bar2_vmm,
.flush = nv50_bar_flush,
};
int
nv50_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bar **pbar)
{
return nv50_bar_new_(&nv50_bar_func, device, type, inst, 0x1400, pbar);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
void
nvkm_bar_flush(struct nvkm_bar *bar)
{
if (bar && bar->func->flush)
bar->func->flush(bar);
}
struct nvkm_vmm *
nvkm_bar_bar1_vmm(struct nvkm_device *device)
{
return device->bar->func->bar1.vmm(device->bar);
}
void
nvkm_bar_bar1_reset(struct nvkm_device *device)
{
struct nvkm_bar *bar = device->bar;
if (bar) {
bar->func->bar1.init(bar);
bar->func->bar1.wait(bar);
}
}
struct nvkm_vmm *
nvkm_bar_bar2_vmm(struct nvkm_device *device)
{
/* Denies access to BAR2 when it's not initialised, used by INSTMEM
* to know when object access needs to go through the BAR0 window.
*/
struct nvkm_bar *bar = device->bar;
if (bar && bar->bar2)
return bar->func->bar2.vmm(bar);
return NULL;
}
void
nvkm_bar_bar2_reset(struct nvkm_device *device)
{
struct nvkm_bar *bar = device->bar;
if (bar && bar->bar2) {
bar->func->bar2.init(bar);
bar->func->bar2.wait(bar);
}
}
void
nvkm_bar_bar2_fini(struct nvkm_device *device)
{
struct nvkm_bar *bar = device->bar;
if (bar && bar->bar2) {
bar->func->bar2.fini(bar);
bar->bar2 = false;
}
}
void
nvkm_bar_bar2_init(struct nvkm_device *device)
{
struct nvkm_bar *bar = device->bar;
if (bar && bar->subdev.oneinit && !bar->bar2 && bar->func->bar2.init) {
bar->func->bar2.init(bar);
bar->func->bar2.wait(bar);
bar->bar2 = true;
}
}
static int
nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
if (bar->func->bar1.fini)
bar->func->bar1.fini(bar);
return 0;
}
static int
nvkm_bar_init(struct nvkm_subdev *subdev)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
bar->func->bar1.init(bar);
bar->func->bar1.wait(bar);
if (bar->func->init)
bar->func->init(bar);
return 0;
}
static int
nvkm_bar_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
return bar->func->oneinit(bar);
}
static void *
nvkm_bar_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
nvkm_bar_bar2_fini(subdev->device);
return bar->func->dtor(bar);
}
static const struct nvkm_subdev_func
nvkm_bar = {
.dtor = nvkm_bar_dtor,
.oneinit = nvkm_bar_oneinit,
.init = nvkm_bar_init,
.fini = nvkm_bar_fini,
};
void
nvkm_bar_ctor(const struct nvkm_bar_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_bar *bar)
{
nvkm_subdev_ctor(&nvkm_bar, device, type, inst, &bar->subdev);
bar->func = func;
spin_lock_init(&bar->lock);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "nv50.h"
#include <subdev/timer.h>
void
g84_bar_flush(struct nvkm_bar *bar)
{
struct nvkm_device *device = bar->subdev.device;
unsigned long flags;
spin_lock_irqsave(&bar->lock, flags);
nvkm_wr32(device, 0x070000, 0x00000001);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x070000) & 0x00000002))
break;
);
spin_unlock_irqrestore(&bar->lock, flags);
}
static const struct nvkm_bar_func
g84_bar_func = {
.dtor = nv50_bar_dtor,
.oneinit = nv50_bar_oneinit,
.init = nv50_bar_init,
.bar1.init = nv50_bar_bar1_init,
.bar1.fini = nv50_bar_bar1_fini,
.bar1.wait = nv50_bar_bar1_wait,
.bar1.vmm = nv50_bar_bar1_vmm,
.bar2.init = nv50_bar_bar2_init,
.bar2.fini = nv50_bar_bar2_fini,
.bar2.wait = nv50_bar_bar1_wait,
.bar2.vmm = nv50_bar_bar2_vmm,
.flush = g84_bar_flush,
};
int
g84_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bar **pbar)
{
return nv50_bar_new_(&g84_bar_func, device, type, inst, 0x200, pbar);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c |
/*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gf100.h"
static const struct nvkm_bar_func
gk20a_bar_func = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
.bar1.init = gf100_bar_bar1_init,
.bar1.wait = gf100_bar_bar1_wait,
.bar1.vmm = gf100_bar_bar1_vmm,
.flush = g84_bar_flush,
};
int
gk20a_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bar **pbar)
{
int ret = gf100_bar_new_(&gk20a_bar_func, device, type, inst, pbar);
if (ret == 0)
(*pbar)->iomap_uncached = true;
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "gf100.h"
static const struct nvkm_bar_func
gm20b_bar_func = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
.bar1.init = gf100_bar_bar1_init,
.bar1.wait = gm107_bar_bar1_wait,
.bar1.vmm = gf100_bar_bar1_vmm,
.flush = g84_bar_flush,
};
int
gm20b_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_bar **pbar)
{
int ret = gf100_bar_new_(&gm20b_bar_func, device, type, inst, pbar);
if (ret == 0)
(*pbar)->iomap_uncached = true;
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/memory.h>
#include <subdev/mc.h>
#include <subdev/mmu.h>
#include <subdev/vfn.h>
#include <engine/fifo.h>
#include <nvif/class.h>
static irqreturn_t
tu102_fault_buffer_notify(struct nvkm_inth *inth)
{
struct nvkm_fault_buffer *buffer = container_of(inth, typeof(*buffer), inth);
nvkm_event_ntfy(&buffer->fault->event, buffer->id, NVKM_FAULT_BUFFER_EVENT_PENDING);
return IRQ_HANDLED;
}
static void
tu102_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
{
if (enable)
nvkm_inth_allow(&buffer->inth);
else
nvkm_inth_block(&buffer->inth);
}
static void
tu102_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x20;
nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x00000000);
}
static void
tu102_fault_buffer_init(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x20;
nvkm_mask(device, 0xb83010 + foff, 0xc0000000, 0x40000000);
nvkm_wr32(device, 0xb83004 + foff, upper_32_bits(buffer->addr));
nvkm_wr32(device, 0xb83000 + foff, lower_32_bits(buffer->addr));
nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x80000000);
}
static void
tu102_fault_buffer_info(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x20;
nvkm_mask(device, 0xb83010 + foff, 0x40000000, 0x40000000);
buffer->entries = nvkm_rd32(device, 0xb83010 + foff) & 0x000fffff;
buffer->get = 0xb83008 + foff;
buffer->put = 0xb8300c + foff;
}
static irqreturn_t
tu102_fault_info_fault(struct nvkm_inth *inth)
{
struct nvkm_fault *fault = container_of(inth, typeof(*fault), info_fault);
struct nvkm_subdev *subdev = &fault->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_fault_data info;
const u32 addrlo = nvkm_rd32(device, 0xb83080);
const u32 addrhi = nvkm_rd32(device, 0xb83084);
const u32 info0 = nvkm_rd32(device, 0xb83088);
const u32 insthi = nvkm_rd32(device, 0xb8308c);
const u32 info1 = nvkm_rd32(device, 0xb83090);
info.addr = ((u64)addrhi << 32) | addrlo;
info.inst = ((u64)insthi << 32) | (info0 & 0xfffff000);
info.time = 0;
info.engine = (info0 & 0x000000ff);
info.valid = (info1 & 0x80000000) >> 31;
info.gpc = (info1 & 0x1f000000) >> 24;
info.hub = (info1 & 0x00100000) >> 20;
info.access = (info1 & 0x000f0000) >> 16;
info.client = (info1 & 0x00007f00) >> 8;
info.reason = (info1 & 0x0000001f);
nvkm_fifo_fault(device->fifo, &info);
nvkm_wr32(device, 0xb83094, 0x80000000);
return IRQ_HANDLED;
}
static void
tu102_fault_fini(struct nvkm_fault *fault)
{
nvkm_event_ntfy_block(&fault->nrpfb);
flush_work(&fault->nrpfb_work);
if (fault->buffer[0])
fault->func->buffer.fini(fault->buffer[0]);
nvkm_inth_block(&fault->info_fault);
}
static void
tu102_fault_init(struct nvkm_fault *fault)
{
nvkm_inth_allow(&fault->info_fault);
fault->func->buffer.init(fault->buffer[0]);
nvkm_event_ntfy_allow(&fault->nrpfb);
}
static int
tu102_fault_oneinit(struct nvkm_fault *fault)
{
struct nvkm_device *device = fault->subdev.device;
struct nvkm_intr *intr = &device->vfn->intr;
int ret, i;
ret = nvkm_inth_add(intr, nvkm_rd32(device, 0x100ee0) & 0x0000ffff,
NVKM_INTR_PRIO_NORMAL, &fault->subdev, tu102_fault_info_fault,
&fault->info_fault);
if (ret)
return ret;
for (i = 0; i < fault->buffer_nr; i++) {
ret = nvkm_inth_add(intr, nvkm_rd32(device, 0x100ee4 + (i * 4)) >> 16,
NVKM_INTR_PRIO_NORMAL, &fault->subdev,
tu102_fault_buffer_notify, &fault->buffer[i]->inth);
if (ret)
return ret;
}
return gv100_fault_oneinit(fault);
}
static const struct nvkm_fault_func
tu102_fault = {
.oneinit = tu102_fault_oneinit,
.init = tu102_fault_init,
.fini = tu102_fault_fini,
.buffer.nr = 2,
.buffer.entry_size = 32,
.buffer.info = tu102_fault_buffer_info,
.buffer.pin = gp100_fault_buffer_pin,
.buffer.init = tu102_fault_buffer_init,
.buffer.fini = tu102_fault_buffer_fini,
.buffer.intr = tu102_fault_buffer_intr,
.user = { { 0, 0, VOLTA_FAULT_BUFFER_A }, 1 },
};
int
tu102_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
int ret = nvkm_fault_new_(&tu102_fault, device, type, inst, pfault);
if (ret)
return ret;
INIT_WORK(&(*pfault)->nrpfb_work, gv100_fault_buffer_process);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/memory.h>
#include <core/event.h>
#include <subdev/mmu.h>
#include <nvif/clb069.h>
#include <nvif/unpack.h>
static int
nvkm_ufault_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
{
struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
union nvif_clb069_event_args *args = argv;
if (!uevent)
return 0;
if (argc != sizeof(args->vn))
return -ENOSYS;
return nvkm_uevent_add(uevent, &buffer->fault->event, buffer->id,
NVKM_FAULT_BUFFER_EVENT_PENDING, NULL);
}
static int
nvkm_ufault_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
struct nvkm_device *device = buffer->fault->subdev.device;
*type = NVKM_OBJECT_MAP_IO;
*addr = device->func->resource_addr(device, 3) + buffer->addr;
*size = nvkm_memory_size(buffer->mem);
return 0;
}
static int
nvkm_ufault_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
buffer->fault->func->buffer.fini(buffer);
return 0;
}
static int
nvkm_ufault_init(struct nvkm_object *object)
{
struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
buffer->fault->func->buffer.init(buffer);
return 0;
}
static void *
nvkm_ufault_dtor(struct nvkm_object *object)
{
return NULL;
}
static const struct nvkm_object_func
nvkm_ufault = {
.dtor = nvkm_ufault_dtor,
.init = nvkm_ufault_init,
.fini = nvkm_ufault_fini,
.map = nvkm_ufault_map,
.uevent = nvkm_ufault_uevent,
};
int
nvkm_ufault_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
void *argv, u32 argc, struct nvkm_object **pobject)
{
union {
struct nvif_clb069_v0 v0;
} *args = argv;
struct nvkm_fault *fault = device->fault;
struct nvkm_fault_buffer *buffer = fault->buffer[fault->func->user.rp];
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
args->v0.entries = buffer->entries;
args->v0.get = buffer->get;
args->v0.put = buffer->put;
} else
return ret;
nvkm_object_ctor(&nvkm_ufault, oclass, &buffer->object);
*pobject = &buffer->object;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/memory.h>
static void
nvkm_fault_ntfy_fini(struct nvkm_event *event, int type, int index)
{
struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
fault->func->buffer.intr(fault->buffer[index], false);
}
static void
nvkm_fault_ntfy_init(struct nvkm_event *event, int type, int index)
{
struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
fault->func->buffer.intr(fault->buffer[index], true);
}
static const struct nvkm_event_func
nvkm_fault_ntfy = {
.init = nvkm_fault_ntfy_init,
.fini = nvkm_fault_ntfy_fini,
};
static void
nvkm_fault_intr(struct nvkm_subdev *subdev)
{
struct nvkm_fault *fault = nvkm_fault(subdev);
return fault->func->intr(fault);
}
static int
nvkm_fault_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_fault *fault = nvkm_fault(subdev);
if (fault->func->fini)
fault->func->fini(fault);
return 0;
}
static int
nvkm_fault_init(struct nvkm_subdev *subdev)
{
struct nvkm_fault *fault = nvkm_fault(subdev);
if (fault->func->init)
fault->func->init(fault);
return 0;
}
static int
nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
{
struct nvkm_subdev *subdev = &fault->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_fault_buffer *buffer;
int ret;
if (!(buffer = kzalloc(sizeof(*buffer), GFP_KERNEL)))
return -ENOMEM;
buffer->fault = fault;
buffer->id = id;
fault->func->buffer.info(buffer);
fault->buffer[id] = buffer;
nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, buffer->entries *
fault->func->buffer.entry_size, 0x1000, true,
&buffer->mem);
if (ret)
return ret;
/* Pin fault buffer in BAR2. */
buffer->addr = fault->func->buffer.pin(buffer);
if (buffer->addr == ~0ULL)
return -EFAULT;
return 0;
}
static int
nvkm_fault_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_fault *fault = nvkm_fault(subdev);
int ret, i;
for (i = 0; i < ARRAY_SIZE(fault->buffer); i++) {
if (i < fault->func->buffer.nr) {
ret = nvkm_fault_oneinit_buffer(fault, i);
if (ret)
return ret;
fault->buffer_nr = i + 1;
}
}
ret = nvkm_event_init(&nvkm_fault_ntfy, subdev, 1, fault->buffer_nr, &fault->event);
if (ret)
return ret;
if (fault->func->oneinit)
ret = fault->func->oneinit(fault);
return ret;
}
static void *
nvkm_fault_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_fault *fault = nvkm_fault(subdev);
int i;
nvkm_event_ntfy_del(&fault->nrpfb);
nvkm_event_fini(&fault->event);
for (i = 0; i < fault->buffer_nr; i++) {
if (fault->buffer[i]) {
nvkm_memory_unref(&fault->buffer[i]->mem);
kfree(fault->buffer[i]);
}
}
return fault;
}
static const struct nvkm_subdev_func
nvkm_fault = {
.dtor = nvkm_fault_dtor,
.oneinit = nvkm_fault_oneinit,
.init = nvkm_fault_init,
.fini = nvkm_fault_fini,
.intr = nvkm_fault_intr,
};
int
nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_fault **pfault)
{
struct nvkm_fault *fault;
if (!(fault = *pfault = kzalloc(sizeof(*fault), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_fault, device, type, inst, &fault->subdev);
fault->func = func;
fault->user.ctor = nvkm_ufault_new;
fault->user.base = func->user.base;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c |
/*
* Copyright (c) 2019 NVIDIA Corporation.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/memory.h>
#include <nvif/class.h>
u64
gp10b_fault_buffer_pin(struct nvkm_fault_buffer *buffer)
{
return nvkm_memory_addr(buffer->mem);
}
static const struct nvkm_fault_func
gp10b_fault = {
.intr = gp100_fault_intr,
.buffer.nr = 1,
.buffer.entry_size = 32,
.buffer.info = gp100_fault_buffer_info,
.buffer.pin = gp10b_fault_buffer_pin,
.buffer.init = gp100_fault_buffer_init,
.buffer.fini = gp100_fault_buffer_fini,
.buffer.intr = gp100_fault_buffer_intr,
.user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 },
};
int
gp10b_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
return nvkm_fault_new_(&gp10b_fault, device, type, inst, pfault);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/memory.h>
#include <subdev/mmu.h>
#include <engine/fifo.h>
#include <nvif/class.h>
void
gv100_fault_buffer_process(struct work_struct *work)
{
struct nvkm_fault *fault = container_of(work, typeof(*fault), nrpfb_work);
struct nvkm_fault_buffer *buffer = fault->buffer[0];
struct nvkm_device *device = fault->subdev.device;
struct nvkm_memory *mem = buffer->mem;
u32 get = nvkm_rd32(device, buffer->get);
u32 put = nvkm_rd32(device, buffer->put);
if (put == get)
return;
nvkm_kmap(mem);
while (get != put) {
const u32 base = get * buffer->fault->func->buffer.entry_size;
const u32 instlo = nvkm_ro32(mem, base + 0x00);
const u32 insthi = nvkm_ro32(mem, base + 0x04);
const u32 addrlo = nvkm_ro32(mem, base + 0x08);
const u32 addrhi = nvkm_ro32(mem, base + 0x0c);
const u32 timelo = nvkm_ro32(mem, base + 0x10);
const u32 timehi = nvkm_ro32(mem, base + 0x14);
const u32 info0 = nvkm_ro32(mem, base + 0x18);
const u32 info1 = nvkm_ro32(mem, base + 0x1c);
struct nvkm_fault_data info;
if (++get == buffer->entries)
get = 0;
nvkm_wr32(device, buffer->get, get);
info.addr = ((u64)addrhi << 32) | addrlo;
info.inst = ((u64)insthi << 32) | instlo;
info.time = ((u64)timehi << 32) | timelo;
info.engine = (info0 & 0x000000ff);
info.valid = (info1 & 0x80000000) >> 31;
info.gpc = (info1 & 0x1f000000) >> 24;
info.hub = (info1 & 0x00100000) >> 20;
info.access = (info1 & 0x000f0000) >> 16;
info.client = (info1 & 0x00007f00) >> 8;
info.reason = (info1 & 0x0000001f);
nvkm_fifo_fault(device->fifo, &info);
}
nvkm_done(mem);
}
static void
gv100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
if (enable)
nvkm_mask(device, 0x100a2c, intr, intr);
else
nvkm_mask(device, 0x100a34, intr, intr);
}
static void
gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x14;
nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x00000000);
}
static void
gv100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x14;
nvkm_mask(device, 0x100e34 + foff, 0xc0000000, 0x40000000);
nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->addr));
nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->addr));
nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x80000000);
}
static void
gv100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x14;
nvkm_mask(device, 0x100e34 + foff, 0x40000000, 0x40000000);
buffer->entries = nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff;
buffer->get = 0x100e2c + foff;
buffer->put = 0x100e30 + foff;
}
static int
gv100_fault_ntfy_nrpfb(struct nvkm_event_ntfy *ntfy, u32 bits)
{
struct nvkm_fault *fault = container_of(ntfy, typeof(*fault), nrpfb);
schedule_work(&fault->nrpfb_work);
return NVKM_EVENT_KEEP;
}
static void
gv100_fault_intr_fault(struct nvkm_fault *fault)
{
struct nvkm_subdev *subdev = &fault->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_fault_data info;
const u32 addrlo = nvkm_rd32(device, 0x100e4c);
const u32 addrhi = nvkm_rd32(device, 0x100e50);
const u32 info0 = nvkm_rd32(device, 0x100e54);
const u32 insthi = nvkm_rd32(device, 0x100e58);
const u32 info1 = nvkm_rd32(device, 0x100e5c);
info.addr = ((u64)addrhi << 32) | addrlo;
info.inst = ((u64)insthi << 32) | (info0 & 0xfffff000);
info.time = 0;
info.engine = (info0 & 0x000000ff);
info.valid = (info1 & 0x80000000) >> 31;
info.gpc = (info1 & 0x1f000000) >> 24;
info.hub = (info1 & 0x00100000) >> 20;
info.access = (info1 & 0x000f0000) >> 16;
info.client = (info1 & 0x00007f00) >> 8;
info.reason = (info1 & 0x0000001f);
nvkm_fifo_fault(device->fifo, &info);
}
static void
gv100_fault_intr(struct nvkm_fault *fault)
{
struct nvkm_subdev *subdev = &fault->subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x100a20);
if (stat & 0x80000000) {
gv100_fault_intr_fault(fault);
nvkm_wr32(device, 0x100e60, 0x80000000);
stat &= ~0x80000000;
}
if (stat & 0x20000000) {
if (fault->buffer[0]) {
nvkm_event_ntfy(&fault->event, 0, NVKM_FAULT_BUFFER_EVENT_PENDING);
stat &= ~0x20000000;
}
}
if (stat & 0x08000000) {
if (fault->buffer[1]) {
nvkm_event_ntfy(&fault->event, 1, NVKM_FAULT_BUFFER_EVENT_PENDING);
stat &= ~0x08000000;
}
}
if (stat) {
nvkm_debug(subdev, "intr %08x\n", stat);
}
}
static void
gv100_fault_fini(struct nvkm_fault *fault)
{
nvkm_event_ntfy_block(&fault->nrpfb);
flush_work(&fault->nrpfb_work);
if (fault->buffer[0])
fault->func->buffer.fini(fault->buffer[0]);
nvkm_mask(fault->subdev.device, 0x100a34, 0x80000000, 0x80000000);
}
static void
gv100_fault_init(struct nvkm_fault *fault)
{
nvkm_mask(fault->subdev.device, 0x100a2c, 0x80000000, 0x80000000);
fault->func->buffer.init(fault->buffer[0]);
nvkm_event_ntfy_allow(&fault->nrpfb);
}
int
gv100_fault_oneinit(struct nvkm_fault *fault)
{
nvkm_event_ntfy_add(&fault->event, 0, NVKM_FAULT_BUFFER_EVENT_PENDING, true,
gv100_fault_ntfy_nrpfb, &fault->nrpfb);
return 0;
}
static const struct nvkm_fault_func
gv100_fault = {
.oneinit = gv100_fault_oneinit,
.init = gv100_fault_init,
.fini = gv100_fault_fini,
.intr = gv100_fault_intr,
.buffer.nr = 2,
.buffer.entry_size = 32,
.buffer.info = gv100_fault_buffer_info,
.buffer.pin = gp100_fault_buffer_pin,
.buffer.init = gv100_fault_buffer_init,
.buffer.fini = gv100_fault_buffer_fini,
.buffer.intr = gv100_fault_buffer_intr,
/*TODO: Figure out how to expose non-replayable fault buffer, which,
* for some reason, is where recoverable CE faults appear...
*
* It's a bit tricky, as both NVKM and SVM will need access to
* the non-replayable fault buffer.
*/
.user = { { 0, 0, VOLTA_FAULT_BUFFER_A }, 1 },
};
int
gv100_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
int ret = nvkm_fault_new_(&gv100_fault, device, type, inst, pfault);
if (ret)
return ret;
INIT_WORK(&(*pfault)->nrpfb_work, gv100_fault_buffer_process);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <core/memory.h>
#include <subdev/mc.h>
#include <nvif/class.h>
void
gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
{
struct nvkm_device *device = buffer->fault->subdev.device;
nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, 0, enable);
}
void
gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
nvkm_mask(device, 0x002a70, 0x00000001, 0x00000000);
}
void
gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
nvkm_wr32(device, 0x002a74, upper_32_bits(buffer->addr));
nvkm_wr32(device, 0x002a70, lower_32_bits(buffer->addr));
nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001);
}
u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *buffer)
{
return nvkm_memory_bar2(buffer->mem);
}
void
gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
{
buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
buffer->get = 0x002a7c;
buffer->put = 0x002a80;
}
void
gp100_fault_intr(struct nvkm_fault *fault)
{
nvkm_event_ntfy(&fault->event, 0, NVKM_FAULT_BUFFER_EVENT_PENDING);
}
static const struct nvkm_fault_func
gp100_fault = {
.intr = gp100_fault_intr,
.buffer.nr = 1,
.buffer.entry_size = 32,
.buffer.info = gp100_fault_buffer_info,
.buffer.pin = gp100_fault_buffer_pin,
.buffer.init = gp100_fault_buffer_init,
.buffer.fini = gp100_fault_buffer_fini,
.buffer.intr = gp100_fault_buffer_intr,
.user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 },
};
int
gp100_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
return nvkm_fault_new_(&gp100_fault, device, type, inst, pfault);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
* Martin Peres
*/
#include "priv.h"
enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 };
static enum nv40_sensor_style
nv40_sensor_style(struct nvkm_therm *therm)
{
switch (therm->subdev.device->chipset) {
case 0x43:
case 0x44:
case 0x4a:
case 0x47:
return OLD_STYLE;
case 0x46:
case 0x49:
case 0x4b:
case 0x4e:
case 0x4c:
case 0x67:
case 0x68:
case 0x63:
return NEW_STYLE;
default:
return INVALID_STYLE;
}
}
static int
nv40_sensor_setup(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
enum nv40_sensor_style style = nv40_sensor_style(therm);
/* enable ADC readout and disable the ALARM threshold */
if (style == NEW_STYLE) {
nvkm_mask(device, 0x15b8, 0x80000000, 0);
nvkm_wr32(device, 0x15b0, 0x80003fff);
mdelay(20); /* wait for the temperature to stabilize */
return nvkm_rd32(device, 0x15b4) & 0x3fff;
} else if (style == OLD_STYLE) {
nvkm_wr32(device, 0x15b0, 0xff);
mdelay(20); /* wait for the temperature to stabilize */
return nvkm_rd32(device, 0x15b4) & 0xff;
} else
return -ENODEV;
}
static int
nv40_temp_get(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
enum nv40_sensor_style style = nv40_sensor_style(therm);
int core_temp;
if (style == NEW_STYLE) {
nvkm_wr32(device, 0x15b0, 0x80003fff);
core_temp = nvkm_rd32(device, 0x15b4) & 0x3fff;
} else if (style == OLD_STYLE) {
nvkm_wr32(device, 0x15b0, 0xff);
core_temp = nvkm_rd32(device, 0x15b4) & 0xff;
} else
return -ENODEV;
/* if the slope or the offset is unset, do no use the sensor */
if (!sensor->slope_div || !sensor->slope_mult ||
!sensor->offset_num || !sensor->offset_den)
return -ENODEV;
core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
core_temp = core_temp + sensor->offset_num / sensor->offset_den;
core_temp = core_temp + sensor->offset_constant - 8;
/* reserve negative temperatures for errors */
if (core_temp < 0)
core_temp = 0;
return core_temp;
}
static int
nv40_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
{
struct nvkm_subdev *subdev = &therm->subdev;
struct nvkm_device *device = subdev->device;
u32 mask = enable ? 0x80000000 : 0x00000000;
if (line == 2) nvkm_mask(device, 0x0010f0, 0x80000000, mask);
else if (line == 9) nvkm_mask(device, 0x0015f4, 0x80000000, mask);
else {
nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
return -ENODEV;
}
return 0;
}
static int
nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
{
struct nvkm_subdev *subdev = &therm->subdev;
struct nvkm_device *device = subdev->device;
if (line == 2) {
u32 reg = nvkm_rd32(device, 0x0010f0);
if (reg & 0x80000000) {
*duty = (reg & 0x7fff0000) >> 16;
*divs = (reg & 0x00007fff);
return 0;
}
} else
if (line == 9) {
u32 reg = nvkm_rd32(device, 0x0015f4);
if (reg & 0x80000000) {
*divs = nvkm_rd32(device, 0x0015f8);
*duty = (reg & 0x7fffffff);
return 0;
}
} else {
nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
return -ENODEV;
}
return -EINVAL;
}
static int
nv40_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
{
struct nvkm_subdev *subdev = &therm->subdev;
struct nvkm_device *device = subdev->device;
if (line == 2) {
nvkm_mask(device, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
} else
if (line == 9) {
nvkm_wr32(device, 0x0015f8, divs);
nvkm_mask(device, 0x0015f4, 0x7fffffff, duty);
} else {
nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
return -ENODEV;
}
return 0;
}
void
nv40_therm_intr(struct nvkm_therm *therm)
{
struct nvkm_subdev *subdev = &therm->subdev;
struct nvkm_device *device = subdev->device;
uint32_t stat = nvkm_rd32(device, 0x1100);
/* traitement */
/* ack all IRQs */
nvkm_wr32(device, 0x1100, 0x70000);
nvkm_error(subdev, "THERM received an IRQ: stat = %x\n", stat);
}
static void
nv40_therm_init(struct nvkm_therm *therm)
{
nv40_sensor_setup(therm);
}
static const struct nvkm_therm_func
nv40_therm = {
.init = nv40_therm_init,
.intr = nv40_therm_intr,
.pwm_ctrl = nv40_fan_pwm_ctrl,
.pwm_get = nv40_fan_pwm_get,
.pwm_set = nv40_fan_pwm_set,
.temp_get = nv40_temp_get,
.program_alarms = nvkm_therm_program_alarms_polling,
};
int
nv40_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
return nvkm_therm_new_(&nv40_therm, device, type, inst, ptherm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c |
/*
* Copyright 2014 Martin Peres
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include "priv.h"
static int
gm107_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
{
/* nothing to do, it seems hardwired */
return 0;
}
static int
gm107_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
{
struct nvkm_device *device = therm->subdev.device;
*divs = nvkm_rd32(device, 0x10eb20) & 0x1fff;
*duty = nvkm_rd32(device, 0x10eb24) & 0x1fff;
return 0;
}
static int
gm107_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
{
struct nvkm_device *device = therm->subdev.device;
nvkm_mask(device, 0x10eb10, 0x1fff, divs); /* keep the high bits */
nvkm_wr32(device, 0x10eb14, duty | 0x80000000);
return 0;
}
static int
gm107_fan_pwm_clock(struct nvkm_therm *therm, int line)
{
return therm->subdev.device->crystal * 1000;
}
static const struct nvkm_therm_func
gm107_therm = {
.init = gf119_therm_init,
.fini = g84_therm_fini,
.pwm_ctrl = gm107_fan_pwm_ctrl,
.pwm_get = gm107_fan_pwm_get,
.pwm_set = gm107_fan_pwm_set,
.pwm_clock = gm107_fan_pwm_clock,
.temp_get = g84_temp_get,
.fan_sense = gt215_therm_fan_sense,
.program_alarms = nvkm_therm_program_alarms_polling,
};
int
gm107_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
return nvkm_therm_new_(&gm107_therm, device, type, inst, ptherm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Lyude Paul
*/
#include <core/device.h>
#include "priv.h"
#include "gk104.h"
void
gk104_clkgate_enable(struct nvkm_therm *base)
{
struct gk104_therm *therm = gk104_therm(base);
struct nvkm_device *dev = therm->base.subdev.device;
const struct gk104_clkgate_engine_info *order = therm->clkgate_order;
int i;
/* Program ENG_MANT, ENG_FILTER */
for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) {
if (!nvkm_device_subdev(dev, order[i].type, order[i].inst))
continue;
nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500);
}
/* magic */
nvkm_wr32(dev, 0x020288, therm->idle_filter->fecs);
nvkm_wr32(dev, 0x02028c, therm->idle_filter->hubmmu);
/* Enable clockgating (ENG_CLK = RUN->AUTO) */
for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) {
if (!nvkm_device_subdev(dev, order[i].type, order[i].inst))
continue;
nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045);
}
}
void
gk104_clkgate_fini(struct nvkm_therm *base, bool suspend)
{
struct gk104_therm *therm = gk104_therm(base);
struct nvkm_device *dev = therm->base.subdev.device;
const struct gk104_clkgate_engine_info *order = therm->clkgate_order;
int i;
/* ENG_CLK = AUTO->RUN, ENG_PWR = RUN->AUTO */
for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) {
if (!nvkm_device_subdev(dev, order[i].type, order[i].inst))
continue;
nvkm_mask(dev, 0x20200 + order[i].offset, 0xff, 0x54);
}
}
const struct gk104_clkgate_engine_info gk104_clkgate_engine_info[] = {
{ NVKM_ENGINE_GR, 0, 0x00 },
{ NVKM_ENGINE_MSPDEC, 0, 0x04 },
{ NVKM_ENGINE_MSPPP, 0, 0x08 },
{ NVKM_ENGINE_MSVLD, 0, 0x0c },
{ NVKM_ENGINE_CE, 0, 0x10 },
{ NVKM_ENGINE_CE, 1, 0x14 },
{ NVKM_ENGINE_MSENC, 0, 0x18 },
{ NVKM_ENGINE_CE, 2, 0x1c },
{ NVKM_SUBDEV_NR },
};
const struct gf100_idle_filter gk104_idle_filter = {
.fecs = 0x00001000,
.hubmmu = 0x00001000,
};
static const struct nvkm_therm_func
gk104_therm_func = {
.init = gf119_therm_init,
.fini = g84_therm_fini,
.pwm_ctrl = gf119_fan_pwm_ctrl,
.pwm_get = gf119_fan_pwm_get,
.pwm_set = gf119_fan_pwm_set,
.pwm_clock = gf119_fan_pwm_clock,
.temp_get = g84_temp_get,
.fan_sense = gt215_therm_fan_sense,
.program_alarms = nvkm_therm_program_alarms_polling,
.clkgate_init = gf100_clkgate_init,
.clkgate_enable = gk104_clkgate_enable,
.clkgate_fini = gk104_clkgate_fini,
};
static int
gk104_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst,
const struct gk104_clkgate_engine_info *clkgate_order,
const struct gf100_idle_filter *idle_filter,
struct nvkm_therm **ptherm)
{
struct gk104_therm *therm = kzalloc(sizeof(*therm), GFP_KERNEL);
if (!therm)
return -ENOMEM;
nvkm_therm_ctor(&therm->base, device, type, inst, func);
*ptherm = &therm->base;
therm->clkgate_order = clkgate_order;
therm->idle_filter = idle_filter;
return 0;
}
int
gk104_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm)
{
return gk104_therm_new_(&gk104_therm_func, device, type, inst,
gk104_clkgate_engine_info, &gk104_idle_filter,
ptherm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c |
/*
* Copyright 2012 The Nouveau community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include "priv.h"
static void
nvkm_therm_temp_set_defaults(struct nvkm_therm *therm)
{
therm->bios_sensor.offset_constant = 0;
therm->bios_sensor.thrs_fan_boost.temp = 90;
therm->bios_sensor.thrs_fan_boost.hysteresis = 3;
therm->bios_sensor.thrs_down_clock.temp = 95;
therm->bios_sensor.thrs_down_clock.hysteresis = 3;
therm->bios_sensor.thrs_critical.temp = 105;
therm->bios_sensor.thrs_critical.hysteresis = 5;
therm->bios_sensor.thrs_shutdown.temp = 135;
therm->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
}
static void
nvkm_therm_temp_safety_checks(struct nvkm_therm *therm)
{
struct nvbios_therm_sensor *s = &therm->bios_sensor;
/* enforce a minimum hysteresis on thresholds */
s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2);
s->thrs_critical.hysteresis = max_t(u8, s->thrs_critical.hysteresis, 2);
s->thrs_shutdown.hysteresis = max_t(u8, s->thrs_shutdown.hysteresis, 2);
}
/* must be called with alarm_program_lock taken ! */
void
nvkm_therm_sensor_set_threshold_state(struct nvkm_therm *therm,
enum nvkm_therm_thrs thrs,
enum nvkm_therm_thrs_state st)
{
therm->sensor.alarm_state[thrs] = st;
}
/* must be called with alarm_program_lock taken ! */
enum nvkm_therm_thrs_state
nvkm_therm_sensor_get_threshold_state(struct nvkm_therm *therm,
enum nvkm_therm_thrs thrs)
{
return therm->sensor.alarm_state[thrs];
}
static void
nv_poweroff_work(struct work_struct *work)
{
orderly_poweroff(true);
kfree(work);
}
void
nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
enum nvkm_therm_thrs_direction dir)
{
struct nvkm_subdev *subdev = &therm->subdev;
bool active;
static const char * const thresholds[] = {
"fanboost", "downclock", "critical", "shutdown"
};
int temperature = therm->func->temp_get(therm);
if (thrs < 0 || thrs > 3)
return;
if (dir == NVKM_THERM_THRS_FALLING)
nvkm_info(subdev,
"temperature (%i C) went below the '%s' threshold\n",
temperature, thresholds[thrs]);
else
nvkm_info(subdev, "temperature (%i C) hit the '%s' threshold\n",
temperature, thresholds[thrs]);
active = (dir == NVKM_THERM_THRS_RISING);
switch (thrs) {
case NVKM_THERM_THRS_FANBOOST:
if (active) {
nvkm_therm_fan_set(therm, true, 100);
nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO);
}
break;
case NVKM_THERM_THRS_DOWNCLOCK:
if (therm->emergency.downclock)
therm->emergency.downclock(therm, active);
break;
case NVKM_THERM_THRS_CRITICAL:
if (therm->emergency.pause)
therm->emergency.pause(therm, active);
break;
case NVKM_THERM_THRS_SHUTDOWN:
if (active) {
struct work_struct *work;
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work) {
INIT_WORK(work, nv_poweroff_work);
schedule_work(work);
}
}
break;
case NVKM_THERM_THRS_NR:
break;
}
}
/* must be called with alarm_program_lock taken ! */
static void
nvkm_therm_threshold_hyst_polling(struct nvkm_therm *therm,
const struct nvbios_therm_threshold *thrs,
enum nvkm_therm_thrs thrs_name)
{
enum nvkm_therm_thrs_direction direction;
enum nvkm_therm_thrs_state prev_state, new_state;
int temp = therm->func->temp_get(therm);
prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
if (temp >= thrs->temp && prev_state == NVKM_THERM_THRS_LOWER) {
direction = NVKM_THERM_THRS_RISING;
new_state = NVKM_THERM_THRS_HIGHER;
} else if (temp <= thrs->temp - thrs->hysteresis &&
prev_state == NVKM_THERM_THRS_HIGHER) {
direction = NVKM_THERM_THRS_FALLING;
new_state = NVKM_THERM_THRS_LOWER;
} else
return; /* nothing to do */
nvkm_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
nvkm_therm_sensor_event(therm, thrs_name, direction);
}
static void
alarm_timer_callback(struct nvkm_alarm *alarm)
{
struct nvkm_therm *therm =
container_of(alarm, struct nvkm_therm, sensor.therm_poll_alarm);
struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
struct nvkm_timer *tmr = therm->subdev.device->timer;
unsigned long flags;
spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
NVKM_THERM_THRS_FANBOOST);
nvkm_therm_threshold_hyst_polling(therm,
&sensor->thrs_down_clock,
NVKM_THERM_THRS_DOWNCLOCK);
nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_critical,
NVKM_THERM_THRS_CRITICAL);
nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
NVKM_THERM_THRS_SHUTDOWN);
spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
/* schedule the next poll in one second */
if (therm->func->temp_get(therm) >= 0)
nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
}
void
nvkm_therm_program_alarms_polling(struct nvkm_therm *therm)
{
struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
nvkm_debug(&therm->subdev,
"programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
sensor->thrs_fan_boost.temp,
sensor->thrs_fan_boost.hysteresis,
sensor->thrs_down_clock.temp,
sensor->thrs_down_clock.hysteresis,
sensor->thrs_critical.temp,
sensor->thrs_critical.hysteresis,
sensor->thrs_shutdown.temp,
sensor->thrs_shutdown.hysteresis);
alarm_timer_callback(&therm->sensor.therm_poll_alarm);
}
int
nvkm_therm_sensor_init(struct nvkm_therm *therm)
{
therm->func->program_alarms(therm);
return 0;
}
int
nvkm_therm_sensor_fini(struct nvkm_therm *therm, bool suspend)
{
struct nvkm_timer *tmr = therm->subdev.device->timer;
if (suspend)
nvkm_timer_alarm(tmr, 0, &therm->sensor.therm_poll_alarm);
return 0;
}
void
nvkm_therm_sensor_preinit(struct nvkm_therm *therm)
{
const char *sensor_avail = "yes";
if (therm->func->temp_get(therm) < 0)
sensor_avail = "no";
nvkm_debug(&therm->subdev, "internal sensor: %s\n", sensor_avail);
}
int
nvkm_therm_sensor_ctor(struct nvkm_therm *therm)
{
struct nvkm_subdev *subdev = &therm->subdev;
struct nvkm_bios *bios = subdev->device->bios;
nvkm_alarm_init(&therm->sensor.therm_poll_alarm, alarm_timer_callback);
nvkm_therm_temp_set_defaults(therm);
if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
&therm->bios_sensor))
nvkm_error(subdev, "nvbios_therm_sensor_parse failed\n");
nvkm_therm_temp_safety_checks(therm);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Lyude Paul
*/
#include <core/device.h>
#include "priv.h"
#define pack_for_each_init(init, pack, head) \
for (pack = head; pack && pack->init; pack++) \
for (init = pack->init; init && init->count; init++)
void
gf100_clkgate_init(struct nvkm_therm *therm,
const struct nvkm_therm_clkgate_pack *p)
{
struct nvkm_device *device = therm->subdev.device;
const struct nvkm_therm_clkgate_pack *pack;
const struct nvkm_therm_clkgate_init *init;
u32 next, addr;
pack_for_each_init(init, pack, p) {
next = init->addr + init->count * 8;
addr = init->addr;
nvkm_trace(&therm->subdev, "{ 0x%06x, %d, 0x%08x }\n",
init->addr, init->count, init->data);
while (addr < next) {
nvkm_trace(&therm->subdev, "\t0x%06x = 0x%08x\n",
addr, init->data);
nvkm_wr32(device, addr, init->data);
addr += 8;
}
}
}
/*
* TODO: Fermi clockgating isn't understood fully yet, so we don't specify any
* clockgate functions to use
*/
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
* Martin Peres
*/
#include "priv.h"
static int
pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx)
{
struct nvkm_subdev *subdev = &therm->subdev;
if (*line == 0x04) {
*ctrl = 0x00e100;
*line = 4;
*indx = 0;
} else
if (*line == 0x09) {
*ctrl = 0x00e100;
*line = 9;
*indx = 1;
} else
if (*line == 0x10) {
*ctrl = 0x00e28c;
*line = 0;
*indx = 0;
} else {
nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", *line);
return -ENODEV;
}
return 0;
}
int
nv50_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
{
struct nvkm_device *device = therm->subdev.device;
u32 data = enable ? 0x00000001 : 0x00000000;
int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
if (ret == 0)
nvkm_mask(device, ctrl, 0x00010001 << line, data << line);
return ret;
}
int
nv50_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
{
struct nvkm_device *device = therm->subdev.device;
int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
if (ret)
return ret;
if (nvkm_rd32(device, ctrl) & (1 << line)) {
*divs = nvkm_rd32(device, 0x00e114 + (id * 8));
*duty = nvkm_rd32(device, 0x00e118 + (id * 8));
return 0;
}
return -EINVAL;
}
int
nv50_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
{
struct nvkm_device *device = therm->subdev.device;
int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
if (ret)
return ret;
nvkm_wr32(device, 0x00e114 + (id * 8), divs);
nvkm_wr32(device, 0x00e118 + (id * 8), duty | 0x80000000);
return 0;
}
int
nv50_fan_pwm_clock(struct nvkm_therm *therm, int line)
{
struct nvkm_device *device = therm->subdev.device;
int pwm_clock;
/* determine the PWM source clock */
if (device->chipset > 0x50 && device->chipset < 0x94) {
u8 pwm_div = nvkm_rd32(device, 0x410c);
if (nvkm_rd32(device, 0xc040) & 0x800000) {
/* Use the HOST clock (100 MHz)
* Where does this constant(2.4) comes from? */
pwm_clock = (100000000 >> pwm_div) * 10 / 24;
} else {
/* Where does this constant(20) comes from? */
pwm_clock = (device->crystal * 1000) >> pwm_div;
pwm_clock /= 20;
}
} else {
pwm_clock = (device->crystal * 1000) / 20;
}
return pwm_clock;
}
static void
nv50_sensor_setup(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
nvkm_mask(device, 0x20010, 0x40000000, 0x0);
mdelay(20); /* wait for the temperature to stabilize */
}
static int
nv50_temp_get(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
int core_temp;
core_temp = nvkm_rd32(device, 0x20014) & 0x3fff;
/* if the slope or the offset is unset, do no use the sensor */
if (!sensor->slope_div || !sensor->slope_mult ||
!sensor->offset_num || !sensor->offset_den)
return -ENODEV;
core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
core_temp = core_temp + sensor->offset_num / sensor->offset_den;
core_temp = core_temp + sensor->offset_constant - 8;
/* reserve negative temperatures for errors */
if (core_temp < 0)
core_temp = 0;
return core_temp;
}
static void
nv50_therm_init(struct nvkm_therm *therm)
{
nv50_sensor_setup(therm);
}
static const struct nvkm_therm_func
nv50_therm = {
.init = nv50_therm_init,
.intr = nv40_therm_intr,
.pwm_ctrl = nv50_fan_pwm_ctrl,
.pwm_get = nv50_fan_pwm_get,
.pwm_set = nv50_fan_pwm_set,
.pwm_clock = nv50_fan_pwm_clock,
.temp_get = nv50_temp_get,
.program_alarms = nvkm_therm_program_alarms_polling,
};
int
nv50_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
return nvkm_therm_new_(&nv50_therm, device, type, inst, ptherm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c |
/*
* Copyright 2012 The Nouveau community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include "priv.h"
#include <subdev/gpio.h>
#include <subdev/timer.h>
struct nvkm_fantog {
struct nvkm_fan base;
struct nvkm_alarm alarm;
spinlock_t lock;
u32 period_us;
u32 percent;
struct dcb_gpio_func func;
};
static void
nvkm_fantog_update(struct nvkm_fantog *fan, int percent)
{
struct nvkm_therm *therm = fan->base.parent;
struct nvkm_device *device = therm->subdev.device;
struct nvkm_timer *tmr = device->timer;
struct nvkm_gpio *gpio = device->gpio;
unsigned long flags;
int duty;
spin_lock_irqsave(&fan->lock, flags);
if (percent < 0)
percent = fan->percent;
fan->percent = percent;
duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
if (percent != (duty * 100)) {
u64 next_change = (percent * fan->period_us) / 100;
if (!duty)
next_change = fan->period_us - next_change;
nvkm_timer_alarm(tmr, next_change * 1000, &fan->alarm);
}
spin_unlock_irqrestore(&fan->lock, flags);
}
static void
nvkm_fantog_alarm(struct nvkm_alarm *alarm)
{
struct nvkm_fantog *fan =
container_of(alarm, struct nvkm_fantog, alarm);
nvkm_fantog_update(fan, -1);
}
static int
nvkm_fantog_get(struct nvkm_therm *therm)
{
struct nvkm_fantog *fan = (void *)therm->fan;
return fan->percent;
}
static int
nvkm_fantog_set(struct nvkm_therm *therm, int percent)
{
struct nvkm_fantog *fan = (void *)therm->fan;
if (therm->func->pwm_ctrl)
therm->func->pwm_ctrl(therm, fan->func.line, false);
nvkm_fantog_update(fan, percent);
return 0;
}
int
nvkm_fantog_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
{
struct nvkm_fantog *fan;
int ret;
if (therm->func->pwm_ctrl) {
ret = therm->func->pwm_ctrl(therm, func->line, false);
if (ret)
return ret;
}
fan = kzalloc(sizeof(*fan), GFP_KERNEL);
if (!fan)
return -ENOMEM;
therm->fan = &fan->base;
fan->base.type = "toggle";
fan->base.get = nvkm_fantog_get;
fan->base.set = nvkm_fantog_set;
nvkm_alarm_init(&fan->alarm, nvkm_fantog_alarm);
fan->period_us = 100000; /* 10Hz */
fan->percent = 100;
fan->func = *func;
spin_lock_init(&fan->lock);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c |
/*
* Copyright 2017 Karol Herbst
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Karol Herbst
*/
#include "priv.h"
static const struct nvkm_therm_func
gm200_therm = {
.init = g84_therm_init,
.fini = g84_therm_fini,
.temp_get = g84_temp_get,
.program_alarms = nvkm_therm_program_alarms_polling,
};
int
gm200_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
return nvkm_therm_new_(&gm200_therm, device, type, inst, ptherm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
static int
pwm_info(struct nvkm_therm *therm, int line)
{
struct nvkm_subdev *subdev = &therm->subdev;
struct nvkm_device *device = subdev->device;
u32 gpio = nvkm_rd32(device, 0x00d610 + (line * 0x04));
switch (gpio & 0x000000c0) {
case 0x00000000: /* normal mode, possibly pwm forced off by us */
case 0x00000040: /* nvio special */
switch (gpio & 0x0000001f) {
case 0x00: return 2;
case 0x19: return 1;
case 0x1c: return 0;
case 0x1e: return 2;
default:
break;
}
break;
default:
break;
}
nvkm_error(subdev, "GPIO %d unknown PWM: %08x\n", line, gpio);
return -ENODEV;
}
int
gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
{
struct nvkm_device *device = therm->subdev.device;
u32 data = enable ? 0x00000040 : 0x00000000;
int indx = pwm_info(therm, line);
if (indx < 0)
return indx;
else if (indx < 2)
nvkm_mask(device, 0x00d610 + (line * 0x04), 0x000000c0, data);
/* nothing to do for indx == 2, it seems hardwired to PTHERM */
return 0;
}
int
gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
{
struct nvkm_device *device = therm->subdev.device;
int indx = pwm_info(therm, line);
if (indx < 0)
return indx;
else if (indx < 2) {
if (nvkm_rd32(device, 0x00d610 + (line * 0x04)) & 0x00000040) {
*divs = nvkm_rd32(device, 0x00e114 + (indx * 8));
*duty = nvkm_rd32(device, 0x00e118 + (indx * 8));
return 0;
}
} else if (indx == 2) {
*divs = nvkm_rd32(device, 0x0200d8) & 0x1fff;
*duty = nvkm_rd32(device, 0x0200dc) & 0x1fff;
return 0;
}
return -EINVAL;
}
int
gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
{
struct nvkm_device *device = therm->subdev.device;
int indx = pwm_info(therm, line);
if (indx < 0)
return indx;
else if (indx < 2) {
nvkm_wr32(device, 0x00e114 + (indx * 8), divs);
nvkm_wr32(device, 0x00e118 + (indx * 8), duty | 0x80000000);
} else if (indx == 2) {
nvkm_mask(device, 0x0200d8, 0x1fff, divs); /* keep the high bits */
nvkm_wr32(device, 0x0200dc, duty | 0x40000000);
}
return 0;
}
int
gf119_fan_pwm_clock(struct nvkm_therm *therm, int line)
{
struct nvkm_device *device = therm->subdev.device;
int indx = pwm_info(therm, line);
if (indx < 0)
return 0;
else if (indx < 2)
return (device->crystal * 1000) / 20;
else
return device->crystal * 1000 / 10;
}
void
gf119_therm_init(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
g84_sensor_setup(therm);
/* enable fan tach, count revolutions per-second */
nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002);
if (therm->fan->tach.func != DCB_GPIO_UNUSED) {
nvkm_mask(device, 0x00d79c, 0x000000ff, therm->fan->tach.line);
nvkm_wr32(device, 0x00e724, device->crystal * 1000);
nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001);
}
nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000);
}
static const struct nvkm_therm_func
gf119_therm = {
.init = gf119_therm_init,
.fini = g84_therm_fini,
.pwm_ctrl = gf119_fan_pwm_ctrl,
.pwm_get = gf119_fan_pwm_get,
.pwm_set = gf119_fan_pwm_set,
.pwm_clock = gf119_fan_pwm_clock,
.temp_get = g84_temp_get,
.fan_sense = gt215_therm_fan_sense,
.program_alarms = nvkm_therm_program_alarms_polling,
};
int
gf119_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
return nvkm_therm_new_(&gf119_therm, device, type, inst, ptherm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c |
/*
* Copyright 2012 The Nouveau community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include "priv.h"
#include <core/option.h>
#include <subdev/pmu.h>
int
nvkm_therm_temp_get(struct nvkm_therm *therm)
{
if (therm->func->temp_get)
return therm->func->temp_get(therm);
return -ENODEV;
}
static int
nvkm_therm_update_trip(struct nvkm_therm *therm)
{
struct nvbios_therm_trip_point *trip = therm->fan->bios.trip,
*cur_trip = NULL,
*last_trip = therm->last_trip;
u8 temp = therm->func->temp_get(therm);
u16 duty, i;
/* look for the trip point corresponding to the current temperature */
cur_trip = NULL;
for (i = 0; i < therm->fan->bios.nr_fan_trip; i++) {
if (temp >= trip[i].temp)
cur_trip = &trip[i];
}
/* account for the hysteresis cycle */
if (last_trip && temp <= (last_trip->temp) &&
temp > (last_trip->temp - last_trip->hysteresis))
cur_trip = last_trip;
if (cur_trip) {
duty = cur_trip->fan_duty;
therm->last_trip = cur_trip;
} else {
duty = 0;
therm->last_trip = NULL;
}
return duty;
}
static int
nvkm_therm_compute_linear_duty(struct nvkm_therm *therm, u8 linear_min_temp,
u8 linear_max_temp)
{
u8 temp = therm->func->temp_get(therm);
u16 duty;
/* handle the non-linear part first */
if (temp < linear_min_temp)
return therm->fan->bios.min_duty;
else if (temp > linear_max_temp)
return therm->fan->bios.max_duty;
/* we are in the linear zone */
duty = (temp - linear_min_temp);
duty *= (therm->fan->bios.max_duty - therm->fan->bios.min_duty);
duty /= (linear_max_temp - linear_min_temp);
duty += therm->fan->bios.min_duty;
return duty;
}
static int
nvkm_therm_update_linear(struct nvkm_therm *therm)
{
u8 min = therm->fan->bios.linear_min_temp;
u8 max = therm->fan->bios.linear_max_temp;
return nvkm_therm_compute_linear_duty(therm, min, max);
}
static int
nvkm_therm_update_linear_fallback(struct nvkm_therm *therm)
{
u8 max = therm->bios_sensor.thrs_fan_boost.temp;
return nvkm_therm_compute_linear_duty(therm, 30, max);
}
static void
nvkm_therm_update(struct nvkm_therm *therm, int mode)
{
struct nvkm_subdev *subdev = &therm->subdev;
struct nvkm_timer *tmr = subdev->device->timer;
unsigned long flags;
bool immd = true;
bool poll = true;
int duty = -1;
spin_lock_irqsave(&therm->lock, flags);
if (mode < 0)
mode = therm->mode;
therm->mode = mode;
switch (mode) {
case NVKM_THERM_CTRL_MANUAL:
nvkm_timer_alarm(tmr, 0, &therm->alarm);
duty = nvkm_therm_fan_get(therm);
if (duty < 0)
duty = 100;
poll = false;
break;
case NVKM_THERM_CTRL_AUTO:
switch(therm->fan->bios.fan_mode) {
case NVBIOS_THERM_FAN_TRIP:
duty = nvkm_therm_update_trip(therm);
break;
case NVBIOS_THERM_FAN_LINEAR:
duty = nvkm_therm_update_linear(therm);
break;
case NVBIOS_THERM_FAN_OTHER:
if (therm->cstate) {
duty = therm->cstate;
poll = false;
} else {
duty = nvkm_therm_update_linear_fallback(therm);
}
break;
}
immd = false;
break;
case NVKM_THERM_CTRL_NONE:
default:
nvkm_timer_alarm(tmr, 0, &therm->alarm);
poll = false;
}
if (poll)
nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
spin_unlock_irqrestore(&therm->lock, flags);
if (duty >= 0) {
nvkm_debug(subdev, "FAN target request: %d%%\n", duty);
nvkm_therm_fan_set(therm, immd, duty);
}
}
int
nvkm_therm_cstate(struct nvkm_therm *therm, int fan, int dir)
{
struct nvkm_subdev *subdev = &therm->subdev;
if (!dir || (dir < 0 && fan < therm->cstate) ||
(dir > 0 && fan > therm->cstate)) {
nvkm_debug(subdev, "default fan speed -> %d%%\n", fan);
therm->cstate = fan;
nvkm_therm_update(therm, -1);
}
return 0;
}
static void
nvkm_therm_alarm(struct nvkm_alarm *alarm)
{
struct nvkm_therm *therm =
container_of(alarm, struct nvkm_therm, alarm);
nvkm_therm_update(therm, -1);
}
int
nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode)
{
struct nvkm_subdev *subdev = &therm->subdev;
struct nvkm_device *device = subdev->device;
static const char *name[] = {
"disabled",
"manual",
"automatic"
};
/* The default PPWR ucode on fermi interferes with fan management */
if ((mode >= ARRAY_SIZE(name)) ||
(mode != NVKM_THERM_CTRL_NONE && nvkm_pmu_fan_controlled(device)))
return -EINVAL;
/* do not allow automatic fan management if the thermal sensor is
* not available */
if (mode == NVKM_THERM_CTRL_AUTO &&
therm->func->temp_get(therm) < 0)
return -EINVAL;
if (therm->mode == mode)
return 0;
nvkm_debug(subdev, "fan management: %s\n", name[mode]);
nvkm_therm_update(therm, mode);
return 0;
}
int
nvkm_therm_attr_get(struct nvkm_therm *therm, enum nvkm_therm_attr_type type)
{
switch (type) {
case NVKM_THERM_ATTR_FAN_MIN_DUTY:
return therm->fan->bios.min_duty;
case NVKM_THERM_ATTR_FAN_MAX_DUTY:
return therm->fan->bios.max_duty;
case NVKM_THERM_ATTR_FAN_MODE:
return therm->mode;
case NVKM_THERM_ATTR_THRS_FAN_BOOST:
return therm->bios_sensor.thrs_fan_boost.temp;
case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
return therm->bios_sensor.thrs_fan_boost.hysteresis;
case NVKM_THERM_ATTR_THRS_DOWN_CLK:
return therm->bios_sensor.thrs_down_clock.temp;
case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
return therm->bios_sensor.thrs_down_clock.hysteresis;
case NVKM_THERM_ATTR_THRS_CRITICAL:
return therm->bios_sensor.thrs_critical.temp;
case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
return therm->bios_sensor.thrs_critical.hysteresis;
case NVKM_THERM_ATTR_THRS_SHUTDOWN:
return therm->bios_sensor.thrs_shutdown.temp;
case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
return therm->bios_sensor.thrs_shutdown.hysteresis;
}
return -EINVAL;
}
int
nvkm_therm_attr_set(struct nvkm_therm *therm,
enum nvkm_therm_attr_type type, int value)
{
switch (type) {
case NVKM_THERM_ATTR_FAN_MIN_DUTY:
if (value < 0)
value = 0;
if (value > therm->fan->bios.max_duty)
value = therm->fan->bios.max_duty;
therm->fan->bios.min_duty = value;
return 0;
case NVKM_THERM_ATTR_FAN_MAX_DUTY:
if (value < 0)
value = 0;
if (value < therm->fan->bios.min_duty)
value = therm->fan->bios.min_duty;
therm->fan->bios.max_duty = value;
return 0;
case NVKM_THERM_ATTR_FAN_MODE:
return nvkm_therm_fan_mode(therm, value);
case NVKM_THERM_ATTR_THRS_FAN_BOOST:
therm->bios_sensor.thrs_fan_boost.temp = value;
therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
therm->bios_sensor.thrs_fan_boost.hysteresis = value;
therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_DOWN_CLK:
therm->bios_sensor.thrs_down_clock.temp = value;
therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
therm->bios_sensor.thrs_down_clock.hysteresis = value;
therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_CRITICAL:
therm->bios_sensor.thrs_critical.temp = value;
therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
therm->bios_sensor.thrs_critical.hysteresis = value;
therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_SHUTDOWN:
therm->bios_sensor.thrs_shutdown.temp = value;
therm->func->program_alarms(therm);
return 0;
case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
therm->bios_sensor.thrs_shutdown.hysteresis = value;
therm->func->program_alarms(therm);
return 0;
}
return -EINVAL;
}
void
nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
{
if (!therm || !therm->func->clkgate_enable || !therm->clkgating_enabled)
return;
nvkm_debug(&therm->subdev,
"Enabling clockgating\n");
therm->func->clkgate_enable(therm);
}
void
nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend)
{
if (!therm || !therm->func->clkgate_fini || !therm->clkgating_enabled)
return;
nvkm_debug(&therm->subdev,
"Preparing clockgating for %s\n",
suspend ? "suspend" : "fini");
therm->func->clkgate_fini(therm, suspend);
}
static void
nvkm_therm_clkgate_oneinit(struct nvkm_therm *therm)
{
if (!therm->func->clkgate_enable || !therm->clkgating_enabled)
return;
nvkm_info(&therm->subdev, "Clockgating enabled\n");
}
static void
nvkm_therm_intr(struct nvkm_subdev *subdev)
{
struct nvkm_therm *therm = nvkm_therm(subdev);
if (therm->func->intr)
therm->func->intr(therm);
}
static int
nvkm_therm_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_therm *therm = nvkm_therm(subdev);
if (therm->func->fini)
therm->func->fini(therm);
nvkm_therm_fan_fini(therm, suspend);
nvkm_therm_sensor_fini(therm, suspend);
if (suspend) {
therm->suspend = therm->mode;
therm->mode = NVKM_THERM_CTRL_NONE;
}
return 0;
}
static int
nvkm_therm_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_therm *therm = nvkm_therm(subdev);
nvkm_therm_sensor_ctor(therm);
nvkm_therm_ic_ctor(therm);
nvkm_therm_fan_ctor(therm);
nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO);
nvkm_therm_sensor_preinit(therm);
nvkm_therm_clkgate_oneinit(therm);
return 0;
}
static int
nvkm_therm_init(struct nvkm_subdev *subdev)
{
struct nvkm_therm *therm = nvkm_therm(subdev);
if (therm->func->init)
therm->func->init(therm);
if (therm->suspend >= 0) {
/* restore the pwm value only when on manual or auto mode */
if (therm->suspend > 0)
nvkm_therm_fan_set(therm, true, therm->fan->percent);
nvkm_therm_fan_mode(therm, therm->suspend);
}
nvkm_therm_sensor_init(therm);
nvkm_therm_fan_init(therm);
return 0;
}
void
nvkm_therm_clkgate_init(struct nvkm_therm *therm,
const struct nvkm_therm_clkgate_pack *p)
{
if (!therm || !therm->func->clkgate_init || !therm->clkgating_enabled)
return;
therm->func->clkgate_init(therm, p);
}
static void *
nvkm_therm_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_therm *therm = nvkm_therm(subdev);
kfree(therm->fan);
return therm;
}
static const struct nvkm_subdev_func
nvkm_therm = {
.dtor = nvkm_therm_dtor,
.oneinit = nvkm_therm_oneinit,
.init = nvkm_therm_init,
.fini = nvkm_therm_fini,
.intr = nvkm_therm_intr,
};
void
nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device, enum nvkm_subdev_type type,
int inst, const struct nvkm_therm_func *func)
{
nvkm_subdev_ctor(&nvkm_therm, device, type, inst, &therm->subdev);
therm->func = func;
nvkm_alarm_init(&therm->alarm, nvkm_therm_alarm);
spin_lock_init(&therm->lock);
spin_lock_init(&therm->sensor.alarm_program_lock);
therm->fan_get = nvkm_therm_fan_user_get;
therm->fan_set = nvkm_therm_fan_user_set;
therm->attr_get = nvkm_therm_attr_get;
therm->attr_set = nvkm_therm_attr_set;
therm->mode = therm->suspend = -1; /* undefined */
therm->clkgating_enabled = nvkm_boolopt(device->cfgopt,
"NvPmEnableGating", false);
}
int
nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm)
{
struct nvkm_therm *therm;
if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
return -ENOMEM;
nvkm_therm_ctor(therm, device, type, inst, func);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
* Martin Peres
*/
#include "priv.h"
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/fan.h>
#include <subdev/gpio.h>
struct nvkm_fanpwm {
struct nvkm_fan base;
struct dcb_gpio_func func;
};
static int
nvkm_fanpwm_get(struct nvkm_therm *therm)
{
struct nvkm_fanpwm *fan = (void *)therm->fan;
struct nvkm_device *device = therm->subdev.device;
struct nvkm_gpio *gpio = device->gpio;
int card_type = device->card_type;
u32 divs, duty;
int ret;
ret = therm->func->pwm_get(therm, fan->func.line, &divs, &duty);
if (ret == 0 && divs) {
divs = max(divs, duty);
if (card_type <= NV_40 || (fan->func.log[0] & 1))
duty = divs - duty;
return (duty * 100) / divs;
}
return nvkm_gpio_get(gpio, 0, fan->func.func, fan->func.line) * 100;
}
static int
nvkm_fanpwm_set(struct nvkm_therm *therm, int percent)
{
struct nvkm_fanpwm *fan = (void *)therm->fan;
int card_type = therm->subdev.device->card_type;
u32 divs, duty;
int ret;
divs = fan->base.perf.pwm_divisor;
if (fan->base.bios.pwm_freq) {
divs = 1;
if (therm->func->pwm_clock)
divs = therm->func->pwm_clock(therm, fan->func.line);
divs /= fan->base.bios.pwm_freq;
}
duty = ((divs * percent) + 99) / 100;
if (card_type <= NV_40 || (fan->func.log[0] & 1))
duty = divs - duty;
ret = therm->func->pwm_set(therm, fan->func.line, divs, duty);
if (ret == 0)
ret = therm->func->pwm_ctrl(therm, fan->func.line, true);
return ret;
}
int
nvkm_fanpwm_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
{
struct nvkm_device *device = therm->subdev.device;
struct nvkm_bios *bios = device->bios;
struct nvkm_fanpwm *fan;
struct nvbios_therm_fan info = {};
u32 divs, duty;
nvbios_fan_parse(bios, &info);
if (!nvkm_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
!therm->func->pwm_ctrl || info.type == NVBIOS_THERM_FAN_TOGGLE ||
therm->func->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
return -ENODEV;
fan = kzalloc(sizeof(*fan), GFP_KERNEL);
if (!fan)
return -ENOMEM;
therm->fan = &fan->base;
fan->base.type = "PWM";
fan->base.get = nvkm_fanpwm_get;
fan->base.set = nvkm_fanpwm_set;
fan->func = *func;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
* Martin Peres
*/
#include "priv.h"
#include <subdev/fuse.h>
int
g84_temp_get(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
if (nvkm_fuse_read(device->fuse, 0x1a8) == 1)
return nvkm_rd32(device, 0x20400);
else
return -ENODEV;
}
void
g84_sensor_setup(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
/* enable temperature reading for cards with insane defaults */
if (nvkm_fuse_read(device->fuse, 0x1a8) == 1) {
nvkm_mask(device, 0x20008, 0x80008000, 0x80000000);
nvkm_mask(device, 0x2000c, 0x80000003, 0x00000000);
mdelay(20); /* wait for the temperature to stabilize */
}
}
static void
g84_therm_program_alarms(struct nvkm_therm *therm)
{
struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
struct nvkm_subdev *subdev = &therm->subdev;
struct nvkm_device *device = subdev->device;
unsigned long flags;
spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
/* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
nvkm_wr32(device, 0x20000, 0x000003ff);
/* shutdown: The computer should be shutdown when reached */
nvkm_wr32(device, 0x20484, sensor->thrs_shutdown.hysteresis);
nvkm_wr32(device, 0x20480, sensor->thrs_shutdown.temp);
/* THRS_1 : fan boost*/
nvkm_wr32(device, 0x204c4, sensor->thrs_fan_boost.temp);
/* THRS_2 : critical */
nvkm_wr32(device, 0x204c0, sensor->thrs_critical.temp);
/* THRS_4 : down clock */
nvkm_wr32(device, 0x20414, sensor->thrs_down_clock.temp);
spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
nvkm_debug(subdev,
"Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
sensor->thrs_fan_boost.temp,
sensor->thrs_fan_boost.hysteresis,
sensor->thrs_down_clock.temp,
sensor->thrs_down_clock.hysteresis,
sensor->thrs_critical.temp,
sensor->thrs_critical.hysteresis,
sensor->thrs_shutdown.temp,
sensor->thrs_shutdown.hysteresis);
}
/* must be called with alarm_program_lock taken ! */
static void
g84_therm_threshold_hyst_emulation(struct nvkm_therm *therm,
uint32_t thrs_reg, u8 status_bit,
const struct nvbios_therm_threshold *thrs,
enum nvkm_therm_thrs thrs_name)
{
struct nvkm_device *device = therm->subdev.device;
enum nvkm_therm_thrs_direction direction;
enum nvkm_therm_thrs_state prev_state, new_state;
int temp, cur;
prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
temp = nvkm_rd32(device, thrs_reg);
/* program the next threshold */
if (temp == thrs->temp) {
nvkm_wr32(device, thrs_reg, thrs->temp - thrs->hysteresis);
new_state = NVKM_THERM_THRS_HIGHER;
} else {
nvkm_wr32(device, thrs_reg, thrs->temp);
new_state = NVKM_THERM_THRS_LOWER;
}
/* fix the state (in case someone reprogrammed the alarms) */
cur = therm->func->temp_get(therm);
if (new_state == NVKM_THERM_THRS_LOWER && cur > thrs->temp)
new_state = NVKM_THERM_THRS_HIGHER;
else if (new_state == NVKM_THERM_THRS_HIGHER &&
cur < thrs->temp - thrs->hysteresis)
new_state = NVKM_THERM_THRS_LOWER;
nvkm_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
/* find the direction */
if (prev_state < new_state)
direction = NVKM_THERM_THRS_RISING;
else if (prev_state > new_state)
direction = NVKM_THERM_THRS_FALLING;
else
return;
/* advertise a change in direction */
nvkm_therm_sensor_event(therm, thrs_name, direction);
}
static void
g84_therm_intr(struct nvkm_therm *therm)
{
struct nvkm_subdev *subdev = &therm->subdev;
struct nvkm_device *device = subdev->device;
struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
unsigned long flags;
uint32_t intr;
spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
intr = nvkm_rd32(device, 0x20100) & 0x3ff;
/* THRS_4: downclock */
if (intr & 0x002) {
g84_therm_threshold_hyst_emulation(therm, 0x20414, 24,
&sensor->thrs_down_clock,
NVKM_THERM_THRS_DOWNCLOCK);
intr &= ~0x002;
}
/* shutdown */
if (intr & 0x004) {
g84_therm_threshold_hyst_emulation(therm, 0x20480, 20,
&sensor->thrs_shutdown,
NVKM_THERM_THRS_SHUTDOWN);
intr &= ~0x004;
}
/* THRS_1 : fan boost */
if (intr & 0x008) {
g84_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
&sensor->thrs_fan_boost,
NVKM_THERM_THRS_FANBOOST);
intr &= ~0x008;
}
/* THRS_2 : critical */
if (intr & 0x010) {
g84_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
&sensor->thrs_critical,
NVKM_THERM_THRS_CRITICAL);
intr &= ~0x010;
}
if (intr)
nvkm_error(subdev, "intr %08x\n", intr);
/* ACK everything */
nvkm_wr32(device, 0x20100, 0xffffffff);
nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
}
void
g84_therm_fini(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
/* Disable PTherm IRQs */
nvkm_wr32(device, 0x20000, 0x00000000);
/* ACK all PTherm IRQs */
nvkm_wr32(device, 0x20100, 0xffffffff);
nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
}
void
g84_therm_init(struct nvkm_therm *therm)
{
g84_sensor_setup(therm);
}
static const struct nvkm_therm_func
g84_therm = {
.init = g84_therm_init,
.fini = g84_therm_fini,
.intr = g84_therm_intr,
.pwm_ctrl = nv50_fan_pwm_ctrl,
.pwm_get = nv50_fan_pwm_get,
.pwm_set = nv50_fan_pwm_set,
.pwm_clock = nv50_fan_pwm_clock,
.temp_get = g84_temp_get,
.program_alarms = g84_therm_program_alarms,
};
int
g84_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
struct nvkm_therm *therm;
int ret;
ret = nvkm_therm_new_(&g84_therm, device, type, inst, &therm);
*ptherm = therm;
if (ret)
return ret;
/* init the thresholds */
nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_SHUTDOWN,
NVKM_THERM_THRS_LOWER);
nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_FANBOOST,
NVKM_THERM_THRS_LOWER);
nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_CRITICAL,
NVKM_THERM_THRS_LOWER);
nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_DOWNCLOCK,
NVKM_THERM_THRS_LOWER);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c |
/*
* Copyright 2012 Nouveau community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include "priv.h"
#include <subdev/bios/extdev.h>
#include <subdev/i2c.h>
static bool
probe_monitoring_device(struct nvkm_i2c_bus *bus,
struct i2c_board_info *info, void *data)
{
struct nvkm_therm *therm = data;
struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
struct i2c_client *client;
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
client = i2c_new_client_device(&bus->i2c, info);
if (IS_ERR(client))
return false;
if (!client->dev.driver ||
to_i2c_driver(client->dev.driver)->detect(client, info)) {
i2c_unregister_device(client);
return false;
}
nvkm_debug(&therm->subdev,
"Found an %s at address 0x%x (controlled by lm_sensors, "
"temp offset %+i C)\n",
info->type, info->addr, sensor->offset_constant);
therm->ic = client;
return true;
}
static struct nvkm_i2c_bus_probe
nv_board_infos[] = {
{ { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
{ { I2C_BOARD_INFO("w83781d", 0x2d) }, 0 },
{ { I2C_BOARD_INFO("adt7473", 0x2e) }, 40 },
{ { I2C_BOARD_INFO("adt7473", 0x2d) }, 40 },
{ { I2C_BOARD_INFO("adt7473", 0x2c) }, 40 },
{ { I2C_BOARD_INFO("f75375", 0x2e) }, 0 },
{ { I2C_BOARD_INFO("lm99", 0x4c) }, 0 },
{ { I2C_BOARD_INFO("lm90", 0x4c) }, 0 },
{ { I2C_BOARD_INFO("lm90", 0x4d) }, 0 },
{ { I2C_BOARD_INFO("adm1021", 0x18) }, 0 },
{ { I2C_BOARD_INFO("adm1021", 0x19) }, 0 },
{ { I2C_BOARD_INFO("adm1021", 0x1a) }, 0 },
{ { I2C_BOARD_INFO("adm1021", 0x29) }, 0 },
{ { I2C_BOARD_INFO("adm1021", 0x2a) }, 0 },
{ { I2C_BOARD_INFO("adm1021", 0x2b) }, 0 },
{ { I2C_BOARD_INFO("adm1021", 0x4c) }, 0 },
{ { I2C_BOARD_INFO("adm1021", 0x4d) }, 0 },
{ { I2C_BOARD_INFO("adm1021", 0x4e) }, 0 },
{ { I2C_BOARD_INFO("lm63", 0x18) }, 0 },
{ { I2C_BOARD_INFO("lm63", 0x4e) }, 0 },
{ }
};
void
nvkm_therm_ic_ctor(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
struct nvkm_bios *bios = device->bios;
struct nvkm_i2c *i2c = device->i2c;
struct nvkm_i2c_bus *bus;
struct nvbios_extdev_func extdev_entry;
bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
if (!bus)
return;
if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
struct nvkm_i2c_bus_probe board[] = {
{ { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 0},
{ }
};
nvkm_i2c_bus_probe(bus, "monitoring device", board,
probe_monitoring_device, therm);
if (therm->ic)
return;
}
if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
struct nvkm_i2c_bus_probe board[] = {
{ { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 20 },
{ }
};
nvkm_i2c_bus_probe(bus, "monitoring device", board,
probe_monitoring_device, therm);
if (therm->ic)
return;
}
if (nvbios_extdev_skip_probe(bios))
return;
/* The vbios doesn't provide the address of an exisiting monitoring
device. Let's try our static list.
*/
nvkm_i2c_bus_probe(bus, "monitoring device", nv_board_infos,
probe_monitoring_device, therm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
static int
nvkm_fannil_get(struct nvkm_therm *therm)
{
return -ENODEV;
}
static int
nvkm_fannil_set(struct nvkm_therm *therm, int percent)
{
return -ENODEV;
}
int
nvkm_fannil_create(struct nvkm_therm *therm)
{
struct nvkm_fan *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
therm->fan = priv;
if (!priv)
return -ENOMEM;
priv->type = "none / external";
priv->get = nvkm_fannil_get;
priv->set = nvkm_fannil_set;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <subdev/gpio.h>
int
gt215_therm_fan_sense(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
u32 tach = nvkm_rd32(device, 0x00e728) & 0x0000ffff;
u32 ctrl = nvkm_rd32(device, 0x00e720);
if (ctrl & 0x00000001)
return tach * 60 / 2;
return -ENODEV;
}
static void
gt215_therm_init(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
struct dcb_gpio_func *tach = &therm->fan->tach;
g84_sensor_setup(therm);
/* enable fan tach, count revolutions per-second */
nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002);
if (tach->func != DCB_GPIO_UNUSED) {
nvkm_wr32(device, 0x00e724, device->crystal * 1000);
nvkm_mask(device, 0x00e720, 0x001f0000, tach->line << 16);
nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001);
}
nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000);
}
static const struct nvkm_therm_func
gt215_therm = {
.init = gt215_therm_init,
.fini = g84_therm_fini,
.pwm_ctrl = nv50_fan_pwm_ctrl,
.pwm_get = nv50_fan_pwm_get,
.pwm_set = nv50_fan_pwm_set,
.pwm_clock = nv50_fan_pwm_clock,
.temp_get = g84_temp_get,
.fan_sense = gt215_therm_fan_sense,
.program_alarms = nvkm_therm_program_alarms_polling,
};
int
gt215_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
return nvkm_therm_new_(>215_therm, device, type, inst, ptherm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
* Martin Peres
*/
#include "priv.h"
#include <subdev/bios/fan.h>
#include <subdev/gpio.h>
#include <subdev/timer.h>
static int
nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
{
struct nvkm_therm *therm = fan->parent;
struct nvkm_subdev *subdev = &therm->subdev;
struct nvkm_timer *tmr = subdev->device->timer;
unsigned long flags;
int ret = 0;
int duty;
/* update target fan speed, restricting to allowed range */
spin_lock_irqsave(&fan->lock, flags);
if (target < 0)
target = fan->percent;
target = max_t(u8, target, fan->bios.min_duty);
target = min_t(u8, target, fan->bios.max_duty);
if (fan->percent != target) {
nvkm_debug(subdev, "FAN target: %d\n", target);
fan->percent = target;
}
/* check that we're not already at the target duty cycle */
duty = fan->get(therm);
if (duty == target) {
spin_unlock_irqrestore(&fan->lock, flags);
return 0;
}
/* smooth out the fanspeed increase/decrease */
if (!immediate && duty >= 0) {
/* the constant "3" is a rough approximation taken from
* nvidia's behaviour.
* it is meant to bump the fan speed more incrementally
*/
if (duty < target)
duty = min(duty + 3, target);
else if (duty > target)
duty = max(duty - 3, target);
} else {
duty = target;
}
nvkm_debug(subdev, "FAN update: %d\n", duty);
ret = fan->set(therm, duty);
if (ret) {
spin_unlock_irqrestore(&fan->lock, flags);
return ret;
}
/* fan speed updated, drop the fan lock before grabbing the
* alarm-scheduling lock and risking a deadlock
*/
spin_unlock_irqrestore(&fan->lock, flags);
/* schedule next fan update, if not at target speed already */
if (target != duty) {
u16 bump_period = fan->bios.bump_period;
u16 slow_down_period = fan->bios.slow_down_period;
u64 delay;
if (duty > target)
delay = slow_down_period;
else if (duty == target)
delay = min(bump_period, slow_down_period) ;
else
delay = bump_period;
nvkm_timer_alarm(tmr, delay * 1000 * 1000, &fan->alarm);
}
return ret;
}
static void
nvkm_fan_alarm(struct nvkm_alarm *alarm)
{
struct nvkm_fan *fan = container_of(alarm, struct nvkm_fan, alarm);
nvkm_fan_update(fan, false, -1);
}
int
nvkm_therm_fan_get(struct nvkm_therm *therm)
{
return therm->fan->get(therm);
}
int
nvkm_therm_fan_set(struct nvkm_therm *therm, bool immediate, int percent)
{
return nvkm_fan_update(therm->fan, immediate, percent);
}
int
nvkm_therm_fan_sense(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
struct nvkm_timer *tmr = device->timer;
struct nvkm_gpio *gpio = device->gpio;
u32 cycles, cur, prev;
u64 start, end, tach;
if (therm->func->fan_sense)
return therm->func->fan_sense(therm);
if (therm->fan->tach.func == DCB_GPIO_UNUSED)
return -ENODEV;
/* Time a complete rotation and extrapolate to RPM:
* When the fan spins, it changes the value of GPIO FAN_SENSE.
* We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
*/
start = nvkm_timer_read(tmr);
prev = nvkm_gpio_get(gpio, 0, therm->fan->tach.func,
therm->fan->tach.line);
cycles = 0;
do {
usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
cur = nvkm_gpio_get(gpio, 0, therm->fan->tach.func,
therm->fan->tach.line);
if (prev != cur) {
if (!start)
start = nvkm_timer_read(tmr);
cycles++;
prev = cur;
}
} while (cycles < 5 && nvkm_timer_read(tmr) - start < 250000000);
end = nvkm_timer_read(tmr);
if (cycles == 5) {
tach = (u64)60000000000ULL;
do_div(tach, (end - start));
return tach;
} else
return 0;
}
int
nvkm_therm_fan_user_get(struct nvkm_therm *therm)
{
return nvkm_therm_fan_get(therm);
}
int
nvkm_therm_fan_user_set(struct nvkm_therm *therm, int percent)
{
if (therm->mode != NVKM_THERM_CTRL_MANUAL)
return -EINVAL;
return nvkm_therm_fan_set(therm, true, percent);
}
static void
nvkm_therm_fan_set_defaults(struct nvkm_therm *therm)
{
therm->fan->bios.pwm_freq = 0;
therm->fan->bios.min_duty = 0;
therm->fan->bios.max_duty = 100;
therm->fan->bios.bump_period = 500;
therm->fan->bios.slow_down_period = 2000;
therm->fan->bios.linear_min_temp = 40;
therm->fan->bios.linear_max_temp = 85;
}
static void
nvkm_therm_fan_safety_checks(struct nvkm_therm *therm)
{
if (therm->fan->bios.min_duty > 100)
therm->fan->bios.min_duty = 100;
if (therm->fan->bios.max_duty > 100)
therm->fan->bios.max_duty = 100;
if (therm->fan->bios.min_duty > therm->fan->bios.max_duty)
therm->fan->bios.min_duty = therm->fan->bios.max_duty;
}
int
nvkm_therm_fan_init(struct nvkm_therm *therm)
{
return 0;
}
int
nvkm_therm_fan_fini(struct nvkm_therm *therm, bool suspend)
{
struct nvkm_timer *tmr = therm->subdev.device->timer;
if (suspend)
nvkm_timer_alarm(tmr, 0, &therm->fan->alarm);
return 0;
}
int
nvkm_therm_fan_ctor(struct nvkm_therm *therm)
{
struct nvkm_subdev *subdev = &therm->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_gpio *gpio = device->gpio;
struct nvkm_bios *bios = device->bios;
struct dcb_gpio_func func;
int ret;
/* attempt to locate a drivable fan, and determine control method */
ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
if (ret == 0) {
/* FIXME: is this really the place to perform such checks ? */
if (func.line != 16 && func.log[0] & DCB_GPIO_LOG_DIR_IN) {
nvkm_debug(subdev, "GPIO_FAN is in input mode\n");
ret = -EINVAL;
} else {
ret = nvkm_fanpwm_create(therm, &func);
if (ret != 0)
ret = nvkm_fantog_create(therm, &func);
}
}
/* no controllable fan found, create a dummy fan module */
if (ret != 0) {
ret = nvkm_fannil_create(therm);
if (ret)
return ret;
}
nvkm_debug(subdev, "FAN control: %s\n", therm->fan->type);
/* read the current speed, it is useful when resuming */
therm->fan->percent = nvkm_therm_fan_get(therm);
/* attempt to detect a tachometer connection */
ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff,
&therm->fan->tach);
if (ret)
therm->fan->tach.func = DCB_GPIO_UNUSED;
/* initialise fan bump/slow update handling */
therm->fan->parent = therm;
nvkm_alarm_init(&therm->fan->alarm, nvkm_fan_alarm);
spin_lock_init(&therm->fan->lock);
/* other random init... */
nvkm_therm_fan_set_defaults(therm);
nvbios_perf_fan_parse(bios, &therm->fan->perf);
if (!nvbios_fan_parse(bios, &therm->fan->bios)) {
nvkm_debug(subdev, "parsing the fan table failed\n");
if (nvbios_therm_fan_parse(bios, &therm->fan->bios))
nvkm_error(subdev, "parsing both fan tables failed\n");
}
nvkm_therm_fan_safety_checks(therm);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c |
/*
* Copyright 2017 Rhys Kidd
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Rhys Kidd
*/
#include "priv.h"
static int
gp100_temp_get(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
struct nvkm_subdev *subdev = &therm->subdev;
u32 tsensor = nvkm_rd32(device, 0x020460);
u32 inttemp = (tsensor & 0x0001fff8);
/* device SHADOWed */
if (tsensor & 0x40000000)
nvkm_trace(subdev, "reading temperature from SHADOWed sensor\n");
/* device valid */
if (tsensor & 0x20000000)
return (inttemp >> 8);
else
return -ENODEV;
}
static const struct nvkm_therm_func
gp100_therm = {
.temp_get = gp100_temp_get,
.program_alarms = nvkm_therm_program_alarms_polling,
};
int
gp100_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_therm **ptherm)
{
return nvkm_therm_new_(&gp100_therm, device, type, inst, ptherm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
static void
gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
struct nvkm_device *device = gpio->subdev.device;
u32 intr0 = nvkm_rd32(device, 0x00dc00);
u32 intr1 = nvkm_rd32(device, 0x00dc80);
u32 stat0 = nvkm_rd32(device, 0x00dc08) & intr0;
u32 stat1 = nvkm_rd32(device, 0x00dc88) & intr1;
*lo = (stat1 & 0xffff0000) | (stat0 >> 16);
*hi = (stat1 << 16) | (stat0 & 0x0000ffff);
nvkm_wr32(device, 0x00dc00, intr0);
nvkm_wr32(device, 0x00dc80, intr1);
}
static void
gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
struct nvkm_device *device = gpio->subdev.device;
u32 inte0 = nvkm_rd32(device, 0x00dc08);
u32 inte1 = nvkm_rd32(device, 0x00dc88);
if (type & NVKM_GPIO_LO)
inte0 = (inte0 & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
mask >>= 16;
data >>= 16;
if (type & NVKM_GPIO_LO)
inte1 = (inte1 & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte1 = (inte1 & ~mask) | data;
nvkm_wr32(device, 0x00dc08, inte0);
nvkm_wr32(device, 0x00dc88, inte1);
}
static const struct nvkm_gpio_func
gk104_gpio = {
.lines = 32,
.intr_stat = gk104_gpio_intr_stat,
.intr_mask = gk104_gpio_intr_mask,
.drive = gf119_gpio_drive,
.sense = gf119_gpio_sense,
.reset = gf119_gpio_reset,
};
int
gk104_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_gpio **pgpio)
{
return nvkm_gpio_new_(&gk104_gpio, device, type, inst, pgpio);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c |
/*
* Copyright (C) 2009 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
static int
nv10_gpio_sense(struct nvkm_gpio *gpio, int line)
{
struct nvkm_device *device = gpio->subdev.device;
if (line < 2) {
line = line * 16;
line = nvkm_rd32(device, 0x600818) >> line;
return !!(line & 0x0100);
} else
if (line < 10) {
line = (line - 2) * 4;
line = nvkm_rd32(device, 0x60081c) >> line;
return !!(line & 0x04);
} else
if (line < 14) {
line = (line - 10) * 4;
line = nvkm_rd32(device, 0x600850) >> line;
return !!(line & 0x04);
}
return -EINVAL;
}
static int
nv10_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
{
struct nvkm_device *device = gpio->subdev.device;
u32 reg, mask, data;
if (line < 2) {
line = line * 16;
reg = 0x600818;
mask = 0x00000011;
data = (dir << 4) | out;
} else
if (line < 10) {
line = (line - 2) * 4;
reg = 0x60081c;
mask = 0x00000003;
data = (dir << 1) | out;
} else
if (line < 14) {
line = (line - 10) * 4;
reg = 0x600850;
mask = 0x00000003;
data = (dir << 1) | out;
} else {
return -EINVAL;
}
nvkm_mask(device, reg, mask << line, data << line);
return 0;
}
static void
nv10_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
struct nvkm_device *device = gpio->subdev.device;
u32 intr = nvkm_rd32(device, 0x001104);
u32 stat = nvkm_rd32(device, 0x001144) & intr;
*lo = (stat & 0xffff0000) >> 16;
*hi = (stat & 0x0000ffff);
nvkm_wr32(device, 0x001104, intr);
}
static void
nv10_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
struct nvkm_device *device = gpio->subdev.device;
u32 inte = nvkm_rd32(device, 0x001144);
if (type & NVKM_GPIO_LO)
inte = (inte & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte = (inte & ~mask) | data;
nvkm_wr32(device, 0x001144, inte);
}
static const struct nvkm_gpio_func
nv10_gpio = {
.lines = 16,
.intr_stat = nv10_gpio_intr_stat,
.intr_mask = nv10_gpio_intr_mask,
.drive = nv10_gpio_drive,
.sense = nv10_gpio_sense,
};
int
nv10_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_gpio **pgpio)
{
return nvkm_gpio_new_(&nv10_gpio, device, type, inst, pgpio);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
void
nv50_gpio_reset(struct nvkm_gpio *gpio, u8 match)
{
struct nvkm_device *device = gpio->subdev.device;
struct nvkm_bios *bios = device->bios;
u8 ver, len;
u16 entry;
int ent = -1;
while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
static const u32 regs[] = { 0xe100, 0xe28c };
u32 data = nvbios_rd32(bios, entry);
u8 line = (data & 0x0000001f);
u8 func = (data & 0x0000ff00) >> 8;
u8 defs = !!(data & 0x01000000);
u8 unk0 = !!(data & 0x02000000);
u8 unk1 = !!(data & 0x04000000);
u32 val = (unk1 << 16) | unk0;
u32 reg = regs[line >> 4];
u32 lsh = line & 0x0f;
if ( func == DCB_GPIO_UNUSED ||
(match != DCB_GPIO_UNUSED && match != func))
continue;
nvkm_gpio_set(gpio, 0, func, line, defs);
nvkm_mask(device, reg, 0x00010001 << lsh, val << lsh);
}
}
static int
nv50_gpio_location(int line, u32 *reg, u32 *shift)
{
const u32 nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
if (line >= 32)
return -EINVAL;
*reg = nv50_gpio_reg[line >> 3];
*shift = (line & 7) << 2;
return 0;
}
int
nv50_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
{
struct nvkm_device *device = gpio->subdev.device;
u32 reg, shift;
if (nv50_gpio_location(line, ®, &shift))
return -EINVAL;
nvkm_mask(device, reg, 3 << shift, (((dir ^ 1) << 1) | out) << shift);
return 0;
}
int
nv50_gpio_sense(struct nvkm_gpio *gpio, int line)
{
struct nvkm_device *device = gpio->subdev.device;
u32 reg, shift;
if (nv50_gpio_location(line, ®, &shift))
return -EINVAL;
return !!(nvkm_rd32(device, reg) & (4 << shift));
}
static void
nv50_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
struct nvkm_device *device = gpio->subdev.device;
u32 intr = nvkm_rd32(device, 0x00e054);
u32 stat = nvkm_rd32(device, 0x00e050) & intr;
*lo = (stat & 0xffff0000) >> 16;
*hi = (stat & 0x0000ffff);
nvkm_wr32(device, 0x00e054, intr);
}
static void
nv50_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
struct nvkm_device *device = gpio->subdev.device;
u32 inte = nvkm_rd32(device, 0x00e050);
if (type & NVKM_GPIO_LO)
inte = (inte & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte = (inte & ~mask) | data;
nvkm_wr32(device, 0x00e050, inte);
}
static const struct nvkm_gpio_func
nv50_gpio = {
.lines = 16,
.intr_stat = nv50_gpio_intr_stat,
.intr_mask = nv50_gpio_intr_mask,
.drive = nv50_gpio_drive,
.sense = nv50_gpio_sense,
.reset = nv50_gpio_reset,
};
int
nv50_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_gpio **pgpio)
{
return nvkm_gpio_new_(&nv50_gpio, device, type, inst, pgpio);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
void
gf119_gpio_reset(struct nvkm_gpio *gpio, u8 match)
{
struct nvkm_device *device = gpio->subdev.device;
struct nvkm_bios *bios = device->bios;
u8 ver, len;
u16 entry;
int ent = -1;
while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
u32 data = nvbios_rd32(bios, entry);
u8 line = (data & 0x0000003f);
u8 defs = !!(data & 0x00000080);
u8 func = (data & 0x0000ff00) >> 8;
u8 unk0 = (data & 0x00ff0000) >> 16;
u8 unk1 = (data & 0x1f000000) >> 24;
if ( func == DCB_GPIO_UNUSED ||
(match != DCB_GPIO_UNUSED && match != func))
continue;
nvkm_gpio_set(gpio, 0, func, line, defs);
nvkm_mask(device, 0x00d610 + (line * 4), 0xff, unk0);
if (unk1--)
nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line);
}
}
int
gf119_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
{
struct nvkm_device *device = gpio->subdev.device;
u32 data = ((dir ^ 1) << 13) | (out << 12);
nvkm_mask(device, 0x00d610 + (line * 4), 0x00003000, data);
nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */
return 0;
}
int
gf119_gpio_sense(struct nvkm_gpio *gpio, int line)
{
struct nvkm_device *device = gpio->subdev.device;
return !!(nvkm_rd32(device, 0x00d610 + (line * 4)) & 0x00004000);
}
static const struct nvkm_gpio_func
gf119_gpio = {
.lines = 32,
.intr_stat = g94_gpio_intr_stat,
.intr_mask = g94_gpio_intr_mask,
.drive = gf119_gpio_drive,
.sense = gf119_gpio_sense,
.reset = gf119_gpio_reset,
};
int
gf119_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_gpio **pgpio)
{
return nvkm_gpio_new_(&gf119_gpio, device, type, inst, pgpio);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c |
/*
* Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <core/option.h>
static int
nvkm_gpio_drive(struct nvkm_gpio *gpio, int idx, int line, int dir, int out)
{
return gpio->func->drive(gpio, line, dir, out);
}
static int
nvkm_gpio_sense(struct nvkm_gpio *gpio, int idx, int line)
{
return gpio->func->sense(gpio, line);
}
void
nvkm_gpio_reset(struct nvkm_gpio *gpio, u8 func)
{
if (gpio->func->reset)
gpio->func->reset(gpio, func);
}
int
nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line,
struct dcb_gpio_func *func)
{
struct nvkm_device *device = gpio->subdev.device;
struct nvkm_bios *bios = device->bios;
u8 ver, len;
u16 data;
if (line == 0xff && tag == 0xff)
return -EINVAL;
data = dcb_gpio_match(bios, idx, tag, line, &ver, &len, func);
if (data)
return 0;
/* Apple iMac G4 NV18 */
if (device->quirk && device->quirk->tv_gpio) {
if (tag == DCB_GPIO_TVDAC0) {
*func = (struct dcb_gpio_func) {
.func = DCB_GPIO_TVDAC0,
.line = device->quirk->tv_gpio,
.log[0] = 0,
.log[1] = 1,
};
return 0;
}
}
return -ENOENT;
}
int
nvkm_gpio_set(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line, int state)
{
struct dcb_gpio_func func;
int ret;
ret = nvkm_gpio_find(gpio, idx, tag, line, &func);
if (ret == 0) {
int dir = !!(func.log[state] & 0x02);
int out = !!(func.log[state] & 0x01);
ret = nvkm_gpio_drive(gpio, idx, func.line, dir, out);
}
return ret;
}
int
nvkm_gpio_get(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line)
{
struct dcb_gpio_func func;
int ret;
ret = nvkm_gpio_find(gpio, idx, tag, line, &func);
if (ret == 0) {
ret = nvkm_gpio_sense(gpio, idx, func.line);
if (ret >= 0)
ret = (ret == (func.log[1] & 1));
}
return ret;
}
static void
nvkm_gpio_intr_fini(struct nvkm_event *event, int type, int index)
{
struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
gpio->func->intr_mask(gpio, type, 1 << index, 0);
}
static void
nvkm_gpio_intr_init(struct nvkm_event *event, int type, int index)
{
struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
gpio->func->intr_mask(gpio, type, 1 << index, 1 << index);
}
static const struct nvkm_event_func
nvkm_gpio_intr_func = {
.init = nvkm_gpio_intr_init,
.fini = nvkm_gpio_intr_fini,
};
static void
nvkm_gpio_intr(struct nvkm_subdev *subdev)
{
struct nvkm_gpio *gpio = nvkm_gpio(subdev);
u32 hi, lo, i;
gpio->func->intr_stat(gpio, &hi, &lo);
for (i = 0; (hi | lo) && i < gpio->func->lines; i++) {
u32 mask = (NVKM_GPIO_HI * !!(hi & (1 << i))) |
(NVKM_GPIO_LO * !!(lo & (1 << i)));
nvkm_event_ntfy(&gpio->event, i, mask);
}
}
static int
nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_gpio *gpio = nvkm_gpio(subdev);
u32 mask = (1ULL << gpio->func->lines) - 1;
gpio->func->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
gpio->func->intr_stat(gpio, &mask, &mask);
return 0;
}
static const struct dmi_system_id gpio_reset_ids[] = {
{
.ident = "Apple Macbook 10,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
}
},
{ }
};
static enum dcb_gpio_func_name power_checks[] = {
DCB_GPIO_THERM_EXT_POWER_EVENT,
DCB_GPIO_POWER_ALERT,
DCB_GPIO_EXT_POWER_LOW,
};
static int
nvkm_gpio_init(struct nvkm_subdev *subdev)
{
struct nvkm_gpio *gpio = nvkm_gpio(subdev);
struct dcb_gpio_func func;
int ret;
int i;
if (dmi_check_system(gpio_reset_ids))
nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
if (nvkm_boolopt(subdev->device->cfgopt, "NvPowerChecks", true)) {
for (i = 0; i < ARRAY_SIZE(power_checks); ++i) {
ret = nvkm_gpio_find(gpio, 0, power_checks[i],
DCB_GPIO_UNUSED, &func);
if (ret)
continue;
ret = nvkm_gpio_get(gpio, 0, func.func, func.line);
if (!ret)
continue;
nvkm_error(&gpio->subdev,
"GPU is missing power, check its power "
"cables. Boot with "
"nouveau.config=NvPowerChecks=0 to "
"disable.\n");
return -EINVAL;
}
}
return 0;
}
static void *
nvkm_gpio_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_gpio *gpio = nvkm_gpio(subdev);
nvkm_event_fini(&gpio->event);
return gpio;
}
static const struct nvkm_subdev_func
nvkm_gpio = {
.dtor = nvkm_gpio_dtor,
.init = nvkm_gpio_init,
.fini = nvkm_gpio_fini,
.intr = nvkm_gpio_intr,
};
int
nvkm_gpio_new_(const struct nvkm_gpio_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_gpio **pgpio)
{
struct nvkm_gpio *gpio;
if (!(gpio = *pgpio = kzalloc(sizeof(*gpio), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_gpio, device, type, inst, &gpio->subdev);
gpio->func = func;
return nvkm_event_init(&nvkm_gpio_intr_func, &gpio->subdev, 2, func->lines, &gpio->event);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
void
g94_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
struct nvkm_device *device = gpio->subdev.device;
u32 intr0 = nvkm_rd32(device, 0x00e054);
u32 intr1 = nvkm_rd32(device, 0x00e074);
u32 stat0 = nvkm_rd32(device, 0x00e050) & intr0;
u32 stat1 = nvkm_rd32(device, 0x00e070) & intr1;
*lo = (stat1 & 0xffff0000) | (stat0 >> 16);
*hi = (stat1 << 16) | (stat0 & 0x0000ffff);
nvkm_wr32(device, 0x00e054, intr0);
nvkm_wr32(device, 0x00e074, intr1);
}
void
g94_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
struct nvkm_device *device = gpio->subdev.device;
u32 inte0 = nvkm_rd32(device, 0x00e050);
u32 inte1 = nvkm_rd32(device, 0x00e070);
if (type & NVKM_GPIO_LO)
inte0 = (inte0 & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
mask >>= 16;
data >>= 16;
if (type & NVKM_GPIO_LO)
inte1 = (inte1 & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte1 = (inte1 & ~mask) | data;
nvkm_wr32(device, 0x00e050, inte0);
nvkm_wr32(device, 0x00e070, inte1);
}
static const struct nvkm_gpio_func
g94_gpio = {
.lines = 32,
.intr_stat = g94_gpio_intr_stat,
.intr_mask = g94_gpio_intr_mask,
.drive = nv50_gpio_drive,
.sense = nv50_gpio_sense,
.reset = nv50_gpio_reset,
};
int
g94_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_gpio **pgpio)
{
return nvkm_gpio_new_(&g94_gpio, device, type, inst, pgpio);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
static void
ga102_gpio_reset(struct nvkm_gpio *gpio, u8 match)
{
struct nvkm_device *device = gpio->subdev.device;
struct nvkm_bios *bios = device->bios;
u8 ver, len;
u16 entry;
int ent = -1;
while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
u32 data = nvbios_rd32(bios, entry);
u8 line = (data & 0x0000003f);
u8 defs = !!(data & 0x00000080);
u8 func = (data & 0x0000ff00) >> 8;
u8 unk0 = (data & 0x00ff0000) >> 16;
u8 unk1 = (data & 0x1f000000) >> 24;
if ( func == DCB_GPIO_UNUSED ||
(match != DCB_GPIO_UNUSED && match != func))
continue;
nvkm_gpio_set(gpio, 0, func, line, defs);
nvkm_mask(device, 0x021200 + (line * 4), 0xff, unk0);
if (unk1--)
nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line);
}
}
static int
ga102_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
{
struct nvkm_device *device = gpio->subdev.device;
u32 data = ((dir ^ 1) << 13) | (out << 12);
nvkm_mask(device, 0x021200 + (line * 4), 0x00003000, data);
nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */
return 0;
}
static int
ga102_gpio_sense(struct nvkm_gpio *gpio, int line)
{
struct nvkm_device *device = gpio->subdev.device;
return !!(nvkm_rd32(device, 0x021200 + (line * 4)) & 0x00004000);
}
static void
ga102_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
{
struct nvkm_device *device = gpio->subdev.device;
u32 intr0 = nvkm_rd32(device, 0x021640);
u32 intr1 = nvkm_rd32(device, 0x02164c);
u32 stat0 = nvkm_rd32(device, 0x021648) & intr0;
u32 stat1 = nvkm_rd32(device, 0x021654) & intr1;
*lo = (stat1 & 0xffff0000) | (stat0 >> 16);
*hi = (stat1 << 16) | (stat0 & 0x0000ffff);
nvkm_wr32(device, 0x021640, intr0);
nvkm_wr32(device, 0x02164c, intr1);
}
static void
ga102_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
{
struct nvkm_device *device = gpio->subdev.device;
u32 inte0 = nvkm_rd32(device, 0x021648);
u32 inte1 = nvkm_rd32(device, 0x021654);
if (type & NVKM_GPIO_LO)
inte0 = (inte0 & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
mask >>= 16;
data >>= 16;
if (type & NVKM_GPIO_LO)
inte1 = (inte1 & ~(mask << 16)) | (data << 16);
if (type & NVKM_GPIO_HI)
inte1 = (inte1 & ~mask) | data;
nvkm_wr32(device, 0x021648, inte0);
nvkm_wr32(device, 0x021654, inte1);
}
static const struct nvkm_gpio_func
ga102_gpio = {
.lines = 32,
.intr_stat = ga102_gpio_intr_stat,
.intr_mask = ga102_gpio_intr_mask,
.drive = ga102_gpio_drive,
.sense = ga102_gpio_sense,
.reset = ga102_gpio_reset,
};
int
ga102_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_gpio **pgpio)
{
return nvkm_gpio_new_(&ga102_gpio, device, type, inst, pgpio);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define nv40_clk(p) container_of((p), struct nv40_clk, base)
#include "priv.h"
#include "pll.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
struct nv40_clk {
struct nvkm_clk base;
u32 ctrl;
u32 npll_ctrl;
u32 npll_coef;
u32 spll;
};
static u32
read_pll_1(struct nv40_clk *clk, u32 reg)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 ctrl = nvkm_rd32(device, reg + 0x00);
int P = (ctrl & 0x00070000) >> 16;
int N = (ctrl & 0x0000ff00) >> 8;
int M = (ctrl & 0x000000ff) >> 0;
u32 ref = 27000, khz = 0;
if (ctrl & 0x80000000)
khz = ref * N / M;
return khz >> P;
}
static u32
read_pll_2(struct nv40_clk *clk, u32 reg)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 ctrl = nvkm_rd32(device, reg + 0x00);
u32 coef = nvkm_rd32(device, reg + 0x04);
int N2 = (coef & 0xff000000) >> 24;
int M2 = (coef & 0x00ff0000) >> 16;
int N1 = (coef & 0x0000ff00) >> 8;
int M1 = (coef & 0x000000ff) >> 0;
int P = (ctrl & 0x00070000) >> 16;
u32 ref = 27000, khz = 0;
if ((ctrl & 0x80000000) && M1) {
khz = ref * N1 / M1;
if ((ctrl & 0x40000100) == 0x40000000) {
if (M2)
khz = khz * N2 / M2;
else
khz = 0;
}
}
return khz >> P;
}
static u32
read_clk(struct nv40_clk *clk, u32 src)
{
switch (src) {
case 3:
return read_pll_2(clk, 0x004000);
case 2:
return read_pll_1(clk, 0x004008);
default:
break;
}
return 0;
}
static int
nv40_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
struct nv40_clk *clk = nv40_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
u32 mast = nvkm_rd32(device, 0x00c040);
switch (src) {
case nv_clk_src_crystal:
return device->crystal;
case nv_clk_src_href:
return 100000; /*XXX: PCIE/AGP differ*/
case nv_clk_src_core:
return read_clk(clk, (mast & 0x00000003) >> 0);
case nv_clk_src_shader:
return read_clk(clk, (mast & 0x00000030) >> 4);
case nv_clk_src_mem:
return read_pll_2(clk, 0x4020);
default:
break;
}
nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
return -EINVAL;
}
static int
nv40_clk_calc_pll(struct nv40_clk *clk, u32 reg, u32 khz,
int *N1, int *M1, int *N2, int *M2, int *log2P)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvbios_pll pll;
int ret;
ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
if (ret)
return ret;
if (khz < pll.vco1.max_freq)
pll.vco2.max_freq = 0;
ret = nv04_pll_calc(subdev, &pll, khz, N1, M1, N2, M2, log2P);
if (ret == 0)
return -ERANGE;
return ret;
}
static int
nv40_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
struct nv40_clk *clk = nv40_clk(base);
int gclk = cstate->domain[nv_clk_src_core];
int sclk = cstate->domain[nv_clk_src_shader];
int N1, M1, N2, M2, log2P;
int ret;
/* core/geometric clock */
ret = nv40_clk_calc_pll(clk, 0x004000, gclk,
&N1, &M1, &N2, &M2, &log2P);
if (ret < 0)
return ret;
if (N2 == M2) {
clk->npll_ctrl = 0x80000100 | (log2P << 16);
clk->npll_coef = (N1 << 8) | M1;
} else {
clk->npll_ctrl = 0xc0000000 | (log2P << 16);
clk->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
}
/* use the second pll for shader/rop clock, if it differs from core */
if (sclk && sclk != gclk) {
ret = nv40_clk_calc_pll(clk, 0x004008, sclk,
&N1, &M1, NULL, NULL, &log2P);
if (ret < 0)
return ret;
clk->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
clk->ctrl = 0x00000223;
} else {
clk->spll = 0x00000000;
clk->ctrl = 0x00000333;
}
return 0;
}
static int
nv40_clk_prog(struct nvkm_clk *base)
{
struct nv40_clk *clk = nv40_clk(base);
struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, 0x00c040, 0x00000333, 0x00000000);
nvkm_wr32(device, 0x004004, clk->npll_coef);
nvkm_mask(device, 0x004000, 0xc0070100, clk->npll_ctrl);
nvkm_mask(device, 0x004008, 0xc007ffff, clk->spll);
mdelay(5);
nvkm_mask(device, 0x00c040, 0x00000333, clk->ctrl);
return 0;
}
static void
nv40_clk_tidy(struct nvkm_clk *obj)
{
}
static const struct nvkm_clk_func
nv40_clk = {
.read = nv40_clk_read,
.calc = nv40_clk_calc,
.prog = nv40_clk_prog,
.tidy = nv40_clk_tidy,
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
{ nv_clk_src_core , 0xff, 0, "core", 1000 },
{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
{ nv_clk_src_mem , 0xff, 0, "memory", 1000 },
{ nv_clk_src_max }
}
};
int
nv40_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
struct nv40_clk *clk;
if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
return -ENOMEM;
clk->base.pll_calc = nv04_clk_pll_calc;
clk->base.pll_prog = nv04_clk_pll_prog;
*pclk = &clk->base;
return nvkm_clk_ctor(&nv40_clk, device, type, inst, true, &clk->base);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "pll.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/devinit/nv04.h>
int
nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info,
int clk, struct nvkm_pll_vals *pv)
{
int N1, M1, N2, M2, P;
int ret = nv04_pll_calc(&clock->subdev, info, clk, &N1, &M1, &N2, &M2, &P);
if (ret) {
pv->refclk = info->refclk;
pv->N1 = N1;
pv->M1 = M1;
pv->N2 = N2;
pv->M2 = M2;
pv->log2P = P;
}
return ret;
}
int
nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv)
{
struct nvkm_device *device = clk->subdev.device;
struct nvkm_devinit *devinit = device->devinit;
int cv = device->bios->version.chip;
if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
cv >= 0x40) {
if (reg1 > 0x405c)
setPLL_double_highregs(devinit, reg1, pv);
else
setPLL_double_lowregs(devinit, reg1, pv);
} else
setPLL_single(devinit, reg1, pv);
return 0;
}
static const struct nvkm_clk_func
nv04_clk = {
.domains = {
{ nv_clk_src_max }
}
};
int
nv04_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
int ret = nvkm_clk_new_(&nv04_clk, device, type, inst, false, pclk);
if (ret == 0) {
(*pclk)->pll_calc = nv04_clk_pll_calc;
(*pclk)->pll_prog = nv04_clk_pll_prog;
}
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define gk104_clk(p) container_of((p), struct gk104_clk, base)
#include "priv.h"
#include "pll.h"
#include <subdev/timer.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
struct gk104_clk_info {
u32 freq;
u32 ssel;
u32 mdiv;
u32 dsrc;
u32 ddiv;
u32 coef;
};
struct gk104_clk {
struct nvkm_clk base;
struct gk104_clk_info eng[16];
};
static u32 read_div(struct gk104_clk *, int, u32, u32);
static u32 read_pll(struct gk104_clk *, u32);
static u32
read_vco(struct gk104_clk *clk, u32 dsrc)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 ssrc = nvkm_rd32(device, dsrc);
if (!(ssrc & 0x00000100))
return read_pll(clk, 0x00e800);
return read_pll(clk, 0x00e820);
}
static u32
read_pll(struct gk104_clk *clk, u32 pll)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 ctrl = nvkm_rd32(device, pll + 0x00);
u32 coef = nvkm_rd32(device, pll + 0x04);
u32 P = (coef & 0x003f0000) >> 16;
u32 N = (coef & 0x0000ff00) >> 8;
u32 M = (coef & 0x000000ff) >> 0;
u32 sclk;
u16 fN = 0xf000;
if (!(ctrl & 0x00000001))
return 0;
switch (pll) {
case 0x00e800:
case 0x00e820:
sclk = device->crystal;
P = 1;
break;
case 0x132000:
sclk = read_pll(clk, 0x132020);
P = (coef & 0x10000000) ? 2 : 1;
break;
case 0x132020:
sclk = read_div(clk, 0, 0x137320, 0x137330);
fN = nvkm_rd32(device, pll + 0x10) >> 16;
break;
case 0x137000:
case 0x137020:
case 0x137040:
case 0x1370e0:
sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
break;
default:
return 0;
}
if (P == 0)
P = 1;
sclk = (sclk * N) + (((u16)(fN + 4096) * sclk) >> 13);
return sclk / (M * P);
}
static u32
read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
switch (ssrc & 0x00000003) {
case 0:
if ((ssrc & 0x00030000) != 0x00030000)
return device->crystal;
return 108000;
case 2:
return 100000;
case 3:
if (sctl & 0x80000000) {
u32 sclk = read_vco(clk, dsrc + (doff * 4));
u32 sdiv = (sctl & 0x0000003f) + 2;
return (sclk * 2) / sdiv;
}
return read_vco(clk, dsrc + (doff * 4));
default:
return 0;
}
}
static u32
read_mem(struct gk104_clk *clk)
{
struct nvkm_device *device = clk->base.subdev.device;
switch (nvkm_rd32(device, 0x1373f4) & 0x0000000f) {
case 1: return read_pll(clk, 0x132020);
case 2: return read_pll(clk, 0x132000);
default:
return 0;
}
}
static u32
read_clk(struct gk104_clk *clk, int idx)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
u32 sclk, sdiv;
if (idx < 7) {
u32 ssel = nvkm_rd32(device, 0x137100);
if (ssel & (1 << idx)) {
sclk = read_pll(clk, 0x137000 + (idx * 0x20));
sdiv = 1;
} else {
sclk = read_div(clk, idx, 0x137160, 0x1371d0);
sdiv = 0;
}
} else {
u32 ssrc = nvkm_rd32(device, 0x137160 + (idx * 0x04));
if ((ssrc & 0x00000003) == 0x00000003) {
sclk = read_div(clk, idx, 0x137160, 0x1371d0);
if (ssrc & 0x00000100) {
if (ssrc & 0x40000000)
sclk = read_pll(clk, 0x1370e0);
sdiv = 1;
} else {
sdiv = 0;
}
} else {
sclk = read_div(clk, idx, 0x137160, 0x1371d0);
sdiv = 0;
}
}
if (sctl & 0x80000000) {
if (sdiv)
sdiv = ((sctl & 0x00003f00) >> 8) + 2;
else
sdiv = ((sctl & 0x0000003f) >> 0) + 2;
return (sclk * 2) / sdiv;
}
return sclk;
}
static int
gk104_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
struct gk104_clk *clk = gk104_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
switch (src) {
case nv_clk_src_crystal:
return device->crystal;
case nv_clk_src_href:
return 100000;
case nv_clk_src_mem:
return read_mem(clk);
case nv_clk_src_gpc:
return read_clk(clk, 0x00);
case nv_clk_src_rop:
return read_clk(clk, 0x01);
case nv_clk_src_hubk07:
return read_clk(clk, 0x02);
case nv_clk_src_hubk06:
return read_clk(clk, 0x07);
case nv_clk_src_hubk01:
return read_clk(clk, 0x08);
case nv_clk_src_pmu:
return read_clk(clk, 0x0c);
case nv_clk_src_vdec:
return read_clk(clk, 0x0e);
default:
nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
}
}
static u32
calc_div(struct gk104_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
{
u32 div = min((ref * 2) / freq, (u32)65);
if (div < 2)
div = 2;
*ddiv = div - 2;
return (ref * 2) / div;
}
static u32
calc_src(struct gk104_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
{
u32 sclk;
/* use one of the fixed frequencies if possible */
*ddiv = 0x00000000;
switch (freq) {
case 27000:
case 108000:
*dsrc = 0x00000000;
if (freq == 108000)
*dsrc |= 0x00030000;
return freq;
case 100000:
*dsrc = 0x00000002;
return freq;
default:
*dsrc = 0x00000003;
break;
}
/* otherwise, calculate the closest divider */
sclk = read_vco(clk, 0x137160 + (idx * 4));
if (idx < 7)
sclk = calc_div(clk, idx, sclk, freq, ddiv);
return sclk;
}
static u32
calc_pll(struct gk104_clk *clk, int idx, u32 freq, u32 *coef)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_pll limits;
int N, M, P, ret;
ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
if (ret)
return 0;
limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
if (!limits.refclk)
return 0;
ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
if (ret <= 0)
return 0;
*coef = (P << 16) | (N << 8) | M;
return ret;
}
static int
calc_clk(struct gk104_clk *clk,
struct nvkm_cstate *cstate, int idx, int dom)
{
struct gk104_clk_info *info = &clk->eng[idx];
u32 freq = cstate->domain[dom];
u32 src0, div0, div1D, div1P = 0;
u32 clk0, clk1 = 0;
/* invalid clock domain */
if (!freq)
return 0;
/* first possible path, using only dividers */
clk0 = calc_src(clk, idx, freq, &src0, &div0);
clk0 = calc_div(clk, idx, clk0, freq, &div1D);
/* see if we can get any closer using PLLs */
if (clk0 != freq && (0x0000ff87 & (1 << idx))) {
if (idx <= 7)
clk1 = calc_pll(clk, idx, freq, &info->coef);
else
clk1 = cstate->domain[nv_clk_src_hubk06];
clk1 = calc_div(clk, idx, clk1, freq, &div1P);
}
/* select the method which gets closest to target freq */
if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
info->dsrc = src0;
if (div0) {
info->ddiv |= 0x80000000;
info->ddiv |= div0;
}
if (div1D) {
info->mdiv |= 0x80000000;
info->mdiv |= div1D;
}
info->ssel = 0;
info->freq = clk0;
} else {
if (div1P) {
info->mdiv |= 0x80000000;
info->mdiv |= div1P << 8;
}
info->ssel = (1 << idx);
info->dsrc = 0x40000100;
info->freq = clk1;
}
return 0;
}
static int
gk104_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
struct gk104_clk *clk = gk104_clk(base);
int ret;
if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
(ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
(ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
(ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
(ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
(ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_pmu)) ||
(ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
return ret;
return 0;
}
static void
gk104_clk_prog_0(struct gk104_clk *clk, int idx)
{
struct gk104_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
if (!info->ssel) {
nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
}
}
static void
gk104_clk_prog_1_0(struct gk104_clk *clk, int idx)
{
struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
break;
);
}
static void
gk104_clk_prog_1_1(struct gk104_clk *clk, int idx)
{
struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
}
static void
gk104_clk_prog_2(struct gk104_clk *clk, int idx)
{
struct gk104_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
const u32 addr = 0x137000 + (idx * 0x20);
nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
if (info->coef) {
nvkm_wr32(device, addr + 0x04, info->coef);
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
/* Test PLL lock */
nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
break;
);
nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
/* Enable sync mode */
nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
}
}
static void
gk104_clk_prog_3(struct gk104_clk *clk, int idx)
{
struct gk104_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel)
nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
else
nvkm_mask(device, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
}
static void
gk104_clk_prog_4_0(struct gk104_clk *clk, int idx)
{
struct gk104_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) {
nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
nvkm_msec(device, 2000,
u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
if (tmp == info->ssel)
break;
);
}
}
static void
gk104_clk_prog_4_1(struct gk104_clk *clk, int idx)
{
struct gk104_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) {
nvkm_mask(device, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
}
}
static int
gk104_clk_prog(struct nvkm_clk *base)
{
struct gk104_clk *clk = gk104_clk(base);
struct {
u32 mask;
void (*exec)(struct gk104_clk *, int);
} stage[] = {
{ 0x007f, gk104_clk_prog_0 }, /* div programming */
{ 0x007f, gk104_clk_prog_1_0 }, /* select div mode */
{ 0xff80, gk104_clk_prog_1_1 },
{ 0x00ff, gk104_clk_prog_2 }, /* (maybe) program pll */
{ 0xff80, gk104_clk_prog_3 }, /* final divider */
{ 0x007f, gk104_clk_prog_4_0 }, /* (maybe) select pll mode */
{ 0xff80, gk104_clk_prog_4_1 },
};
int i, j;
for (i = 0; i < ARRAY_SIZE(stage); i++) {
for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
if (!(stage[i].mask & (1 << j)))
continue;
if (!clk->eng[j].freq)
continue;
stage[i].exec(clk, j);
}
}
return 0;
}
static void
gk104_clk_tidy(struct nvkm_clk *base)
{
struct gk104_clk *clk = gk104_clk(base);
memset(clk->eng, 0x00, sizeof(clk->eng));
}
static const struct nvkm_clk_func
gk104_clk = {
.read = gk104_clk_read,
.calc = gk104_clk_calc,
.prog = gk104_clk_prog,
.tidy = gk104_clk_tidy,
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
{ nv_clk_src_gpc , 0x00, NVKM_CLK_DOM_FLAG_CORE | NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_rop , 0x02, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_mem , 0x03, 0, "memory", 500 },
{ nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
{ nv_clk_src_hubk01 , 0x05 },
{ nv_clk_src_vdec , 0x06 },
{ nv_clk_src_pmu , 0x07 },
{ nv_clk_src_max }
}
};
int
gk104_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
struct gk104_clk *clk;
if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
return -ENOMEM;
*pclk = &clk->base;
return nvkm_clk_ctor(&gk104_clk, device, type, inst, true, &clk->base);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define gf100_clk(p) container_of((p), struct gf100_clk, base)
#include "priv.h"
#include "pll.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
struct gf100_clk_info {
u32 freq;
u32 ssel;
u32 mdiv;
u32 dsrc;
u32 ddiv;
u32 coef;
};
struct gf100_clk {
struct nvkm_clk base;
struct gf100_clk_info eng[16];
};
static u32 read_div(struct gf100_clk *, int, u32, u32);
static u32
read_vco(struct gf100_clk *clk, u32 dsrc)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 ssrc = nvkm_rd32(device, dsrc);
if (!(ssrc & 0x00000100))
return nvkm_clk_read(&clk->base, nv_clk_src_sppll0);
return nvkm_clk_read(&clk->base, nv_clk_src_sppll1);
}
static u32
read_pll(struct gf100_clk *clk, u32 pll)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 ctrl = nvkm_rd32(device, pll + 0x00);
u32 coef = nvkm_rd32(device, pll + 0x04);
u32 P = (coef & 0x003f0000) >> 16;
u32 N = (coef & 0x0000ff00) >> 8;
u32 M = (coef & 0x000000ff) >> 0;
u32 sclk;
if (!(ctrl & 0x00000001))
return 0;
switch (pll) {
case 0x00e800:
case 0x00e820:
sclk = device->crystal;
P = 1;
break;
case 0x132000:
sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrc);
break;
case 0x132020:
sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrcref);
break;
case 0x137000:
case 0x137020:
case 0x137040:
case 0x1370e0:
sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
break;
default:
return 0;
}
return sclk * N / M / P;
}
static u32
read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
u32 sclk, sctl, sdiv = 2;
switch (ssrc & 0x00000003) {
case 0:
if ((ssrc & 0x00030000) != 0x00030000)
return device->crystal;
return 108000;
case 2:
return 100000;
case 3:
sclk = read_vco(clk, dsrc + (doff * 4));
/* Memclk has doff of 0 despite its alt. location */
if (doff <= 2) {
sctl = nvkm_rd32(device, dctl + (doff * 4));
if (sctl & 0x80000000) {
if (ssrc & 0x100)
sctl >>= 8;
sdiv = (sctl & 0x3f) + 2;
}
}
return (sclk * 2) / sdiv;
default:
return 0;
}
}
static u32
read_clk(struct gf100_clk *clk, int idx)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
u32 ssel = nvkm_rd32(device, 0x137100);
u32 sclk, sdiv;
if (ssel & (1 << idx)) {
if (idx < 7)
sclk = read_pll(clk, 0x137000 + (idx * 0x20));
else
sclk = read_pll(clk, 0x1370e0);
sdiv = ((sctl & 0x00003f00) >> 8) + 2;
} else {
sclk = read_div(clk, idx, 0x137160, 0x1371d0);
sdiv = ((sctl & 0x0000003f) >> 0) + 2;
}
if (sctl & 0x80000000)
return (sclk * 2) / sdiv;
return sclk;
}
static int
gf100_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
struct gf100_clk *clk = gf100_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
switch (src) {
case nv_clk_src_crystal:
return device->crystal;
case nv_clk_src_href:
return 100000;
case nv_clk_src_sppll0:
return read_pll(clk, 0x00e800);
case nv_clk_src_sppll1:
return read_pll(clk, 0x00e820);
case nv_clk_src_mpllsrcref:
return read_div(clk, 0, 0x137320, 0x137330);
case nv_clk_src_mpllsrc:
return read_pll(clk, 0x132020);
case nv_clk_src_mpll:
return read_pll(clk, 0x132000);
case nv_clk_src_mdiv:
return read_div(clk, 0, 0x137300, 0x137310);
case nv_clk_src_mem:
if (nvkm_rd32(device, 0x1373f0) & 0x00000002)
return nvkm_clk_read(&clk->base, nv_clk_src_mpll);
return nvkm_clk_read(&clk->base, nv_clk_src_mdiv);
case nv_clk_src_gpc:
return read_clk(clk, 0x00);
case nv_clk_src_rop:
return read_clk(clk, 0x01);
case nv_clk_src_hubk07:
return read_clk(clk, 0x02);
case nv_clk_src_hubk06:
return read_clk(clk, 0x07);
case nv_clk_src_hubk01:
return read_clk(clk, 0x08);
case nv_clk_src_copy:
return read_clk(clk, 0x09);
case nv_clk_src_pmu:
return read_clk(clk, 0x0c);
case nv_clk_src_vdec:
return read_clk(clk, 0x0e);
default:
nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
}
}
static u32
calc_div(struct gf100_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
{
u32 div = min((ref * 2) / freq, (u32)65);
if (div < 2)
div = 2;
*ddiv = div - 2;
return (ref * 2) / div;
}
static u32
calc_src(struct gf100_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
{
u32 sclk;
/* use one of the fixed frequencies if possible */
*ddiv = 0x00000000;
switch (freq) {
case 27000:
case 108000:
*dsrc = 0x00000000;
if (freq == 108000)
*dsrc |= 0x00030000;
return freq;
case 100000:
*dsrc = 0x00000002;
return freq;
default:
*dsrc = 0x00000003;
break;
}
/* otherwise, calculate the closest divider */
sclk = read_vco(clk, 0x137160 + (idx * 4));
if (idx < 7)
sclk = calc_div(clk, idx, sclk, freq, ddiv);
return sclk;
}
static u32
calc_pll(struct gf100_clk *clk, int idx, u32 freq, u32 *coef)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_pll limits;
int N, M, P, ret;
ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
if (ret)
return 0;
limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
if (!limits.refclk)
return 0;
ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
if (ret <= 0)
return 0;
*coef = (P << 16) | (N << 8) | M;
return ret;
}
static int
calc_clk(struct gf100_clk *clk, struct nvkm_cstate *cstate, int idx, int dom)
{
struct gf100_clk_info *info = &clk->eng[idx];
u32 freq = cstate->domain[dom];
u32 src0, div0, div1D, div1P = 0;
u32 clk0, clk1 = 0;
/* invalid clock domain */
if (!freq)
return 0;
/* first possible path, using only dividers */
clk0 = calc_src(clk, idx, freq, &src0, &div0);
clk0 = calc_div(clk, idx, clk0, freq, &div1D);
/* see if we can get any closer using PLLs */
if (clk0 != freq && (0x00004387 & (1 << idx))) {
if (idx <= 7)
clk1 = calc_pll(clk, idx, freq, &info->coef);
else
clk1 = cstate->domain[nv_clk_src_hubk06];
clk1 = calc_div(clk, idx, clk1, freq, &div1P);
}
/* select the method which gets closest to target freq */
if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
info->dsrc = src0;
if (div0) {
info->ddiv |= 0x80000000;
info->ddiv |= div0 << 8;
info->ddiv |= div0;
}
if (div1D) {
info->mdiv |= 0x80000000;
info->mdiv |= div1D;
}
info->ssel = info->coef = 0;
info->freq = clk0;
} else {
if (div1P) {
info->mdiv |= 0x80000000;
info->mdiv |= div1P << 8;
}
info->ssel = (1 << idx);
info->freq = clk1;
}
return 0;
}
static int
gf100_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
struct gf100_clk *clk = gf100_clk(base);
int ret;
if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
(ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
(ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
(ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
(ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
(ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) ||
(ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_pmu)) ||
(ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
return ret;
return 0;
}
static void
gf100_clk_prog_0(struct gf100_clk *clk, int idx)
{
struct gf100_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
if (idx < 7 && !info->ssel) {
nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
}
}
static void
gf100_clk_prog_1(struct gf100_clk *clk, int idx)
{
struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
break;
);
}
static void
gf100_clk_prog_2(struct gf100_clk *clk, int idx)
{
struct gf100_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
const u32 addr = 0x137000 + (idx * 0x20);
if (idx <= 7) {
nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
if (info->coef) {
nvkm_wr32(device, addr + 0x04, info->coef);
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
/* Test PLL lock */
nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
break;
);
nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
/* Enable sync mode */
nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
}
}
}
static void
gf100_clk_prog_3(struct gf100_clk *clk, int idx)
{
struct gf100_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) {
nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
nvkm_msec(device, 2000,
u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
if (tmp == info->ssel)
break;
);
}
}
static void
gf100_clk_prog_4(struct gf100_clk *clk, int idx)
{
struct gf100_clk_info *info = &clk->eng[idx];
struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
}
static int
gf100_clk_prog(struct nvkm_clk *base)
{
struct gf100_clk *clk = gf100_clk(base);
struct {
void (*exec)(struct gf100_clk *, int);
} stage[] = {
{ gf100_clk_prog_0 }, /* div programming */
{ gf100_clk_prog_1 }, /* select div mode */
{ gf100_clk_prog_2 }, /* (maybe) program pll */
{ gf100_clk_prog_3 }, /* (maybe) select pll mode */
{ gf100_clk_prog_4 }, /* final divider */
};
int i, j;
for (i = 0; i < ARRAY_SIZE(stage); i++) {
for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
if (!clk->eng[j].freq)
continue;
stage[i].exec(clk, j);
}
}
return 0;
}
static void
gf100_clk_tidy(struct nvkm_clk *base)
{
struct gf100_clk *clk = gf100_clk(base);
memset(clk->eng, 0x00, sizeof(clk->eng));
}
static const struct nvkm_clk_func
gf100_clk = {
.read = gf100_clk_read,
.calc = gf100_clk_calc,
.prog = gf100_clk_prog,
.tidy = gf100_clk_tidy,
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
{ nv_clk_src_hubk06 , 0x00 },
{ nv_clk_src_hubk01 , 0x01 },
{ nv_clk_src_copy , 0x02 },
{ nv_clk_src_gpc , 0x03, NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
{ nv_clk_src_rop , 0x04 },
{ nv_clk_src_mem , 0x05, 0, "memory", 1000 },
{ nv_clk_src_vdec , 0x06 },
{ nv_clk_src_pmu , 0x0a },
{ nv_clk_src_hubk07 , 0x0b },
{ nv_clk_src_max }
}
};
int
gf100_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
struct gf100_clk *clk;
if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
return -ENOMEM;
*pclk = &clk->base;
return nvkm_clk_ctor(&gf100_clk, device, type, inst, false, &clk->base);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include "pll.h"
#include "seq.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
static u32
read_div(struct nv50_clk *clk)
{
struct nvkm_device *device = clk->base.subdev.device;
switch (device->chipset) {
case 0x50: /* it exists, but only has bit 31, not the dividers.. */
case 0x84:
case 0x86:
case 0x98:
case 0xa0:
return nvkm_rd32(device, 0x004700);
case 0x92:
case 0x94:
case 0x96:
return nvkm_rd32(device, 0x004800);
default:
return 0x00000000;
}
}
static u32
read_pll_src(struct nv50_clk *clk, u32 base)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
u32 coef, ref = nvkm_clk_read(&clk->base, nv_clk_src_crystal);
u32 rsel = nvkm_rd32(device, 0x00e18c);
int P, N, M, id;
switch (device->chipset) {
case 0x50:
case 0xa0:
switch (base) {
case 0x4020:
case 0x4028: id = !!(rsel & 0x00000004); break;
case 0x4008: id = !!(rsel & 0x00000008); break;
case 0x4030: id = 0; break;
default:
nvkm_error(subdev, "ref: bad pll %06x\n", base);
return 0;
}
coef = nvkm_rd32(device, 0x00e81c + (id * 0x0c));
ref *= (coef & 0x01000000) ? 2 : 4;
P = (coef & 0x00070000) >> 16;
N = ((coef & 0x0000ff00) >> 8) + 1;
M = ((coef & 0x000000ff) >> 0) + 1;
break;
case 0x84:
case 0x86:
case 0x92:
coef = nvkm_rd32(device, 0x00e81c);
P = (coef & 0x00070000) >> 16;
N = (coef & 0x0000ff00) >> 8;
M = (coef & 0x000000ff) >> 0;
break;
case 0x94:
case 0x96:
case 0x98:
rsel = nvkm_rd32(device, 0x00c050);
switch (base) {
case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
case 0x4030: rsel = 3; break;
default:
nvkm_error(subdev, "ref: bad pll %06x\n", base);
return 0;
}
switch (rsel) {
case 0: id = 1; break;
case 1: return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
case 2: return nvkm_clk_read(&clk->base, nv_clk_src_href);
case 3: id = 0; break;
}
coef = nvkm_rd32(device, 0x00e81c + (id * 0x28));
P = (nvkm_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
P += (coef & 0x00070000) >> 16;
N = (coef & 0x0000ff00) >> 8;
M = (coef & 0x000000ff) >> 0;
break;
default:
BUG();
}
if (M)
return (ref * N / M) >> P;
return 0;
}
static u32
read_pll_ref(struct nv50_clk *clk, u32 base)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
u32 src, mast = nvkm_rd32(device, 0x00c040);
switch (base) {
case 0x004028:
src = !!(mast & 0x00200000);
break;
case 0x004020:
src = !!(mast & 0x00400000);
break;
case 0x004008:
src = !!(mast & 0x00010000);
break;
case 0x004030:
src = !!(mast & 0x02000000);
break;
case 0x00e810:
return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
default:
nvkm_error(subdev, "bad pll %06x\n", base);
return 0;
}
if (src)
return nvkm_clk_read(&clk->base, nv_clk_src_href);
return read_pll_src(clk, base);
}
static u32
read_pll(struct nv50_clk *clk, u32 base)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 mast = nvkm_rd32(device, 0x00c040);
u32 ctrl = nvkm_rd32(device, base + 0);
u32 coef = nvkm_rd32(device, base + 4);
u32 ref = read_pll_ref(clk, base);
u32 freq = 0;
int N1, N2, M1, M2;
if (base == 0x004028 && (mast & 0x00100000)) {
/* wtf, appears to only disable post-divider on gt200 */
if (device->chipset != 0xa0)
return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
}
N2 = (coef & 0xff000000) >> 24;
M2 = (coef & 0x00ff0000) >> 16;
N1 = (coef & 0x0000ff00) >> 8;
M1 = (coef & 0x000000ff);
if ((ctrl & 0x80000000) && M1) {
freq = ref * N1 / M1;
if ((ctrl & 0x40000100) == 0x40000000) {
if (M2)
freq = freq * N2 / M2;
else
freq = 0;
}
}
return freq;
}
int
nv50_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
struct nv50_clk *clk = nv50_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
u32 mast = nvkm_rd32(device, 0x00c040);
u32 P = 0;
switch (src) {
case nv_clk_src_crystal:
return device->crystal;
case nv_clk_src_href:
return 100000; /* PCIE reference clock */
case nv_clk_src_hclk:
return div_u64((u64)nvkm_clk_read(&clk->base, nv_clk_src_href) * 27778, 10000);
case nv_clk_src_hclkm3:
return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
case nv_clk_src_hclkm3d2:
return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3 / 2;
case nv_clk_src_host:
switch (mast & 0x30000000) {
case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
case 0x10000000: break;
case 0x20000000: /* !0x50 */
case 0x30000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
}
break;
case nv_clk_src_core:
if (!(mast & 0x00100000))
P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
switch (mast & 0x00000003) {
case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000001: return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
case 0x00000002: return read_pll(clk, 0x004020) >> P;
case 0x00000003: return read_pll(clk, 0x004028) >> P;
}
break;
case nv_clk_src_shader:
P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
switch (mast & 0x00000030) {
case 0x00000000:
if (mast & 0x00000080)
return nvkm_clk_read(&clk->base, nv_clk_src_host) >> P;
return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000010: break;
case 0x00000020: return read_pll(clk, 0x004028) >> P;
case 0x00000030: return read_pll(clk, 0x004020) >> P;
}
break;
case nv_clk_src_mem:
P = (nvkm_rd32(device, 0x004008) & 0x00070000) >> 16;
if (nvkm_rd32(device, 0x004008) & 0x00000200) {
switch (mast & 0x0000c000) {
case 0x00000000:
return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00008000:
case 0x0000c000:
return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
}
} else {
return read_pll(clk, 0x004008) >> P;
}
break;
case nv_clk_src_vdec:
P = (read_div(clk) & 0x00000700) >> 8;
switch (device->chipset) {
case 0x84:
case 0x86:
case 0x92:
case 0x94:
case 0x96:
case 0xa0:
switch (mast & 0x00000c00) {
case 0x00000000:
if (device->chipset == 0xa0) /* wtf?? */
return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000400:
return 0;
case 0x00000800:
if (mast & 0x01000000)
return read_pll(clk, 0x004028) >> P;
return read_pll(clk, 0x004030) >> P;
case 0x00000c00:
return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
}
break;
case 0x98:
switch (mast & 0x00000c00) {
case 0x00000000:
return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
case 0x00000400:
return 0;
case 0x00000800:
return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2) >> P;
case 0x00000c00:
return nvkm_clk_read(&clk->base, nv_clk_src_mem) >> P;
}
break;
}
break;
case nv_clk_src_dom6:
switch (device->chipset) {
case 0x50:
case 0xa0:
return read_pll(clk, 0x00e810) >> 2;
case 0x84:
case 0x86:
case 0x92:
case 0x94:
case 0x96:
case 0x98:
P = (read_div(clk) & 0x00000007) >> 0;
switch (mast & 0x0c000000) {
case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
case 0x04000000: break;
case 0x08000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
case 0x0c000000:
return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3) >> P;
}
break;
default:
break;
}
break;
default:
break;
}
nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
return -EINVAL;
}
static u32
calc_pll(struct nv50_clk *clk, u32 reg, u32 idx, int *N, int *M, int *P)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvbios_pll pll;
int ret;
ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
if (ret)
return 0;
pll.vco2.max_freq = 0;
pll.refclk = read_pll_ref(clk, reg);
if (!pll.refclk)
return 0;
return nv04_pll_calc(subdev, &pll, idx, N, M, NULL, NULL, P);
}
static inline u32
calc_div(u32 src, u32 target, int *div)
{
u32 clk0 = src, clk1 = src;
for (*div = 0; *div <= 7; (*div)++) {
if (clk0 <= target) {
clk1 = clk0 << (*div ? 1 : 0);
break;
}
clk0 >>= 1;
}
if (target - clk0 <= clk1 - target)
return clk0;
(*div)--;
return clk1;
}
static inline u32
clk_same(u32 a, u32 b)
{
return ((a / 1000) == (b / 1000));
}
int
nv50_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
struct nv50_clk *clk = nv50_clk(base);
struct nv50_clk_hwsq *hwsq = &clk->hwsq;
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
const int shader = cstate->domain[nv_clk_src_shader];
const int core = cstate->domain[nv_clk_src_core];
const int vdec = cstate->domain[nv_clk_src_vdec];
const int dom6 = cstate->domain[nv_clk_src_dom6];
u32 mastm = 0, mastv = 0;
u32 divsm = 0, divsv = 0;
int N, M, P1, P2;
int freq, out;
/* prepare a hwsq script from which we'll perform the reclock */
out = clk_init(hwsq, subdev);
if (out)
return out;
clk_wr32(hwsq, fifo, 0x00000001); /* block fifo */
clk_nsec(hwsq, 8000);
clk_setf(hwsq, 0x10, 0x00); /* disable fb */
clk_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
/* vdec: avoid modifying xpll until we know exactly how the other
* clock domains work, i suspect at least some of them can also be
* tied to xpll...
*/
if (vdec) {
/* see how close we can get using nvclk as a source */
freq = calc_div(core, vdec, &P1);
/* see how close we can get using xpll/hclk as a source */
if (device->chipset != 0x98)
out = read_pll(clk, 0x004030);
else
out = nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2);
out = calc_div(out, vdec, &P2);
/* select whichever gets us closest */
if (abs(vdec - freq) <= abs(vdec - out)) {
if (device->chipset != 0x98)
mastv |= 0x00000c00;
divsv |= P1 << 8;
} else {
mastv |= 0x00000800;
divsv |= P2 << 8;
}
mastm |= 0x00000c00;
divsm |= 0x00000700;
}
/* dom6: nfi what this is, but we're limited to various combinations
* of the host clock frequency
*/
if (dom6) {
if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_href))) {
mastv |= 0x00000000;
} else
if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_hclk))) {
mastv |= 0x08000000;
} else {
freq = nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
calc_div(freq, dom6, &P1);
mastv |= 0x0c000000;
divsv |= P1;
}
mastm |= 0x0c000000;
divsm |= 0x00000007;
}
/* vdec/dom6: switch to "safe" clocks temporarily, update dividers
* and then switch to target clocks
*/
clk_mask(hwsq, mast, mastm, 0x00000000);
clk_mask(hwsq, divs, divsm, divsv);
clk_mask(hwsq, mast, mastm, mastv);
/* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6,
* sclk to hclk) before reprogramming
*/
if (device->chipset < 0x92)
clk_mask(hwsq, mast, 0x001000b0, 0x00100080);
else
clk_mask(hwsq, mast, 0x000000b3, 0x00000081);
/* core: for the moment at least, always use nvpll */
freq = calc_pll(clk, 0x4028, core, &N, &M, &P1);
if (freq == 0)
return -ERANGE;
clk_mask(hwsq, nvpll[0], 0xc03f0100,
0x80000000 | (P1 << 19) | (P1 << 16));
clk_mask(hwsq, nvpll[1], 0x0000ffff, (N << 8) | M);
/* shader: tie to nvclk if possible, otherwise use spll. have to be
* very careful that the shader clock is at least twice the core, or
* some chipsets will be very unhappy. i expect most or all of these
* cases will be handled by tying to nvclk, but it's possible there's
* corners
*/
if (P1-- && shader == (core << 1)) {
clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16));
clk_mask(hwsq, mast, 0x00100033, 0x00000023);
} else {
freq = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
if (freq == 0)
return -ERANGE;
clk_mask(hwsq, spll[0], 0xc03f0100,
0x80000000 | (P1 << 19) | (P1 << 16));
clk_mask(hwsq, spll[1], 0x0000ffff, (N << 8) | M);
clk_mask(hwsq, mast, 0x00100033, 0x00000033);
}
/* restore normal operation */
clk_setf(hwsq, 0x10, 0x01); /* enable fb */
clk_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
clk_wr32(hwsq, fifo, 0x00000000); /* un-block fifo */
return 0;
}
int
nv50_clk_prog(struct nvkm_clk *base)
{
struct nv50_clk *clk = nv50_clk(base);
return clk_exec(&clk->hwsq, true);
}
void
nv50_clk_tidy(struct nvkm_clk *base)
{
struct nv50_clk *clk = nv50_clk(base);
clk_exec(&clk->hwsq, false);
}
int
nv50_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk)
{
struct nv50_clk *clk;
int ret;
if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
return -ENOMEM;
ret = nvkm_clk_ctor(func, device, type, inst, allow_reclock, &clk->base);
*pclk = &clk->base;
if (ret)
return ret;
clk->hwsq.r_fifo = hwsq_reg(0x002504);
clk->hwsq.r_spll[0] = hwsq_reg(0x004020);
clk->hwsq.r_spll[1] = hwsq_reg(0x004024);
clk->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
clk->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
switch (device->chipset) {
case 0x92:
case 0x94:
case 0x96:
clk->hwsq.r_divs = hwsq_reg(0x004800);
break;
default:
clk->hwsq.r_divs = hwsq_reg(0x004700);
break;
}
clk->hwsq.r_mast = hwsq_reg(0x00c040);
return 0;
}
static const struct nvkm_clk_func
nv50_clk = {
.read = nv50_clk_read,
.calc = nv50_clk_calc,
.prog = nv50_clk_prog,
.tidy = nv50_clk_tidy,
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
{ nv_clk_src_core , 0xff, 0, "core", 1000 },
{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
{ nv_clk_src_mem , 0xff, 0, "memory", 1000 },
{ nv_clk_src_max }
}
};
int
nv50_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
return nv50_clk_new_(&nv50_clk, device, type, inst, false, pclk);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c |
/*
* Copyright 1993-2003 NVIDIA, Corporation
* Copyright 2007-2009 Stuart Bennett
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "pll.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
static int
getMNP_single(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk,
int *pN, int *pM, int *pP)
{
/* Find M, N and P for a single stage PLL
*
* Note that some bioses (NV3x) have lookup tables of precomputed MNP
* values, but we're too lazy to use those atm
*
* "clk" parameter in kHz
* returns calculated clock
*/
struct nvkm_bios *bios = subdev->device->bios;
int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
int minM = info->vco1.min_m, maxM = info->vco1.max_m;
int minN = info->vco1.min_n, maxN = info->vco1.max_n;
int minU = info->vco1.min_inputfreq;
int maxU = info->vco1.max_inputfreq;
int minP = info->min_p;
int maxP = info->max_p_usable;
int crystal = info->refclk;
int M, N, thisP, P;
int clkP, calcclk;
int delta, bestdelta = INT_MAX;
int bestclk = 0;
/* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
/* possibly correlated with introduction of 27MHz crystal */
if (bios->version.major < 0x60) {
int cv = bios->version.chip;
if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
if (clk > 250000)
maxM = 6;
if (clk > 340000)
maxM = 2;
} else if (cv < 0x40) {
if (clk > 150000)
maxM = 6;
if (clk > 200000)
maxM = 4;
if (clk > 340000)
maxM = 2;
}
}
P = 1 << maxP;
if ((clk * P) < minvco) {
minvco = clk * maxP;
maxvco = minvco * 2;
}
if (clk + clk/200 > maxvco) /* +0.5% */
maxvco = clk + clk/200;
/* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
for (thisP = minP; thisP <= maxP; thisP++) {
P = 1 << thisP;
clkP = clk * P;
if (clkP < minvco)
continue;
if (clkP > maxvco)
return bestclk;
for (M = minM; M <= maxM; M++) {
if (crystal/M < minU)
return bestclk;
if (crystal/M > maxU)
continue;
/* add crystal/2 to round better */
N = (clkP * M + crystal/2) / crystal;
if (N < minN)
continue;
if (N > maxN)
break;
/* more rounding additions */
calcclk = ((N * crystal + P/2) / P + M/2) / M;
delta = abs(calcclk - clk);
/* we do an exhaustive search rather than terminating
* on an optimality condition...
*/
if (delta < bestdelta) {
bestdelta = delta;
bestclk = calcclk;
*pN = N;
*pM = M;
*pP = thisP;
if (delta == 0) /* except this one */
return bestclk;
}
}
}
return bestclk;
}
static int
getMNP_double(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk,
int *pN1, int *pM1, int *pN2, int *pM2, int *pP)
{
/* Find M, N and P for a two stage PLL
*
* Note that some bioses (NV30+) have lookup tables of precomputed MNP
* values, but we're too lazy to use those atm
*
* "clk" parameter in kHz
* returns calculated clock
*/
int chip_version = subdev->device->bios->version.chip;
int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq;
int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq;
int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq;
int maxU1 = info->vco1.max_inputfreq, maxU2 = info->vco2.max_inputfreq;
int minM1 = info->vco1.min_m, maxM1 = info->vco1.max_m;
int minN1 = info->vco1.min_n, maxN1 = info->vco1.max_n;
int minM2 = info->vco2.min_m, maxM2 = info->vco2.max_m;
int minN2 = info->vco2.min_n, maxN2 = info->vco2.max_n;
int maxlog2P = info->max_p_usable;
int crystal = info->refclk;
bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
int M1, N1, M2, N2, log2P;
int clkP, calcclk1, calcclk2, calcclkout;
int delta, bestdelta = INT_MAX;
int bestclk = 0;
int vco2 = (maxvco2 - maxvco2/200) / 2;
for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
;
clkP = clk << log2P;
if (maxvco2 < clk + clk/200) /* +0.5% */
maxvco2 = clk + clk/200;
for (M1 = minM1; M1 <= maxM1; M1++) {
if (crystal/M1 < minU1)
return bestclk;
if (crystal/M1 > maxU1)
continue;
for (N1 = minN1; N1 <= maxN1; N1++) {
calcclk1 = crystal * N1 / M1;
if (calcclk1 < minvco1)
continue;
if (calcclk1 > maxvco1)
break;
for (M2 = minM2; M2 <= maxM2; M2++) {
if (calcclk1/M2 < minU2)
break;
if (calcclk1/M2 > maxU2)
continue;
/* add calcclk1/2 to round better */
N2 = (clkP * M2 + calcclk1/2) / calcclk1;
if (N2 < minN2)
continue;
if (N2 > maxN2)
break;
if (!fixedgain2) {
if (chip_version < 0x60)
if (N2/M2 < 4 || N2/M2 > 10)
continue;
calcclk2 = calcclk1 * N2 / M2;
if (calcclk2 < minvco2)
break;
if (calcclk2 > maxvco2)
continue;
} else
calcclk2 = calcclk1;
calcclkout = calcclk2 >> log2P;
delta = abs(calcclkout - clk);
/* we do an exhaustive search rather than terminating
* on an optimality condition...
*/
if (delta < bestdelta) {
bestdelta = delta;
bestclk = calcclkout;
*pN1 = N1;
*pM1 = M1;
*pN2 = N2;
*pM2 = M2;
*pP = log2P;
if (delta == 0) /* except this one */
return bestclk;
}
}
}
}
return bestclk;
}
int
nv04_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info, u32 freq,
int *N1, int *M1, int *N2, int *M2, int *P)
{
int ret;
if (!info->vco2.max_freq || !N2) {
ret = getMNP_single(subdev, info, freq, N1, M1, P);
if (N2) {
*N2 = 1;
*M2 = 1;
}
} else {
ret = getMNP_double(subdev, info, freq, N1, M1, N2, M2, P);
}
if (!ret)
nvkm_error(subdev, "unable to compute acceptable pll values\n");
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <subdev/bios.h>
#include <subdev/bios/boost.h>
#include <subdev/bios/cstep.h>
#include <subdev/bios/perf.h>
#include <subdev/bios/vpstate.h>
#include <subdev/fb.h>
#include <subdev/therm.h>
#include <subdev/volt.h>
#include <core/option.h>
/******************************************************************************
* misc
*****************************************************************************/
static u32
nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
u8 pstate, u8 domain, u32 input)
{
struct nvkm_bios *bios = clk->subdev.device->bios;
struct nvbios_boostE boostE;
u8 ver, hdr, cnt, len;
u32 data;
data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
if (data) {
struct nvbios_boostS boostS;
u8 idx = 0, sver, shdr;
u32 subd;
input = max(boostE.min, input);
input = min(boostE.max, input);
do {
sver = ver;
shdr = hdr;
subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
cnt, len, &boostS);
if (subd && boostS.domain == domain) {
if (adjust)
input = input * boostS.percent / 100;
input = max(boostS.min, input);
input = min(boostS.max, input);
break;
}
} while (subd);
}
return input;
}
/******************************************************************************
* C-States
*****************************************************************************/
static bool
nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
u32 max_volt, int temp)
{
const struct nvkm_domain *domain = clk->domains;
struct nvkm_volt *volt = clk->subdev.device->volt;
int voltage;
while (domain && domain->name != nv_clk_src_max) {
if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
u32 freq = cstate->domain[domain->name];
switch (clk->boost_mode) {
case NVKM_CLK_BOOST_NONE:
if (clk->base_khz && freq > clk->base_khz)
return false;
fallthrough;
case NVKM_CLK_BOOST_BIOS:
if (clk->boost_khz && freq > clk->boost_khz)
return false;
}
}
domain++;
}
if (!volt)
return true;
voltage = nvkm_volt_map(volt, cstate->voltage, temp);
if (voltage < 0)
return false;
return voltage <= min(max_volt, volt->max_uv);
}
static struct nvkm_cstate *
nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
struct nvkm_cstate *cstate)
{
struct nvkm_device *device = clk->subdev.device;
struct nvkm_volt *volt = device->volt;
int max_volt;
if (!pstate || !cstate)
return NULL;
if (!volt)
return cstate;
max_volt = volt->max_uv;
if (volt->max0_id != 0xff)
max_volt = min(max_volt,
nvkm_volt_map(volt, volt->max0_id, clk->temp));
if (volt->max1_id != 0xff)
max_volt = min(max_volt,
nvkm_volt_map(volt, volt->max1_id, clk->temp));
if (volt->max2_id != 0xff)
max_volt = min(max_volt,
nvkm_volt_map(volt, volt->max2_id, clk->temp));
list_for_each_entry_from_reverse(cstate, &pstate->list, head) {
if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
return cstate;
}
return NULL;
}
static struct nvkm_cstate *
nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
{
struct nvkm_cstate *cstate;
if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
return list_last_entry(&pstate->list, typeof(*cstate), head);
else {
list_for_each_entry(cstate, &pstate->list, head) {
if (cstate->id == cstatei)
return cstate;
}
}
return NULL;
}
static int
nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
{
struct nvkm_subdev *subdev = &clk->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_therm *therm = device->therm;
struct nvkm_volt *volt = device->volt;
struct nvkm_cstate *cstate;
int ret;
if (!list_empty(&pstate->list)) {
cstate = nvkm_cstate_get(clk, pstate, cstatei);
cstate = nvkm_cstate_find_best(clk, pstate, cstate);
if (!cstate)
return -EINVAL;
} else {
cstate = &pstate->base;
}
if (therm) {
ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
if (ret && ret != -ENODEV) {
nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
return ret;
}
}
if (volt) {
ret = nvkm_volt_set_id(volt, cstate->voltage,
pstate->base.voltage, clk->temp, +1);
if (ret && ret != -ENODEV) {
nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
return ret;
}
}
ret = clk->func->calc(clk, cstate);
if (ret == 0) {
ret = clk->func->prog(clk);
clk->func->tidy(clk);
}
if (volt) {
ret = nvkm_volt_set_id(volt, cstate->voltage,
pstate->base.voltage, clk->temp, -1);
if (ret && ret != -ENODEV)
nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
}
if (therm) {
ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
if (ret && ret != -ENODEV)
nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
}
return ret;
}
static void
nvkm_cstate_del(struct nvkm_cstate *cstate)
{
list_del(&cstate->head);
kfree(cstate);
}
static int
nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
{
struct nvkm_bios *bios = clk->subdev.device->bios;
struct nvkm_volt *volt = clk->subdev.device->volt;
const struct nvkm_domain *domain = clk->domains;
struct nvkm_cstate *cstate = NULL;
struct nvbios_cstepX cstepX;
u8 ver, hdr;
u32 data;
data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
if (!data)
return -ENOENT;
if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
return -EINVAL;
cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
if (!cstate)
return -ENOMEM;
*cstate = pstate->base;
cstate->voltage = cstepX.voltage;
cstate->id = idx;
while (domain && domain->name != nv_clk_src_max) {
if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
u32 freq = nvkm_clk_adjust(clk, true, pstate->pstate,
domain->bios, cstepX.freq);
cstate->domain[domain->name] = freq;
}
domain++;
}
list_add(&cstate->head, &pstate->list);
return 0;
}
/******************************************************************************
* P-States
*****************************************************************************/
static int
nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
{
struct nvkm_subdev *subdev = &clk->subdev;
struct nvkm_fb *fb = subdev->device->fb;
struct nvkm_pci *pci = subdev->device->pci;
struct nvkm_pstate *pstate;
int ret, idx = 0;
list_for_each_entry(pstate, &clk->states, head) {
if (idx++ == pstatei)
break;
}
nvkm_debug(subdev, "setting performance state %d\n", pstatei);
clk->pstate = pstatei;
nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
if (fb && fb->ram && fb->ram->func->calc) {
struct nvkm_ram *ram = fb->ram;
int khz = pstate->base.domain[nv_clk_src_mem];
do {
ret = ram->func->calc(ram, khz);
if (ret == 0)
ret = ram->func->prog(ram);
} while (ret > 0);
ram->func->tidy(ram);
}
return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
}
static void
nvkm_pstate_work(struct work_struct *work)
{
struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
struct nvkm_subdev *subdev = &clk->subdev;
int pstate;
if (!atomic_xchg(&clk->waiting, 0))
return;
clk->pwrsrc = power_supply_is_system_supplied();
nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n",
clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
clk->astate, clk->temp, clk->dstate);
pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
if (clk->state_nr && pstate != -1) {
pstate = (pstate < 0) ? clk->astate : pstate;
pstate = min(pstate, clk->state_nr - 1);
pstate = max(pstate, clk->dstate);
} else {
pstate = clk->pstate = -1;
}
nvkm_trace(subdev, "-> %d\n", pstate);
if (pstate != clk->pstate) {
int ret = nvkm_pstate_prog(clk, pstate);
if (ret) {
nvkm_error(subdev, "error setting pstate %d: %d\n",
pstate, ret);
}
}
wake_up_all(&clk->wait);
}
static int
nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
{
atomic_set(&clk->waiting, 1);
schedule_work(&clk->work);
if (wait)
wait_event(clk->wait, !atomic_read(&clk->waiting));
return 0;
}
static void
nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
{
const struct nvkm_domain *clock = clk->domains - 1;
struct nvkm_cstate *cstate;
struct nvkm_subdev *subdev = &clk->subdev;
char info[3][32] = { "", "", "" };
char name[4] = "--";
int i = -1;
if (pstate->pstate != 0xff)
snprintf(name, sizeof(name), "%02x", pstate->pstate);
while ((++clock)->name != nv_clk_src_max) {
u32 lo = pstate->base.domain[clock->name];
u32 hi = lo;
if (hi == 0)
continue;
nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
list_for_each_entry(cstate, &pstate->list, head) {
u32 freq = cstate->domain[clock->name];
lo = min(lo, freq);
hi = max(hi, freq);
nvkm_debug(subdev, "%10d KHz\n", freq);
}
if (clock->mname && ++i < ARRAY_SIZE(info)) {
lo /= clock->mdiv;
hi /= clock->mdiv;
if (lo == hi) {
snprintf(info[i], sizeof(info[i]), "%s %d MHz",
clock->mname, lo);
} else {
snprintf(info[i], sizeof(info[i]),
"%s %d-%d MHz", clock->mname, lo, hi);
}
}
}
nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
}
static void
nvkm_pstate_del(struct nvkm_pstate *pstate)
{
struct nvkm_cstate *cstate, *temp;
list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
nvkm_cstate_del(cstate);
}
list_del(&pstate->head);
kfree(pstate);
}
static int
nvkm_pstate_new(struct nvkm_clk *clk, int idx)
{
struct nvkm_bios *bios = clk->subdev.device->bios;
const struct nvkm_domain *domain = clk->domains - 1;
struct nvkm_pstate *pstate;
struct nvkm_cstate *cstate;
struct nvbios_cstepE cstepE;
struct nvbios_perfE perfE;
u8 ver, hdr, cnt, len;
u32 data;
data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
if (!data)
return -EINVAL;
if (perfE.pstate == 0xff)
return 0;
pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
if (!pstate)
return -ENOMEM;
INIT_LIST_HEAD(&pstate->list);
pstate->pstate = perfE.pstate;
pstate->fanspeed = perfE.fanspeed;
pstate->pcie_speed = perfE.pcie_speed;
pstate->pcie_width = perfE.pcie_width;
cstate = &pstate->base;
cstate->voltage = perfE.voltage;
cstate->domain[nv_clk_src_core] = perfE.core;
cstate->domain[nv_clk_src_shader] = perfE.shader;
cstate->domain[nv_clk_src_mem] = perfE.memory;
cstate->domain[nv_clk_src_vdec] = perfE.vdec;
cstate->domain[nv_clk_src_dom6] = perfE.disp;
while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
struct nvbios_perfS perfS;
u8 sver = ver, shdr = hdr;
u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
&sver, &shdr, cnt, len, &perfS);
if (perfSe == 0 || sver != 0x40)
continue;
if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
perfS.v40.freq = nvkm_clk_adjust(clk, false,
pstate->pstate,
domain->bios,
perfS.v40.freq);
}
cstate->domain[domain->name] = perfS.v40.freq;
}
data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
if (data) {
int idx = cstepE.index;
do {
nvkm_cstate_new(clk, idx, pstate);
} while(idx--);
}
nvkm_pstate_info(clk, pstate);
list_add_tail(&pstate->head, &clk->states);
clk->state_nr++;
return 0;
}
/******************************************************************************
* Adjustment triggers
*****************************************************************************/
static int
nvkm_clk_ustate_update(struct nvkm_clk *clk, int req)
{
struct nvkm_pstate *pstate;
int i = 0;
if (!clk->allow_reclock)
return -ENOSYS;
if (req != -1 && req != -2) {
list_for_each_entry(pstate, &clk->states, head) {
if (pstate->pstate == req)
break;
i++;
}
if (pstate->pstate != req)
return -EINVAL;
req = i;
}
return req + 2;
}
static int
nvkm_clk_nstate(struct nvkm_clk *clk, const char *mode, int arglen)
{
int ret = 1;
if (clk->allow_reclock && !strncasecmpz(mode, "auto", arglen))
return -2;
if (strncasecmpz(mode, "disabled", arglen)) {
char save = mode[arglen];
long v;
((char *)mode)[arglen] = '\0';
if (!kstrtol(mode, 0, &v)) {
ret = nvkm_clk_ustate_update(clk, v);
if (ret < 0)
ret = 1;
}
((char *)mode)[arglen] = save;
}
return ret - 2;
}
int
nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr)
{
int ret = nvkm_clk_ustate_update(clk, req);
if (ret >= 0) {
if (ret -= 2, pwr) clk->ustate_ac = ret;
else clk->ustate_dc = ret;
return nvkm_pstate_calc(clk, true);
}
return ret;
}
int
nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
{
if (!rel) clk->astate = req;
if ( rel) clk->astate += rel;
clk->astate = min(clk->astate, clk->state_nr - 1);
clk->astate = max(clk->astate, 0);
return nvkm_pstate_calc(clk, wait);
}
int
nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
{
if (clk->temp == temp)
return 0;
clk->temp = temp;
return nvkm_pstate_calc(clk, false);
}
int
nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
{
if (!rel) clk->dstate = req;
if ( rel) clk->dstate += rel;
clk->dstate = min(clk->dstate, clk->state_nr - 1);
clk->dstate = max(clk->dstate, 0);
return nvkm_pstate_calc(clk, true);
}
int
nvkm_clk_pwrsrc(struct nvkm_device *device)
{
if (device->clk)
return nvkm_pstate_calc(device->clk, false);
return 0;
}
/******************************************************************************
* subdev base class implementation
*****************************************************************************/
int
nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
{
return clk->func->read(clk, src);
}
static int
nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_clk *clk = nvkm_clk(subdev);
flush_work(&clk->work);
if (clk->func->fini)
clk->func->fini(clk);
return 0;
}
static int
nvkm_clk_init(struct nvkm_subdev *subdev)
{
struct nvkm_clk *clk = nvkm_clk(subdev);
const struct nvkm_domain *clock = clk->domains;
int ret;
memset(&clk->bstate, 0x00, sizeof(clk->bstate));
INIT_LIST_HEAD(&clk->bstate.list);
clk->bstate.pstate = 0xff;
while (clock->name != nv_clk_src_max) {
ret = nvkm_clk_read(clk, clock->name);
if (ret < 0) {
nvkm_error(subdev, "%02x freq unknown\n", clock->name);
return ret;
}
clk->bstate.base.domain[clock->name] = ret;
clock++;
}
nvkm_pstate_info(clk, &clk->bstate);
if (clk->func->init)
return clk->func->init(clk);
clk->astate = clk->state_nr - 1;
clk->dstate = 0;
clk->pstate = -1;
clk->temp = 90; /* reasonable default value */
nvkm_pstate_calc(clk, true);
return 0;
}
static void *
nvkm_clk_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_clk *clk = nvkm_clk(subdev);
struct nvkm_pstate *pstate, *temp;
/* Early return if the pstates have been provided statically */
if (clk->func->pstates)
return clk;
list_for_each_entry_safe(pstate, temp, &clk->states, head) {
nvkm_pstate_del(pstate);
}
return clk;
}
static const struct nvkm_subdev_func
nvkm_clk = {
.dtor = nvkm_clk_dtor,
.init = nvkm_clk_init,
.fini = nvkm_clk_fini,
};
int
nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk *clk)
{
struct nvkm_subdev *subdev = &clk->subdev;
struct nvkm_bios *bios = device->bios;
int ret, idx, arglen;
const char *mode;
struct nvbios_vpstate_header h;
nvkm_subdev_ctor(&nvkm_clk, device, type, inst, subdev);
if (bios && !nvbios_vpstate_parse(bios, &h)) {
struct nvbios_vpstate_entry base, boost;
if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
clk->boost_khz = boost.clock_mhz * 1000;
if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
clk->base_khz = base.clock_mhz * 1000;
}
clk->func = func;
INIT_LIST_HEAD(&clk->states);
clk->domains = func->domains;
clk->ustate_ac = -1;
clk->ustate_dc = -1;
clk->allow_reclock = allow_reclock;
INIT_WORK(&clk->work, nvkm_pstate_work);
init_waitqueue_head(&clk->wait);
atomic_set(&clk->waiting, 0);
/* If no pstates are provided, try and fetch them from the BIOS */
if (!func->pstates) {
idx = 0;
do {
ret = nvkm_pstate_new(clk, idx++);
} while (ret == 0);
} else {
for (idx = 0; idx < func->nr_pstates; idx++)
list_add_tail(&func->pstates[idx].head, &clk->states);
clk->state_nr = func->nr_pstates;
}
mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
if (mode) {
clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
}
mode = nvkm_stropt(device->cfgopt, "NvClkModeAC", &arglen);
if (mode)
clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
mode = nvkm_stropt(device->cfgopt, "NvClkModeDC", &arglen);
if (mode)
clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
NVKM_CLK_BOOST_NONE);
return 0;
}
int
nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk)
{
if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
return -ENOMEM;
return nvkm_clk_ctor(func, device, type, inst, allow_reclock, *pclk);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "nv50.h"
static const struct nvkm_clk_func
g84_clk = {
.read = nv50_clk_read,
.calc = nv50_clk_calc,
.prog = nv50_clk_prog,
.tidy = nv50_clk_tidy,
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
{ nv_clk_src_core , 0xff, 0, "core", 1000 },
{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
{ nv_clk_src_mem , 0xff, 0, "memory", 1000 },
{ nv_clk_src_vdec , 0xff },
{ nv_clk_src_max }
}
};
int
g84_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
return nv50_clk_new_(&g84_clk, device, type, inst, (device->chipset >= 0x94), pclk);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c |
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "pll.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
int
gt215_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info,
u32 freq, int *pN, int *pfN, int *pM, int *P)
{
u32 best_err = ~0, err;
int M, lM, hM, N, fN;
*P = info->vco1.max_freq / freq;
if (*P > info->max_p)
*P = info->max_p;
if (*P < info->min_p)
*P = info->min_p;
lM = (info->refclk + info->vco1.max_inputfreq) / info->vco1.max_inputfreq;
lM = max(lM, (int)info->vco1.min_m);
hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq;
hM = min(hM, (int)info->vco1.max_m);
lM = min(lM, hM);
for (M = lM; M <= hM; M++) {
u32 tmp = freq * *P * M;
N = tmp / info->refclk;
fN = tmp % info->refclk;
if (!pfN) {
if (fN >= info->refclk / 2)
N++;
} else {
if (fN < info->refclk / 2)
N--;
fN = tmp - (N * info->refclk);
}
if (N < info->vco1.min_n)
continue;
if (N > info->vco1.max_n)
break;
err = abs(freq - (info->refclk * N / M / *P));
if (err < best_err) {
best_err = err;
*pN = N;
*pM = M;
}
if (pfN) {
*pfN = ((fN << 13) + info->refclk / 2) / info->refclk;
*pfN = (*pfN - 4096) & 0xffff;
return freq;
}
}
if (unlikely(best_err == ~0)) {
nvkm_error(subdev, "unable to find matching pll values\n");
return -EINVAL;
}
return info->refclk * *pN / *pM / *P;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
* Roy Spliet
*/
#define gt215_clk(p) container_of((p), struct gt215_clk, base)
#include "gt215.h"
#include "pll.h"
#include <engine/fifo.h>
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
struct gt215_clk {
struct nvkm_clk base;
struct gt215_clk_info eng[nv_clk_src_max];
};
static u32 read_clk(struct gt215_clk *, int, bool);
static u32 read_pll(struct gt215_clk *, int, u32);
static u32
read_vco(struct gt215_clk *clk, int idx)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
switch (sctl & 0x00000030) {
case 0x00000000:
return device->crystal;
case 0x00000020:
return read_pll(clk, 0x41, 0x00e820);
case 0x00000030:
return read_pll(clk, 0x42, 0x00e8a0);
default:
return 0;
}
}
static u32
read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 sctl, sdiv, sclk;
/* refclk for the 0xe8xx plls is a fixed frequency */
if (idx >= 0x40) {
if (device->chipset == 0xaf) {
/* no joke.. seriously.. sigh.. */
return nvkm_rd32(device, 0x00471c) * 1000;
}
return device->crystal;
}
sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
if (!ignore_en && !(sctl & 0x00000100))
return 0;
/* out_alt */
if (sctl & 0x00000400)
return 108000;
/* vco_out */
switch (sctl & 0x00003000) {
case 0x00000000:
if (!(sctl & 0x00000200))
return device->crystal;
return 0;
case 0x00002000:
if (sctl & 0x00000040)
return 108000;
return 100000;
case 0x00003000:
/* vco_enable */
if (!(sctl & 0x00000001))
return 0;
sclk = read_vco(clk, idx);
sdiv = ((sctl & 0x003f0000) >> 16) + 2;
return (sclk * 2) / sdiv;
default:
return 0;
}
}
static u32
read_pll(struct gt215_clk *clk, int idx, u32 pll)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 ctrl = nvkm_rd32(device, pll + 0);
u32 sclk = 0, P = 1, N = 1, M = 1;
u32 MP;
if (!(ctrl & 0x00000008)) {
if (ctrl & 0x00000001) {
u32 coef = nvkm_rd32(device, pll + 4);
M = (coef & 0x000000ff) >> 0;
N = (coef & 0x0000ff00) >> 8;
P = (coef & 0x003f0000) >> 16;
/* no post-divider on these..
* XXX: it looks more like two post-"dividers" that
* cross each other out in the default RPLL config */
if ((pll & 0x00ff00) == 0x00e800)
P = 1;
sclk = read_clk(clk, 0x00 + idx, false);
}
} else {
sclk = read_clk(clk, 0x10 + idx, false);
}
MP = M * P;
if (!MP)
return 0;
return sclk * N / MP;
}
static int
gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
struct gt215_clk *clk = gt215_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
u32 hsrc;
switch (src) {
case nv_clk_src_crystal:
return device->crystal;
case nv_clk_src_core:
case nv_clk_src_core_intm:
return read_pll(clk, 0x00, 0x4200);
case nv_clk_src_shader:
return read_pll(clk, 0x01, 0x4220);
case nv_clk_src_mem:
return read_pll(clk, 0x02, 0x4000);
case nv_clk_src_disp:
return read_clk(clk, 0x20, false);
case nv_clk_src_vdec:
return read_clk(clk, 0x21, false);
case nv_clk_src_pmu:
return read_clk(clk, 0x25, false);
case nv_clk_src_host:
hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28;
switch (hsrc) {
case 0:
return read_clk(clk, 0x1d, false);
case 2:
case 3:
return 277000;
default:
nvkm_error(subdev, "unknown HOST clock source %d\n", hsrc);
return -EINVAL;
}
default:
nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
}
return 0;
}
static int
gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz,
struct gt215_clk_info *info)
{
struct gt215_clk *clk = gt215_clk(base);
u32 oclk, sclk, sdiv;
s32 diff;
info->clk = 0;
switch (khz) {
case 27000:
info->clk = 0x00000100;
return khz;
case 100000:
info->clk = 0x00002100;
return khz;
case 108000:
info->clk = 0x00002140;
return khz;
default:
sclk = read_vco(clk, idx);
sdiv = min((sclk * 2) / khz, (u32)65);
oclk = (sclk * 2) / sdiv;
diff = ((khz + 3000) - oclk);
/* When imprecise, play it safe and aim for a clock lower than
* desired rather than higher */
if (diff < 0) {
sdiv++;
oclk = (sclk * 2) / sdiv;
}
/* divider can go as low as 2, limited here because NVIDIA
* and the VBIOS on my NVA8 seem to prefer using the PLL
* for 810MHz - is there a good reason?
* XXX: PLLs with refclk 810MHz? */
if (sdiv > 4) {
info->clk = (((sdiv - 2) << 16) | 0x00003100);
return oclk;
}
break;
}
return -ERANGE;
}
int
gt215_pll_info(struct nvkm_clk *base, int idx, u32 pll, u32 khz,
struct gt215_clk_info *info)
{
struct gt215_clk *clk = gt215_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvbios_pll limits;
int P, N, M, diff;
int ret;
info->pll = 0;
/* If we can get a within [-2, 3) MHz of a divider, we'll disable the
* PLL and use the divider instead. */
ret = gt215_clk_info(&clk->base, idx, khz, info);
diff = khz - ret;
if (!pll || (diff >= -2000 && diff < 3000)) {
goto out;
}
/* Try with PLL */
ret = nvbios_pll_parse(subdev->device->bios, pll, &limits);
if (ret)
return ret;
ret = gt215_clk_info(&clk->base, idx - 0x10, limits.refclk, info);
if (ret != limits.refclk)
return -EINVAL;
ret = gt215_pll_calc(subdev, &limits, khz, &N, NULL, &M, &P);
if (ret >= 0) {
info->pll = (P << 16) | (N << 8) | M;
}
out:
info->fb_delay = max(((khz + 7566) / 15133), (u32) 18);
return ret ? ret : -ERANGE;
}
static int
calc_clk(struct gt215_clk *clk, struct nvkm_cstate *cstate,
int idx, u32 pll, int dom)
{
int ret = gt215_pll_info(&clk->base, idx, pll, cstate->domain[dom],
&clk->eng[dom]);
if (ret >= 0)
return 0;
return ret;
}
static int
calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
{
int ret = 0;
u32 kHz = cstate->domain[nv_clk_src_host];
struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
if (kHz == 277000) {
info->clk = 0;
info->host_out = NVA3_HOST_277;
return 0;
}
info->host_out = NVA3_HOST_CLK;
ret = gt215_clk_info(&clk->base, 0x1d, kHz, info);
if (ret >= 0)
return 0;
return ret;
}
int
gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
{
struct nvkm_device *device = clk->subdev.device;
struct nvkm_fifo *fifo = device->fifo;
/* halt and idle execution engines */
nvkm_mask(device, 0x020060, 0x00070000, 0x00000000);
nvkm_mask(device, 0x002504, 0x00000001, 0x00000001);
/* Wait until the interrupt handler is finished */
if (nvkm_msec(device, 2000,
if (!nvkm_rd32(device, 0x000100))
break;
) < 0)
return -EBUSY;
if (fifo)
nvkm_fifo_pause(fifo, flags);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x002504) & 0x00000010)
break;
) < 0)
return -EIO;
if (nvkm_msec(device, 2000,
u32 tmp = nvkm_rd32(device, 0x00251c) & 0x0000003f;
if (tmp == 0x0000003f)
break;
) < 0)
return -EIO;
return 0;
}
void
gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
{
struct nvkm_device *device = clk->subdev.device;
struct nvkm_fifo *fifo = device->fifo;
if (fifo && flags)
nvkm_fifo_start(fifo, flags);
nvkm_mask(device, 0x002504, 0x00000001, 0x00000000);
nvkm_mask(device, 0x020060, 0x00070000, 0x00040000);
}
static void
disable_clk_src(struct gt215_clk *clk, u32 src)
{
struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, src, 0x00000100, 0x00000000);
nvkm_mask(device, src, 0x00000001, 0x00000000);
}
static void
prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
{
struct gt215_clk_info *info = &clk->eng[dom];
struct nvkm_device *device = clk->base.subdev.device;
const u32 src0 = 0x004120 + (idx * 4);
const u32 src1 = 0x004160 + (idx * 4);
const u32 ctrl = pll + 0;
const u32 coef = pll + 4;
u32 bypass;
if (info->pll) {
/* Always start from a non-PLL clock */
bypass = nvkm_rd32(device, ctrl) & 0x00000008;
if (!bypass) {
nvkm_mask(device, src1, 0x00000101, 0x00000101);
nvkm_mask(device, ctrl, 0x00000008, 0x00000008);
udelay(20);
}
nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk);
nvkm_wr32(device, coef, info->pll);
nvkm_mask(device, ctrl, 0x00000015, 0x00000015);
nvkm_mask(device, ctrl, 0x00000010, 0x00000000);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, ctrl) & 0x00020000)
break;
) < 0) {
nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
nvkm_mask(device, src0, 0x00000101, 0x00000000);
return;
}
nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
nvkm_mask(device, ctrl, 0x00000008, 0x00000000);
disable_clk_src(clk, src1);
} else {
nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk);
nvkm_mask(device, ctrl, 0x00000018, 0x00000018);
udelay(20);
nvkm_mask(device, ctrl, 0x00000001, 0x00000000);
disable_clk_src(clk, src0);
}
}
static void
prog_clk(struct gt215_clk *clk, int idx, int dom)
{
struct gt215_clk_info *info = &clk->eng[dom];
struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
}
static void
prog_host(struct gt215_clk *clk)
{
struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
struct nvkm_device *device = clk->base.subdev.device;
u32 hsrc = (nvkm_rd32(device, 0xc040));
switch (info->host_out) {
case NVA3_HOST_277:
if ((hsrc & 0x30000000) == 0) {
nvkm_wr32(device, 0xc040, hsrc | 0x20000000);
disable_clk_src(clk, 0x4194);
}
break;
case NVA3_HOST_CLK:
prog_clk(clk, 0x1d, nv_clk_src_host);
if ((hsrc & 0x30000000) >= 0x20000000) {
nvkm_wr32(device, 0xc040, hsrc & ~0x30000000);
}
break;
default:
break;
}
/* This seems to be a clock gating factor on idle, always set to 64 */
nvkm_wr32(device, 0xc044, 0x3e);
}
static void
prog_core(struct gt215_clk *clk, int dom)
{
struct gt215_clk_info *info = &clk->eng[dom];
struct nvkm_device *device = clk->base.subdev.device;
u32 fb_delay = nvkm_rd32(device, 0x10002c);
if (fb_delay < info->fb_delay)
nvkm_wr32(device, 0x10002c, info->fb_delay);
prog_pll(clk, 0x00, 0x004200, dom);
if (fb_delay > info->fb_delay)
nvkm_wr32(device, 0x10002c, info->fb_delay);
}
static int
gt215_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
struct gt215_clk *clk = gt215_clk(base);
struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
int ret;
if ((ret = calc_clk(clk, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
(ret = calc_clk(clk, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
(ret = calc_clk(clk, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
(ret = calc_clk(clk, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
(ret = calc_host(clk, cstate)))
return ret;
/* XXX: Should be reading the highest bit in the VBIOS clock to decide
* whether to use a PLL or not... but using a PLL defeats the purpose */
if (core->pll) {
ret = gt215_clk_info(&clk->base, 0x10,
cstate->domain[nv_clk_src_core_intm],
&clk->eng[nv_clk_src_core_intm]);
if (ret < 0)
return ret;
}
return 0;
}
static int
gt215_clk_prog(struct nvkm_clk *base)
{
struct gt215_clk *clk = gt215_clk(base);
struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
int ret = 0;
unsigned long flags;
unsigned long *f = &flags;
ret = gt215_clk_pre(&clk->base, f);
if (ret)
goto out;
if (core->pll)
prog_core(clk, nv_clk_src_core_intm);
prog_core(clk, nv_clk_src_core);
prog_pll(clk, 0x01, 0x004220, nv_clk_src_shader);
prog_clk(clk, 0x20, nv_clk_src_disp);
prog_clk(clk, 0x21, nv_clk_src_vdec);
prog_host(clk);
out:
if (ret == -EBUSY)
f = NULL;
gt215_clk_post(&clk->base, f);
return ret;
}
static void
gt215_clk_tidy(struct nvkm_clk *base)
{
}
static const struct nvkm_clk_func
gt215_clk = {
.read = gt215_clk_read,
.calc = gt215_clk_calc,
.prog = gt215_clk_prog,
.tidy = gt215_clk_tidy,
.domains = {
{ nv_clk_src_crystal , 0xff },
{ nv_clk_src_core , 0x00, 0, "core", 1000 },
{ nv_clk_src_shader , 0x01, 0, "shader", 1000 },
{ nv_clk_src_mem , 0x02, 0, "memory", 1000 },
{ nv_clk_src_vdec , 0x03 },
{ nv_clk_src_disp , 0x04 },
{ nv_clk_src_host , 0x05 },
{ nv_clk_src_core_intm, 0x06 },
{ nv_clk_src_max }
}
};
int
gt215_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
struct gt215_clk *clk;
if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
return -ENOMEM;
*pclk = &clk->base;
return nvkm_clk_ctor(>215_clk, device, type, inst, true, &clk->base);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c |
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
*
*/
#include "priv.h"
#include "gk20a.h"
#include <core/tegra.h>
#include <subdev/timer.h>
static const u8 _pl_to_div[] = {
/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
};
static u32 pl_to_div(u32 pl)
{
if (pl >= ARRAY_SIZE(_pl_to_div))
return 1;
return _pl_to_div[pl];
}
static u32 div_to_pl(u32 div)
{
u32 pl;
for (pl = 0; pl < ARRAY_SIZE(_pl_to_div) - 1; pl++) {
if (_pl_to_div[pl] >= div)
return pl;
}
return ARRAY_SIZE(_pl_to_div) - 1;
}
static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
.min_vco = 1000000, .max_vco = 2064000,
.min_u = 12000, .max_u = 38000,
.min_m = 1, .max_m = 255,
.min_n = 8, .max_n = 255,
.min_pl = 1, .max_pl = 32,
};
void
gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 val;
val = nvkm_rd32(device, GPCPLL_COEFF);
pll->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
pll->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
}
void
gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 val;
val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT;
val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT;
nvkm_wr32(device, GPCPLL_COEFF, val);
}
u32
gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll)
{
u32 rate;
u32 divider;
rate = clk->parent_rate * pll->n;
divider = pll->m * clk->pl_to_div(pll->pl);
return rate / divider / 2;
}
int
gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate,
struct gk20a_pll *pll)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
u32 target_clk_f, ref_clk_f, target_freq;
u32 min_vco_f, max_vco_f;
u32 low_pl, high_pl, best_pl;
u32 target_vco_f;
u32 best_m, best_n;
u32 best_delta = ~0;
u32 pl;
target_clk_f = rate * 2 / KHZ;
ref_clk_f = clk->parent_rate / KHZ;
target_vco_f = target_clk_f + target_clk_f / 50;
max_vco_f = max(clk->params->max_vco, target_vco_f);
min_vco_f = clk->params->min_vco;
best_m = clk->params->max_m;
best_n = clk->params->min_n;
best_pl = clk->params->min_pl;
/* min_pl <= high_pl <= max_pl */
high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
high_pl = min(high_pl, clk->params->max_pl);
high_pl = max(high_pl, clk->params->min_pl);
high_pl = clk->div_to_pl(high_pl);
/* min_pl <= low_pl <= max_pl */
low_pl = min_vco_f / target_vco_f;
low_pl = min(low_pl, clk->params->max_pl);
low_pl = max(low_pl, clk->params->min_pl);
low_pl = clk->div_to_pl(low_pl);
nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
clk->pl_to_div(low_pl), high_pl, clk->pl_to_div(high_pl));
/* Select lowest possible VCO */
for (pl = low_pl; pl <= high_pl; pl++) {
u32 m, n, n2;
target_vco_f = target_clk_f * clk->pl_to_div(pl);
for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
u32 u_f = ref_clk_f / m;
if (u_f < clk->params->min_u)
break;
if (u_f > clk->params->max_u)
continue;
n = (target_vco_f * m) / ref_clk_f;
n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
if (n > clk->params->max_n)
break;
for (; n <= n2; n++) {
u32 vco_f;
if (n < clk->params->min_n)
continue;
if (n > clk->params->max_n)
break;
vco_f = ref_clk_f * n / m;
if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
u32 delta, lwv;
lwv = (vco_f + (clk->pl_to_div(pl) / 2))
/ clk->pl_to_div(pl);
delta = abs(lwv - target_clk_f);
if (delta < best_delta) {
best_delta = delta;
best_m = m;
best_n = n;
best_pl = pl;
if (best_delta == 0)
goto found_match;
}
}
}
}
}
found_match:
WARN_ON(best_delta == ~0);
if (best_delta != 0)
nvkm_debug(subdev,
"no best match for target @ %dMHz on gpc_pll",
target_clk_f / KHZ);
pll->m = best_m;
pll->n = best_n;
pll->pl = best_pl;
target_freq = gk20a_pllg_calc_rate(clk, pll);
nvkm_debug(subdev,
"actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n",
target_freq / KHZ, pll->m, pll->n, pll->pl,
clk->pl_to_div(pll->pl));
return 0;
}
static int
gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
struct gk20a_pll pll;
int ret = 0;
/* get old coefficients */
gk20a_pllg_read_mnp(clk, &pll);
/* do nothing if NDIV is the same */
if (n == pll.n)
return 0;
/* pll slowdown mode */
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
/* new ndiv ready for ramp */
pll.n = n;
udelay(1);
gk20a_pllg_write_mnp(clk, &pll);
/* dynamic ramp to new ndiv */
udelay(1);
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
/* wait for ramping to complete */
if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
ret = -ETIMEDOUT;
/* exit slowdown mode */
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
return ret;
}
static int
gk20a_pllg_enable(struct gk20a_clk *clk)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 val;
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
nvkm_rd32(device, GPCPLL_CFG);
/* enable lock detection */
val = nvkm_rd32(device, GPCPLL_CFG);
if (val & GPCPLL_CFG_LOCK_DET_OFF) {
val &= ~GPCPLL_CFG_LOCK_DET_OFF;
nvkm_wr32(device, GPCPLL_CFG, val);
}
/* wait for lock */
if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK,
GPCPLL_CFG_LOCK) < 0)
return -ETIMEDOUT;
/* switch to VCO mode */
nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
return 0;
}
static void
gk20a_pllg_disable(struct gk20a_clk *clk)
{
struct nvkm_device *device = clk->base.subdev.device;
/* put PLL in bypass before disabling it */
nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
nvkm_rd32(device, GPCPLL_CFG);
}
static int
gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
struct gk20a_pll cur_pll;
int ret;
gk20a_pllg_read_mnp(clk, &cur_pll);
/* split VCO-to-bypass jump in half by setting out divider 1:2 */
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
/* Intentional 2nd write to assure linear divider operation */
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
nvkm_rd32(device, GPC2CLK_OUT);
udelay(2);
gk20a_pllg_disable(clk);
gk20a_pllg_write_mnp(clk, pll);
ret = gk20a_pllg_enable(clk);
if (ret)
return ret;
/* restore out divider 1:1 */
udelay(2);
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
/* Intentional 2nd write to assure linear divider operation */
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
nvkm_rd32(device, GPC2CLK_OUT);
return 0;
}
static int
gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll)
{
struct gk20a_pll cur_pll;
int ret;
if (gk20a_pllg_is_enabled(clk)) {
gk20a_pllg_read_mnp(clk, &cur_pll);
/* just do NDIV slide if there is no change to M and PL */
if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
return gk20a_pllg_slide(clk, pll->n);
/* slide down to current NDIV_LO */
cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
ret = gk20a_pllg_slide(clk, cur_pll.n);
if (ret)
return ret;
}
/* program MNP with the new clock parameters and new NDIV_LO */
cur_pll = *pll;
cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
ret = gk20a_pllg_program_mnp(clk, &cur_pll);
if (ret)
return ret;
/* slide up to new NDIV */
return gk20a_pllg_slide(clk, pll->n);
}
static struct nvkm_pstate
gk20a_pstates[] = {
{
.base = {
.domain[nv_clk_src_gpc] = 72000,
.voltage = 0,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 108000,
.voltage = 1,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 180000,
.voltage = 2,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 252000,
.voltage = 3,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 324000,
.voltage = 4,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 396000,
.voltage = 5,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 468000,
.voltage = 6,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 540000,
.voltage = 7,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 612000,
.voltage = 8,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 648000,
.voltage = 9,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 684000,
.voltage = 10,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 708000,
.voltage = 11,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 756000,
.voltage = 12,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 804000,
.voltage = 13,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 852000,
.voltage = 14,
},
},
};
int
gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
struct gk20a_clk *clk = gk20a_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
struct gk20a_pll pll;
switch (src) {
case nv_clk_src_crystal:
return device->crystal;
case nv_clk_src_gpc:
gk20a_pllg_read_mnp(clk, &pll);
return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV;
default:
nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
}
}
int
gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
struct gk20a_clk *clk = gk20a_clk(base);
return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
GK20A_CLK_GPC_MDIV, &clk->pll);
}
int
gk20a_clk_prog(struct nvkm_clk *base)
{
struct gk20a_clk *clk = gk20a_clk(base);
int ret;
ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll);
if (ret)
ret = gk20a_pllg_program_mnp(clk, &clk->pll);
return ret;
}
void
gk20a_clk_tidy(struct nvkm_clk *base)
{
}
int
gk20a_clk_setup_slide(struct gk20a_clk *clk)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
u32 step_a, step_b;
switch (clk->parent_rate) {
case 12000000:
case 12800000:
case 13000000:
step_a = 0x2b;
step_b = 0x0b;
break;
case 19200000:
step_a = 0x12;
step_b = 0x08;
break;
case 38400000:
step_a = 0x04;
step_b = 0x05;
break;
default:
nvkm_error(subdev, "invalid parent clock rate %u KHz",
clk->parent_rate / KHZ);
return -EINVAL;
}
nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT);
nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT);
return 0;
}
void
gk20a_clk_fini(struct nvkm_clk *base)
{
struct nvkm_device *device = base->subdev.device;
struct gk20a_clk *clk = gk20a_clk(base);
/* slide to VCO min */
if (gk20a_pllg_is_enabled(clk)) {
struct gk20a_pll pll;
u32 n_lo;
gk20a_pllg_read_mnp(clk, &pll);
n_lo = gk20a_pllg_n_lo(clk, &pll);
gk20a_pllg_slide(clk, n_lo);
}
gk20a_pllg_disable(clk);
/* set IDDQ */
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
}
static int
gk20a_clk_init(struct nvkm_clk *base)
{
struct gk20a_clk *clk = gk20a_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
int ret;
/* get out from IDDQ */
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
nvkm_rd32(device, GPCPLL_CFG);
udelay(5);
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
GPC2CLK_OUT_INIT_VAL);
ret = gk20a_clk_setup_slide(clk);
if (ret)
return ret;
/* Start with lowest frequency */
base->func->calc(base, &base->func->pstates[0].base);
ret = base->func->prog(&clk->base);
if (ret) {
nvkm_error(subdev, "cannot initialize clock\n");
return ret;
}
return 0;
}
static const struct nvkm_clk_func
gk20a_clk = {
.init = gk20a_clk_init,
.fini = gk20a_clk_fini,
.read = gk20a_clk_read,
.calc = gk20a_clk_calc,
.prog = gk20a_clk_prog,
.tidy = gk20a_clk_tidy,
.pstates = gk20a_pstates,
.nr_pstates = ARRAY_SIZE(gk20a_pstates),
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
{ nv_clk_src_max }
}
};
int
gk20a_clk_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
const struct nvkm_clk_func *func, const struct gk20a_clk_pllg_params *params,
struct gk20a_clk *clk)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
int ret;
int i;
/* Finish initializing the pstates */
for (i = 0; i < func->nr_pstates; i++) {
INIT_LIST_HEAD(&func->pstates[i].list);
func->pstates[i].pstate = i + 1;
}
clk->params = params;
clk->parent_rate = clk_get_rate(tdev->clk);
ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base);
if (ret)
return ret;
nvkm_debug(&clk->base.subdev, "parent clock rate: %d Khz\n",
clk->parent_rate / KHZ);
return 0;
}
int
gk20a_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
struct gk20a_clk *clk;
int ret;
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
if (!clk)
return -ENOMEM;
*pclk = &clk->base;
ret = gk20a_clk_ctor(device, type, inst, &gk20a_clk, &gk20a_pllg_params, clk);
clk->pl_to_div = pl_to_div;
clk->div_to_pl = div_to_pl;
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c |
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <subdev/clk.h>
#include <subdev/volt.h>
#include <subdev/timer.h>
#include <core/device.h>
#include <core/tegra.h>
#include "priv.h"
#include "gk20a.h"
#define GPCPLL_CFG_SYNC_MODE BIT(2)
#define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340)
#define BYPASSCTRL_SYS_GPCPLL_SHIFT 0
#define BYPASSCTRL_SYS_GPCPLL_WIDTH 1
#define GPCPLL_CFG2_SDM_DIN_SHIFT 0
#define GPCPLL_CFG2_SDM_DIN_WIDTH 8
#define GPCPLL_CFG2_SDM_DIN_MASK \
(MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT)
#define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT 8
#define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH 15
#define GPCPLL_CFG2_SDM_DIN_NEW_MASK \
(MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT)
#define GPCPLL_CFG2_SETUP2_SHIFT 16
#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
#define GPCPLL_DVFS0 (SYS_GPCPLL_CFG_BASE + 0x10)
#define GPCPLL_DVFS0_DFS_COEFF_SHIFT 0
#define GPCPLL_DVFS0_DFS_COEFF_WIDTH 7
#define GPCPLL_DVFS0_DFS_COEFF_MASK \
(MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT)
#define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT 8
#define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH 7
#define GPCPLL_DVFS0_DFS_DET_MAX_MASK \
(MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT)
#define GPCPLL_DVFS1 (SYS_GPCPLL_CFG_BASE + 0x14)
#define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT 0
#define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH 7
#define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT 7
#define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH 1
#define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT 8
#define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH 7
#define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT 15
#define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH 1
#define GPCPLL_DVFS1_DFS_CTRL_SHIFT 16
#define GPCPLL_DVFS1_DFS_CTRL_WIDTH 12
#define GPCPLL_DVFS1_EN_SDM_SHIFT 28
#define GPCPLL_DVFS1_EN_SDM_WIDTH 1
#define GPCPLL_DVFS1_EN_SDM_BIT BIT(28)
#define GPCPLL_DVFS1_EN_DFS_SHIFT 29
#define GPCPLL_DVFS1_EN_DFS_WIDTH 1
#define GPCPLL_DVFS1_EN_DFS_BIT BIT(29)
#define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT 30
#define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH 1
#define GPCPLL_DVFS1_EN_DFS_CAL_BIT BIT(30)
#define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT 31
#define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH 1
#define GPCPLL_DVFS1_DFS_CAL_DONE_BIT BIT(31)
#define GPC_BCAST_GPCPLL_DVFS2 (GPC_BCAST_GPCPLL_CFG_BASE + 0x20)
#define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT BIT(16)
#define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT 24
#define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH 7
#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */
#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */
struct gm20b_clk_dvfs_params {
s32 coeff_slope;
s32 coeff_offs;
u32 vco_ctrl;
};
static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = {
.coeff_slope = -165230,
.coeff_offs = 214007,
.vco_ctrl = 0x7 << 3,
};
/*
* base.n is now the *integer* part of the N factor.
* sdm_din contains n's decimal part.
*/
struct gm20b_pll {
struct gk20a_pll base;
u32 sdm_din;
};
struct gm20b_clk_dvfs {
u32 dfs_coeff;
s32 dfs_det_max;
s32 dfs_ext_cal;
};
struct gm20b_clk {
/* currently applied parameters */
struct gk20a_clk base;
struct gm20b_clk_dvfs dvfs;
u32 uv;
/* new parameters to apply */
struct gk20a_pll new_pll;
struct gm20b_clk_dvfs new_dvfs;
u32 new_uv;
const struct gm20b_clk_dvfs_params *dvfs_params;
/* fused parameters */
s32 uvdet_slope;
s32 uvdet_offs;
/* safe frequency we can use at minimum voltage */
u32 safe_fmax_vmin;
};
#define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base)
static u32 pl_to_div(u32 pl)
{
return pl;
}
static u32 div_to_pl(u32 div)
{
return div;
}
static const struct gk20a_clk_pllg_params gm20b_pllg_params = {
.min_vco = 1300000, .max_vco = 2600000,
.min_u = 12000, .max_u = 38400,
.min_m = 1, .max_m = 255,
.min_n = 8, .max_n = 255,
.min_pl = 1, .max_pl = 31,
};
static void
gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
struct nvkm_device *device = subdev->device;
u32 val;
gk20a_pllg_read_mnp(&clk->base, &pll->base);
val = nvkm_rd32(device, GPCPLL_CFG2);
pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) &
MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
}
static void
gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll)
{
struct nvkm_device *device = clk->base.base.subdev.device;
nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
gk20a_pllg_write_mnp(&clk->base, &pll->base);
}
/*
* Determine DFS_COEFF for the requested voltage. Always select external
* calibration override equal to the voltage, and set maximum detection
* limit "0" (to make sure that PLL output remains under F/V curve when
* voltage increases).
*/
static void
gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv,
struct gm20b_clk_dvfs *dvfs)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
const struct gm20b_clk_dvfs_params *p = clk->dvfs_params;
u32 coeff;
/* Work with mv as uv would likely trigger an overflow */
s32 mv = DIV_ROUND_CLOSEST(uv, 1000);
/* coeff = slope * voltage + offset */
coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
coeff = DIV_ROUND_CLOSEST(coeff, 1000);
dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH));
dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs,
clk->uvdet_slope);
/* should never happen */
if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE))
nvkm_error(subdev, "dfs_ext_cal overflow!\n");
dvfs->dfs_det_max = 0;
nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n",
__func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal,
dvfs->dfs_det_max);
}
/*
* Solve equation for integer and fractional part of the effective NDIV:
*
* n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) +
* (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE
*
* The SDM_DIN LSB is finally shifted out, since it is not accessible by sw.
*/
static void
gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
const struct gk20a_clk_pllg_params *p = clk->base.params;
u32 n;
s32 det_delta;
u32 rem, rem_range;
/* calculate current ext_cal and subtract previous one */
det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs,
clk->uvdet_slope);
det_delta -= clk->dvfs.dfs_ext_cal;
det_delta = min(det_delta, clk->dvfs.dfs_det_max);
det_delta *= clk->dvfs.dfs_coeff;
/* integer part of n */
n = (n_eff << DFS_DET_RANGE) - det_delta;
/* should never happen! */
if (n <= 0) {
nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n");
n = 1 << DFS_DET_RANGE;
}
if (n >> DFS_DET_RANGE > p->max_n) {
nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n");
n = p->max_n << DFS_DET_RANGE;
}
*n_int = n >> DFS_DET_RANGE;
/* fractional part of n */
rem = ((u32)n) & MASK(DFS_DET_RANGE);
rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
/* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */
rem = (rem << rem_range) - BIT(SDM_DIN_RANGE);
/* lose 8 LSB and clip - sdm_din only keeps the most significant byte */
*sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__,
n_eff, *n_int, *sdm_din);
}
static int
gm20b_pllg_slide(struct gm20b_clk *clk, u32 n)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
struct nvkm_device *device = subdev->device;
struct gm20b_pll pll;
u32 n_int, sdm_din;
int ret = 0;
/* calculate the new n_int/sdm_din for this n/uv */
gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din);
/* get old coefficients */
gm20b_pllg_read_mnp(clk, &pll);
/* do nothing if NDIV is the same */
if (n_int == pll.base.n && sdm_din == pll.sdm_din)
return 0;
/* pll slowdown mode */
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
/* new ndiv ready for ramp */
/* in DVFS mode SDM is updated via "new" field */
nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK,
sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT);
pll.base.n = n_int;
udelay(1);
gk20a_pllg_write_mnp(&clk->base, &pll.base);
/* dynamic ramp to new ndiv */
udelay(1);
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
/* wait for ramping to complete */
if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
ret = -ETIMEDOUT;
/* in DVFS mode complete SDM update */
nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
/* exit slowdown mode */
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
return ret;
}
static int
gm20b_pllg_enable(struct gm20b_clk *clk)
{
struct nvkm_device *device = clk->base.base.subdev.device;
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
nvkm_rd32(device, GPCPLL_CFG);
/* In DVFS mode lock cannot be used - so just delay */
udelay(40);
/* set SYNC_MODE for glitchless switch out of bypass */
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE,
GPCPLL_CFG_SYNC_MODE);
nvkm_rd32(device, GPCPLL_CFG);
/* switch to VCO mode */
nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
return 0;
}
static void
gm20b_pllg_disable(struct gm20b_clk *clk)
{
struct nvkm_device *device = clk->base.base.subdev.device;
/* put PLL in bypass before disabling it */
nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
/* clear SYNC_MODE before disabling PLL */
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0);
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
nvkm_rd32(device, GPCPLL_CFG);
}
static int
gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
struct nvkm_device *device = subdev->device;
struct gm20b_pll cur_pll;
u32 n_int, sdm_din;
/* if we only change pdiv, we can do a glitchless transition */
bool pdiv_only;
int ret;
gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din);
gm20b_pllg_read_mnp(clk, &cur_pll);
pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din &&
cur_pll.base.m == pll->m;
/* need full sequence if clock not enabled yet */
if (!gk20a_pllg_is_enabled(&clk->base))
pdiv_only = false;
/* split VCO-to-bypass jump in half by setting out divider 1:2 */
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
/* Intentional 2nd write to assure linear divider operation */
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
nvkm_rd32(device, GPC2CLK_OUT);
udelay(2);
if (pdiv_only) {
u32 old = cur_pll.base.pl;
u32 new = pll->pl;
/*
* we can do a glitchless transition only if the old and new PL
* parameters share at least one bit set to 1. If this is not
* the case, calculate and program an interim PL that will allow
* us to respect that rule.
*/
if ((old & new) == 0) {
cur_pll.base.pl = min(old | BIT(ffs(new) - 1),
new | BIT(ffs(old) - 1));
gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
}
cur_pll.base.pl = new;
gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
} else {
/* disable before programming if more than pdiv changes */
gm20b_pllg_disable(clk);
cur_pll.base = *pll;
cur_pll.base.n = n_int;
cur_pll.sdm_din = sdm_din;
gm20b_pllg_write_mnp(clk, &cur_pll);
ret = gm20b_pllg_enable(clk);
if (ret)
return ret;
}
/* restore out divider 1:1 */
udelay(2);
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
/* Intentional 2nd write to assure linear divider operation */
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
nvkm_rd32(device, GPC2CLK_OUT);
return 0;
}
static int
gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll)
{
struct gk20a_pll cur_pll;
int ret;
if (gk20a_pllg_is_enabled(&clk->base)) {
gk20a_pllg_read_mnp(&clk->base, &cur_pll);
/* just do NDIV slide if there is no change to M and PL */
if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
return gm20b_pllg_slide(clk, pll->n);
/* slide down to current NDIV_LO */
cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
ret = gm20b_pllg_slide(clk, cur_pll.n);
if (ret)
return ret;
}
/* program MNP with the new clock parameters and new NDIV_LO */
cur_pll = *pll;
cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
ret = gm20b_pllg_program_mnp(clk, &cur_pll);
if (ret)
return ret;
/* slide up to new NDIV */
return gm20b_pllg_slide(clk, pll->n);
}
static int
gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
struct gm20b_clk *clk = gm20b_clk(base);
struct nvkm_subdev *subdev = &base->subdev;
struct nvkm_volt *volt = base->subdev.device->volt;
int ret;
ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] *
GK20A_CLK_GPC_MDIV, &clk->new_pll);
if (ret)
return ret;
clk->new_uv = volt->vid[cstate->voltage].uv;
gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs);
nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv);
return 0;
}
/*
* Compute PLL parameters that are always safe for the current voltage
*/
static void
gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll)
{
u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ;
u32 parent_rate = clk->base.parent_rate / KHZ;
u32 nmin, nsafe;
/* remove a safe margin of 10% */
if (rate > clk->safe_fmax_vmin)
rate = rate * (100 - 10) / 100;
/* gpc2clk */
rate *= 2;
nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate);
nsafe = pll->m * rate / (clk->base.parent_rate);
if (nsafe < nmin) {
pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate);
nsafe = nmin;
}
pll->n = nsafe;
}
static void
gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff)
{
struct nvkm_device *device = clk->base.base.subdev.device;
/* strobe to read external DFS coefficient */
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK,
coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT);
udelay(1);
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
}
static void
gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal)
{
struct nvkm_device *device = clk->base.base.subdev.device;
u32 val;
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1),
dfs_det_cal);
udelay(1);
val = nvkm_rd32(device, GPCPLL_DVFS1);
if (!(val & BIT(25))) {
/* Use external value to overwrite calibration value */
val |= BIT(25) | BIT(16);
nvkm_wr32(device, GPCPLL_DVFS1, val);
}
}
static void
gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk,
struct gm20b_clk_dvfs *dvfs)
{
struct nvkm_device *device = clk->base.base.subdev.device;
/* strobe to read external DFS coefficient */
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
nvkm_mask(device, GPCPLL_DVFS0,
GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK,
dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT |
dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT);
udelay(1);
nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal);
}
static int
gm20b_clk_prog(struct nvkm_clk *base)
{
struct gm20b_clk *clk = gm20b_clk(base);
u32 cur_freq;
int ret;
/* No change in DVFS settings? */
if (clk->uv == clk->new_uv)
goto prog;
/*
* Interim step for changing DVFS detection settings: low enough
* frequency to be safe at DVFS coeff = 0.
*
* 1. If voltage is increasing:
* - safe frequency target matches the lowest - old - frequency
* - DVFS settings are still old
* - Voltage already increased to new level by volt, but maximum
* detection limit assures PLL output remains under F/V curve
*
* 2. If voltage is decreasing:
* - safe frequency target matches the lowest - new - frequency
* - DVFS settings are still old
* - Voltage is also old, it will be lowered by volt afterwards
*
* Interim step can be skipped if old frequency is below safe minimum,
* i.e., it is low enough to be safe at any voltage in operating range
* with zero DVFS coefficient.
*/
cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc);
if (cur_freq > clk->safe_fmax_vmin) {
struct gk20a_pll pll_safe;
if (clk->uv < clk->new_uv)
/* voltage will raise: safe frequency is current one */
pll_safe = clk->base.pll;
else
/* voltage will drop: safe frequency is new one */
pll_safe = clk->new_pll;
gm20b_dvfs_calc_safe_pll(clk, &pll_safe);
ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe);
if (ret)
return ret;
}
/*
* DVFS detection settings transition:
* - Set DVFS coefficient zero
* - Set calibration level to new voltage
* - Set DVFS coefficient to match new voltage
*/
gm20b_dvfs_program_coeff(clk, 0);
gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal);
gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff);
gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
prog:
clk->uv = clk->new_uv;
clk->dvfs = clk->new_dvfs;
clk->base.pll = clk->new_pll;
return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll);
}
static struct nvkm_pstate
gm20b_pstates[] = {
{
.base = {
.domain[nv_clk_src_gpc] = 76800,
.voltage = 0,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 153600,
.voltage = 1,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 230400,
.voltage = 2,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 307200,
.voltage = 3,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 384000,
.voltage = 4,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 460800,
.voltage = 5,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 537600,
.voltage = 6,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 614400,
.voltage = 7,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 691200,
.voltage = 8,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 768000,
.voltage = 9,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 844800,
.voltage = 10,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 921600,
.voltage = 11,
},
},
{
.base = {
.domain[nv_clk_src_gpc] = 998400,
.voltage = 12,
},
},
};
static void
gm20b_clk_fini(struct nvkm_clk *base)
{
struct nvkm_device *device = base->subdev.device;
struct gm20b_clk *clk = gm20b_clk(base);
/* slide to VCO min */
if (gk20a_pllg_is_enabled(&clk->base)) {
struct gk20a_pll pll;
u32 n_lo;
gk20a_pllg_read_mnp(&clk->base, &pll);
n_lo = gk20a_pllg_n_lo(&clk->base, &pll);
gm20b_pllg_slide(clk, n_lo);
}
gm20b_pllg_disable(clk);
/* set IDDQ */
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
}
static int
gm20b_clk_init_dvfs(struct gm20b_clk *clk)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
struct nvkm_device *device = subdev->device;
bool fused = clk->uvdet_offs && clk->uvdet_slope;
static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */
u32 data;
int ret;
/* Enable NA DVFS */
nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT,
GPCPLL_DVFS1_EN_DFS_BIT);
/* Set VCO_CTRL */
if (clk->dvfs_params->vco_ctrl)
nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK,
clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT);
if (fused) {
/* Start internal calibration, but ignore results */
nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
GPCPLL_DVFS1_EN_DFS_CAL_BIT);
/* got uvdev parameters from fuse, skip calibration */
goto calibrated;
}
/*
* If calibration parameters are not fused, start internal calibration,
* wait for completion, and use results along with default slope to
* calculate ADC offset during boot.
*/
nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
GPCPLL_DVFS1_EN_DFS_CAL_BIT);
/* Wait for internal calibration done (spec < 2us). */
ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1,
GPCPLL_DVFS1_DFS_CAL_DONE_BIT,
GPCPLL_DVFS1_DFS_CAL_DONE_BIT);
if (ret < 0) {
nvkm_error(subdev, "GPCPLL calibration timeout\n");
return -ETIMEDOUT;
}
data = nvkm_rd32(device, GPCPLL_CFG3) >>
GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT;
data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH);
clk->uvdet_slope = ADC_SLOPE_UV;
clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV;
nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n",
clk->uvdet_offs, clk->uvdet_slope);
calibrated:
/* Compute and apply initial DVFS parameters */
gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs);
gm20b_dvfs_program_coeff(clk, 0);
gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal);
gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff);
gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
return 0;
}
/* Forward declaration to detect speedo >=1 in gm20b_clk_init() */
static const struct nvkm_clk_func gm20b_clk;
static int
gm20b_clk_init(struct nvkm_clk *base)
{
struct gk20a_clk *clk = gk20a_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
int ret;
u32 data;
/* get out from IDDQ */
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
nvkm_rd32(device, GPCPLL_CFG);
udelay(5);
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
GPC2CLK_OUT_INIT_VAL);
/* Set the global bypass control to VCO */
nvkm_mask(device, BYPASSCTRL_SYS,
MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
0);
ret = gk20a_clk_setup_slide(clk);
if (ret)
return ret;
/* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */
data = nvkm_rd32(device, 0x021944);
if (!(data & 0x3)) {
data |= 0x2;
nvkm_wr32(device, 0x021944, data);
data = nvkm_rd32(device, 0x021948);
data |= 0x1;
nvkm_wr32(device, 0x021948, data);
}
/* Disable idle slow down */
nvkm_mask(device, 0x20160, 0x003f0000, 0x0);
/* speedo >= 1? */
if (clk->base.func == &gm20b_clk) {
struct gm20b_clk *_clk = gm20b_clk(base);
struct nvkm_volt *volt = device->volt;
/* Get current voltage */
_clk->uv = nvkm_volt_get(volt);
/* Initialize DVFS */
ret = gm20b_clk_init_dvfs(_clk);
if (ret)
return ret;
}
/* Start with lowest frequency */
base->func->calc(base, &base->func->pstates[0].base);
ret = base->func->prog(base);
if (ret) {
nvkm_error(subdev, "cannot initialize clock\n");
return ret;
}
return 0;
}
static const struct nvkm_clk_func
gm20b_clk_speedo0 = {
.init = gm20b_clk_init,
.fini = gk20a_clk_fini,
.read = gk20a_clk_read,
.calc = gk20a_clk_calc,
.prog = gk20a_clk_prog,
.tidy = gk20a_clk_tidy,
.pstates = gm20b_pstates,
/* Speedo 0 only supports 12 voltages */
.nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
{ nv_clk_src_max },
},
};
static const struct nvkm_clk_func
gm20b_clk = {
.init = gm20b_clk_init,
.fini = gm20b_clk_fini,
.read = gk20a_clk_read,
.calc = gm20b_clk_calc,
.prog = gm20b_clk_prog,
.tidy = gk20a_clk_tidy,
.pstates = gm20b_pstates,
.nr_pstates = ARRAY_SIZE(gm20b_pstates),
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
{ nv_clk_src_max },
},
};
static int
gm20b_clk_new_speedo0(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
struct gk20a_clk *clk;
int ret;
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
if (!clk)
return -ENOMEM;
*pclk = &clk->base;
ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk_speedo0, &gm20b_pllg_params, clk);
clk->pl_to_div = pl_to_div;
clk->div_to_pl = div_to_pl;
return ret;
}
/* FUSE register */
#define FUSE_RESERVED_CALIB0 0x204
#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT 0
#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH 4
#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT 4
#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH 10
#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT 14
#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH 10
#define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT 24
#define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH 6
#define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT 30
#define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH 2
static int
gm20b_clk_init_fused_params(struct gm20b_clk *clk)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
u32 val = 0;
u32 rev = 0;
#if IS_ENABLED(CONFIG_ARCH_TEGRA)
tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val);
rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) &
MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH);
#endif
/* No fused parameters, we will calibrate later */
if (rev == 0)
return -EINVAL;
/* Integer part in mV + fractional part in uV */
clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) &
MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 +
((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) &
MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH));
/* Integer part in mV + fractional part in 100uV */
clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) &
MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 +
((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) &
MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100;
nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n",
clk->uvdet_slope, clk->uvdet_offs);
return 0;
}
static int
gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
{
struct nvkm_subdev *subdev = &clk->base.base.subdev;
struct nvkm_volt *volt = subdev->device->volt;
struct nvkm_pstate *pstates = clk->base.base.func->pstates;
int nr_pstates = clk->base.base.func->nr_pstates;
int vmin, id = 0;
u32 fmax = 0;
int i;
/* find lowest voltage we can use */
vmin = volt->vid[0].uv;
for (i = 1; i < volt->vid_nr; i++) {
if (volt->vid[i].uv <= vmin) {
vmin = volt->vid[i].uv;
id = volt->vid[i].vid;
}
}
/* find max frequency at this voltage */
for (i = 0; i < nr_pstates; i++)
if (pstates[i].base.voltage == id)
fmax = max(fmax,
pstates[i].base.domain[nv_clk_src_gpc]);
if (!fmax) {
nvkm_error(subdev, "failed to evaluate safe fmax\n");
return -EINVAL;
}
/* we are safe at 90% of the max frequency */
clk->safe_fmax_vmin = fmax * (100 - 10) / 100;
nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin);
return 0;
}
int
gm20b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gm20b_clk *clk;
struct nvkm_subdev *subdev;
struct gk20a_clk_pllg_params *clk_params;
int ret;
/* Speedo 0 GPUs cannot use noise-aware PLL */
if (tdev->gpu_speedo_id == 0)
return gm20b_clk_new_speedo0(device, type, inst, pclk);
/* Speedo >= 1, use NAPLL */
clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
if (!clk)
return -ENOMEM;
*pclk = &clk->base.base;
subdev = &clk->base.base.subdev;
/* duplicate the clock parameters since we will patch them below */
clk_params = (void *) (clk + 1);
*clk_params = gm20b_pllg_params;
ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk, clk_params, &clk->base);
if (ret)
return ret;
/*
* NAPLL can only work with max_u, clamp the m range so
* gk20a_pllg_calc_mnp always uses it
*/
clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u,
(clk->base.parent_rate / KHZ));
if (clk_params->max_m == 0) {
nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
kfree(clk);
return gm20b_clk_new_speedo0(device, type, inst, pclk);
}
clk->base.pl_to_div = pl_to_div;
clk->base.div_to_pl = div_to_pl;
clk->dvfs_params = &gm20b_dvfs_params;
ret = gm20b_clk_init_fused_params(clk);
/*
* we will calibrate during init - should never happen on
* prod parts
*/
if (ret)
nvkm_warn(subdev, "no fused calibration parameters\n");
ret = gm20b_clk_init_safe_fmax(clk);
if (ret)
return ret;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
#include "gt215.h"
#include "pll.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/timer.h>
struct mcp77_clk {
struct nvkm_clk base;
enum nv_clk_src csrc, ssrc, vsrc;
u32 cctrl, sctrl;
u32 ccoef, scoef;
u32 cpost, spost;
u32 vdiv;
};
static u32
read_div(struct mcp77_clk *clk)
{
struct nvkm_device *device = clk->base.subdev.device;
return nvkm_rd32(device, 0x004600);
}
static u32
read_pll(struct mcp77_clk *clk, u32 base)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 ctrl = nvkm_rd32(device, base + 0);
u32 coef = nvkm_rd32(device, base + 4);
u32 ref = nvkm_clk_read(&clk->base, nv_clk_src_href);
u32 post_div = 0;
u32 clock = 0;
int N1, M1;
switch (base){
case 0x4020:
post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
break;
case 0x4028:
post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
break;
default:
break;
}
N1 = (coef & 0x0000ff00) >> 8;
M1 = (coef & 0x000000ff);
if ((ctrl & 0x80000000) && M1) {
clock = ref * N1 / M1;
clock = clock / post_div;
}
return clock;
}
static int
mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
struct mcp77_clk *clk = mcp77_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
u32 mast = nvkm_rd32(device, 0x00c054);
u32 P = 0;
switch (src) {
case nv_clk_src_crystal:
return device->crystal;
case nv_clk_src_href:
return 100000; /* PCIE reference clock */
case nv_clk_src_hclkm4:
return nvkm_clk_read(&clk->base, nv_clk_src_href) * 4;
case nv_clk_src_hclkm2d3:
return nvkm_clk_read(&clk->base, nv_clk_src_href) * 2 / 3;
case nv_clk_src_host:
switch (mast & 0x000c0000) {
case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
case 0x00040000: break;
case 0x00080000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
case 0x000c0000: return nvkm_clk_read(&clk->base, nv_clk_src_cclk);
}
break;
case nv_clk_src_core:
P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
switch (mast & 0x00000003) {
case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000001: return 0;
case 0x00000002: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4) >> P;
case 0x00000003: return read_pll(clk, 0x004028) >> P;
}
break;
case nv_clk_src_cclk:
if ((mast & 0x03000000) != 0x03000000)
return nvkm_clk_read(&clk->base, nv_clk_src_core);
if ((mast & 0x00000200) == 0x00000000)
return nvkm_clk_read(&clk->base, nv_clk_src_core);
switch (mast & 0x00000c00) {
case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
case 0x00000400: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
case 0x00000800: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
default: return 0;
}
case nv_clk_src_shader:
P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
switch (mast & 0x00000030) {
case 0x00000000:
if (mast & 0x00000040)
return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
case 0x00000010: break;
case 0x00000020: return read_pll(clk, 0x004028) >> P;
case 0x00000030: return read_pll(clk, 0x004020) >> P;
}
break;
case nv_clk_src_mem:
return 0;
case nv_clk_src_vdec:
P = (read_div(clk) & 0x00000700) >> 8;
switch (mast & 0x00400000) {
case 0x00400000:
return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
default:
return 500000 >> P;
}
break;
default:
break;
}
nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
return 0;
}
static u32
calc_pll(struct mcp77_clk *clk, u32 reg,
u32 clock, int *N, int *M, int *P)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvbios_pll pll;
int ret;
ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
if (ret)
return 0;
pll.vco2.max_freq = 0;
pll.refclk = nvkm_clk_read(&clk->base, nv_clk_src_href);
if (!pll.refclk)
return 0;
return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P);
}
static inline u32
calc_P(u32 src, u32 target, int *div)
{
u32 clk0 = src, clk1 = src;
for (*div = 0; *div <= 7; (*div)++) {
if (clk0 <= target) {
clk1 = clk0 << (*div ? 1 : 0);
break;
}
clk0 >>= 1;
}
if (target - clk0 <= clk1 - target)
return clk0;
(*div)--;
return clk1;
}
static int
mcp77_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
struct mcp77_clk *clk = mcp77_clk(base);
const int shader = cstate->domain[nv_clk_src_shader];
const int core = cstate->domain[nv_clk_src_core];
const int vdec = cstate->domain[nv_clk_src_vdec];
struct nvkm_subdev *subdev = &clk->base.subdev;
u32 out = 0, clock = 0;
int N, M, P1, P2 = 0;
int divs = 0;
/* cclk: find suitable source, disable PLL if we can */
if (core < nvkm_clk_read(&clk->base, nv_clk_src_hclkm4))
out = calc_P(nvkm_clk_read(&clk->base, nv_clk_src_hclkm4), core, &divs);
/* Calculate clock * 2, so shader clock can use it too */
clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
if (abs(core - out) <= abs(core - (clock >> 1))) {
clk->csrc = nv_clk_src_hclkm4;
clk->cctrl = divs << 16;
} else {
/* NVCTRL is actually used _after_ NVPOST, and after what we
* call NVPLL. To make matters worse, NVPOST is an integer
* divider instead of a right-shift number. */
if(P1 > 2) {
P2 = P1 - 2;
P1 = 2;
}
clk->csrc = nv_clk_src_core;
clk->ccoef = (N << 8) | M;
clk->cctrl = (P2 + 1) << 16;
clk->cpost = (1 << P1) << 16;
}
/* sclk: nvpll + divisor, href or spll */
out = 0;
if (shader == nvkm_clk_read(&clk->base, nv_clk_src_href)) {
clk->ssrc = nv_clk_src_href;
} else {
clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
if (clk->csrc == nv_clk_src_core)
out = calc_P((core << 1), shader, &divs);
if (abs(shader - out) <=
abs(shader - clock) &&
(divs + P2) <= 7) {
clk->ssrc = nv_clk_src_core;
clk->sctrl = (divs + P2) << 16;
} else {
clk->ssrc = nv_clk_src_shader;
clk->scoef = (N << 8) | M;
clk->sctrl = P1 << 16;
}
}
/* vclk */
out = calc_P(core, vdec, &divs);
clock = calc_P(500000, vdec, &P1);
if(abs(vdec - out) <= abs(vdec - clock)) {
clk->vsrc = nv_clk_src_cclk;
clk->vdiv = divs << 16;
} else {
clk->vsrc = nv_clk_src_vdec;
clk->vdiv = P1 << 16;
}
/* Print strategy! */
nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
clk->ccoef, clk->cpost, clk->cctrl);
nvkm_debug(subdev, " spll: %08x %08x %08x\n",
clk->scoef, clk->spost, clk->sctrl);
nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
if (clk->csrc == nv_clk_src_hclkm4)
nvkm_debug(subdev, "core: hrefm4\n");
else
nvkm_debug(subdev, "core: nvpll\n");
if (clk->ssrc == nv_clk_src_hclkm4)
nvkm_debug(subdev, "shader: hrefm4\n");
else if (clk->ssrc == nv_clk_src_core)
nvkm_debug(subdev, "shader: nvpll\n");
else
nvkm_debug(subdev, "shader: spll\n");
if (clk->vsrc == nv_clk_src_hclkm4)
nvkm_debug(subdev, "vdec: 500MHz\n");
else
nvkm_debug(subdev, "vdec: core\n");
return 0;
}
static int
mcp77_clk_prog(struct nvkm_clk *base)
{
struct mcp77_clk *clk = mcp77_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
u32 pllmask = 0, mast;
unsigned long flags;
unsigned long *f = &flags;
int ret = 0;
ret = gt215_clk_pre(&clk->base, f);
if (ret)
goto out;
/* First switch to safe clocks: href */
mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
mast &= ~0x00400e73;
mast |= 0x03000000;
switch (clk->csrc) {
case nv_clk_src_hclkm4:
nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
mast |= 0x00000002;
break;
case nv_clk_src_core:
nvkm_wr32(device, 0x402c, clk->ccoef);
nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
nvkm_wr32(device, 0x4040, clk->cpost);
pllmask |= (0x3 << 8);
mast |= 0x00000003;
break;
default:
nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
goto resume;
}
switch (clk->ssrc) {
case nv_clk_src_href:
nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
/* mast |= 0x00000000; */
break;
case nv_clk_src_core:
nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
mast |= 0x00000020;
break;
case nv_clk_src_shader:
nvkm_wr32(device, 0x4024, clk->scoef);
nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
nvkm_wr32(device, 0x4070, clk->spost);
pllmask |= (0x3 << 12);
mast |= 0x00000030;
break;
default:
nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
goto resume;
}
if (nvkm_msec(device, 2000,
u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
if (tmp == pllmask)
break;
) < 0)
goto resume;
switch (clk->vsrc) {
case nv_clk_src_cclk:
mast |= 0x00400000;
fallthrough;
default:
nvkm_wr32(device, 0x4600, clk->vdiv);
}
nvkm_wr32(device, 0xc054, mast);
resume:
/* Disable some PLLs and dividers when unused */
if (clk->csrc != nv_clk_src_core) {
nvkm_wr32(device, 0x4040, 0x00000000);
nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
}
if (clk->ssrc != nv_clk_src_shader) {
nvkm_wr32(device, 0x4070, 0x00000000);
nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
}
out:
if (ret == -EBUSY)
f = NULL;
gt215_clk_post(&clk->base, f);
return ret;
}
static void
mcp77_clk_tidy(struct nvkm_clk *base)
{
}
static const struct nvkm_clk_func
mcp77_clk = {
.read = mcp77_clk_read,
.calc = mcp77_clk_calc,
.prog = mcp77_clk_prog,
.tidy = mcp77_clk_tidy,
.domains = {
{ nv_clk_src_crystal, 0xff },
{ nv_clk_src_href , 0xff },
{ nv_clk_src_core , 0xff, 0, "core", 1000 },
{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
{ nv_clk_src_vdec , 0xff, 0, "vdec", 1000 },
{ nv_clk_src_max }
}
};
int
mcp77_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_clk **pclk)
{
struct mcp77_clk *clk;
if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
return -ENOMEM;
*pclk = &clk->base;
return nvkm_clk_ctor(&mcp77_clk, device, type, inst, true, &clk->base);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <core/subdev.h>
#include <nvfw/acr.h>
void
wpr_header_dump(struct nvkm_subdev *subdev, const struct wpr_header *hdr)
{
nvkm_debug(subdev, "wprHeader\n");
nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id);
nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset);
nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner);
nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap);
nvkm_debug(subdev, "\tstatus : %d\n", hdr->status);
}
void
wpr_header_v1_dump(struct nvkm_subdev *subdev, const struct wpr_header_v1 *hdr)
{
nvkm_debug(subdev, "wprHeader\n");
nvkm_debug(subdev, "\tfalconID : %d\n", hdr->falcon_id);
nvkm_debug(subdev, "\tlsbOffset : 0x%x\n", hdr->lsb_offset);
nvkm_debug(subdev, "\tbootstrapOwner: %d\n", hdr->bootstrap_owner);
nvkm_debug(subdev, "\tlazyBootstrap : %d\n", hdr->lazy_bootstrap);
nvkm_debug(subdev, "\tbinVersion : %d\n", hdr->bin_version);
nvkm_debug(subdev, "\tstatus : %d\n", hdr->status);
}
static void
wpr_generic_header_dump(struct nvkm_subdev *subdev, const struct wpr_generic_header *hdr)
{
nvkm_debug(subdev, "wprGenericHeader\n");
nvkm_debug(subdev, "\tidentifier : %04x\n", hdr->identifier);
nvkm_debug(subdev, "\tversion : %04x\n", hdr->version);
nvkm_debug(subdev, "\tsize : %08x\n", hdr->size);
}
void
wpr_header_v2_dump(struct nvkm_subdev *subdev, const struct wpr_header_v2 *hdr)
{
wpr_generic_header_dump(subdev, &hdr->hdr);
wpr_header_v1_dump(subdev, &hdr->wpr);
}
void
lsb_header_v2_dump(struct nvkm_subdev *subdev, struct lsb_header_v2 *hdr)
{
wpr_generic_header_dump(subdev, &hdr->hdr);
nvkm_debug(subdev, "lsbHeader\n");
nvkm_debug(subdev, "\tucodeOff : 0x%x\n", hdr->ucode_off);
nvkm_debug(subdev, "\tucodeSize : 0x%x\n", hdr->ucode_size);
nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
nvkm_debug(subdev, "\tblCodeSize : 0x%x\n", hdr->bl_code_size);
nvkm_debug(subdev, "\tblImemOff : 0x%x\n", hdr->bl_imem_off);
nvkm_debug(subdev, "\tblDataOff : 0x%x\n", hdr->bl_data_off);
nvkm_debug(subdev, "\tblDataSize : 0x%x\n", hdr->bl_data_size);
nvkm_debug(subdev, "\treserved0 : %08x\n", hdr->rsvd0);
nvkm_debug(subdev, "\tappCodeOff : 0x%x\n", hdr->app_code_off);
nvkm_debug(subdev, "\tappCodeSize : 0x%x\n", hdr->app_code_size);
nvkm_debug(subdev, "\tappDataOff : 0x%x\n", hdr->app_data_off);
nvkm_debug(subdev, "\tappDataSize : 0x%x\n", hdr->app_data_size);
nvkm_debug(subdev, "\tappImemOffset : 0x%x\n", hdr->app_imem_offset);
nvkm_debug(subdev, "\tappDmemOffset : 0x%x\n", hdr->app_dmem_offset);
nvkm_debug(subdev, "\tflags : 0x%x\n", hdr->flags);
nvkm_debug(subdev, "\tmonitorCodeOff: 0x%x\n", hdr->monitor_code_offset);
nvkm_debug(subdev, "\tmonitorDataOff: 0x%x\n", hdr->monitor_data_offset);
nvkm_debug(subdev, "\tmanifestOffset: 0x%x\n", hdr->manifest_offset);
}
static void
lsb_header_tail_dump(struct nvkm_subdev *subdev, struct lsb_header_tail *hdr)
{
nvkm_debug(subdev, "lsbHeader\n");
nvkm_debug(subdev, "\tucodeOff : 0x%x\n", hdr->ucode_off);
nvkm_debug(subdev, "\tucodeSize : 0x%x\n", hdr->ucode_size);
nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
nvkm_debug(subdev, "\tblCodeSize : 0x%x\n", hdr->bl_code_size);
nvkm_debug(subdev, "\tblImemOff : 0x%x\n", hdr->bl_imem_off);
nvkm_debug(subdev, "\tblDataOff : 0x%x\n", hdr->bl_data_off);
nvkm_debug(subdev, "\tblDataSize : 0x%x\n", hdr->bl_data_size);
nvkm_debug(subdev, "\tappCodeOff : 0x%x\n", hdr->app_code_off);
nvkm_debug(subdev, "\tappCodeSize : 0x%x\n", hdr->app_code_size);
nvkm_debug(subdev, "\tappDataOff : 0x%x\n", hdr->app_data_off);
nvkm_debug(subdev, "\tappDataSize : 0x%x\n", hdr->app_data_size);
nvkm_debug(subdev, "\tflags : 0x%x\n", hdr->flags);
}
void
lsb_header_dump(struct nvkm_subdev *subdev, struct lsb_header *hdr)
{
lsb_header_tail_dump(subdev, &hdr->tail);
}
void
lsb_header_v1_dump(struct nvkm_subdev *subdev, struct lsb_header_v1 *hdr)
{
lsb_header_tail_dump(subdev, &hdr->tail);
}
void
flcn_acr_desc_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc *hdr)
{
int i;
nvkm_debug(subdev, "acrDesc\n");
nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id);
nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset);
nvkm_debug(subdev, "\tmmuMemRange : 0x%x\n",
hdr->mmu_mem_range);
nvkm_debug(subdev, "\tnoRegions : %d\n",
hdr->regions.no_regions);
for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) {
nvkm_debug(subdev, "\tregion[%d] :\n", i);
nvkm_debug(subdev, "\t startAddr : 0x%x\n",
hdr->regions.region_props[i].start_addr);
nvkm_debug(subdev, "\t endAddr : 0x%x\n",
hdr->regions.region_props[i].end_addr);
nvkm_debug(subdev, "\t regionId : %d\n",
hdr->regions.region_props[i].region_id);
nvkm_debug(subdev, "\t readMask : 0x%x\n",
hdr->regions.region_props[i].read_mask);
nvkm_debug(subdev, "\t writeMask : 0x%x\n",
hdr->regions.region_props[i].write_mask);
nvkm_debug(subdev, "\t clientMask : 0x%x\n",
hdr->regions.region_props[i].client_mask);
}
nvkm_debug(subdev, "\tucodeBlobSize: %d\n",
hdr->ucode_blob_size);
nvkm_debug(subdev, "\tucodeBlobBase: 0x%llx\n",
hdr->ucode_blob_base);
nvkm_debug(subdev, "\tvprEnabled : %d\n",
hdr->vpr_desc.vpr_enabled);
nvkm_debug(subdev, "\tvprStart : 0x%x\n",
hdr->vpr_desc.vpr_start);
nvkm_debug(subdev, "\tvprEnd : 0x%x\n",
hdr->vpr_desc.vpr_end);
nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n",
hdr->vpr_desc.hdcp_policies);
}
void
flcn_acr_desc_v1_dump(struct nvkm_subdev *subdev, struct flcn_acr_desc_v1 *hdr)
{
int i;
nvkm_debug(subdev, "acrDesc\n");
nvkm_debug(subdev, "\twprRegionId : %d\n", hdr->wpr_region_id);
nvkm_debug(subdev, "\twprOffset : 0x%x\n", hdr->wpr_offset);
nvkm_debug(subdev, "\tmmuMemoryRange : 0x%x\n",
hdr->mmu_memory_range);
nvkm_debug(subdev, "\tnoRegions : %d\n",
hdr->regions.no_regions);
for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) {
nvkm_debug(subdev, "\tregion[%d] :\n", i);
nvkm_debug(subdev, "\t startAddr : 0x%x\n",
hdr->regions.region_props[i].start_addr);
nvkm_debug(subdev, "\t endAddr : 0x%x\n",
hdr->regions.region_props[i].end_addr);
nvkm_debug(subdev, "\t regionId : %d\n",
hdr->regions.region_props[i].region_id);
nvkm_debug(subdev, "\t readMask : 0x%x\n",
hdr->regions.region_props[i].read_mask);
nvkm_debug(subdev, "\t writeMask : 0x%x\n",
hdr->regions.region_props[i].write_mask);
nvkm_debug(subdev, "\t clientMask : 0x%x\n",
hdr->regions.region_props[i].client_mask);
nvkm_debug(subdev, "\t shadowMemStartAddr: 0x%x\n",
hdr->regions.region_props[i].shadow_mem_start_addr);
}
nvkm_debug(subdev, "\tucodeBlobSize : %d\n",
hdr->ucode_blob_size);
nvkm_debug(subdev, "\tucodeBlobBase : 0x%llx\n",
hdr->ucode_blob_base);
nvkm_debug(subdev, "\tvprEnabled : %d\n",
hdr->vpr_desc.vpr_enabled);
nvkm_debug(subdev, "\tvprStart : 0x%x\n",
hdr->vpr_desc.vpr_start);
nvkm_debug(subdev, "\tvprEnd : 0x%x\n",
hdr->vpr_desc.vpr_end);
nvkm_debug(subdev, "\thdcpPolicies : 0x%x\n",
hdr->vpr_desc.hdcp_policies);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/nvfw/acr.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <core/subdev.h>
#include <nvfw/ls.h>
static void
nvfw_ls_desc_head(struct nvkm_subdev *subdev,
const struct nvfw_ls_desc_head *hdr)
{
char *date;
nvkm_debug(subdev, "lsUcodeImgDesc:\n");
nvkm_debug(subdev, "\tdescriptorSize : %d\n",
hdr->descriptor_size);
nvkm_debug(subdev, "\timageSize : %d\n", hdr->image_size);
nvkm_debug(subdev, "\ttoolsVersion : 0x%x\n",
hdr->tools_version);
nvkm_debug(subdev, "\tappVersion : 0x%x\n", hdr->app_version);
date = kstrndup(hdr->date, sizeof(hdr->date), GFP_KERNEL);
nvkm_debug(subdev, "\tdate : %s\n", date);
kfree(date);
nvkm_debug(subdev, "\tbootloaderStartOffset: 0x%x\n",
hdr->bootloader_start_offset);
nvkm_debug(subdev, "\tbootloaderSize : 0x%x\n",
hdr->bootloader_size);
nvkm_debug(subdev, "\tbootloaderImemOffset : 0x%x\n",
hdr->bootloader_imem_offset);
nvkm_debug(subdev, "\tbootloaderEntryPoint : 0x%x\n",
hdr->bootloader_entry_point);
nvkm_debug(subdev, "\tappStartOffset : 0x%x\n",
hdr->app_start_offset);
nvkm_debug(subdev, "\tappSize : 0x%x\n", hdr->app_size);
nvkm_debug(subdev, "\tappImemOffset : 0x%x\n",
hdr->app_imem_offset);
nvkm_debug(subdev, "\tappImemEntry : 0x%x\n",
hdr->app_imem_entry);
nvkm_debug(subdev, "\tappDmemOffset : 0x%x\n",
hdr->app_dmem_offset);
nvkm_debug(subdev, "\tappResidentCodeOffset: 0x%x\n",
hdr->app_resident_code_offset);
nvkm_debug(subdev, "\tappResidentCodeSize : 0x%x\n",
hdr->app_resident_code_size);
nvkm_debug(subdev, "\tappResidentDataOffset: 0x%x\n",
hdr->app_resident_data_offset);
nvkm_debug(subdev, "\tappResidentDataSize : 0x%x\n",
hdr->app_resident_data_size);
}
const struct nvfw_ls_desc *
nvfw_ls_desc(struct nvkm_subdev *subdev, const void *data)
{
const struct nvfw_ls_desc *hdr = data;
int i;
nvfw_ls_desc_head(subdev, &hdr->head);
nvkm_debug(subdev, "\tnbOverlays : %d\n", hdr->nb_overlays);
for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) {
nvkm_debug(subdev, "\tloadOvl[%d] : 0x%x %d\n", i,
hdr->load_ovl[i].start, hdr->load_ovl[i].size);
}
nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed);
return hdr;
}
const struct nvfw_ls_desc_v1 *
nvfw_ls_desc_v1(struct nvkm_subdev *subdev, const void *data)
{
const struct nvfw_ls_desc_v1 *hdr = data;
int i;
nvfw_ls_desc_head(subdev, &hdr->head);
nvkm_debug(subdev, "\tnbImemOverlays : %d\n",
hdr->nb_imem_overlays);
nvkm_debug(subdev, "\tnbDmemOverlays : %d\n",
hdr->nb_imem_overlays);
for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) {
nvkm_debug(subdev, "\tloadOvl[%2d] : 0x%x %d\n", i,
hdr->load_ovl[i].start, hdr->load_ovl[i].size);
}
nvkm_debug(subdev, "\tcompressed : %d\n", hdr->compressed);
return hdr;
}
const struct nvfw_ls_desc_v2 *
nvfw_ls_desc_v2(struct nvkm_subdev *subdev, const void *data)
{
const struct nvfw_ls_desc_v2 *hdr = data;
char *date;
int i;
nvkm_debug(subdev, "lsUcodeImgDesc:\n");
nvkm_debug(subdev, "\tdescriptorSize : %d\n", hdr->descriptor_size);
nvkm_debug(subdev, "\timageSize : %d\n", hdr->image_size);
nvkm_debug(subdev, "\ttoolsVersion : 0x%x\n", hdr->tools_version);
nvkm_debug(subdev, "\tappVersion : 0x%x\n", hdr->app_version);
date = kstrndup(hdr->date, sizeof(hdr->date), GFP_KERNEL);
nvkm_debug(subdev, "\tdate : %s\n", date);
kfree(date);
nvkm_debug(subdev, "\tsecureBootloader : 0x%x\n", hdr->secure_bootloader);
nvkm_debug(subdev, "\tbootloaderStartOffset: 0x%x\n", hdr->bootloader_start_offset);
nvkm_debug(subdev, "\tbootloaderSize : 0x%x\n", hdr->bootloader_size);
nvkm_debug(subdev, "\tbootloaderImemOffset : 0x%x\n", hdr->bootloader_imem_offset);
nvkm_debug(subdev, "\tbootloaderEntryPoint : 0x%x\n", hdr->bootloader_entry_point);
nvkm_debug(subdev, "\tappStartOffset : 0x%x\n", hdr->app_start_offset);
nvkm_debug(subdev, "\tappSize : 0x%x\n", hdr->app_size);
nvkm_debug(subdev, "\tappImemOffset : 0x%x\n", hdr->app_imem_offset);
nvkm_debug(subdev, "\tappImemEntry : 0x%x\n", hdr->app_imem_entry);
nvkm_debug(subdev, "\tappDmemOffset : 0x%x\n", hdr->app_dmem_offset);
nvkm_debug(subdev, "\tappResidentCodeOffset: 0x%x\n", hdr->app_resident_code_offset);
nvkm_debug(subdev, "\tappResidentCodeSize : 0x%x\n", hdr->app_resident_code_size);
nvkm_debug(subdev, "\tappResidentDataOffset: 0x%x\n", hdr->app_resident_data_offset);
nvkm_debug(subdev, "\tappResidentDataSize : 0x%x\n", hdr->app_resident_data_size);
nvkm_debug(subdev, "\tnbImemOverlays : %d\n", hdr->nb_imem_overlays);
nvkm_debug(subdev, "\tnbDmemOverlays : %d\n", hdr->nb_dmem_overlays);
for (i = 0; i < ARRAY_SIZE(hdr->load_ovl); i++) {
nvkm_debug(subdev, "\tloadOvl[%d] : 0x%x %d\n", i,
hdr->load_ovl[i].start, hdr->load_ovl[i].size);
}
return hdr;
}
const struct nvfw_ls_hsbl_bin_hdr *
nvfw_ls_hsbl_bin_hdr(struct nvkm_subdev *subdev, const void *data)
{
const struct nvfw_ls_hsbl_bin_hdr *hdr = data;
nvkm_debug(subdev, "lsHsblBinHdr:\n");
nvkm_debug(subdev, "\tbinMagic : 0x%08x\n", hdr->bin_magic);
nvkm_debug(subdev, "\tbinVer : %d\n", hdr->bin_ver);
nvkm_debug(subdev, "\tbinSize : %d\n", hdr->bin_size);
nvkm_debug(subdev, "\theaderOffset : 0x%x\n", hdr->header_offset);
return hdr;
}
const struct nvfw_ls_hsbl_hdr *
nvfw_ls_hsbl_hdr(struct nvkm_subdev *subdev, const void *data)
{
const struct nvfw_ls_hsbl_hdr *hdr = data;
nvkm_debug(subdev, "lsHsblHdr:\n");
nvkm_debug(subdev, "\tsigProdOffset : 0x%x\n", hdr->sig_prod_offset);
nvkm_debug(subdev, "\tsigProdSize : 0x%x\n", hdr->sig_prod_size);
nvkm_debug(subdev, "\tpatchLoc : 0x%x\n", hdr->patch_loc);
nvkm_debug(subdev, "\tpatchSig : 0x%x\n", hdr->patch_sig);
nvkm_debug(subdev, "\tmetadataOffset : 0x%x\n", hdr->meta_data_offset);
nvkm_debug(subdev, "\tmetadataSize : 0x%x\n", hdr->meta_data_size);
nvkm_debug(subdev, "\tnumSig : 0x%x\n", hdr->num_sig);
return hdr;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/nvfw/ls.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <core/subdev.h>
#include <nvfw/hs.h>
const struct nvfw_hs_header *
nvfw_hs_header(struct nvkm_subdev *subdev, const void *data)
{
const struct nvfw_hs_header *hdr = data;
nvkm_debug(subdev, "hsHeader:\n");
nvkm_debug(subdev, "\tsigDbgOffset : 0x%x\n", hdr->sig_dbg_offset);
nvkm_debug(subdev, "\tsigDbgSize : 0x%x\n", hdr->sig_dbg_size);
nvkm_debug(subdev, "\tsigProdOffset : 0x%x\n", hdr->sig_prod_offset);
nvkm_debug(subdev, "\tsigProdSize : 0x%x\n", hdr->sig_prod_size);
nvkm_debug(subdev, "\tpatchLoc : 0x%x\n", hdr->patch_loc);
nvkm_debug(subdev, "\tpatchSig : 0x%x\n", hdr->patch_sig);
nvkm_debug(subdev, "\thdrOffset : 0x%x\n", hdr->hdr_offset);
nvkm_debug(subdev, "\thdrSize : 0x%x\n", hdr->hdr_size);
return hdr;
}
const struct nvfw_hs_header_v2 *
nvfw_hs_header_v2(struct nvkm_subdev *subdev, const void *data)
{
const struct nvfw_hs_header_v2 *hdr = data;
nvkm_debug(subdev, "hsHeader:\n");
nvkm_debug(subdev, "\tsigProdOffset : 0x%x\n", hdr->sig_prod_offset);
nvkm_debug(subdev, "\tsigProdSize : 0x%x\n", hdr->sig_prod_size);
nvkm_debug(subdev, "\tpatchLoc : 0x%x\n", hdr->patch_loc);
nvkm_debug(subdev, "\tpatchSig : 0x%x\n", hdr->patch_sig);
nvkm_debug(subdev, "\tmetadataOffset : 0x%x\n", hdr->meta_data_offset);
nvkm_debug(subdev, "\tmetadataSize : 0x%x\n", hdr->meta_data_size);
nvkm_debug(subdev, "\tnumSig : 0x%x\n", hdr->num_sig);
nvkm_debug(subdev, "\theaderOffset : 0x%x\n", hdr->header_offset);
nvkm_debug(subdev, "\theaderSize : 0x%x\n", hdr->header_size);
return hdr;
}
const struct nvfw_hs_load_header *
nvfw_hs_load_header(struct nvkm_subdev *subdev, const void *data)
{
const struct nvfw_hs_load_header *hdr = data;
int i;
nvkm_debug(subdev, "hsLoadHeader:\n");
nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n",
hdr->non_sec_code_off);
nvkm_debug(subdev, "\tnonSecCodeSize : 0x%x\n",
hdr->non_sec_code_size);
nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base);
nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
nvkm_debug(subdev, "\tnumApps : 0x%x\n", hdr->num_apps);
for (i = 0; i < hdr->num_apps; i++) {
nvkm_debug(subdev,
"\tApp[%d] : offset 0x%x size 0x%x\n", i,
hdr->apps[(i * 2) + 0], hdr->apps[(i * 2) + 1]);
}
return hdr;
}
const struct nvfw_hs_load_header_v2 *
nvfw_hs_load_header_v2(struct nvkm_subdev *subdev, const void *data)
{
const struct nvfw_hs_load_header_v2 *hdr = data;
int i;
nvkm_debug(subdev, "hsLoadHeader:\n");
nvkm_debug(subdev, "\tosCodeOffset : 0x%x\n", hdr->os_code_offset);
nvkm_debug(subdev, "\tosCodeSize : 0x%x\n", hdr->os_code_size);
nvkm_debug(subdev, "\tosDataOffset : 0x%x\n", hdr->os_data_offset);
nvkm_debug(subdev, "\tosDataSize : 0x%x\n", hdr->os_data_size);
nvkm_debug(subdev, "\tnumApps : 0x%x\n", hdr->num_apps);
for (i = 0; i < hdr->num_apps; i++) {
nvkm_debug(subdev,
"\tApp[%d] : offset 0x%x size 0x%x\n", i,
hdr->app[i].offset, hdr->app[i].size);
}
return hdr;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/nvfw/hs.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <core/subdev.h>
#include <nvfw/flcn.h>
void
loader_config_dump(struct nvkm_subdev *subdev, const struct loader_config *hdr)
{
nvkm_debug(subdev, "loaderConfig\n");
nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx);
nvkm_debug(subdev, "\tcodeDmaBase : 0x%xx\n", hdr->code_dma_base);
nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total);
nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load);
nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base);
nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
nvkm_debug(subdev, "\toverlayDmaBase: 0x%x\n", hdr->overlay_dma_base);
nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc);
nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv);
nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1);
nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1);
nvkm_debug(subdev, "\tovlyDmaBase1 : 0x%x\n", hdr->overlay_dma_base1);
}
void
loader_config_v1_dump(struct nvkm_subdev *subdev,
const struct loader_config_v1 *hdr)
{
nvkm_debug(subdev, "loaderConfig\n");
nvkm_debug(subdev, "\treserved : 0x%08x\n", hdr->reserved);
nvkm_debug(subdev, "\tdmaIdx : %d\n", hdr->dma_idx);
nvkm_debug(subdev, "\tcodeDmaBase : 0x%llxx\n", hdr->code_dma_base);
nvkm_debug(subdev, "\tcodeSizeTotal : 0x%x\n", hdr->code_size_total);
nvkm_debug(subdev, "\tcodeSizeToLoad: 0x%x\n", hdr->code_size_to_load);
nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base);
nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
nvkm_debug(subdev, "\toverlayDmaBase: 0x%llx\n", hdr->overlay_dma_base);
nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc);
nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv);
}
void
flcn_bl_dmem_desc_dump(struct nvkm_subdev *subdev,
const struct flcn_bl_dmem_desc *hdr)
{
nvkm_debug(subdev, "flcnBlDmemDesc\n");
nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n",
hdr->reserved[0], hdr->reserved[1], hdr->reserved[2],
hdr->reserved[3]);
nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n",
hdr->signature[0], hdr->signature[1], hdr->signature[2],
hdr->signature[3]);
nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma);
nvkm_debug(subdev, "\tcodeDmaBase : 0x%x\n", hdr->code_dma_base);
nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off);
nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size);
nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off);
nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size);
nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
nvkm_debug(subdev, "\tdataDmaBase : 0x%x\n", hdr->data_dma_base);
nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
nvkm_debug(subdev, "\tcodeDmaBase1 : 0x%x\n", hdr->code_dma_base1);
nvkm_debug(subdev, "\tdataDmaBase1 : 0x%x\n", hdr->data_dma_base1);
}
void
flcn_bl_dmem_desc_v1_dump(struct nvkm_subdev *subdev,
const struct flcn_bl_dmem_desc_v1 *hdr)
{
nvkm_debug(subdev, "flcnBlDmemDesc\n");
nvkm_debug(subdev, "\treserved : 0x%08x 0x%08x 0x%08x 0x%08x\n",
hdr->reserved[0], hdr->reserved[1], hdr->reserved[2],
hdr->reserved[3]);
nvkm_debug(subdev, "\tsignature : 0x%08x 0x%08x 0x%08x 0x%08x\n",
hdr->signature[0], hdr->signature[1], hdr->signature[2],
hdr->signature[3]);
nvkm_debug(subdev, "\tctxDma : %d\n", hdr->ctx_dma);
nvkm_debug(subdev, "\tcodeDmaBase : 0x%llx\n", hdr->code_dma_base);
nvkm_debug(subdev, "\tnonSecCodeOff : 0x%x\n", hdr->non_sec_code_off);
nvkm_debug(subdev, "\tnonSecCodeSize: 0x%x\n", hdr->non_sec_code_size);
nvkm_debug(subdev, "\tsecCodeOff : 0x%x\n", hdr->sec_code_off);
nvkm_debug(subdev, "\tsecCodeSize : 0x%x\n", hdr->sec_code_size);
nvkm_debug(subdev, "\tcodeEntryPoint: 0x%x\n", hdr->code_entry_point);
nvkm_debug(subdev, "\tdataDmaBase : 0x%llx\n", hdr->data_dma_base);
nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
}
void
flcn_bl_dmem_desc_v2_dump(struct nvkm_subdev *subdev,
const struct flcn_bl_dmem_desc_v2 *hdr)
{
flcn_bl_dmem_desc_v1_dump(subdev, (void *)hdr);
nvkm_debug(subdev, "\targc : 0x%08x\n", hdr->argc);
nvkm_debug(subdev, "\targv : 0x%08x\n", hdr->argv);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/nvfw/flcn.c |
/*
* Copyright 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <core/subdev.h>
#include <nvfw/fw.h>
const struct nvfw_bin_hdr *
nvfw_bin_hdr(struct nvkm_subdev *subdev, const void *data)
{
const struct nvfw_bin_hdr *hdr = data;
nvkm_debug(subdev, "binHdr:\n");
nvkm_debug(subdev, "\tbinMagic : 0x%08x\n", hdr->bin_magic);
nvkm_debug(subdev, "\tbinVer : %d\n", hdr->bin_ver);
nvkm_debug(subdev, "\tbinSize : %d\n", hdr->bin_size);
nvkm_debug(subdev, "\theaderOffset : 0x%x\n", hdr->header_offset);
nvkm_debug(subdev, "\tdataOffset : 0x%x\n", hdr->data_offset);
nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
return hdr;
}
const struct nvfw_bl_desc *
nvfw_bl_desc(struct nvkm_subdev *subdev, const void *data)
{
const struct nvfw_bl_desc *hdr = data;
nvkm_debug(subdev, "blDesc\n");
nvkm_debug(subdev, "\tstartTag : 0x%x\n", hdr->start_tag);
nvkm_debug(subdev, "\tdmemLoadOff : 0x%x\n", hdr->dmem_load_off);
nvkm_debug(subdev, "\tcodeOff : 0x%x\n", hdr->code_off);
nvkm_debug(subdev, "\tcodeSize : 0x%x\n", hdr->code_size);
nvkm_debug(subdev, "\tdataOff : 0x%x\n", hdr->data_off);
nvkm_debug(subdev, "\tdataSize : 0x%x\n", hdr->data_size);
return hdr;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/nvfw/fw.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <engine/falcon.h>
#include <core/gpuobj.h>
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <engine/fifo.h>
static int
nvkm_falcon_oclass_get(struct nvkm_oclass *oclass, int index)
{
struct nvkm_falcon *falcon = nvkm_falcon(oclass->engine);
int c = 0;
while (falcon->func->sclass[c].oclass) {
if (c++ == index) {
oclass->base = falcon->func->sclass[index];
return index;
}
}
return c;
}
static int
nvkm_falcon_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
return nvkm_gpuobj_new(object->engine->subdev.device, 256,
align, true, parent, pgpuobj);
}
static const struct nvkm_object_func
nvkm_falcon_cclass = {
.bind = nvkm_falcon_cclass_bind,
};
static void
nvkm_falcon_intr(struct nvkm_engine *engine)
{
struct nvkm_falcon *falcon = nvkm_falcon(engine);
struct nvkm_subdev *subdev = &falcon->engine.subdev;
struct nvkm_device *device = subdev->device;
const u32 base = falcon->addr;
u32 dest = nvkm_rd32(device, base + 0x01c);
u32 intr = nvkm_rd32(device, base + 0x008) & dest & ~(dest >> 16);
u32 inst = nvkm_rd32(device, base + 0x050) & 0x3fffffff;
struct nvkm_chan *chan;
unsigned long flags;
chan = nvkm_chan_get_inst(engine, (u64)inst << 12, &flags);
if (intr & 0x00000040) {
if (falcon->func->intr) {
falcon->func->intr(falcon, chan);
nvkm_wr32(device, base + 0x004, 0x00000040);
intr &= ~0x00000040;
}
}
if (intr & 0x00000010) {
nvkm_debug(subdev, "ucode halted\n");
nvkm_wr32(device, base + 0x004, 0x00000010);
intr &= ~0x00000010;
}
if (intr) {
nvkm_error(subdev, "intr %08x\n", intr);
nvkm_wr32(device, base + 0x004, intr);
}
nvkm_chan_put(&chan, flags);
}
static int
nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
{
struct nvkm_falcon *falcon = nvkm_falcon(engine);
struct nvkm_device *device = falcon->engine.subdev.device;
const u32 base = falcon->addr;
if (!suspend) {
nvkm_memory_unref(&falcon->core);
if (falcon->external) {
vfree(falcon->data.data);
vfree(falcon->code.data);
falcon->code.data = NULL;
}
}
if (nvkm_mc_enabled(device, engine->subdev.type, engine->subdev.inst)) {
nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
nvkm_wr32(device, base + 0x014, 0xffffffff);
}
return 0;
}
static void *
vmemdup(const void *src, size_t len)
{
void *p = vmalloc(len);
if (p)
memcpy(p, src, len);
return p;
}
static int
nvkm_falcon_oneinit(struct nvkm_engine *engine)
{
struct nvkm_falcon *falcon = nvkm_falcon(engine);
struct nvkm_subdev *subdev = &falcon->engine.subdev;
struct nvkm_device *device = subdev->device;
const u32 base = falcon->addr;
u32 caps;
/* determine falcon capabilities */
if (device->chipset < 0xa3 ||
device->chipset == 0xaa || device->chipset == 0xac) {
falcon->version = 0;
falcon->secret = (falcon->addr == 0x087000) ? 1 : 0;
} else {
caps = nvkm_rd32(device, base + 0x12c);
falcon->version = (caps & 0x0000000f);
falcon->secret = (caps & 0x00000030) >> 4;
}
caps = nvkm_rd32(device, base + 0x108);
falcon->code.limit = (caps & 0x000001ff) << 8;
falcon->data.limit = (caps & 0x0003fe00) >> 1;
nvkm_debug(subdev, "falcon version: %d\n", falcon->version);
nvkm_debug(subdev, "secret level: %d\n", falcon->secret);
nvkm_debug(subdev, "code limit: %d\n", falcon->code.limit);
nvkm_debug(subdev, "data limit: %d\n", falcon->data.limit);
return 0;
}
static int
nvkm_falcon_init(struct nvkm_engine *engine)
{
struct nvkm_falcon *falcon = nvkm_falcon(engine);
struct nvkm_subdev *subdev = &falcon->engine.subdev;
struct nvkm_device *device = subdev->device;
const struct firmware *fw;
char name[32] = "internal";
const u32 base = falcon->addr;
int ret, i;
/* wait for 'uc halted' to be signalled before continuing */
if (falcon->secret && falcon->version < 4) {
if (!falcon->version) {
nvkm_msec(device, 2000,
if (nvkm_rd32(device, base + 0x008) & 0x00000010)
break;
);
} else {
nvkm_msec(device, 2000,
if (!(nvkm_rd32(device, base + 0x180) & 0x80000000))
break;
);
}
nvkm_wr32(device, base + 0x004, 0x00000010);
}
/* disable all interrupts */
nvkm_wr32(device, base + 0x014, 0xffffffff);
/* no default ucode provided by the engine implementation, try and
* locate a "self-bootstrapping" firmware image for the engine
*/
if (!falcon->code.data) {
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
device->chipset, falcon->addr >> 12);
ret = request_firmware(&fw, name, device->dev);
if (ret == 0) {
falcon->code.data = vmemdup(fw->data, fw->size);
falcon->code.size = fw->size;
falcon->data.data = NULL;
falcon->data.size = 0;
release_firmware(fw);
}
falcon->external = true;
}
/* next step is to try and load "static code/data segment" firmware
* images for the engine
*/
if (!falcon->code.data) {
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
device->chipset, falcon->addr >> 12);
ret = request_firmware(&fw, name, device->dev);
if (ret) {
nvkm_error(subdev, "unable to load firmware data\n");
return -ENODEV;
}
falcon->data.data = vmemdup(fw->data, fw->size);
falcon->data.size = fw->size;
release_firmware(fw);
if (!falcon->data.data)
return -ENOMEM;
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
device->chipset, falcon->addr >> 12);
ret = request_firmware(&fw, name, device->dev);
if (ret) {
nvkm_error(subdev, "unable to load firmware code\n");
return -ENODEV;
}
falcon->code.data = vmemdup(fw->data, fw->size);
falcon->code.size = fw->size;
release_firmware(fw);
if (!falcon->code.data)
return -ENOMEM;
}
nvkm_debug(subdev, "firmware: %s (%s)\n", name, falcon->data.data ?
"static code/data segments" : "self-bootstrapping");
/* ensure any "self-bootstrapping" firmware image is in vram */
if (!falcon->data.data && !falcon->core) {
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
falcon->code.size, 256, false,
&falcon->core);
if (ret) {
nvkm_error(subdev, "core allocation failed, %d\n", ret);
return ret;
}
nvkm_kmap(falcon->core);
for (i = 0; i < falcon->code.size; i += 4)
nvkm_wo32(falcon->core, i, falcon->code.data[i / 4]);
nvkm_done(falcon->core);
}
/* upload firmware bootloader (or the full code segments) */
if (falcon->core) {
u64 addr = nvkm_memory_addr(falcon->core);
if (device->card_type < NV_C0)
nvkm_wr32(device, base + 0x618, 0x04000000);
else
nvkm_wr32(device, base + 0x618, 0x00000114);
nvkm_wr32(device, base + 0x11c, 0);
nvkm_wr32(device, base + 0x110, addr >> 8);
nvkm_wr32(device, base + 0x114, 0);
nvkm_wr32(device, base + 0x118, 0x00006610);
} else {
if (falcon->code.size > falcon->code.limit ||
falcon->data.size > falcon->data.limit) {
nvkm_error(subdev, "ucode exceeds falcon limit(s)\n");
return -EINVAL;
}
if (falcon->version < 3) {
nvkm_wr32(device, base + 0xff8, 0x00100000);
for (i = 0; i < falcon->code.size / 4; i++)
nvkm_wr32(device, base + 0xff4, falcon->code.data[i]);
} else {
nvkm_wr32(device, base + 0x180, 0x01000000);
for (i = 0; i < falcon->code.size / 4; i++) {
if ((i & 0x3f) == 0)
nvkm_wr32(device, base + 0x188, i >> 6);
nvkm_wr32(device, base + 0x184, falcon->code.data[i]);
}
}
}
/* upload data segment (if necessary), zeroing the remainder */
if (falcon->version < 3) {
nvkm_wr32(device, base + 0xff8, 0x00000000);
for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
nvkm_wr32(device, base + 0xff4, falcon->data.data[i]);
for (; i < falcon->data.limit; i += 4)
nvkm_wr32(device, base + 0xff4, 0x00000000);
} else {
nvkm_wr32(device, base + 0x1c0, 0x01000000);
for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
nvkm_wr32(device, base + 0x1c4, falcon->data.data[i]);
for (; i < falcon->data.limit / 4; i++)
nvkm_wr32(device, base + 0x1c4, 0x00000000);
}
/* start it running */
nvkm_wr32(device, base + 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
nvkm_wr32(device, base + 0x104, 0x00000000); /* ENTRY */
nvkm_wr32(device, base + 0x100, 0x00000002); /* TRIGGER */
nvkm_wr32(device, base + 0x048, 0x00000003); /* FIFO | CHSW */
if (falcon->func->init)
falcon->func->init(falcon);
return 0;
}
static void *
nvkm_falcon_dtor(struct nvkm_engine *engine)
{
return nvkm_falcon(engine);
}
static const struct nvkm_engine_func
nvkm_falcon = {
.dtor = nvkm_falcon_dtor,
.oneinit = nvkm_falcon_oneinit,
.init = nvkm_falcon_init,
.fini = nvkm_falcon_fini,
.intr = nvkm_falcon_intr,
.fifo.sclass = nvkm_falcon_oclass_get,
.cclass = &nvkm_falcon_cclass,
};
int
nvkm_falcon_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, bool enable, u32 addr,
struct nvkm_engine **pengine)
{
struct nvkm_falcon *falcon;
if (!(falcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
return -ENOMEM;
falcon->func = func;
falcon->addr = addr;
falcon->code.data = func->code.data;
falcon->code.size = func->code.size;
falcon->data.data = func->data.data;
falcon->data.size = func->data.size;
*pengine = &falcon->engine;
return nvkm_engine_ctor(&nvkm_falcon, device, type, inst, enable, &falcon->engine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/falcon.c |
/*
* Copyright 2013 Ilia Mirkin
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <engine/xtensa.h>
#include <core/gpuobj.h>
#include <engine/fifo.h>
static int
nvkm_xtensa_oclass_get(struct nvkm_oclass *oclass, int index)
{
struct nvkm_xtensa *xtensa = nvkm_xtensa(oclass->engine);
int c = 0;
while (xtensa->func->sclass[c].oclass) {
if (c++ == index) {
oclass->base = xtensa->func->sclass[index];
return index;
}
}
return c;
}
static int
nvkm_xtensa_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
return nvkm_gpuobj_new(object->engine->subdev.device, 0x10000, align,
true, parent, pgpuobj);
}
static const struct nvkm_object_func
nvkm_xtensa_cclass = {
.bind = nvkm_xtensa_cclass_bind,
};
static void
nvkm_xtensa_intr(struct nvkm_engine *engine)
{
struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
struct nvkm_subdev *subdev = &xtensa->engine.subdev;
struct nvkm_device *device = subdev->device;
const u32 base = xtensa->addr;
u32 unk104 = nvkm_rd32(device, base + 0xd04);
u32 intr = nvkm_rd32(device, base + 0xc20);
u32 chan = nvkm_rd32(device, base + 0xc28);
u32 unk10c = nvkm_rd32(device, base + 0xd0c);
if (intr & 0x10)
nvkm_warn(subdev, "Watchdog interrupt, engine hung.\n");
nvkm_wr32(device, base + 0xc20, intr);
intr = nvkm_rd32(device, base + 0xc20);
if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) {
nvkm_debug(subdev, "Enabling FIFO_CTRL\n");
nvkm_mask(device, xtensa->addr + 0xd94, 0, xtensa->func->fifo_val);
}
}
static int
nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend)
{
struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
struct nvkm_device *device = xtensa->engine.subdev.device;
const u32 base = xtensa->addr;
nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */
nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */
if (!suspend)
nvkm_memory_unref(&xtensa->gpu_fw);
return 0;
}
static int
nvkm_xtensa_init(struct nvkm_engine *engine)
{
struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
struct nvkm_subdev *subdev = &xtensa->engine.subdev;
struct nvkm_device *device = subdev->device;
const u32 base = xtensa->addr;
const struct firmware *fw;
char name[32];
int i, ret;
u64 addr, size;
u32 tmp;
if (!xtensa->gpu_fw) {
snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
xtensa->addr >> 12);
ret = request_firmware(&fw, name, device->dev);
if (ret) {
nvkm_warn(subdev, "unable to load firmware %s\n", name);
return ret;
}
if (fw->size > 0x40000) {
nvkm_warn(subdev, "firmware %s too large\n", name);
release_firmware(fw);
return -EINVAL;
}
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
0x40000, 0x1000, false,
&xtensa->gpu_fw);
if (ret) {
release_firmware(fw);
return ret;
}
nvkm_kmap(xtensa->gpu_fw);
for (i = 0; i < fw->size / 4; i++)
nvkm_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
nvkm_done(xtensa->gpu_fw);
release_firmware(fw);
}
addr = nvkm_memory_addr(xtensa->gpu_fw);
size = nvkm_memory_size(xtensa->gpu_fw);
nvkm_wr32(device, base + 0xd10, 0x1fffffff); /* ?? */
nvkm_wr32(device, base + 0xd08, 0x0fffffff); /* ?? */
nvkm_wr32(device, base + 0xd28, xtensa->func->unkd28); /* ?? */
nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
nvkm_wr32(device, base + 0xcc0, addr >> 8); /* XT_REGION_BASE */
nvkm_wr32(device, base + 0xcc4, 0x1c); /* XT_REGION_SETUP */
nvkm_wr32(device, base + 0xcc8, size >> 8); /* XT_REGION_LIMIT */
tmp = nvkm_rd32(device, 0x0);
nvkm_wr32(device, base + 0xde0, tmp); /* SCRATCH_H2X */
nvkm_wr32(device, base + 0xce8, 0xf); /* XT_REGION_SETUP */
nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
return 0;
}
static void *
nvkm_xtensa_dtor(struct nvkm_engine *engine)
{
return nvkm_xtensa(engine);
}
static const struct nvkm_engine_func
nvkm_xtensa = {
.dtor = nvkm_xtensa_dtor,
.init = nvkm_xtensa_init,
.fini = nvkm_xtensa_fini,
.intr = nvkm_xtensa_intr,
.fifo.sclass = nvkm_xtensa_oclass_get,
.cclass = &nvkm_xtensa_cclass,
};
int
nvkm_xtensa_new_(const struct nvkm_xtensa_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, bool enable, u32 addr,
struct nvkm_engine **pengine)
{
struct nvkm_xtensa *xtensa;
if (!(xtensa = kzalloc(sizeof(*xtensa), GFP_KERNEL)))
return -ENOMEM;
xtensa->func = func;
xtensa->addr = addr;
*pengine = &xtensa->engine;
return nvkm_engine_ctor(&nvkm_xtensa, device, type, inst, enable, &xtensa->engine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv40.h"
static void
nv40_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
struct nvkm_perfctr *ctr)
{
struct nvkm_device *device = pm->engine.subdev.device;
u32 log = ctr->logic_op;
u32 src = 0x00000000;
int i;
for (i = 0; i < 4; i++)
src |= ctr->signal[i] << (i * 8);
nvkm_wr32(device, 0x00a7c0 + dom->addr, 0x00000001 | (dom->mode << 4));
nvkm_wr32(device, 0x00a400 + dom->addr + (ctr->slot * 0x40), src);
nvkm_wr32(device, 0x00a420 + dom->addr + (ctr->slot * 0x40), log);
}
static void
nv40_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
struct nvkm_perfctr *ctr)
{
struct nvkm_device *device = pm->engine.subdev.device;
switch (ctr->slot) {
case 0: ctr->ctr = nvkm_rd32(device, 0x00a700 + dom->addr); break;
case 1: ctr->ctr = nvkm_rd32(device, 0x00a6c0 + dom->addr); break;
case 2: ctr->ctr = nvkm_rd32(device, 0x00a680 + dom->addr); break;
case 3: ctr->ctr = nvkm_rd32(device, 0x00a740 + dom->addr); break;
}
dom->clk = nvkm_rd32(device, 0x00a600 + dom->addr);
}
static void
nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
{
struct nvkm_device *device = pm->engine.subdev.device;
struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base);
if (nv40pm->sequence != pm->sequence) {
nvkm_wr32(device, 0x400084, 0x00000020);
nv40pm->sequence = pm->sequence;
}
}
const struct nvkm_funcdom
nv40_perfctr_func = {
.init = nv40_perfctr_init,
.read = nv40_perfctr_read,
.next = nv40_perfctr_next,
};
static const struct nvkm_pm_func
nv40_pm_ = {
};
int
nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
struct nv40_pm *pm;
int ret;
if (!(pm = kzalloc(sizeof(*pm), GFP_KERNEL)))
return -ENOMEM;
*ppm = &pm->base;
ret = nvkm_pm_ctor(&nv40_pm_, device, type, inst, &pm->base);
if (ret)
return ret;
return nvkm_perfdom_new(&pm->base, "pc", 0, 0, 0, 4, doms);
}
static const struct nvkm_specdom
nv40_pm[] = {
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{}
};
int
nv40_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
return nv40_pm_new_(nv40_pm, device, type, inst, ppm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "gf100.h"
static const struct nvkm_specsrc
gk104_pmfb_sources[] = {
{ 0x140028, (const struct nvkm_specmux[]) {
{ 0x3fff, 0, "unk0" },
{ 0x7, 16, "unk16" },
{ 0x3, 24, "unk24" },
{ 0x2, 28, "unk28" },
{}
}, "pmfb0_pm_unk28" },
{ 0x14125c, (const struct nvkm_specmux[]) {
{ 0x3fff, 0, "unk0" },
{}
}, "pmfb0_subp0_pm_unk25c" },
{ 0x14165c, (const struct nvkm_specmux[]) {
{ 0x3fff, 0, "unk0" },
{}
}, "pmfb0_subp1_pm_unk25c" },
{ 0x141a5c, (const struct nvkm_specmux[]) {
{ 0x3fff, 0, "unk0" },
{}
}, "pmfb0_subp2_pm_unk25c" },
{ 0x141e5c, (const struct nvkm_specmux[]) {
{ 0x3fff, 0, "unk0" },
{}
}, "pmfb0_subp3_pm_unk25c" },
{}
};
static const struct nvkm_specsrc
gk104_tex_sources[] = {
{ 0x5042c0, (const struct nvkm_specmux[]) {
{ 0xf, 0, "sel0", true },
{ 0x7, 8, "sel1", true },
{}
}, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
{ 0x5042c8, (const struct nvkm_specmux[]) {
{ 0x1f, 0, "sel", true },
{}
}, "pgraph_gpc0_tpc0_tex_pm_unkc8" },
{ 0x5042b8, (const struct nvkm_specmux[]) {
{ 0xff, 0, "sel", true },
{}
}, "pgraph_gpc0_tpc0_tex_pm_unkb8" },
{}
};
static const struct nvkm_specdom
gk104_pm_hub[] = {
{ 0x60, (const struct nvkm_specsig[]) {
{ 0x47, "hub00_user_0" },
{}
}, &gf100_perfctr_func },
{ 0x40, (const struct nvkm_specsig[]) {
{ 0x27, "hub01_user_0" },
{}
}, &gf100_perfctr_func },
{ 0x60, (const struct nvkm_specsig[]) {
{ 0x47, "hub02_user_0" },
{}
}, &gf100_perfctr_func },
{ 0x60, (const struct nvkm_specsig[]) {
{ 0x47, "hub03_user_0" },
{}
}, &gf100_perfctr_func },
{ 0x40, (const struct nvkm_specsig[]) {
{ 0x03, "host_mmio_rd" },
{ 0x27, "hub04_user_0" },
{}
}, &gf100_perfctr_func },
{ 0x60, (const struct nvkm_specsig[]) {
{ 0x47, "hub05_user_0" },
{}
}, &gf100_perfctr_func },
{ 0xc0, (const struct nvkm_specsig[]) {
{ 0x74, "host_fb_rd3x" },
{ 0x75, "host_fb_rd3x_2" },
{ 0xa7, "hub06_user_0" },
{}
}, &gf100_perfctr_func },
{ 0x60, (const struct nvkm_specsig[]) {
{ 0x47, "hub07_user_0" },
{}
}, &gf100_perfctr_func },
{}
};
static const struct nvkm_specdom
gk104_pm_gpc[] = {
{ 0xe0, (const struct nvkm_specsig[]) {
{ 0xc7, "gpc00_user_0" },
{}
}, &gf100_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &gf100_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{ 0x00, "gpc02_tex_00", gk104_tex_sources },
{ 0x01, "gpc02_tex_01", gk104_tex_sources },
{ 0x02, "gpc02_tex_02", gk104_tex_sources },
{ 0x03, "gpc02_tex_03", gk104_tex_sources },
{ 0x04, "gpc02_tex_04", gk104_tex_sources },
{ 0x05, "gpc02_tex_05", gk104_tex_sources },
{ 0x06, "gpc02_tex_06", gk104_tex_sources },
{ 0x07, "gpc02_tex_07", gk104_tex_sources },
{ 0x08, "gpc02_tex_08", gk104_tex_sources },
{ 0x0a, "gpc02_tex_0a", gk104_tex_sources },
{ 0x0b, "gpc02_tex_0b", gk104_tex_sources },
{ 0x0d, "gpc02_tex_0c", gk104_tex_sources },
{ 0x0c, "gpc02_tex_0d", gk104_tex_sources },
{ 0x0e, "gpc02_tex_0e", gk104_tex_sources },
{ 0x0f, "gpc02_tex_0f", gk104_tex_sources },
{ 0x10, "gpc02_tex_10", gk104_tex_sources },
{ 0x11, "gpc02_tex_11", gk104_tex_sources },
{ 0x12, "gpc02_tex_12", gk104_tex_sources },
{}
}, &gf100_perfctr_func },
{}
};
static const struct nvkm_specdom
gk104_pm_part[] = {
{ 0x60, (const struct nvkm_specsig[]) {
{ 0x00, "part00_pbfb_00", gf100_pbfb_sources },
{ 0x01, "part00_pbfb_01", gf100_pbfb_sources },
{ 0x0c, "part00_pmfb_00", gk104_pmfb_sources },
{ 0x0d, "part00_pmfb_01", gk104_pmfb_sources },
{ 0x0e, "part00_pmfb_02", gk104_pmfb_sources },
{ 0x0f, "part00_pmfb_03", gk104_pmfb_sources },
{ 0x10, "part00_pmfb_04", gk104_pmfb_sources },
{ 0x12, "part00_pmfb_05", gk104_pmfb_sources },
{ 0x15, "part00_pmfb_06", gk104_pmfb_sources },
{ 0x16, "part00_pmfb_07", gk104_pmfb_sources },
{ 0x18, "part00_pmfb_08", gk104_pmfb_sources },
{ 0x21, "part00_pmfb_09", gk104_pmfb_sources },
{ 0x25, "part00_pmfb_0a", gk104_pmfb_sources },
{ 0x26, "part00_pmfb_0b", gk104_pmfb_sources },
{ 0x27, "part00_pmfb_0c", gk104_pmfb_sources },
{ 0x47, "part00_user_0" },
{}
}, &gf100_perfctr_func },
{ 0x60, (const struct nvkm_specsig[]) {
{ 0x47, "part01_user_0" },
{}
}, &gf100_perfctr_func },
{}
};
static const struct gf100_pm_func
gk104_pm = {
.doms_gpc = gk104_pm_gpc,
.doms_hub = gk104_pm_hub,
.doms_part = gk104_pm_part,
};
int
gk104_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
return gf100_pm_new_(&gk104_pm, device, type, inst, ppm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "gf100.h"
const struct nvkm_specsrc
gf100_pbfb_sources[] = {
{ 0x10f100, (const struct nvkm_specmux[]) {
{ 0x1, 0, "unk0" },
{ 0x3f, 4, "unk4" },
{}
}, "pbfb_broadcast_pm_unk100" },
{}
};
const struct nvkm_specsrc
gf100_pmfb_sources[] = {
{ 0x140028, (const struct nvkm_specmux[]) {
{ 0x3fff, 0, "unk0" },
{ 0x7, 16, "unk16" },
{ 0x3, 24, "unk24" },
{ 0x2, 29, "unk29" },
{}
}, "pmfb0_pm_unk28" },
{}
};
static const struct nvkm_specsrc
gf100_l1_sources[] = {
{ 0x5044a8, (const struct nvkm_specmux[]) {
{ 0x3f, 0, "sel", true },
{}
}, "pgraph_gpc0_tpc0_l1_pm_mux" },
{}
};
static const struct nvkm_specsrc
gf100_tex_sources[] = {
{ 0x5042c0, (const struct nvkm_specmux[]) {
{ 0xf, 0, "sel0", true },
{ 0x7, 8, "sel1", true },
{}
}, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
{}
};
static const struct nvkm_specsrc
gf100_unk400_sources[] = {
{ 0x50440c, (const struct nvkm_specmux[]) {
{ 0x3f, 0, "sel", true },
{}
}, "pgraph_gpc0_tpc0_unk400_pm_mux" },
{}
};
static const struct nvkm_specdom
gf100_pm_hub[] = {
{}
};
const struct nvkm_specdom
gf100_pm_gpc[] = {
{ 0xe0, (const struct nvkm_specsig[]) {
{ 0x00, "gpc00_l1_00", gf100_l1_sources },
{ 0x01, "gpc00_l1_01", gf100_l1_sources },
{ 0x02, "gpc00_l1_02", gf100_l1_sources },
{ 0x03, "gpc00_l1_03", gf100_l1_sources },
{ 0x05, "gpc00_l1_04", gf100_l1_sources },
{ 0x06, "gpc00_l1_05", gf100_l1_sources },
{ 0x0a, "gpc00_tex_00", gf100_tex_sources },
{ 0x0b, "gpc00_tex_01", gf100_tex_sources },
{ 0x0c, "gpc00_tex_02", gf100_tex_sources },
{ 0x0d, "gpc00_tex_03", gf100_tex_sources },
{ 0x0e, "gpc00_tex_04", gf100_tex_sources },
{ 0x0f, "gpc00_tex_05", gf100_tex_sources },
{ 0x10, "gpc00_tex_06", gf100_tex_sources },
{ 0x11, "gpc00_tex_07", gf100_tex_sources },
{ 0x12, "gpc00_tex_08", gf100_tex_sources },
{ 0x26, "gpc00_unk400_00", gf100_unk400_sources },
{}
}, &gf100_perfctr_func },
{}
};
static const struct nvkm_specdom
gf100_pm_part[] = {
{ 0xe0, (const struct nvkm_specsig[]) {
{ 0x0f, "part00_pbfb_00", gf100_pbfb_sources },
{ 0x10, "part00_pbfb_01", gf100_pbfb_sources },
{ 0x21, "part00_pmfb_00", gf100_pmfb_sources },
{ 0x04, "part00_pmfb_01", gf100_pmfb_sources },
{ 0x00, "part00_pmfb_02", gf100_pmfb_sources },
{ 0x02, "part00_pmfb_03", gf100_pmfb_sources },
{ 0x01, "part00_pmfb_04", gf100_pmfb_sources },
{ 0x2e, "part00_pmfb_05", gf100_pmfb_sources },
{ 0x2f, "part00_pmfb_06", gf100_pmfb_sources },
{ 0x1b, "part00_pmfb_07", gf100_pmfb_sources },
{ 0x1c, "part00_pmfb_08", gf100_pmfb_sources },
{ 0x1d, "part00_pmfb_09", gf100_pmfb_sources },
{ 0x1e, "part00_pmfb_0a", gf100_pmfb_sources },
{ 0x1f, "part00_pmfb_0b", gf100_pmfb_sources },
{}
}, &gf100_perfctr_func },
{}
};
static void
gf100_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
struct nvkm_perfctr *ctr)
{
struct nvkm_device *device = pm->engine.subdev.device;
u32 log = ctr->logic_op;
u32 src = 0x00000000;
int i;
for (i = 0; i < 4; i++)
src |= ctr->signal[i] << (i * 8);
nvkm_wr32(device, dom->addr + 0x09c, 0x00040002 | (dom->mode << 3));
nvkm_wr32(device, dom->addr + 0x100, 0x00000000);
nvkm_wr32(device, dom->addr + 0x040 + (ctr->slot * 0x08), src);
nvkm_wr32(device, dom->addr + 0x044 + (ctr->slot * 0x08), log);
}
static void
gf100_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
struct nvkm_perfctr *ctr)
{
struct nvkm_device *device = pm->engine.subdev.device;
switch (ctr->slot) {
case 0: ctr->ctr = nvkm_rd32(device, dom->addr + 0x08c); break;
case 1: ctr->ctr = nvkm_rd32(device, dom->addr + 0x088); break;
case 2: ctr->ctr = nvkm_rd32(device, dom->addr + 0x080); break;
case 3: ctr->ctr = nvkm_rd32(device, dom->addr + 0x090); break;
}
dom->clk = nvkm_rd32(device, dom->addr + 0x070);
}
static void
gf100_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
{
struct nvkm_device *device = pm->engine.subdev.device;
nvkm_wr32(device, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
nvkm_wr32(device, dom->addr + 0x0ec, 0x00000011);
}
const struct nvkm_funcdom
gf100_perfctr_func = {
.init = gf100_perfctr_init,
.read = gf100_perfctr_read,
.next = gf100_perfctr_next,
};
static void
gf100_pm_fini(struct nvkm_pm *pm)
{
struct nvkm_device *device = pm->engine.subdev.device;
nvkm_mask(device, 0x000200, 0x10000000, 0x00000000);
nvkm_mask(device, 0x000200, 0x10000000, 0x10000000);
}
static const struct nvkm_pm_func
gf100_pm_ = {
.fini = gf100_pm_fini,
};
int
gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
struct nvkm_pm *pm;
u32 mask;
int ret;
if (!(pm = *ppm = kzalloc(sizeof(*pm), GFP_KERNEL)))
return -ENOMEM;
ret = nvkm_pm_ctor(&gf100_pm_, device, type, inst, pm);
if (ret)
return ret;
/* HUB */
ret = nvkm_perfdom_new(pm, "hub", 0, 0x1b0000, 0, 0x200,
func->doms_hub);
if (ret)
return ret;
/* GPC */
mask = (1 << nvkm_rd32(device, 0x022430)) - 1;
mask &= ~nvkm_rd32(device, 0x022504);
mask &= ~nvkm_rd32(device, 0x022584);
ret = nvkm_perfdom_new(pm, "gpc", mask, 0x180000,
0x1000, 0x200, func->doms_gpc);
if (ret)
return ret;
/* PART */
mask = (1 << nvkm_rd32(device, 0x022438)) - 1;
mask &= ~nvkm_rd32(device, 0x022548);
mask &= ~nvkm_rd32(device, 0x0225c8);
ret = nvkm_perfdom_new(pm, "part", mask, 0x1a0000,
0x1000, 0x200, func->doms_part);
if (ret)
return ret;
return 0;
}
static const struct gf100_pm_func
gf100_pm = {
.doms_gpc = gf100_pm_gpc,
.doms_hub = gf100_pm_hub,
.doms_part = gf100_pm_part,
};
int
gf100_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
return gf100_pm_new_(&gf100_pm, device, type, inst, ppm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv40.h"
const struct nvkm_specsrc
nv50_zcull_sources[] = {
{ 0x402ca4, (const struct nvkm_specmux[]) {
{ 0x7fff, 0, "unk0" },
{}
}, "pgraph_zcull_pm_unka4" },
{}
};
const struct nvkm_specsrc
nv50_zrop_sources[] = {
{ 0x40708c, (const struct nvkm_specmux[]) {
{ 0xf, 0, "sel0", true },
{ 0xf, 16, "sel1", true },
{}
}, "pgraph_rop0_zrop_pm_mux" },
{}
};
static const struct nvkm_specsrc
nv50_prop_sources[] = {
{ 0x40be50, (const struct nvkm_specmux[]) {
{ 0x1f, 0, "sel", true },
{}
}, "pgraph_tpc3_prop_pm_mux" },
{}
};
static const struct nvkm_specsrc
nv50_crop_sources[] = {
{ 0x407008, (const struct nvkm_specmux[]) {
{ 0x7, 0, "sel0", true },
{ 0x7, 16, "sel1", true },
{}
}, "pgraph_rop0_crop_pm_mux" },
{}
};
static const struct nvkm_specsrc
nv50_tex_sources[] = {
{ 0x40b808, (const struct nvkm_specmux[]) {
{ 0x3fff, 0, "unk0" },
{}
}, "pgraph_tpc3_tex_unk08" },
{}
};
static const struct nvkm_specsrc
nv50_vfetch_sources[] = {
{ 0x400c0c, (const struct nvkm_specmux[]) {
{ 0x1, 0, "unk0" },
{}
}, "pgraph_vfetch_unk0c" },
{}
};
static const struct nvkm_specdom
nv50_pm[] = {
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0xf0, (const struct nvkm_specsig[]) {
{ 0xc8, "pc01_gr_idle" },
{ 0x7f, "pc01_strmout_00" },
{ 0x80, "pc01_strmout_01" },
{ 0xdc, "pc01_trast_00" },
{ 0xdd, "pc01_trast_01" },
{ 0xde, "pc01_trast_02" },
{ 0xdf, "pc01_trast_03" },
{ 0xe2, "pc01_trast_04" },
{ 0xe3, "pc01_trast_05" },
{ 0x7c, "pc01_vattr_00" },
{ 0x7d, "pc01_vattr_01" },
{ 0x26, "pc01_vfetch_00", nv50_vfetch_sources },
{ 0x27, "pc01_vfetch_01", nv50_vfetch_sources },
{ 0x28, "pc01_vfetch_02", nv50_vfetch_sources },
{ 0x29, "pc01_vfetch_03", nv50_vfetch_sources },
{ 0x2a, "pc01_vfetch_04", nv50_vfetch_sources },
{ 0x2b, "pc01_vfetch_05", nv50_vfetch_sources },
{ 0x2c, "pc01_vfetch_06", nv50_vfetch_sources },
{ 0x2d, "pc01_vfetch_07", nv50_vfetch_sources },
{ 0x2e, "pc01_vfetch_08", nv50_vfetch_sources },
{ 0x2f, "pc01_vfetch_09", nv50_vfetch_sources },
{ 0x30, "pc01_vfetch_0a", nv50_vfetch_sources },
{ 0x31, "pc01_vfetch_0b", nv50_vfetch_sources },
{ 0x32, "pc01_vfetch_0c", nv50_vfetch_sources },
{ 0x33, "pc01_vfetch_0d", nv50_vfetch_sources },
{ 0x34, "pc01_vfetch_0e", nv50_vfetch_sources },
{ 0x35, "pc01_vfetch_0f", nv50_vfetch_sources },
{ 0x36, "pc01_vfetch_10", nv50_vfetch_sources },
{ 0x37, "pc01_vfetch_11", nv50_vfetch_sources },
{ 0x38, "pc01_vfetch_12", nv50_vfetch_sources },
{ 0x39, "pc01_vfetch_13", nv50_vfetch_sources },
{ 0x3a, "pc01_vfetch_14", nv50_vfetch_sources },
{ 0x3b, "pc01_vfetch_15", nv50_vfetch_sources },
{ 0x3c, "pc01_vfetch_16", nv50_vfetch_sources },
{ 0x3d, "pc01_vfetch_17", nv50_vfetch_sources },
{ 0x3e, "pc01_vfetch_18", nv50_vfetch_sources },
{ 0x3f, "pc01_vfetch_19", nv50_vfetch_sources },
{ 0x20, "pc01_zcull_00", nv50_zcull_sources },
{ 0x21, "pc01_zcull_01", nv50_zcull_sources },
{ 0x22, "pc01_zcull_02", nv50_zcull_sources },
{ 0x23, "pc01_zcull_03", nv50_zcull_sources },
{ 0x24, "pc01_zcull_04", nv50_zcull_sources },
{ 0x25, "pc01_zcull_05", nv50_zcull_sources },
{ 0xae, "pc01_unk00" },
{ 0xee, "pc01_trailer" },
{}
}, &nv40_perfctr_func },
{ 0xf0, (const struct nvkm_specsig[]) {
{ 0x52, "pc02_crop_00", nv50_crop_sources },
{ 0x53, "pc02_crop_01", nv50_crop_sources },
{ 0x54, "pc02_crop_02", nv50_crop_sources },
{ 0x55, "pc02_crop_03", nv50_crop_sources },
{ 0x00, "pc02_prop_00", nv50_prop_sources },
{ 0x01, "pc02_prop_01", nv50_prop_sources },
{ 0x02, "pc02_prop_02", nv50_prop_sources },
{ 0x03, "pc02_prop_03", nv50_prop_sources },
{ 0x04, "pc02_prop_04", nv50_prop_sources },
{ 0x05, "pc02_prop_05", nv50_prop_sources },
{ 0x06, "pc02_prop_06", nv50_prop_sources },
{ 0x07, "pc02_prop_07", nv50_prop_sources },
{ 0x70, "pc02_tex_00", nv50_tex_sources },
{ 0x71, "pc02_tex_01", nv50_tex_sources },
{ 0x72, "pc02_tex_02", nv50_tex_sources },
{ 0x73, "pc02_tex_03", nv50_tex_sources },
{ 0x40, "pc02_tex_04", nv50_tex_sources },
{ 0x41, "pc02_tex_05", nv50_tex_sources },
{ 0x42, "pc02_tex_06", nv50_tex_sources },
{ 0x6c, "pc02_zrop_00", nv50_zrop_sources },
{ 0x6d, "pc02_zrop_01", nv50_zrop_sources },
{ 0x6e, "pc02_zrop_02", nv50_zrop_sources },
{ 0x6f, "pc02_zrop_03", nv50_zrop_sources },
{ 0xee, "pc02_trailer" },
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{}
};
int
nv50_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
return nv40_pm_new_(nv50_pm, device, type, inst, ppm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c |
/*
* Copyright 2015 Samuel Pitoiset
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Samuel Pitoiset
*/
#include "gf100.h"
static const struct nvkm_specsrc
gf117_pmfb_sources[] = {
{ 0x140028, (const struct nvkm_specmux[]) {
{ 0x3fff, 0, "unk0" },
{ 0x7, 16, "unk16" },
{ 0x3, 24, "unk24" },
{ 0x2, 28, "unk28" },
{}
}, "pmfb0_pm_unk28" },
{ 0x14125c, (const struct nvkm_specmux[]) {
{ 0x3fff, 0, "unk0" },
{}
}, "pmfb0_subp0_pm_unk25c" },
{}
};
static const struct nvkm_specdom
gf117_pm_hub[] = {
{}
};
static const struct nvkm_specdom
gf117_pm_part[] = {
{ 0xe0, (const struct nvkm_specsig[]) {
{ 0x00, "part00_pbfb_00", gf100_pbfb_sources },
{ 0x01, "part00_pbfb_01", gf100_pbfb_sources },
{ 0x12, "part00_pmfb_00", gf117_pmfb_sources },
{ 0x15, "part00_pmfb_01", gf117_pmfb_sources },
{ 0x16, "part00_pmfb_02", gf117_pmfb_sources },
{ 0x18, "part00_pmfb_03", gf117_pmfb_sources },
{ 0x1e, "part00_pmfb_04", gf117_pmfb_sources },
{ 0x23, "part00_pmfb_05", gf117_pmfb_sources },
{ 0x24, "part00_pmfb_06", gf117_pmfb_sources },
{ 0x0c, "part00_pmfb_07", gf117_pmfb_sources },
{ 0x0d, "part00_pmfb_08", gf117_pmfb_sources },
{ 0x0e, "part00_pmfb_09", gf117_pmfb_sources },
{ 0x0f, "part00_pmfb_0a", gf117_pmfb_sources },
{ 0x10, "part00_pmfb_0b", gf117_pmfb_sources },
{}
}, &gf100_perfctr_func },
{}
};
static const struct gf100_pm_func
gf117_pm = {
.doms_gpc = gf100_pm_gpc,
.doms_hub = gf117_pm_hub,
.doms_part = gf117_pm_part,
};
int
gf117_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
return gf100_pm_new_(&gf117_pm, device, type, inst, ppm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c |
/*
* Copyright 2015 Nouveau project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Samuel Pitoiset
*/
#include "nv40.h"
const struct nvkm_specsrc
gt200_crop_sources[] = {
{ 0x407008, (const struct nvkm_specmux[]) {
{ 0xf, 0, "sel0", true },
{ 0x1f, 16, "sel1", true },
{}
}, "pgraph_rop0_crop_pm_mux" },
{}
};
const struct nvkm_specsrc
gt200_prop_sources[] = {
{ 0x408750, (const struct nvkm_specmux[]) {
{ 0x3f, 0, "sel", true },
{}
}, "pgraph_tpc0_prop_pm_mux" },
{}
};
const struct nvkm_specsrc
gt200_tex_sources[] = {
{ 0x408508, (const struct nvkm_specmux[]) {
{ 0xfffff, 0, "unk0" },
{}
}, "pgraph_tpc0_tex_unk08" },
{}
};
static const struct nvkm_specdom
gt200_pm[] = {
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0xf0, (const struct nvkm_specsig[]) {
{ 0xc9, "pc01_gr_idle" },
{ 0x84, "pc01_strmout_00" },
{ 0x85, "pc01_strmout_01" },
{ 0xde, "pc01_trast_00" },
{ 0xdf, "pc01_trast_01" },
{ 0xe0, "pc01_trast_02" },
{ 0xe1, "pc01_trast_03" },
{ 0xe4, "pc01_trast_04" },
{ 0xe5, "pc01_trast_05" },
{ 0x82, "pc01_vattr_00" },
{ 0x83, "pc01_vattr_01" },
{ 0x46, "pc01_vfetch_00", g84_vfetch_sources },
{ 0x47, "pc01_vfetch_01", g84_vfetch_sources },
{ 0x48, "pc01_vfetch_02", g84_vfetch_sources },
{ 0x49, "pc01_vfetch_03", g84_vfetch_sources },
{ 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
{ 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
{ 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
{ 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
{ 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
{ 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
{ 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
{ 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
{ 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
{ 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
{ 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
{ 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
{ 0x56, "pc01_vfetch_10", g84_vfetch_sources },
{ 0x57, "pc01_vfetch_11", g84_vfetch_sources },
{ 0x58, "pc01_vfetch_12", g84_vfetch_sources },
{ 0x59, "pc01_vfetch_13", g84_vfetch_sources },
{ 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
{ 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
{ 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
{ 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
{ 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
{ 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
{ 0x07, "pc01_zcull_00", nv50_zcull_sources },
{ 0x08, "pc01_zcull_01", nv50_zcull_sources },
{ 0x09, "pc01_zcull_02", nv50_zcull_sources },
{ 0x0a, "pc01_zcull_03", nv50_zcull_sources },
{ 0x0b, "pc01_zcull_04", nv50_zcull_sources },
{ 0x0c, "pc01_zcull_05", nv50_zcull_sources },
{ 0xb0, "pc01_unk00" },
{ 0xec, "pc01_trailer" },
{}
}, &nv40_perfctr_func },
{ 0xf0, (const struct nvkm_specsig[]) {
{ 0x55, "pc02_crop_00", gt200_crop_sources },
{ 0x56, "pc02_crop_01", gt200_crop_sources },
{ 0x57, "pc02_crop_02", gt200_crop_sources },
{ 0x58, "pc02_crop_03", gt200_crop_sources },
{ 0x00, "pc02_prop_00", gt200_prop_sources },
{ 0x01, "pc02_prop_01", gt200_prop_sources },
{ 0x02, "pc02_prop_02", gt200_prop_sources },
{ 0x03, "pc02_prop_03", gt200_prop_sources },
{ 0x04, "pc02_prop_04", gt200_prop_sources },
{ 0x05, "pc02_prop_05", gt200_prop_sources },
{ 0x06, "pc02_prop_06", gt200_prop_sources },
{ 0x07, "pc02_prop_07", gt200_prop_sources },
{ 0x78, "pc02_tex_00", gt200_tex_sources },
{ 0x79, "pc02_tex_01", gt200_tex_sources },
{ 0x7a, "pc02_tex_02", gt200_tex_sources },
{ 0x7b, "pc02_tex_03", gt200_tex_sources },
{ 0x32, "pc02_tex_04", gt200_tex_sources },
{ 0x33, "pc02_tex_05", gt200_tex_sources },
{ 0x34, "pc02_tex_06", gt200_tex_sources },
{ 0x74, "pc02_zrop_00", nv50_zrop_sources },
{ 0x75, "pc02_zrop_01", nv50_zrop_sources },
{ 0x76, "pc02_zrop_02", nv50_zrop_sources },
{ 0x77, "pc02_zrop_03", nv50_zrop_sources },
{ 0xec, "pc02_trailer" },
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{}
};
int
gt200_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
return nv40_pm_new_(gt200_pm, device, type, inst, ppm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <core/client.h>
#include <core/option.h>
#include <nvif/class.h>
#include <nvif/if0002.h>
#include <nvif/if0003.h>
#include <nvif/ioctl.h>
#include <nvif/unpack.h>
static u8
nvkm_pm_count_perfdom(struct nvkm_pm *pm)
{
struct nvkm_perfdom *dom;
u8 domain_nr = 0;
list_for_each_entry(dom, &pm->domains, head)
domain_nr++;
return domain_nr;
}
static u16
nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
{
u16 signal_nr = 0;
int i;
if (dom) {
for (i = 0; i < dom->signal_nr; i++) {
if (dom->signal[i].name)
signal_nr++;
}
}
return signal_nr;
}
static struct nvkm_perfdom *
nvkm_perfdom_find(struct nvkm_pm *pm, int di)
{
struct nvkm_perfdom *dom;
int tmp = 0;
list_for_each_entry(dom, &pm->domains, head) {
if (tmp++ == di)
return dom;
}
return NULL;
}
static struct nvkm_perfsig *
nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
{
struct nvkm_perfdom *dom = *pdom;
if (dom == NULL) {
dom = nvkm_perfdom_find(pm, di);
if (dom == NULL)
return NULL;
*pdom = dom;
}
if (!dom->signal[si].name)
return NULL;
return &dom->signal[si];
}
static u8
nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
{
u8 source_nr = 0, i;
for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
if (sig->source[i])
source_nr++;
}
return source_nr;
}
static struct nvkm_perfsrc *
nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
{
struct nvkm_perfsrc *src;
bool found = false;
int tmp = 1; /* Sources ID start from 1 */
u8 i;
for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
if (sig->source[i] == si) {
found = true;
break;
}
}
if (found) {
list_for_each_entry(src, &pm->sources, head) {
if (tmp++ == si)
return src;
}
}
return NULL;
}
static int
nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
{
struct nvkm_subdev *subdev = &pm->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_perfdom *dom = NULL;
struct nvkm_perfsig *sig;
struct nvkm_perfsrc *src;
u32 mask, value;
int i, j;
for (i = 0; i < 4; i++) {
for (j = 0; j < 8 && ctr->source[i][j]; j++) {
sig = nvkm_perfsig_find(pm, ctr->domain,
ctr->signal[i], &dom);
if (!sig)
return -EINVAL;
src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
if (!src)
return -EINVAL;
/* set enable bit if needed */
mask = value = 0x00000000;
if (src->enable)
mask = value = 0x80000000;
mask |= (src->mask << src->shift);
value |= ((ctr->source[i][j] >> 32) << src->shift);
/* enable the source */
nvkm_mask(device, src->addr, mask, value);
nvkm_debug(subdev,
"enabled source %08x %08x %08x\n",
src->addr, mask, value);
}
}
return 0;
}
static int
nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
{
struct nvkm_subdev *subdev = &pm->engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_perfdom *dom = NULL;
struct nvkm_perfsig *sig;
struct nvkm_perfsrc *src;
u32 mask;
int i, j;
for (i = 0; i < 4; i++) {
for (j = 0; j < 8 && ctr->source[i][j]; j++) {
sig = nvkm_perfsig_find(pm, ctr->domain,
ctr->signal[i], &dom);
if (!sig)
return -EINVAL;
src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
if (!src)
return -EINVAL;
/* unset enable bit if needed */
mask = 0x00000000;
if (src->enable)
mask = 0x80000000;
mask |= (src->mask << src->shift);
/* disable the source */
nvkm_mask(device, src->addr, mask, 0);
nvkm_debug(subdev, "disabled source %08x %08x\n",
src->addr, mask);
}
}
return 0;
}
/*******************************************************************************
* Perfdom object classes
******************************************************************************/
static int
nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
{
union {
struct nvif_perfdom_init none;
} *args = data;
struct nvkm_object *object = &dom->object;
struct nvkm_pm *pm = dom->perfmon->pm;
int ret = -ENOSYS, i;
nvif_ioctl(object, "perfdom init size %d\n", size);
if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
nvif_ioctl(object, "perfdom init\n");
} else
return ret;
for (i = 0; i < 4; i++) {
if (dom->ctr[i]) {
dom->func->init(pm, dom, dom->ctr[i]);
/* enable sources */
nvkm_perfsrc_enable(pm, dom->ctr[i]);
}
}
/* start next batch of counters for sampling */
dom->func->next(pm, dom);
return 0;
}
static int
nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
{
union {
struct nvif_perfdom_sample none;
} *args = data;
struct nvkm_object *object = &dom->object;
struct nvkm_pm *pm = dom->perfmon->pm;
int ret = -ENOSYS;
nvif_ioctl(object, "perfdom sample size %d\n", size);
if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
nvif_ioctl(object, "perfdom sample\n");
} else
return ret;
pm->sequence++;
/* sample previous batch of counters */
list_for_each_entry(dom, &pm->domains, head)
dom->func->next(pm, dom);
return 0;
}
static int
nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
{
union {
struct nvif_perfdom_read_v0 v0;
} *args = data;
struct nvkm_object *object = &dom->object;
struct nvkm_pm *pm = dom->perfmon->pm;
int ret = -ENOSYS, i;
nvif_ioctl(object, "perfdom read size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
} else
return ret;
for (i = 0; i < 4; i++) {
if (dom->ctr[i])
dom->func->read(pm, dom, dom->ctr[i]);
}
if (!dom->clk)
return -EAGAIN;
for (i = 0; i < 4; i++)
if (dom->ctr[i])
args->v0.ctr[i] = dom->ctr[i]->ctr;
args->v0.clk = dom->clk;
return 0;
}
static int
nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
struct nvkm_perfdom *dom = nvkm_perfdom(object);
switch (mthd) {
case NVIF_PERFDOM_V0_INIT:
return nvkm_perfdom_init(dom, data, size);
case NVIF_PERFDOM_V0_SAMPLE:
return nvkm_perfdom_sample(dom, data, size);
case NVIF_PERFDOM_V0_READ:
return nvkm_perfdom_read(dom, data, size);
default:
break;
}
return -EINVAL;
}
static void *
nvkm_perfdom_dtor(struct nvkm_object *object)
{
struct nvkm_perfdom *dom = nvkm_perfdom(object);
struct nvkm_pm *pm = dom->perfmon->pm;
int i;
for (i = 0; i < 4; i++) {
struct nvkm_perfctr *ctr = dom->ctr[i];
if (ctr) {
nvkm_perfsrc_disable(pm, ctr);
if (ctr->head.next)
list_del(&ctr->head);
}
kfree(ctr);
}
return dom;
}
static int
nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
struct nvkm_perfsig *signal[4], u64 source[4][8],
u16 logic_op, struct nvkm_perfctr **pctr)
{
struct nvkm_perfctr *ctr;
int i, j;
if (!dom)
return -EINVAL;
ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
if (!ctr)
return -ENOMEM;
ctr->domain = domain;
ctr->logic_op = logic_op;
ctr->slot = slot;
for (i = 0; i < 4; i++) {
if (signal[i]) {
ctr->signal[i] = signal[i] - dom->signal;
for (j = 0; j < 8; j++)
ctr->source[i][j] = source[i][j];
}
}
list_add_tail(&ctr->head, &dom->list);
return 0;
}
static const struct nvkm_object_func
nvkm_perfdom = {
.dtor = nvkm_perfdom_dtor,
.mthd = nvkm_perfdom_mthd,
};
static int
nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
union {
struct nvif_perfdom_v0 v0;
} *args = data;
struct nvkm_pm *pm = perfmon->pm;
struct nvkm_object *parent = oclass->parent;
struct nvkm_perfdom *sdom = NULL;
struct nvkm_perfctr *ctr[4] = {};
struct nvkm_perfdom *dom;
int c, s, m;
int ret = -ENOSYS;
nvif_ioctl(parent, "create perfdom size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
args->v0.version, args->v0.domain, args->v0.mode);
} else
return ret;
for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
struct nvkm_perfsig *sig[4] = {};
u64 src[4][8] = {};
for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
args->v0.ctr[c].signal[s],
&sdom);
if (args->v0.ctr[c].signal[s] && !sig[s])
return -EINVAL;
for (m = 0; m < 8; m++) {
src[s][m] = args->v0.ctr[c].source[s][m];
if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
src[s][m]))
return -EINVAL;
}
}
ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
args->v0.ctr[c].logic_op, &ctr[c]);
if (ret)
return ret;
}
if (!sdom)
return -EINVAL;
if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
dom->perfmon = perfmon;
*pobject = &dom->object;
dom->func = sdom->func;
dom->addr = sdom->addr;
dom->mode = args->v0.mode;
for (c = 0; c < ARRAY_SIZE(ctr); c++)
dom->ctr[c] = ctr[c];
return 0;
}
/*******************************************************************************
* Perfmon object classes
******************************************************************************/
static int
nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
void *data, u32 size)
{
union {
struct nvif_perfmon_query_domain_v0 v0;
} *args = data;
struct nvkm_object *object = &perfmon->object;
struct nvkm_pm *pm = perfmon->pm;
struct nvkm_perfdom *dom;
u8 domain_nr;
int di, ret = -ENOSYS;
nvif_ioctl(object, "perfmon query domain size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
args->v0.version, args->v0.iter);
di = (args->v0.iter & 0xff) - 1;
} else
return ret;
domain_nr = nvkm_pm_count_perfdom(pm);
if (di >= (int)domain_nr)
return -EINVAL;
if (di >= 0) {
dom = nvkm_perfdom_find(pm, di);
if (dom == NULL)
return -EINVAL;
args->v0.id = di;
args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
strncpy(args->v0.name, dom->name, sizeof(args->v0.name) - 1);
/* Currently only global counters (PCOUNTER) are implemented
* but this will be different for local counters (MP). */
args->v0.counter_nr = 4;
}
if (++di < domain_nr) {
args->v0.iter = ++di;
return 0;
}
args->v0.iter = 0xff;
return 0;
}
static int
nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
void *data, u32 size)
{
union {
struct nvif_perfmon_query_signal_v0 v0;
} *args = data;
struct nvkm_object *object = &perfmon->object;
struct nvkm_pm *pm = perfmon->pm;
struct nvkm_device *device = pm->engine.subdev.device;
struct nvkm_perfdom *dom;
struct nvkm_perfsig *sig;
const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
int ret = -ENOSYS, si;
nvif_ioctl(object, "perfmon query signal size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(object,
"perfmon query signal vers %d dom %d iter %04x\n",
args->v0.version, args->v0.domain, args->v0.iter);
si = (args->v0.iter & 0xffff) - 1;
} else
return ret;
dom = nvkm_perfdom_find(pm, args->v0.domain);
if (dom == NULL || si >= (int)dom->signal_nr)
return -EINVAL;
if (si >= 0) {
sig = &dom->signal[si];
if (raw || !sig->name) {
snprintf(args->v0.name, sizeof(args->v0.name),
"/%s/%02x", dom->name, si);
} else {
strncpy(args->v0.name, sig->name,
sizeof(args->v0.name) - 1);
}
args->v0.signal = si;
args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
}
while (++si < dom->signal_nr) {
if (all || dom->signal[si].name) {
args->v0.iter = ++si;
return 0;
}
}
args->v0.iter = 0xffff;
return 0;
}
static int
nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
void *data, u32 size)
{
union {
struct nvif_perfmon_query_source_v0 v0;
} *args = data;
struct nvkm_object *object = &perfmon->object;
struct nvkm_pm *pm = perfmon->pm;
struct nvkm_perfdom *dom = NULL;
struct nvkm_perfsig *sig;
struct nvkm_perfsrc *src;
u8 source_nr = 0;
int si, ret = -ENOSYS;
nvif_ioctl(object, "perfmon query source size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(object,
"perfmon source vers %d dom %d sig %02x iter %02x\n",
args->v0.version, args->v0.domain, args->v0.signal,
args->v0.iter);
si = (args->v0.iter & 0xff) - 1;
} else
return ret;
sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
if (!sig)
return -EINVAL;
source_nr = nvkm_perfsig_count_perfsrc(sig);
if (si >= (int)source_nr)
return -EINVAL;
if (si >= 0) {
src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
if (!src)
return -EINVAL;
args->v0.source = sig->source[si];
args->v0.mask = src->mask;
strncpy(args->v0.name, src->name, sizeof(args->v0.name) - 1);
}
if (++si < source_nr) {
args->v0.iter = ++si;
return 0;
}
args->v0.iter = 0xff;
return 0;
}
static int
nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
switch (mthd) {
case NVIF_PERFMON_V0_QUERY_DOMAIN:
return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
case NVIF_PERFMON_V0_QUERY_SIGNAL:
return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
case NVIF_PERFMON_V0_QUERY_SOURCE:
return nvkm_perfmon_mthd_query_source(perfmon, data, size);
default:
break;
}
return -EINVAL;
}
static int
nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
}
static int
nvkm_perfmon_child_get(struct nvkm_object *object, int index,
struct nvkm_oclass *oclass)
{
if (index == 0) {
oclass->base.oclass = NVIF_CLASS_PERFDOM;
oclass->base.minver = 0;
oclass->base.maxver = 0;
oclass->ctor = nvkm_perfmon_child_new;
return 0;
}
return -EINVAL;
}
static void *
nvkm_perfmon_dtor(struct nvkm_object *object)
{
struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
struct nvkm_pm *pm = perfmon->pm;
spin_lock(&pm->client.lock);
if (pm->client.object == &perfmon->object)
pm->client.object = NULL;
spin_unlock(&pm->client.lock);
return perfmon;
}
static const struct nvkm_object_func
nvkm_perfmon = {
.dtor = nvkm_perfmon_dtor,
.mthd = nvkm_perfmon_mthd,
.sclass = nvkm_perfmon_child_get,
};
static int
nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
{
struct nvkm_perfmon *perfmon;
if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
perfmon->pm = pm;
*pobject = &perfmon->object;
return 0;
}
/*******************************************************************************
* PPM engine/subdev functions
******************************************************************************/
static int
nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
{
struct nvkm_pm *pm = nvkm_pm(oclass->engine);
int ret;
ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
if (ret)
return ret;
spin_lock(&pm->client.lock);
if (pm->client.object == NULL)
pm->client.object = *pobject;
ret = (pm->client.object == *pobject) ? 0 : -EBUSY;
spin_unlock(&pm->client.lock);
return ret;
}
static const struct nvkm_device_oclass
nvkm_pm_oclass = {
.base.oclass = NVIF_CLASS_PERFMON,
.base.minver = -1,
.base.maxver = -1,
.ctor = nvkm_pm_oclass_new,
};
static int
nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
const struct nvkm_device_oclass **class)
{
if (index == 0) {
oclass->base = nvkm_pm_oclass.base;
*class = &nvkm_pm_oclass;
return index;
}
return 1;
}
static int
nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
const struct nvkm_specsrc *spec)
{
const struct nvkm_specsrc *ssrc;
const struct nvkm_specmux *smux;
struct nvkm_perfsrc *src;
u8 source_nr = 0;
if (!spec) {
/* No sources are defined for this signal. */
return 0;
}
ssrc = spec;
while (ssrc->name) {
smux = ssrc->mux;
while (smux->name) {
bool found = false;
u8 source_id = 0;
u32 len;
list_for_each_entry(src, &pm->sources, head) {
if (src->addr == ssrc->addr &&
src->shift == smux->shift) {
found = true;
break;
}
source_id++;
}
if (!found) {
src = kzalloc(sizeof(*src), GFP_KERNEL);
if (!src)
return -ENOMEM;
src->addr = ssrc->addr;
src->mask = smux->mask;
src->shift = smux->shift;
src->enable = smux->enable;
len = strlen(ssrc->name) +
strlen(smux->name) + 2;
src->name = kzalloc(len, GFP_KERNEL);
if (!src->name) {
kfree(src);
return -ENOMEM;
}
snprintf(src->name, len, "%s_%s", ssrc->name,
smux->name);
list_add_tail(&src->head, &pm->sources);
}
sig->source[source_nr++] = source_id + 1;
smux++;
}
ssrc++;
}
return 0;
}
int
nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
u32 base, u32 size_unit, u32 size_domain,
const struct nvkm_specdom *spec)
{
const struct nvkm_specdom *sdom;
const struct nvkm_specsig *ssig;
struct nvkm_perfdom *dom;
int ret, i;
for (i = 0; i == 0 || mask; i++) {
u32 addr = base + (i * size_unit);
if (i && !(mask & (1 << i)))
continue;
sdom = spec;
while (sdom->signal_nr) {
dom = kzalloc(struct_size(dom, signal, sdom->signal_nr),
GFP_KERNEL);
if (!dom)
return -ENOMEM;
if (mask) {
snprintf(dom->name, sizeof(dom->name),
"%s/%02x/%02x", name, i,
(int)(sdom - spec));
} else {
snprintf(dom->name, sizeof(dom->name),
"%s/%02x", name, (int)(sdom - spec));
}
list_add_tail(&dom->head, &pm->domains);
INIT_LIST_HEAD(&dom->list);
dom->func = sdom->func;
dom->addr = addr;
dom->signal_nr = sdom->signal_nr;
ssig = (sdom++)->signal;
while (ssig->name) {
struct nvkm_perfsig *sig =
&dom->signal[ssig->signal];
sig->name = ssig->name;
ret = nvkm_perfsrc_new(pm, sig, ssig->source);
if (ret)
return ret;
ssig++;
}
addr += size_domain;
}
mask &= ~(1 << i);
}
return 0;
}
static int
nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
{
struct nvkm_pm *pm = nvkm_pm(engine);
if (pm->func->fini)
pm->func->fini(pm);
return 0;
}
static void *
nvkm_pm_dtor(struct nvkm_engine *engine)
{
struct nvkm_pm *pm = nvkm_pm(engine);
struct nvkm_perfdom *dom, *next_dom;
struct nvkm_perfsrc *src, *next_src;
list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
list_del(&dom->head);
kfree(dom);
}
list_for_each_entry_safe(src, next_src, &pm->sources, head) {
list_del(&src->head);
kfree(src->name);
kfree(src);
}
return pm;
}
static const struct nvkm_engine_func
nvkm_pm = {
.dtor = nvkm_pm_dtor,
.fini = nvkm_pm_fini,
.base.sclass = nvkm_pm_oclass_get,
};
int
nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_pm *pm)
{
pm->func = func;
INIT_LIST_HEAD(&pm->domains);
INIT_LIST_HEAD(&pm->sources);
spin_lock_init(&pm->client.lock);
return nvkm_engine_ctor(&nvkm_pm, device, type, inst, true, &pm->engine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv40.h"
const struct nvkm_specsrc
g84_vfetch_sources[] = {
{ 0x400c0c, (const struct nvkm_specmux[]) {
{ 0x3, 0, "unk0" },
{}
}, "pgraph_vfetch_unk0c" },
{}
};
static const struct nvkm_specsrc
g84_prop_sources[] = {
{ 0x408e50, (const struct nvkm_specmux[]) {
{ 0x1f, 0, "sel", true },
{}
}, "pgraph_tpc0_prop_pm_mux" },
{}
};
static const struct nvkm_specsrc
g84_crop_sources[] = {
{ 0x407008, (const struct nvkm_specmux[]) {
{ 0xf, 0, "sel0", true },
{ 0x7, 16, "sel1", true },
{}
}, "pgraph_rop0_crop_pm_mux" },
{}
};
static const struct nvkm_specsrc
g84_tex_sources[] = {
{ 0x408808, (const struct nvkm_specmux[]) {
{ 0xfffff, 0, "unk0" },
{}
}, "pgraph_tpc0_tex_unk08" },
{}
};
static const struct nvkm_specdom
g84_pm[] = {
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0xf0, (const struct nvkm_specsig[]) {
{ 0xbd, "pc01_gr_idle" },
{ 0x5e, "pc01_strmout_00" },
{ 0x5f, "pc01_strmout_01" },
{ 0xd2, "pc01_trast_00" },
{ 0xd3, "pc01_trast_01" },
{ 0xd4, "pc01_trast_02" },
{ 0xd5, "pc01_trast_03" },
{ 0xd8, "pc01_trast_04" },
{ 0xd9, "pc01_trast_05" },
{ 0x5c, "pc01_vattr_00" },
{ 0x5d, "pc01_vattr_01" },
{ 0x66, "pc01_vfetch_00", g84_vfetch_sources },
{ 0x67, "pc01_vfetch_01", g84_vfetch_sources },
{ 0x68, "pc01_vfetch_02", g84_vfetch_sources },
{ 0x69, "pc01_vfetch_03", g84_vfetch_sources },
{ 0x6a, "pc01_vfetch_04", g84_vfetch_sources },
{ 0x6b, "pc01_vfetch_05", g84_vfetch_sources },
{ 0x6c, "pc01_vfetch_06", g84_vfetch_sources },
{ 0x6d, "pc01_vfetch_07", g84_vfetch_sources },
{ 0x6e, "pc01_vfetch_08", g84_vfetch_sources },
{ 0x6f, "pc01_vfetch_09", g84_vfetch_sources },
{ 0x70, "pc01_vfetch_0a", g84_vfetch_sources },
{ 0x71, "pc01_vfetch_0b", g84_vfetch_sources },
{ 0x72, "pc01_vfetch_0c", g84_vfetch_sources },
{ 0x73, "pc01_vfetch_0d", g84_vfetch_sources },
{ 0x74, "pc01_vfetch_0e", g84_vfetch_sources },
{ 0x75, "pc01_vfetch_0f", g84_vfetch_sources },
{ 0x76, "pc01_vfetch_10", g84_vfetch_sources },
{ 0x77, "pc01_vfetch_11", g84_vfetch_sources },
{ 0x78, "pc01_vfetch_12", g84_vfetch_sources },
{ 0x79, "pc01_vfetch_13", g84_vfetch_sources },
{ 0x7a, "pc01_vfetch_14", g84_vfetch_sources },
{ 0x7b, "pc01_vfetch_15", g84_vfetch_sources },
{ 0x7c, "pc01_vfetch_16", g84_vfetch_sources },
{ 0x7d, "pc01_vfetch_17", g84_vfetch_sources },
{ 0x7e, "pc01_vfetch_18", g84_vfetch_sources },
{ 0x7f, "pc01_vfetch_19", g84_vfetch_sources },
{ 0x07, "pc01_zcull_00", nv50_zcull_sources },
{ 0x08, "pc01_zcull_01", nv50_zcull_sources },
{ 0x09, "pc01_zcull_02", nv50_zcull_sources },
{ 0x0a, "pc01_zcull_03", nv50_zcull_sources },
{ 0x0b, "pc01_zcull_04", nv50_zcull_sources },
{ 0x0c, "pc01_zcull_05", nv50_zcull_sources },
{ 0xa4, "pc01_unk00" },
{ 0xec, "pc01_trailer" },
{}
}, &nv40_perfctr_func },
{ 0xa0, (const struct nvkm_specsig[]) {
{ 0x30, "pc02_crop_00", g84_crop_sources },
{ 0x31, "pc02_crop_01", g84_crop_sources },
{ 0x32, "pc02_crop_02", g84_crop_sources },
{ 0x33, "pc02_crop_03", g84_crop_sources },
{ 0x00, "pc02_prop_00", g84_prop_sources },
{ 0x01, "pc02_prop_01", g84_prop_sources },
{ 0x02, "pc02_prop_02", g84_prop_sources },
{ 0x03, "pc02_prop_03", g84_prop_sources },
{ 0x04, "pc02_prop_04", g84_prop_sources },
{ 0x05, "pc02_prop_05", g84_prop_sources },
{ 0x06, "pc02_prop_06", g84_prop_sources },
{ 0x07, "pc02_prop_07", g84_prop_sources },
{ 0x48, "pc02_tex_00", g84_tex_sources },
{ 0x49, "pc02_tex_01", g84_tex_sources },
{ 0x4a, "pc02_tex_02", g84_tex_sources },
{ 0x4b, "pc02_tex_03", g84_tex_sources },
{ 0x1a, "pc02_tex_04", g84_tex_sources },
{ 0x1b, "pc02_tex_05", g84_tex_sources },
{ 0x1c, "pc02_tex_06", g84_tex_sources },
{ 0x44, "pc02_zrop_00", nv50_zrop_sources },
{ 0x45, "pc02_zrop_01", nv50_zrop_sources },
{ 0x46, "pc02_zrop_02", nv50_zrop_sources },
{ 0x47, "pc02_zrop_03", nv50_zrop_sources },
{ 0x8c, "pc02_trailer" },
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{}
};
int
g84_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
return nv40_pm_new_(g84_pm, device, type, inst, ppm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c |
/*
* Copyright 2015 Samuel Pitoiset
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Samuel Pitoiset
*/
#include "gf100.h"
static const struct nvkm_specdom
gf108_pm_hub[] = {
{}
};
static const struct nvkm_specdom
gf108_pm_part[] = {
{ 0xe0, (const struct nvkm_specsig[]) {
{ 0x14, "part00_pbfb_00", gf100_pbfb_sources },
{ 0x15, "part00_pbfb_01", gf100_pbfb_sources },
{ 0x20, "part00_pbfb_02", gf100_pbfb_sources },
{ 0x21, "part00_pbfb_03", gf100_pbfb_sources },
{ 0x01, "part00_pmfb_00", gf100_pmfb_sources },
{ 0x04, "part00_pmfb_01", gf100_pmfb_sources },
{ 0x05, "part00_pmfb_02", gf100_pmfb_sources},
{ 0x07, "part00_pmfb_03", gf100_pmfb_sources },
{ 0x0d, "part00_pmfb_04", gf100_pmfb_sources },
{ 0x12, "part00_pmfb_05", gf100_pmfb_sources },
{ 0x13, "part00_pmfb_06", gf100_pmfb_sources },
{ 0x2c, "part00_pmfb_07", gf100_pmfb_sources },
{ 0x2d, "part00_pmfb_08", gf100_pmfb_sources },
{ 0x2e, "part00_pmfb_09", gf100_pmfb_sources },
{ 0x2f, "part00_pmfb_0a", gf100_pmfb_sources },
{ 0x30, "part00_pmfb_0b", gf100_pmfb_sources },
{}
}, &gf100_perfctr_func },
{}
};
static const struct gf100_pm_func
gf108_pm = {
.doms_gpc = gf100_pm_gpc,
.doms_hub = gf108_pm_hub,
.doms_part = gf108_pm_part,
};
int
gf108_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
return gf100_pm_new_(&gf108_pm, device, type, inst, ppm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv40.h"
static const struct nvkm_specsrc
gt215_zcull_sources[] = {
{ 0x402ca4, (const struct nvkm_specmux[]) {
{ 0x7fff, 0, "unk0" },
{ 0xff, 24, "unk24" },
{}
}, "pgraph_zcull_pm_unka4" },
{}
};
static const struct nvkm_specdom
gt215_pm[] = {
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0xf0, (const struct nvkm_specsig[]) {
{ 0xcb, "pc01_gr_idle" },
{ 0x86, "pc01_strmout_00" },
{ 0x87, "pc01_strmout_01" },
{ 0xe0, "pc01_trast_00" },
{ 0xe1, "pc01_trast_01" },
{ 0xe2, "pc01_trast_02" },
{ 0xe3, "pc01_trast_03" },
{ 0xe6, "pc01_trast_04" },
{ 0xe7, "pc01_trast_05" },
{ 0x84, "pc01_vattr_00" },
{ 0x85, "pc01_vattr_01" },
{ 0x46, "pc01_vfetch_00", g84_vfetch_sources },
{ 0x47, "pc01_vfetch_01", g84_vfetch_sources },
{ 0x48, "pc01_vfetch_02", g84_vfetch_sources },
{ 0x49, "pc01_vfetch_03", g84_vfetch_sources },
{ 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
{ 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
{ 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
{ 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
{ 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
{ 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
{ 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
{ 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
{ 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
{ 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
{ 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
{ 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
{ 0x56, "pc01_vfetch_10", g84_vfetch_sources },
{ 0x57, "pc01_vfetch_11", g84_vfetch_sources },
{ 0x58, "pc01_vfetch_12", g84_vfetch_sources },
{ 0x59, "pc01_vfetch_13", g84_vfetch_sources },
{ 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
{ 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
{ 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
{ 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
{ 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
{ 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
{ 0x07, "pc01_zcull_00", gt215_zcull_sources },
{ 0x08, "pc01_zcull_01", gt215_zcull_sources },
{ 0x09, "pc01_zcull_02", gt215_zcull_sources },
{ 0x0a, "pc01_zcull_03", gt215_zcull_sources },
{ 0x0b, "pc01_zcull_04", gt215_zcull_sources },
{ 0x0c, "pc01_zcull_05", gt215_zcull_sources },
{ 0xb2, "pc01_unk00" },
{ 0xec, "pc01_trailer" },
{}
}, &nv40_perfctr_func },
{ 0xe0, (const struct nvkm_specsig[]) {
{ 0x64, "pc02_crop_00", gt200_crop_sources },
{ 0x65, "pc02_crop_01", gt200_crop_sources },
{ 0x66, "pc02_crop_02", gt200_crop_sources },
{ 0x67, "pc02_crop_03", gt200_crop_sources },
{ 0x00, "pc02_prop_00", gt200_prop_sources },
{ 0x01, "pc02_prop_01", gt200_prop_sources },
{ 0x02, "pc02_prop_02", gt200_prop_sources },
{ 0x03, "pc02_prop_03", gt200_prop_sources },
{ 0x04, "pc02_prop_04", gt200_prop_sources },
{ 0x05, "pc02_prop_05", gt200_prop_sources },
{ 0x06, "pc02_prop_06", gt200_prop_sources },
{ 0x07, "pc02_prop_07", gt200_prop_sources },
{ 0x80, "pc02_tex_00", gt200_tex_sources },
{ 0x81, "pc02_tex_01", gt200_tex_sources },
{ 0x82, "pc02_tex_02", gt200_tex_sources },
{ 0x83, "pc02_tex_03", gt200_tex_sources },
{ 0x3a, "pc02_tex_04", gt200_tex_sources },
{ 0x3b, "pc02_tex_05", gt200_tex_sources },
{ 0x3c, "pc02_tex_06", gt200_tex_sources },
{ 0x7c, "pc02_zrop_00", nv50_zrop_sources },
{ 0x7d, "pc02_zrop_01", nv50_zrop_sources },
{ 0x7e, "pc02_zrop_02", nv50_zrop_sources },
{ 0x7f, "pc02_zrop_03", nv50_zrop_sources },
{ 0xcc, "pc02_trailer" },
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{ 0x20, (const struct nvkm_specsig[]) {
{}
}, &nv40_perfctr_func },
{}
};
int
gt215_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
{
return nv40_pm_new_(gt215_pm, device, type, inst, ppm);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#define nv04_sw_chan(p) container_of((p), struct nv04_sw_chan, base)
#include "priv.h"
#include "chan.h"
#include "nvsw.h"
#include <nvif/class.h>
#include <nvif/if0004.h>
#include <nvif/ioctl.h>
#include <nvif/unpack.h>
struct nv04_sw_chan {
struct nvkm_sw_chan base;
atomic_t ref;
};
/*******************************************************************************
* software object classes
******************************************************************************/
static int
nv04_nvsw_mthd_get_ref(struct nvkm_nvsw *nvsw, void *data, u32 size)
{
struct nv04_sw_chan *chan = nv04_sw_chan(nvsw->chan);
union {
struct nv04_nvsw_get_ref_v0 v0;
} *args = data;
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
args->v0.ref = atomic_read(&chan->ref);
}
return ret;
}
static int
nv04_nvsw_mthd(struct nvkm_nvsw *nvsw, u32 mthd, void *data, u32 size)
{
switch (mthd) {
case NV04_NVSW_GET_REF:
return nv04_nvsw_mthd_get_ref(nvsw, data, size);
default:
break;
}
return -EINVAL;
}
static const struct nvkm_nvsw_func
nv04_nvsw = {
.mthd = nv04_nvsw_mthd,
};
static int
nv04_nvsw_new(struct nvkm_sw_chan *chan, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
{
return nvkm_nvsw_new_(&nv04_nvsw, chan, oclass, data, size, pobject);
}
/*******************************************************************************
* software context
******************************************************************************/
static bool
nv04_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
{
struct nv04_sw_chan *chan = nv04_sw_chan(base);
switch (mthd) {
case 0x0150:
atomic_set(&chan->ref, data);
return true;
default:
break;
}
return false;
}
static const struct nvkm_sw_chan_func
nv04_sw_chan = {
.mthd = nv04_sw_chan_mthd,
};
static int
nv04_sw_chan_new(struct nvkm_sw *sw, struct nvkm_chan *fifo,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv04_sw_chan *chan;
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
atomic_set(&chan->ref, 0);
*pobject = &chan->base.object;
return nvkm_sw_chan_ctor(&nv04_sw_chan, sw, fifo, oclass, &chan->base);
}
/*******************************************************************************
* software engine/subdev functions
******************************************************************************/
static const struct nvkm_sw_func
nv04_sw = {
.chan_new = nv04_sw_chan_new,
.sclass = {
{ nv04_nvsw_new, { -1, -1, NVIF_CLASS_SW_NV04 } },
{}
}
};
int
nv04_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw)
{
return nvkm_sw_new_(&nv04_sw, device, type, inst, psw);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "chan.h"
#include <engine/fifo.h>
#include <nvif/event.h>
#include <nvif/unpack.h>
bool
nvkm_sw_chan_mthd(struct nvkm_sw_chan *chan, int subc, u32 mthd, u32 data)
{
switch (mthd) {
case 0x0000:
return true;
case 0x0500:
nvkm_event_ntfy(&chan->event, 0, NVKM_SW_CHAN_EVENT_PAGE_FLIP);
return true;
default:
if (chan->func->mthd)
return chan->func->mthd(chan, subc, mthd, data);
break;
}
return false;
}
static const struct nvkm_event_func
nvkm_sw_chan_event = {
};
static void *
nvkm_sw_chan_dtor(struct nvkm_object *object)
{
struct nvkm_sw_chan *chan = nvkm_sw_chan(object);
struct nvkm_sw *sw = chan->sw;
unsigned long flags;
void *data = chan;
if (chan->func->dtor)
data = chan->func->dtor(chan);
nvkm_event_fini(&chan->event);
spin_lock_irqsave(&sw->engine.lock, flags);
list_del(&chan->head);
spin_unlock_irqrestore(&sw->engine.lock, flags);
return data;
}
static const struct nvkm_object_func
nvkm_sw_chan = {
.dtor = nvkm_sw_chan_dtor,
};
int
nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *func, struct nvkm_sw *sw,
struct nvkm_chan *fifo, const struct nvkm_oclass *oclass,
struct nvkm_sw_chan *chan)
{
unsigned long flags;
nvkm_object_ctor(&nvkm_sw_chan, oclass, &chan->object);
chan->func = func;
chan->sw = sw;
chan->fifo = fifo;
spin_lock_irqsave(&sw->engine.lock, flags);
list_add(&chan->head, &sw->chan);
spin_unlock_irqrestore(&sw->engine.lock, flags);
return nvkm_event_init(&nvkm_sw_chan_event, &sw->engine.subdev, 1, 1, &chan->event);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.