python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "nouveau_vmm.h"
#include "nouveau_drv.h"
#include "nouveau_bo.h"
#include "nouveau_svm.h"
#include "nouveau_mem.h"
void
nouveau_vma_unmap(struct nouveau_vma *vma)
{
if (vma->mem) {
nvif_vmm_unmap(&vma->vmm->vmm, vma->addr);
vma->mem = NULL;
}
}
int
nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem)
{
struct nvif_vma tmp = { .addr = vma->addr };
int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp);
if (ret)
return ret;
vma->mem = mem;
return 0;
}
struct nouveau_vma *
nouveau_vma_find(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm)
{
struct nouveau_vma *vma;
list_for_each_entry(vma, &nvbo->vma_list, head) {
if (vma->vmm == vmm)
return vma;
}
return NULL;
}
void
nouveau_vma_del(struct nouveau_vma **pvma)
{
struct nouveau_vma *vma = *pvma;
if (vma && --vma->refs <= 0) {
if (likely(vma->addr != ~0ULL)) {
struct nvif_vma tmp = { .addr = vma->addr, .size = 1 };
nvif_vmm_put(&vma->vmm->vmm, &tmp);
}
list_del(&vma->head);
kfree(*pvma);
}
*pvma = NULL;
}
int
nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
struct nouveau_vma **pvma)
{
struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource);
struct nouveau_vma *vma;
struct nvif_vma tmp;
int ret;
if ((vma = *pvma = nouveau_vma_find(nvbo, vmm))) {
vma->refs++;
return 0;
}
if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL)))
return -ENOMEM;
vma->vmm = vmm;
vma->refs = 1;
vma->addr = ~0ULL;
vma->mem = NULL;
vma->fence = NULL;
list_add_tail(&vma->head, &nvbo->vma_list);
if (nvbo->bo.resource->mem_type != TTM_PL_SYSTEM &&
mem->mem.page == nvbo->page) {
ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
mem->mem.size, &tmp);
if (ret)
goto done;
vma->addr = tmp.addr;
ret = nouveau_vma_map(vma, mem);
} else {
ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
mem->mem.size, &tmp);
vma->addr = tmp.addr;
}
done:
if (ret)
nouveau_vma_del(pvma);
return ret;
}
void
nouveau_vmm_fini(struct nouveau_vmm *vmm)
{
nouveau_svmm_fini(&vmm->svmm);
nvif_vmm_dtor(&vmm->vmm);
vmm->cli = NULL;
}
int
nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
{
int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, UNMANAGED,
PAGE_SIZE, 0, NULL, 0, &vmm->vmm);
if (ret)
return ret;
vmm->cli = cli;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_vmm.c |
/*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/sched/signal.h>
#include <trace/events/dma_fence.h>
#include <nvif/if0020.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
static const struct dma_fence_ops nouveau_fence_ops_uevent;
static const struct dma_fence_ops nouveau_fence_ops_legacy;
static inline struct nouveau_fence *
from_fence(struct dma_fence *fence)
{
return container_of(fence, struct nouveau_fence, base);
}
static inline struct nouveau_fence_chan *
nouveau_fctx(struct nouveau_fence *fence)
{
return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
}
static int
nouveau_fence_signal(struct nouveau_fence *fence)
{
int drop = 0;
dma_fence_signal_locked(&fence->base);
list_del(&fence->head);
rcu_assign_pointer(fence->channel, NULL);
if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
if (!--fctx->notify_ref)
drop = 1;
}
dma_fence_put(&fence->base);
return drop;
}
static struct nouveau_fence *
nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
{
if (fence->ops != &nouveau_fence_ops_legacy &&
fence->ops != &nouveau_fence_ops_uevent)
return NULL;
return from_fence(fence);
}
void
nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
{
struct nouveau_fence *fence;
unsigned long flags;
spin_lock_irqsave(&fctx->lock, flags);
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
if (error)
dma_fence_set_error(&fence->base, error);
if (nouveau_fence_signal(fence))
nvif_event_block(&fctx->event);
}
fctx->killed = 1;
spin_unlock_irqrestore(&fctx->lock, flags);
}
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
nouveau_fence_context_kill(fctx, 0);
nvif_event_dtor(&fctx->event);
fctx->dead = 1;
/*
* Ensure that all accesses to fence->channel complete before freeing
* the channel.
*/
synchronize_rcu();
}
static void
nouveau_fence_context_put(struct kref *fence_ref)
{
kfree(container_of(fence_ref, struct nouveau_fence_chan, fence_ref));
}
void
nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
{
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
}
static int
nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
struct nouveau_fence *fence;
int drop = 0;
u32 seq = fctx->read(chan);
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
if ((int)(seq - fence->base.seqno) < 0)
break;
drop |= nouveau_fence_signal(fence);
}
return drop;
}
static int
nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
{
struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
unsigned long flags;
int ret = NVIF_EVENT_KEEP;
spin_lock_irqsave(&fctx->lock, flags);
if (!list_empty(&fctx->pending)) {
struct nouveau_fence *fence;
struct nouveau_channel *chan;
fence = list_entry(fctx->pending.next, typeof(*fence), head);
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
if (nouveau_fence_update(chan, fctx))
ret = NVIF_EVENT_DROP;
}
spin_unlock_irqrestore(&fctx->lock, flags);
return ret;
}
void
nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
struct nouveau_cli *cli = (void *)chan->user.client;
struct {
struct nvif_event_v0 base;
struct nvif_chan_event_v0 host;
} args;
int ret;
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
spin_lock_init(&fctx->lock);
fctx->context = chan->drm->runl[chan->runlist].context_base + chan->chid;
if (chan == chan->drm->cechan)
strcpy(fctx->name, "copy engine channel");
else if (chan == chan->drm->channel)
strcpy(fctx->name, "generic kernel channel");
else
strcpy(fctx->name, nvxx_client(&cli->base)->name);
kref_init(&fctx->fence_ref);
if (!priv->uevent)
return;
args.host.version = 0;
args.host.type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR;
ret = nvif_event_ctor(&chan->user, "fenceNonStallIntr", (chan->runlist << 16) | chan->chid,
nouveau_fence_wait_uevent_handler, false,
&args.base, sizeof(args), &fctx->event);
WARN_ON(ret);
}
int
nouveau_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = unrcu_pointer(fence->channel);
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
int ret;
fence->timeout = jiffies + (15 * HZ);
if (priv->uevent)
dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
&fctx->lock, fctx->context, ++fctx->sequence);
else
dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
&fctx->lock, fctx->context, ++fctx->sequence);
kref_get(&fctx->fence_ref);
ret = fctx->emit(fence);
if (!ret) {
dma_fence_get(&fence->base);
spin_lock_irq(&fctx->lock);
if (unlikely(fctx->killed)) {
spin_unlock_irq(&fctx->lock);
dma_fence_put(&fence->base);
return -ENODEV;
}
if (nouveau_fence_update(chan, fctx))
nvif_event_block(&fctx->event);
list_add_tail(&fence->head, &fctx->pending);
spin_unlock_irq(&fctx->lock);
}
return ret;
}
bool
nouveau_fence_done(struct nouveau_fence *fence)
{
if (fence->base.ops == &nouveau_fence_ops_legacy ||
fence->base.ops == &nouveau_fence_ops_uevent) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
struct nouveau_channel *chan;
unsigned long flags;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return true;
spin_lock_irqsave(&fctx->lock, flags);
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
if (chan && nouveau_fence_update(chan, fctx))
nvif_event_block(&fctx->event);
spin_unlock_irqrestore(&fctx->lock, flags);
}
return dma_fence_is_signaled(&fence->base);
}
static long
nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
{
struct nouveau_fence *fence = from_fence(f);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
unsigned long t = jiffies, timeout = t + wait;
while (!nouveau_fence_done(fence)) {
ktime_t kt;
t = jiffies;
if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
__set_current_state(TASK_RUNNING);
return 0;
}
__set_current_state(intr ? TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
kt = sleep_time;
schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
sleep_time *= 2;
if (sleep_time > NSEC_PER_MSEC)
sleep_time = NSEC_PER_MSEC;
if (intr && signal_pending(current))
return -ERESTARTSYS;
}
__set_current_state(TASK_RUNNING);
return timeout - t;
}
static int
nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
{
int ret = 0;
while (!nouveau_fence_done(fence)) {
if (time_after_eq(jiffies, fence->timeout)) {
ret = -EBUSY;
break;
}
__set_current_state(intr ?
TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
__set_current_state(TASK_RUNNING);
return ret;
}
int
nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
{
long ret;
if (!lazy)
return nouveau_fence_wait_busy(fence, intr);
ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
if (ret < 0)
return ret;
else if (!ret)
return -EBUSY;
else
return 0;
}
int
nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
bool exclusive, bool intr)
{
struct nouveau_fence_chan *fctx = chan->fence;
struct dma_resv *resv = nvbo->bo.base.resv;
int i, ret;
ret = dma_resv_reserve_fences(resv, 1);
if (ret)
return ret;
/* Waiting for the writes first causes performance regressions
* under some circumstances. So manually wait for the reads first.
*/
for (i = 0; i < 2; ++i) {
struct dma_resv_iter cursor;
struct dma_fence *fence;
dma_resv_for_each_fence(&cursor, resv,
dma_resv_usage_rw(exclusive),
fence) {
enum dma_resv_usage usage;
struct nouveau_fence *f;
usage = dma_resv_iter_usage(&cursor);
if (i == 0 && usage == DMA_RESV_USAGE_WRITE)
continue;
f = nouveau_local_fence(fence, chan->drm);
if (f) {
struct nouveau_channel *prev;
bool must_wait = true;
rcu_read_lock();
prev = rcu_dereference(f->channel);
if (prev && (prev == chan ||
fctx->sync(f, prev, chan) == 0))
must_wait = false;
rcu_read_unlock();
if (!must_wait)
continue;
}
ret = dma_fence_wait(fence, intr);
if (ret)
return ret;
}
}
return 0;
}
void
nouveau_fence_unref(struct nouveau_fence **pfence)
{
if (*pfence)
dma_fence_put(&(*pfence)->base);
*pfence = NULL;
}
int
nouveau_fence_create(struct nouveau_fence **pfence,
struct nouveau_channel *chan)
{
struct nouveau_fence *fence;
if (unlikely(!chan->fence))
return -ENODEV;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
fence->channel = chan;
*pfence = fence;
return 0;
}
int
nouveau_fence_new(struct nouveau_fence **pfence,
struct nouveau_channel *chan)
{
int ret = 0;
ret = nouveau_fence_create(pfence, chan);
if (ret)
return ret;
ret = nouveau_fence_emit(*pfence);
if (ret)
nouveau_fence_unref(pfence);
return ret;
}
static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
{
return "nouveau";
}
static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
return !fctx->dead ? fctx->name : "dead channel";
}
/*
* In an ideal world, read would not assume the channel context is still alive.
* This function may be called from another device, running into free memory as a
* result. The drm node should still be there, so we can derive the index from
* the fence context.
*/
static bool nouveau_fence_is_signaled(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
struct nouveau_channel *chan;
bool ret = false;
rcu_read_lock();
chan = rcu_dereference(fence->channel);
if (chan)
ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
rcu_read_unlock();
return ret;
}
static bool nouveau_fence_no_signaling(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
/*
* caller should have a reference on the fence,
* else fence could get freed here
*/
WARN_ON(kref_read(&fence->base.refcount) <= 1);
/*
* This needs uevents to work correctly, but dma_fence_add_callback relies on
* being able to enable signaling. It will still get signaled eventually,
* just not right away.
*/
if (nouveau_fence_is_signaled(f)) {
list_del(&fence->head);
dma_fence_put(&fence->base);
return false;
}
return true;
}
static void nouveau_fence_release(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
dma_fence_free(&fence->base);
}
static const struct dma_fence_ops nouveau_fence_ops_legacy = {
.get_driver_name = nouveau_fence_get_get_driver_name,
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_no_signaling,
.signaled = nouveau_fence_is_signaled,
.wait = nouveau_fence_wait_legacy,
.release = nouveau_fence_release
};
static bool nouveau_fence_enable_signaling(struct dma_fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
bool ret;
if (!fctx->notify_ref++)
nvif_event_allow(&fctx->event);
ret = nouveau_fence_no_signaling(f);
if (ret)
set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
else if (!--fctx->notify_ref)
nvif_event_block(&fctx->event);
return ret;
}
static const struct dma_fence_ops nouveau_fence_ops_uevent = {
.get_driver_name = nouveau_fence_get_get_driver_name,
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_enable_signaling,
.signaled = nouveau_fence_is_signaled,
.release = nouveau_fence_release
};
| linux-master | drivers/gpu/drm/nouveau/nouveau_fence.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nv10_fence.h"
#include <nvif/push006c.h>
#include <nvhw/class/cl006e.h>
int
nv10_fence_emit(struct nouveau_fence *fence)
{
struct nvif_push *push = fence->channel->chan.push;
int ret = PUSH_WAIT(push, 2);
if (ret == 0) {
PUSH_MTHD(push, NV06E, SET_REFERENCE, fence->base.seqno);
PUSH_KICK(push);
}
return ret;
}
static int
nv10_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
return -ENODEV;
}
u32
nv10_fence_read(struct nouveau_channel *chan)
{
return NVIF_RD32(&chan->user, NV06E, REFERENCE);
}
void
nv10_fence_context_del(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx = chan->fence;
nouveau_fence_context_del(&fctx->base);
nvif_object_dtor(&fctx->sema);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
}
static int
nv10_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
nouveau_fence_context_new(chan, &fctx->base);
fctx->base.emit = nv10_fence_emit;
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv10_fence_sync;
return 0;
}
void
nv10_fence_destroy(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv = drm->fence;
nouveau_bo_unmap(priv->bo);
if (priv->bo)
nouveau_bo_unpin(priv->bo);
nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
}
int
nv10_fence_create(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base.dtor = nv10_fence_destroy;
priv->base.context_new = nv10_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nv10_fence.c |
/*
* Copyright (C) 2016 Martin Peres
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*
* Authors:
* Martin Peres <[email protected]>
*/
#include <linux/leds.h>
#include "nouveau_led.h"
#include <nvkm/subdev/gpio.h>
static enum led_brightness
nouveau_led_get_brightness(struct led_classdev *led)
{
struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvif_object *device = &drm->client.device.object;
u32 div, duty;
div = nvif_rd32(device, 0x61c880) & 0x00ffffff;
duty = nvif_rd32(device, 0x61c884) & 0x00ffffff;
if (div > 0)
return duty * LED_FULL / div;
else
return 0;
}
static void
nouveau_led_set_brightness(struct led_classdev *led, enum led_brightness value)
{
struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvif_object *device = &drm->client.device.object;
u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */
u32 freq = 100; /* this is what nvidia uses and it should be good-enough */
u32 div, duty;
div = input_clk / freq;
duty = value * div / LED_FULL;
/* for now, this is safe to directly poke those registers because:
* - A: nvidia never puts the logo led to any other PWM controler
* than PDISPLAY.SOR[1].PWM.
* - B: nouveau does not touch these registers anywhere else
*/
nvif_wr32(device, 0x61c880, div);
nvif_wr32(device, 0x61c884, 0xc0000000 | duty);
}
int
nouveau_led_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
struct dcb_gpio_func logo_led;
int ret;
if (!gpio)
return 0;
/* check that there is a GPIO controlling the logo LED */
if (nvkm_gpio_find(gpio, 0, DCB_GPIO_LOGO_LED_PWM, 0xff, &logo_led))
return 0;
drm->led = kzalloc(sizeof(*drm->led), GFP_KERNEL);
if (!drm->led)
return -ENOMEM;
drm->led->dev = dev;
drm->led->led.name = "nvidia-logo";
drm->led->led.max_brightness = 255;
drm->led->led.brightness_get = nouveau_led_get_brightness;
drm->led->led.brightness_set = nouveau_led_set_brightness;
ret = led_classdev_register(dev->dev, &drm->led->led);
if (ret) {
kfree(drm->led);
drm->led = NULL;
return ret;
}
return 0;
}
void
nouveau_led_suspend(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->led)
led_classdev_suspend(&drm->led->led);
}
void
nouveau_led_resume(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->led)
led_classdev_resume(&drm->led->led);
}
void
nouveau_led_fini(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->led) {
led_classdev_unregister(&drm->led->led);
kfree(drm->led);
drm->led = NULL;
}
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_led.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/limits.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/drm_cache.h>
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_ttm.h"
#include <core/tegra.h>
static void
nouveau_manager_del(struct ttm_resource_manager *man,
struct ttm_resource *reg)
{
nouveau_mem_del(man, reg);
}
static bool
nouveau_manager_intersects(struct ttm_resource_manager *man,
struct ttm_resource *res,
const struct ttm_place *place,
size_t size)
{
return nouveau_mem_intersects(res, place, size);
}
static bool
nouveau_manager_compatible(struct ttm_resource_manager *man,
struct ttm_resource *res,
const struct ttm_place *place,
size_t size)
{
return nouveau_mem_compatible(res, place, size);
}
static int
nouveau_vram_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
int ret;
if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
ttm_resource_init(bo, place, *res);
ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
if (ret) {
nouveau_mem_del(man, *res);
return ret;
}
return 0;
}
const struct ttm_resource_manager_func nouveau_vram_manager = {
.alloc = nouveau_vram_manager_new,
.free = nouveau_manager_del,
.intersects = nouveau_manager_intersects,
.compatible = nouveau_manager_compatible,
};
static int
nouveau_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
int ret;
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
ttm_resource_init(bo, place, *res);
(*res)->start = 0;
return 0;
}
const struct ttm_resource_manager_func nouveau_gart_manager = {
.alloc = nouveau_gart_manager_new,
.free = nouveau_manager_del,
.intersects = nouveau_manager_intersects,
.compatible = nouveau_manager_compatible,
};
static int
nv04_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_mem *mem;
int ret;
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
mem = nouveau_mem(*res);
ttm_resource_init(bo, place, *res);
ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
(long)(*res)->size, &mem->vma[0]);
if (ret) {
nouveau_mem_del(man, *res);
return ret;
}
(*res)->start = mem->vma[0].addr >> PAGE_SHIFT;
return 0;
}
const struct ttm_resource_manager_func nv04_gart_manager = {
.alloc = nv04_gart_manager_new,
.free = nouveau_manager_del,
.intersects = nouveau_manager_intersects,
.compatible = nouveau_manager_compatible,
};
static int
nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
{
struct nvif_mmu *mmu = &drm->client.mmu;
int typei;
typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
kind | NVIF_MEM_COHERENT);
if (typei < 0)
return -ENOSYS;
drm->ttm.type_host[!!kind] = typei;
typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
if (typei < 0)
return -ENOSYS;
drm->ttm.type_ncoh[!!kind] = typei;
return 0;
}
static int
nouveau_ttm_init_vram(struct nouveau_drm *drm)
{
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL);
if (!man)
return -ENOMEM;
man->func = &nouveau_vram_manager;
ttm_resource_manager_init(man, &drm->ttm.bdev,
drm->gem.vram_available >> PAGE_SHIFT);
ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man);
ttm_resource_manager_set_used(man, true);
return 0;
} else {
return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false,
drm->gem.vram_available >> PAGE_SHIFT);
}
}
static void
nouveau_ttm_fini_vram(struct nouveau_drm *drm)
{
struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ttm_resource_manager_set_used(man, false);
ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL);
kfree(man);
} else
ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM);
}
static int
nouveau_ttm_init_gtt(struct nouveau_drm *drm)
{
struct ttm_resource_manager *man;
unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT;
const struct ttm_resource_manager_func *func = NULL;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
func = &nouveau_gart_manager;
else if (!drm->agp.bridge)
func = &nv04_gart_manager;
else
return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true,
size_pages);
man = kzalloc(sizeof(*man), GFP_KERNEL);
if (!man)
return -ENOMEM;
man->func = func;
man->use_tt = true;
ttm_resource_manager_init(man, &drm->ttm.bdev, size_pages);
ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
ttm_resource_manager_set_used(man, true);
return 0;
}
static void
nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
{
struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT);
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
drm->agp.bridge)
ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT);
else {
ttm_resource_manager_set_used(man, false);
ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL);
kfree(man);
}
}
int
nouveau_ttm_init(struct nouveau_drm *drm)
{
struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nvkm_pci *pci = device->pci;
struct nvif_mmu *mmu = &drm->client.mmu;
struct drm_device *dev = drm->dev;
int typei, ret;
ret = nouveau_ttm_init_host(drm, 0);
if (ret)
return ret;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
drm->client.device.info.chipset != 0x50) {
ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
if (ret)
return ret;
}
if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
NVIF_MEM_KIND |
NVIF_MEM_COMP |
NVIF_MEM_DISP);
if (typei < 0)
return -ENOSYS;
drm->ttm.type_vram = typei;
} else {
drm->ttm.type_vram = -1;
}
if (pci && pci->agp.bridge) {
drm->agp.bridge = pci->agp.bridge;
drm->agp.base = pci->agp.base;
drm->agp.size = pci->agp.size;
drm->agp.cma = pci->agp.cma;
}
ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
dev->anon_inode->i_mapping,
dev->vma_offset_manager,
drm_need_swiotlb(drm->client.mmu.dmabits),
drm->client.mmu.dmabits <= 32);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret;
}
/* VRAM init */
drm->gem.vram_available = drm->client.device.info.ram_user;
arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
device->func->resource_size(device, 1));
ret = nouveau_ttm_init_vram(drm);
if (ret) {
NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
return ret;
}
drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
device->func->resource_size(device, 1));
/* GART init */
if (!drm->agp.bridge) {
drm->gem.gart_available = drm->client.vmm.vmm.limit;
} else {
drm->gem.gart_available = drm->agp.size;
}
ret = nouveau_ttm_init_gtt(drm);
if (ret) {
NV_ERROR(drm, "GART mm init failed, %d\n", ret);
return ret;
}
mutex_init(&drm->ttm.io_reserve_mutex);
INIT_LIST_HEAD(&drm->ttm.io_reserve_lru);
NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
return 0;
}
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
struct nvkm_device *device = nvxx_device(&drm->client.device);
nouveau_ttm_fini_vram(drm);
nouveau_ttm_fini_gtt(drm);
ttm_device_fini(&drm->ttm.bdev);
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
device->func->resource_size(device, 1));
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_ttm.c |
/*
* Copyright 2007 Dave Airlied
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Authors: Dave Airlied <[email protected]>
* Ben Skeggs <[email protected]>
* Jeremy Kolb <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <drm/ttm/ttm_tt.h>
#include "nouveau_drv.h"
#include "nouveau_chan.h"
#include "nouveau_fence.h"
#include "nouveau_bo.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_vmm.h"
#include <nvif/class.h>
#include <nvif/if500b.h>
#include <nvif/if900b.h>
static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *reg);
static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
/*
* NV10-NV40 tiling helpers
*/
static void
nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
u32 addr, u32 size, u32 pitch, u32 flags)
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
struct nvkm_fb_tile *tile = &fb->tile.region[i];
nouveau_fence_unref(®->fence);
if (tile->pitch)
nvkm_fb_tile_fini(fb, i, tile);
if (pitch)
nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
nvkm_fb_tile_prog(fb, i, tile);
}
static struct nouveau_drm_tile *
nv10_bo_get_tile_region(struct drm_device *dev, int i)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_drm_tile *tile = &drm->tile.reg[i];
spin_lock(&drm->tile.lock);
if (!tile->used &&
(!tile->fence || nouveau_fence_done(tile->fence)))
tile->used = true;
else
tile = NULL;
spin_unlock(&drm->tile.lock);
return tile;
}
static void
nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
struct dma_fence *fence)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (tile) {
spin_lock(&drm->tile.lock);
tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
tile->used = false;
spin_unlock(&drm->tile.lock);
}
}
static struct nouveau_drm_tile *
nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 zeta)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
for (i = 0; i < fb->tile.regions; i++) {
tile = nv10_bo_get_tile_region(dev, i);
if (pitch && !found) {
found = tile;
continue;
} else if (tile && fb->tile.region[i].pitch) {
/* Kill an unused tile region. */
nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
}
nv10_bo_put_tile_region(dev, tile, NULL);
}
if (found)
nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
return found;
}
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
WARN_ON(nvbo->bo.pin_count > 0);
nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
/*
* If nouveau_bo_new() allocated this buffer, the GEM object was never
* initialized, so don't attempt to release it.
*/
if (bo->base.dev)
drm_gem_object_release(&bo->base);
else
dma_resv_fini(&bo->base._resv);
kfree(nvbo);
}
static inline u64
roundup_64(u64 x, u32 y)
{
x += y - 1;
do_div(x, y);
return x * y;
}
static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nvif_device *device = &drm->client.device;
if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->mode) {
if (device->info.chipset >= 0x40) {
*align = 65536;
*size = roundup_64(*size, 64 * nvbo->mode);
} else if (device->info.chipset >= 0x30) {
*align = 32768;
*size = roundup_64(*size, 64 * nvbo->mode);
} else if (device->info.chipset >= 0x20) {
*align = 16384;
*size = roundup_64(*size, 64 * nvbo->mode);
} else if (device->info.chipset >= 0x10) {
*align = 16384;
*size = roundup_64(*size, 32 * nvbo->mode);
}
}
} else {
*size = roundup_64(*size, (1 << nvbo->page));
*align = max((1 << nvbo->page), *align);
}
*size = roundup_64(*size, PAGE_SIZE);
}
struct nouveau_bo *
nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
u32 tile_mode, u32 tile_flags, bool internal)
{
struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
struct nvif_mmu *mmu = &cli->mmu;
struct nvif_vmm *vmm = &nouveau_cli_vmm(cli)->vmm;
int i, pi = -1;
if (!*size) {
NV_WARN(drm, "skipped size %016llx\n", *size);
return ERR_PTR(-EINVAL);
}
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
INIT_LIST_HEAD(&nvbo->vma_list);
nvbo->bo.bdev = &drm->ttm.bdev;
/* This is confusing, and doesn't actually mean we want an uncached
* mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
* into in nouveau_gem_new().
*/
if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
/* Determine if we can get a cache-coherent map, forcing
* uncached mapping if we can't.
*/
if (!nouveau_drm_use_coherent_gpu_mapping(drm))
nvbo->force_coherent = true;
}
nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
if (!nouveau_cli_uvmm(cli) || internal) {
/* for BO noVM allocs, don't assign kinds */
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
} else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
nvbo->kind = (tile_flags & 0x00007f00) >> 8;
nvbo->comp = (tile_flags & 0x00030000) >> 16;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
} else {
nvbo->zeta = (tile_flags & 0x00000007);
}
nvbo->mode = tile_mode;
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
* during buffer migration, we need to determine page
* size for the buffer up-front, and pre-allocate its
* page tables.
*
* Skip page sizes that can't support needed domains.
*/
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
(domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
continue;
if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
/* Select this page size if it's the first that supports
* the potential memory domains, or when it's compatible
* with the requested compression settings.
*/
if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
pi = i;
/* Stop once the buffer is larger than the current page size. */
if (*size >= 1ULL << vmm->page[i].shift)
break;
}
if (WARN_ON(pi < 0)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
/* Disable compression if suitable settings couldn't be found. */
if (nvbo->comp && !vmm->page[pi].comp) {
if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
nvbo->kind = mmu->kind[nvbo->kind];
nvbo->comp = 0;
}
nvbo->page = vmm->page[pi].shift;
} else {
/* reject other tile flags when in VM mode. */
if (tile_mode)
return ERR_PTR(-EINVAL);
if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
return ERR_PTR(-EINVAL);
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
* during buffer migration, we need to determine page
* size for the buffer up-front, and pre-allocate its
* page tables.
*
* Skip page sizes that can't support needed domains.
*/
if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
continue;
if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
if (pi < 0)
pi = i;
/* Stop once the buffer is larger than the current page size. */
if (*size >= 1ULL << vmm->page[i].shift)
break;
}
if (WARN_ON(pi < 0)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
nvbo->page = vmm->page[pi].shift;
}
nouveau_bo_fixup_align(nvbo, align, size);
return nvbo;
}
int
nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj)
{
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
int ret;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
.resv = robj,
};
nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type,
&nvbo->placement, align >> PAGE_SHIFT, &ctx,
sg, robj, nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
if (!robj)
ttm_bo_unreserve(&nvbo->bo);
return 0;
}
int
nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
struct sg_table *sg, struct dma_resv *robj,
struct nouveau_bo **pnvbo)
{
struct nouveau_bo *nvbo;
int ret;
nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
tile_flags, true);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
nvbo->bo.base.size = size;
dma_resv_init(&nvbo->bo.base._resv);
drm_vma_node_reset(&nvbo->bo.base.vma_node);
/* This must be called before ttm_bo_init_reserved(). Subsequent
* bo_move() callbacks might already iterate the GEMs GPUVA list.
*/
drm_gem_gpuva_init(&nvbo->bo.base);
ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
if (ret)
return ret;
*pnvbo = nvbo;
return 0;
}
static void
set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain)
{
*n = 0;
if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
pl[*n].mem_type = TTM_PL_VRAM;
pl[*n].flags = 0;
(*n)++;
}
if (domain & NOUVEAU_GEM_DOMAIN_GART) {
pl[*n].mem_type = TTM_PL_TT;
pl[*n].flags = 0;
(*n)++;
}
if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
pl[*n].mem_type = TTM_PL_SYSTEM;
pl[(*n)++].flags = 0;
}
}
static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
u64 vram_size = drm->client.device.info.ram_size;
unsigned i, fpfn, lpfn;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
nvbo->bo.base.size < vram_size / 4) {
/*
* Make sure that the color and depth buffers are handled
* by independent memory controller units. Up to a 9x
* speed up when alpha-blending and depth-test are enabled
* at the same time.
*/
if (nvbo->zeta) {
fpfn = (vram_size / 2) >> PAGE_SHIFT;
lpfn = ~0;
} else {
fpfn = 0;
lpfn = (vram_size / 2) >> PAGE_SHIFT;
}
for (i = 0; i < nvbo->placement.num_placement; ++i) {
nvbo->placements[i].fpfn = fpfn;
nvbo->placements[i].lpfn = lpfn;
}
for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
nvbo->busy_placements[i].fpfn = fpfn;
nvbo->busy_placements[i].lpfn = lpfn;
}
}
}
void
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
uint32_t busy)
{
struct ttm_placement *pl = &nvbo->placement;
pl->placement = nvbo->placements;
set_placement_list(nvbo->placements, &pl->num_placement, domain);
pl->busy_placement = nvbo->busy_placements;
set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
domain | busy);
set_placement_range(nvbo, domain);
}
int
nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
bool force = false, evict = false;
int ret;
ret = ttm_bo_reserve(bo, false, false, NULL);
if (ret)
return ret;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
if (!nvbo->contig) {
nvbo->contig = true;
force = true;
evict = true;
}
}
if (nvbo->bo.pin_count) {
bool error = evict;
switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
break;
case TTM_PL_TT:
error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
break;
default:
break;
}
if (error) {
NV_ERROR(drm, "bo %p pinned elsewhere: "
"0x%08x vs 0x%08x\n", bo,
bo->resource->mem_type, domain);
ret = -EBUSY;
}
ttm_bo_pin(&nvbo->bo);
goto out;
}
if (evict) {
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
ret = nouveau_bo_validate(nvbo, false, false);
if (ret)
goto out;
}
nouveau_bo_placement_set(nvbo, domain, 0);
ret = nouveau_bo_validate(nvbo, false, false);
if (ret)
goto out;
ttm_bo_pin(&nvbo->bo);
switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available -= bo->base.size;
break;
case TTM_PL_TT:
drm->gem.gart_available -= bo->base.size;
break;
default:
break;
}
out:
if (force && ret)
nvbo->contig = false;
ttm_bo_unreserve(bo);
return ret;
}
int
nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
ret = ttm_bo_reserve(bo, false, false, NULL);
if (ret)
return ret;
ttm_bo_unpin(&nvbo->bo);
if (!nvbo->bo.pin_count) {
switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available += bo->base.size;
break;
case TTM_PL_TT:
drm->gem.gart_available += bo->base.size;
break;
default:
break;
}
}
ttm_bo_unreserve(bo);
return 0;
}
int
nouveau_bo_map(struct nouveau_bo *nvbo)
{
int ret;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return ret;
ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap);
ttm_bo_unreserve(&nvbo->bo);
return ret;
}
void
nouveau_bo_unmap(struct nouveau_bo *nvbo)
{
if (!nvbo)
return;
ttm_bo_kunmap(&nvbo->kmap);
}
void
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j;
if (!ttm_dma || !ttm_dma->dma_address)
return;
if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
return;
}
/* Don't waste time looping if the object is coherent */
if (nvbo->force_coherent)
return;
i = 0;
while (i < ttm_dma->num_pages) {
struct page *p = ttm_dma->pages[i];
size_t num_pages = 1;
for (j = i + 1; j < ttm_dma->num_pages; ++j) {
if (++p != ttm_dma->pages[j])
break;
++num_pages;
}
dma_sync_single_for_device(drm->dev->dev,
ttm_dma->dma_address[i],
num_pages * PAGE_SIZE, DMA_TO_DEVICE);
i += num_pages;
}
}
void
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j;
if (!ttm_dma || !ttm_dma->dma_address)
return;
if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
return;
}
/* Don't waste time looping if the object is coherent */
if (nvbo->force_coherent)
return;
i = 0;
while (i < ttm_dma->num_pages) {
struct page *p = ttm_dma->pages[i];
size_t num_pages = 1;
for (j = i + 1; j < ttm_dma->num_pages; ++j) {
if (++p != ttm_dma->pages[j])
break;
++num_pages;
}
dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
i += num_pages;
}
}
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
mutex_lock(&drm->ttm.io_reserve_mutex);
list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
mutex_unlock(&drm->ttm.io_reserve_mutex);
}
void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
mutex_lock(&drm->ttm.io_reserve_mutex);
list_del_init(&nvbo->io_reserve_lru);
mutex_unlock(&drm->ttm.io_reserve_mutex);
}
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu)
{
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
int ret;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
if (ret)
return ret;
nouveau_bo_sync_for_device(nvbo);
return 0;
}
void
nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
{
bool is_iomem;
u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
mem += index;
if (is_iomem)
iowrite16_native(val, (void __force __iomem *)mem);
else
*mem = val;
}
u32
nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
{
bool is_iomem;
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
mem += index;
if (is_iomem)
return ioread32_native((void __force __iomem *)mem);
else
return *mem;
}
void
nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
{
bool is_iomem;
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
mem += index;
if (is_iomem)
iowrite32_native(val, (void __force __iomem *)mem);
else
*mem = val;
}
static struct ttm_tt *
nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
{
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
if (drm->agp.bridge) {
return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
}
#endif
return nouveau_sgdma_create_ttm(bo, page_flags);
}
static int
nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *reg)
{
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
#endif
if (!reg)
return -EINVAL;
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge)
return ttm_agp_bind(ttm, reg);
#endif
return nouveau_sgdma_bind(bdev, ttm, reg);
}
static void
nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
if (drm->agp.bridge) {
ttm_agp_unbind(ttm);
return;
}
#endif
nouveau_sgdma_unbind(bdev, ttm);
}
static void
nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
NOUVEAU_GEM_DOMAIN_CPU);
break;
default:
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
break;
}
*pl = nvbo->placement;
}
static int
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
struct ttm_resource *reg)
{
struct nouveau_mem *old_mem = nouveau_mem(bo->resource);
struct nouveau_mem *new_mem = nouveau_mem(reg);
struct nvif_vmm *vmm = &drm->client.vmm.vmm;
int ret;
ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
old_mem->mem.size, &old_mem->vma[0]);
if (ret)
return ret;
ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
new_mem->mem.size, &old_mem->vma[1]);
if (ret)
goto done;
ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
if (ret)
goto done;
ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
done:
if (ret) {
nvif_vmm_put(vmm, &old_mem->vma[1]);
nvif_vmm_put(vmm, &old_mem->vma[0]);
}
return 0;
}
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_reg)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
struct nouveau_cli *cli = (void *)chan->user.client;
struct nouveau_fence *fence;
int ret;
/* create temporary vmas for the transfer and attach them to the
* old nvkm_mem node, these will get cleaned up after ttm has
* destroyed the ttm_resource
*/
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_move_prep(drm, bo, new_reg);
if (ret)
return ret;
}
if (drm_drv_uses_atomic_modeset(drm->dev))
mutex_lock(&cli->mutex);
else
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
if (ret)
goto out_unlock;
ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
if (ret)
goto out_unlock;
ret = nouveau_fence_new(&fence, chan);
if (ret)
goto out_unlock;
/* TODO: figure out a better solution here
*
* wait on the fence here explicitly as going through
* ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
*
* Without this the operation can timeout and we'll fallback to a
* software copy, which might take several minutes to finish.
*/
nouveau_fence_wait(fence, false, false);
ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false,
new_reg);
nouveau_fence_unref(&fence);
out_unlock:
mutex_unlock(&cli->mutex);
return ret;
}
void
nouveau_bo_move_init(struct nouveau_drm *drm)
{
static const struct _method_table {
const char *name;
int engine;
s32 oclass;
int (*exec)(struct nouveau_channel *,
struct ttm_buffer_object *,
struct ttm_resource *, struct ttm_resource *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
{ "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc7b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc6b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc6b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
{ "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
{ "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
{ "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
{},
};
const struct _method_table *mthd = _methods;
const char *name = "CPU";
int ret;
do {
struct nouveau_channel *chan;
if (mthd->engine)
chan = drm->cechan;
else
chan = drm->channel;
if (chan == NULL)
continue;
ret = nvif_object_ctor(&chan->user, "ttmBoMove",
mthd->oclass | (mthd->engine << 16),
mthd->oclass, NULL, 0,
&drm->ttm.copy);
if (ret == 0) {
ret = mthd->init(chan, drm->ttm.copy.handle);
if (ret) {
nvif_object_dtor(&drm->ttm.copy);
continue;
}
drm->ttm.move = mthd->exec;
drm->ttm.chan = chan;
name = mthd->name;
break;
}
} while ((++mthd)->exec);
NV_INFO(drm, "MM: using %s for buffer copies\n", name);
}
static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vma *vma;
long ret;
/* ttm can now (stupidly) pass the driver bos it didn't create... */
if (bo->destroy != nouveau_bo_del_ttm)
return;
nouveau_bo_del_io_reserve_lru(bo);
if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
mem->mem.page == nvbo->page) {
list_for_each_entry(vma, &nvbo->vma_list, head) {
nouveau_vma_map(vma, mem);
}
nouveau_uvmm_bo_map_all(nvbo, mem);
} else {
list_for_each_entry(vma, &nvbo->vma_list, head) {
ret = dma_resv_wait_timeout(bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP,
false, 15 * HZ);
WARN_ON(ret <= 0);
nouveau_vma_unmap(vma);
}
nouveau_uvmm_bo_unmap_all(nvbo);
}
if (new_reg)
nvbo->offset = (new_reg->start << PAGE_SHIFT);
}
static int
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
struct nouveau_drm_tile **new_tile)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 offset = new_reg->start << PAGE_SHIFT;
*new_tile = NULL;
if (new_reg->mem_type != TTM_PL_VRAM)
return 0;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
*new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
nvbo->mode, nvbo->zeta);
}
return 0;
}
static void
nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
struct nouveau_drm_tile *new_tile,
struct nouveau_drm_tile **old_tile)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
struct dma_fence *fence;
int ret;
ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE,
&fence);
if (ret)
dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE,
false, MAX_SCHEDULE_TIMEOUT);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
}
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_reg,
struct ttm_place *hop)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct ttm_resource *old_reg = bo->resource;
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
if (new_reg->mem_type == TTM_PL_TT) {
ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
if (ret)
return ret;
}
nouveau_bo_move_ntfy(bo, new_reg);
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
goto out_ntfy;
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
if (ret)
goto out_ntfy;
}
/* Fake bo copy. */
if (!old_reg || (old_reg->mem_type == TTM_PL_SYSTEM &&
!bo->ttm)) {
ttm_bo_move_null(bo, new_reg);
goto out;
}
if (old_reg->mem_type == TTM_PL_SYSTEM &&
new_reg->mem_type == TTM_PL_TT) {
ttm_bo_move_null(bo, new_reg);
goto out;
}
if (old_reg->mem_type == TTM_PL_TT &&
new_reg->mem_type == TTM_PL_SYSTEM) {
nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_reg);
goto out;
}
/* Hardware assisted copy. */
if (drm->ttm.move) {
if ((old_reg->mem_type == TTM_PL_SYSTEM &&
new_reg->mem_type == TTM_PL_VRAM) ||
(old_reg->mem_type == TTM_PL_VRAM &&
new_reg->mem_type == TTM_PL_SYSTEM)) {
hop->fpfn = 0;
hop->lpfn = 0;
hop->mem_type = TTM_PL_TT;
hop->flags = 0;
return -EMULTIHOP;
}
ret = nouveau_bo_move_m2mf(bo, evict, ctx,
new_reg);
} else
ret = -ENODEV;
if (ret) {
/* Fallback to software copy. */
ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
}
out:
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (ret)
nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
else
nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
}
out_ntfy:
if (ret) {
nouveau_bo_move_ntfy(bo, bo->resource);
}
return ret;
}
static void
nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
struct ttm_resource *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
switch (reg->mem_type) {
case TTM_PL_TT:
if (mem->kind)
nvif_object_unmap_handle(&mem->mem.object);
break;
case TTM_PL_VRAM:
nvif_object_unmap_handle(&mem->mem.object);
break;
default:
break;
}
}
}
static int
nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nouveau_mem *mem = nouveau_mem(reg);
struct nvif_mmu *mmu = &drm->client.mmu;
int ret;
mutex_lock(&drm->ttm.io_reserve_mutex);
retry:
switch (reg->mem_type) {
case TTM_PL_SYSTEM:
/* System memory */
ret = 0;
goto out;
case TTM_PL_TT:
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
reg->bus.offset = (reg->start << PAGE_SHIFT) +
drm->agp.base;
reg->bus.is_iomem = !drm->agp.cma;
reg->bus.caching = ttm_write_combined;
}
#endif
if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
!mem->kind) {
/* untiled */
ret = 0;
break;
}
fallthrough; /* tiled memory */
case TTM_PL_VRAM:
reg->bus.offset = (reg->start << PAGE_SHIFT) +
device->func->resource_addr(device, 1);
reg->bus.is_iomem = true;
/* Some BARs do not support being ioremapped WC */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED)
reg->bus.caching = ttm_uncached;
else
reg->bus.caching = ttm_write_combined;
if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
union {
struct nv50_mem_map_v0 nv50;
struct gf100_mem_map_v0 gf100;
} args;
u64 handle, length;
u32 argc = 0;
switch (mem->mem.object.oclass) {
case NVIF_CLASS_MEM_NV50:
args.nv50.version = 0;
args.nv50.ro = 0;
args.nv50.kind = mem->kind;
args.nv50.comp = mem->comp;
argc = sizeof(args.nv50);
break;
case NVIF_CLASS_MEM_GF100:
args.gf100.version = 0;
args.gf100.ro = 0;
args.gf100.kind = mem->kind;
argc = sizeof(args.gf100);
break;
default:
WARN_ON(1);
break;
}
ret = nvif_object_map_handle(&mem->mem.object,
&args, argc,
&handle, &length);
if (ret != 1) {
if (WARN_ON(ret == 0))
ret = -EINVAL;
goto out;
}
reg->bus.offset = handle;
}
ret = 0;
break;
default:
ret = -EINVAL;
}
out:
if (ret == -ENOSPC) {
struct nouveau_bo *nvbo;
nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
typeof(*nvbo),
io_reserve_lru);
if (nvbo) {
list_del_init(&nvbo->io_reserve_lru);
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
goto retry;
}
}
mutex_unlock(&drm->ttm.io_reserve_mutex);
return ret;
}
static void
nouveau_ttm_io_mem_free(struct ttm_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
mutex_lock(&drm->ttm.io_reserve_mutex);
nouveau_ttm_io_mem_free_locked(drm, reg);
mutex_unlock(&drm->ttm.io_reserve_mutex);
}
vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_device *device = nvxx_device(&drm->client.device);
u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
int i, ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
*/
if (bo->resource->mem_type != TTM_PL_VRAM) {
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
!nvbo->kind)
return 0;
if (bo->resource->mem_type != TTM_PL_SYSTEM)
return 0;
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
} else {
/* make sure bo is in mappable vram */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
bo->resource->start + PFN_UP(bo->resource->size) < mappable)
return 0;
for (i = 0; i < nvbo->placement.num_placement; ++i) {
nvbo->placements[i].fpfn = 0;
nvbo->placements[i].lpfn = mappable;
}
for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
nvbo->busy_placements[i].fpfn = 0;
nvbo->busy_placements[i].lpfn = mappable;
}
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
}
ret = nouveau_bo_validate(nvbo, false, false);
if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
return VM_FAULT_NOPAGE;
else if (unlikely(ret))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
return 0;
}
static int
nouveau_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct ttm_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (ttm_tt_is_populated(ttm))
return 0;
if (slave && ttm->sg) {
drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address,
ttm->num_pages);
return 0;
}
drm = nouveau_bdev(bdev);
return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx);
}
static void
nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct nouveau_drm *drm;
bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (slave)
return;
nouveau_ttm_tt_unbind(bdev, ttm);
drm = nouveau_bdev(bdev);
return ttm_pool_free(&drm->ttm.bdev.pool, ttm);
}
static void
nouveau_ttm_tt_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
if (drm->agp.bridge) {
ttm_agp_destroy(ttm);
return;
}
#endif
nouveau_sgdma_destroy(bdev, ttm);
}
void
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
{
struct dma_resv *resv = nvbo->bo.base.resv;
if (!fence)
return;
dma_resv_add_fence(resv, &fence->base, exclusive ?
DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
}
static void
nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
nouveau_bo_move_ntfy(bo, NULL);
}
struct ttm_device_funcs nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
.ttm_tt_destroy = &nouveau_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
.delete_mem_notify = nouveau_bo_delete_mem_notify,
.move = nouveau_bo_move,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
};
| linux-master | drivers/gpu/drm/nouveau/nouveau_bo.c |
/*
* Copyright 2007 Dave Airlied
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Authors: Dave Airlied <[email protected]>
* Ben Skeggs <[email protected]>
* Jeremy Kolb <[email protected]>
*/
#include "nouveau_bo.h"
#include "nouveau_dma.h"
#include "nouveau_mem.h"
#include <nvif/push206e.h>
int
nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
struct nvif_push *push = chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 7);
if (ret)
return ret;
PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->size,
0x0308, upper_32_bits(mem->vma[0].addr),
0x030c, lower_32_bits(mem->vma[0].addr),
0x0310, upper_32_bits(mem->vma[1].addr),
0x0314, lower_32_bits(mem->vma[1].addr),
0x0318, 0x00000000 /* MODE_COPY, QUERY_NONE */);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_bo74c1.c |
/*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nouveau_platform.h"
static int nouveau_platform_probe(struct platform_device *pdev)
{
const struct nvkm_device_tegra_func *func;
struct nvkm_device *device = NULL;
struct drm_device *drm;
int ret;
func = of_device_get_match_data(&pdev->dev);
drm = nouveau_platform_device_create(func, pdev, &device);
if (IS_ERR(drm))
return PTR_ERR(drm);
ret = drm_dev_register(drm, 0);
if (ret < 0) {
drm_dev_put(drm);
return ret;
}
return 0;
}
static int nouveau_platform_remove(struct platform_device *pdev)
{
struct drm_device *dev = platform_get_drvdata(pdev);
nouveau_drm_device_remove(dev);
return 0;
}
#if IS_ENABLED(CONFIG_OF)
static const struct nvkm_device_tegra_func gk20a_platform_data = {
.iommu_bit = 34,
.require_vdd = true,
};
static const struct nvkm_device_tegra_func gm20b_platform_data = {
.iommu_bit = 34,
.require_vdd = true,
.require_ref_clk = true,
};
static const struct nvkm_device_tegra_func gp10b_platform_data = {
.iommu_bit = 36,
/* power provided by generic PM domains */
.require_vdd = false,
};
static const struct of_device_id nouveau_platform_match[] = {
{
.compatible = "nvidia,gk20a",
.data = &gk20a_platform_data,
},
{
.compatible = "nvidia,gm20b",
.data = &gm20b_platform_data,
},
{
.compatible = "nvidia,gp10b",
.data = &gp10b_platform_data,
},
{ }
};
MODULE_DEVICE_TABLE(of, nouveau_platform_match);
#endif
struct platform_driver nouveau_platform_driver = {
.driver = {
.name = "nouveau",
.of_match_table = of_match_ptr(nouveau_platform_match),
},
.probe = nouveau_platform_probe,
.remove = nouveau_platform_remove,
};
| linux-master | drivers/gpu/drm/nouveau/nouveau_platform.c |
// SPDX-License-Identifier: MIT
/*
* Locking:
*
* The uvmm mutex protects any operations on the GPU VA space provided by the
* DRM GPU VA manager.
*
* The GEMs dma_resv lock protects the GEMs GPUVA list, hence link/unlink of a
* mapping to it's backing GEM must be performed under this lock.
*
* Actual map/unmap operations within the fence signalling critical path are
* protected by installing DMA fences to the corresponding GEMs DMA
* reservations, such that concurrent BO moves, which itself walk the GEMs GPUVA
* list in order to map/unmap it's entries, can't occur concurrently.
*
* Accessing the DRM_GPUVA_INVALIDATED flag doesn't need any separate
* protection, since there are no accesses other than from BO move callbacks
* and from the fence signalling critical path, which are already protected by
* the corresponding GEMs DMA reservation fence.
*/
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_uvmm.h"
#include <nvif/vmm.h>
#include <nvif/mem.h>
#include <nvif/class.h>
#include <nvif/if000c.h>
#include <nvif/if900d.h>
#define NOUVEAU_VA_SPACE_BITS 47 /* FIXME */
#define NOUVEAU_VA_SPACE_START 0x0
#define NOUVEAU_VA_SPACE_END (1ULL << NOUVEAU_VA_SPACE_BITS)
#define list_last_op(_ops) list_last_entry(_ops, struct bind_job_op, entry)
#define list_prev_op(_op) list_prev_entry(_op, entry)
#define list_for_each_op(_op, _ops) list_for_each_entry(_op, _ops, entry)
#define list_for_each_op_from_reverse(_op, _ops) \
list_for_each_entry_from_reverse(_op, _ops, entry)
#define list_for_each_op_safe(_op, _n, _ops) list_for_each_entry_safe(_op, _n, _ops, entry)
enum vm_bind_op {
OP_MAP = DRM_NOUVEAU_VM_BIND_OP_MAP,
OP_UNMAP = DRM_NOUVEAU_VM_BIND_OP_UNMAP,
OP_MAP_SPARSE,
OP_UNMAP_SPARSE,
};
struct nouveau_uvma_prealloc {
struct nouveau_uvma *map;
struct nouveau_uvma *prev;
struct nouveau_uvma *next;
};
struct bind_job_op {
struct list_head entry;
enum vm_bind_op op;
u32 flags;
struct {
u64 addr;
u64 range;
} va;
struct {
u32 handle;
u64 offset;
struct drm_gem_object *obj;
} gem;
struct nouveau_uvma_region *reg;
struct nouveau_uvma_prealloc new;
struct drm_gpuva_ops *ops;
};
struct uvmm_map_args {
struct nouveau_uvma_region *region;
u64 addr;
u64 range;
u8 kind;
};
static int
nouveau_uvmm_vmm_sparse_ref(struct nouveau_uvmm *uvmm,
u64 addr, u64 range)
{
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
return nvif_vmm_raw_sparse(vmm, addr, range, true);
}
static int
nouveau_uvmm_vmm_sparse_unref(struct nouveau_uvmm *uvmm,
u64 addr, u64 range)
{
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
return nvif_vmm_raw_sparse(vmm, addr, range, false);
}
static int
nouveau_uvmm_vmm_get(struct nouveau_uvmm *uvmm,
u64 addr, u64 range)
{
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
return nvif_vmm_raw_get(vmm, addr, range, PAGE_SHIFT);
}
static int
nouveau_uvmm_vmm_put(struct nouveau_uvmm *uvmm,
u64 addr, u64 range)
{
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
return nvif_vmm_raw_put(vmm, addr, range, PAGE_SHIFT);
}
static int
nouveau_uvmm_vmm_unmap(struct nouveau_uvmm *uvmm,
u64 addr, u64 range, bool sparse)
{
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
return nvif_vmm_raw_unmap(vmm, addr, range, PAGE_SHIFT, sparse);
}
static int
nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
u64 addr, u64 range,
u64 bo_offset, u8 kind,
struct nouveau_mem *mem)
{
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
union {
struct gf100_vmm_map_v0 gf100;
} args;
u32 argc = 0;
switch (vmm->object.oclass) {
case NVIF_CLASS_VMM_GF100:
case NVIF_CLASS_VMM_GM200:
case NVIF_CLASS_VMM_GP100:
args.gf100.version = 0;
if (mem->mem.type & NVIF_MEM_VRAM)
args.gf100.vol = 0;
else
args.gf100.vol = 1;
args.gf100.ro = 0;
args.gf100.priv = 0;
args.gf100.kind = kind;
argc = sizeof(args.gf100);
break;
default:
WARN_ON(1);
return -ENOSYS;
}
return nvif_vmm_raw_map(vmm, addr, range, PAGE_SHIFT,
&args, argc,
&mem->mem, bo_offset);
}
static int
nouveau_uvma_region_sparse_unref(struct nouveau_uvma_region *reg)
{
u64 addr = reg->va.addr;
u64 range = reg->va.range;
return nouveau_uvmm_vmm_sparse_unref(reg->uvmm, addr, range);
}
static int
nouveau_uvma_vmm_put(struct nouveau_uvma *uvma)
{
u64 addr = uvma->va.va.addr;
u64 range = uvma->va.va.range;
return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range);
}
static int
nouveau_uvma_map(struct nouveau_uvma *uvma,
struct nouveau_mem *mem)
{
u64 addr = uvma->va.va.addr;
u64 offset = uvma->va.gem.offset;
u64 range = uvma->va.va.range;
return nouveau_uvmm_vmm_map(to_uvmm(uvma), addr, range,
offset, uvma->kind, mem);
}
static int
nouveau_uvma_unmap(struct nouveau_uvma *uvma)
{
u64 addr = uvma->va.va.addr;
u64 range = uvma->va.va.range;
bool sparse = !!uvma->region;
if (drm_gpuva_invalidated(&uvma->va))
return 0;
return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
}
static int
nouveau_uvma_alloc(struct nouveau_uvma **puvma)
{
*puvma = kzalloc(sizeof(**puvma), GFP_KERNEL);
if (!*puvma)
return -ENOMEM;
return 0;
}
static void
nouveau_uvma_free(struct nouveau_uvma *uvma)
{
kfree(uvma);
}
static void
nouveau_uvma_gem_get(struct nouveau_uvma *uvma)
{
drm_gem_object_get(uvma->va.gem.obj);
}
static void
nouveau_uvma_gem_put(struct nouveau_uvma *uvma)
{
drm_gem_object_put(uvma->va.gem.obj);
}
static int
nouveau_uvma_region_alloc(struct nouveau_uvma_region **preg)
{
*preg = kzalloc(sizeof(**preg), GFP_KERNEL);
if (!*preg)
return -ENOMEM;
kref_init(&(*preg)->kref);
return 0;
}
static void
nouveau_uvma_region_free(struct kref *kref)
{
struct nouveau_uvma_region *reg =
container_of(kref, struct nouveau_uvma_region, kref);
kfree(reg);
}
static void
nouveau_uvma_region_get(struct nouveau_uvma_region *reg)
{
kref_get(®->kref);
}
static void
nouveau_uvma_region_put(struct nouveau_uvma_region *reg)
{
kref_put(®->kref, nouveau_uvma_region_free);
}
static int
__nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_region *reg)
{
u64 addr = reg->va.addr;
u64 range = reg->va.range;
u64 last = addr + range - 1;
MA_STATE(mas, &uvmm->region_mt, addr, addr);
if (unlikely(mas_walk(&mas)))
return -EEXIST;
if (unlikely(mas.last < last))
return -EEXIST;
mas.index = addr;
mas.last = last;
mas_store_gfp(&mas, reg, GFP_KERNEL);
reg->uvmm = uvmm;
return 0;
}
static int
nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_region *reg,
u64 addr, u64 range)
{
int ret;
reg->uvmm = uvmm;
reg->va.addr = addr;
reg->va.range = range;
ret = __nouveau_uvma_region_insert(uvmm, reg);
if (ret)
return ret;
return 0;
}
static void
nouveau_uvma_region_remove(struct nouveau_uvma_region *reg)
{
struct nouveau_uvmm *uvmm = reg->uvmm;
MA_STATE(mas, &uvmm->region_mt, reg->va.addr, 0);
mas_erase(&mas);
}
static int
nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
u64 addr, u64 range)
{
struct nouveau_uvma_region *reg;
int ret;
if (!drm_gpuva_interval_empty(&uvmm->umgr, addr, range))
return -ENOSPC;
ret = nouveau_uvma_region_alloc(®);
if (ret)
return ret;
ret = nouveau_uvma_region_insert(uvmm, reg, addr, range);
if (ret)
goto err_free_region;
ret = nouveau_uvmm_vmm_sparse_ref(uvmm, addr, range);
if (ret)
goto err_region_remove;
return 0;
err_region_remove:
nouveau_uvma_region_remove(reg);
err_free_region:
nouveau_uvma_region_put(reg);
return ret;
}
static struct nouveau_uvma_region *
nouveau_uvma_region_find_first(struct nouveau_uvmm *uvmm,
u64 addr, u64 range)
{
MA_STATE(mas, &uvmm->region_mt, addr, 0);
return mas_find(&mas, addr + range - 1);
}
static struct nouveau_uvma_region *
nouveau_uvma_region_find(struct nouveau_uvmm *uvmm,
u64 addr, u64 range)
{
struct nouveau_uvma_region *reg;
reg = nouveau_uvma_region_find_first(uvmm, addr, range);
if (!reg)
return NULL;
if (reg->va.addr != addr ||
reg->va.range != range)
return NULL;
return reg;
}
static bool
nouveau_uvma_region_empty(struct nouveau_uvma_region *reg)
{
struct nouveau_uvmm *uvmm = reg->uvmm;
return drm_gpuva_interval_empty(&uvmm->umgr,
reg->va.addr,
reg->va.range);
}
static int
__nouveau_uvma_region_destroy(struct nouveau_uvma_region *reg)
{
struct nouveau_uvmm *uvmm = reg->uvmm;
u64 addr = reg->va.addr;
u64 range = reg->va.range;
if (!nouveau_uvma_region_empty(reg))
return -EBUSY;
nouveau_uvma_region_remove(reg);
nouveau_uvmm_vmm_sparse_unref(uvmm, addr, range);
nouveau_uvma_region_put(reg);
return 0;
}
static int
nouveau_uvma_region_destroy(struct nouveau_uvmm *uvmm,
u64 addr, u64 range)
{
struct nouveau_uvma_region *reg;
reg = nouveau_uvma_region_find(uvmm, addr, range);
if (!reg)
return -ENOENT;
return __nouveau_uvma_region_destroy(reg);
}
static void
nouveau_uvma_region_dirty(struct nouveau_uvma_region *reg)
{
init_completion(®->complete);
reg->dirty = true;
}
static void
nouveau_uvma_region_complete(struct nouveau_uvma_region *reg)
{
complete_all(®->complete);
}
static void
op_map_prepare_unwind(struct nouveau_uvma *uvma)
{
nouveau_uvma_gem_put(uvma);
drm_gpuva_remove(&uvma->va);
nouveau_uvma_free(uvma);
}
static void
op_unmap_prepare_unwind(struct drm_gpuva *va)
{
drm_gpuva_insert(va->mgr, va);
}
static void
nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
struct drm_gpuva_ops *ops,
struct drm_gpuva_op *last,
struct uvmm_map_args *args)
{
struct drm_gpuva_op *op = last;
u64 vmm_get_start = args ? args->addr : 0;
u64 vmm_get_end = args ? args->addr + args->range : 0;
/* Unwind GPUVA space. */
drm_gpuva_for_each_op_from_reverse(op, ops) {
switch (op->op) {
case DRM_GPUVA_OP_MAP:
op_map_prepare_unwind(new->map);
break;
case DRM_GPUVA_OP_REMAP: {
struct drm_gpuva_op_remap *r = &op->remap;
if (r->next)
op_map_prepare_unwind(new->next);
if (r->prev)
op_map_prepare_unwind(new->prev);
op_unmap_prepare_unwind(r->unmap->va);
break;
}
case DRM_GPUVA_OP_UNMAP:
op_unmap_prepare_unwind(op->unmap.va);
break;
default:
break;
}
}
/* Unmap operation don't allocate page tables, hence skip the following
* page table unwind.
*/
if (!args)
return;
drm_gpuva_for_each_op(op, ops) {
switch (op->op) {
case DRM_GPUVA_OP_MAP: {
u64 vmm_get_range = vmm_get_end - vmm_get_start;
if (vmm_get_range)
nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
vmm_get_range);
break;
}
case DRM_GPUVA_OP_REMAP: {
struct drm_gpuva_op_remap *r = &op->remap;
struct drm_gpuva *va = r->unmap->va;
u64 ustart = va->va.addr;
u64 urange = va->va.range;
u64 uend = ustart + urange;
if (r->prev)
vmm_get_start = uend;
if (r->next)
vmm_get_end = ustart;
if (r->prev && r->next)
vmm_get_start = vmm_get_end = 0;
break;
}
case DRM_GPUVA_OP_UNMAP: {
struct drm_gpuva_op_unmap *u = &op->unmap;
struct drm_gpuva *va = u->va;
u64 ustart = va->va.addr;
u64 urange = va->va.range;
u64 uend = ustart + urange;
/* Nothing to do for mappings we merge with. */
if (uend == vmm_get_start ||
ustart == vmm_get_end)
break;
if (ustart > vmm_get_start) {
u64 vmm_get_range = ustart - vmm_get_start;
nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
vmm_get_range);
}
vmm_get_start = uend;
break;
}
default:
break;
}
if (op == last)
break;
}
}
static void
nouveau_uvmm_sm_map_prepare_unwind(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
struct drm_gpuva_ops *ops,
u64 addr, u64 range)
{
struct drm_gpuva_op *last = drm_gpuva_last_op(ops);
struct uvmm_map_args args = {
.addr = addr,
.range = range,
};
nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, &args);
}
static void
nouveau_uvmm_sm_unmap_prepare_unwind(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
struct drm_gpuva_ops *ops)
{
struct drm_gpuva_op *last = drm_gpuva_last_op(ops);
nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, NULL);
}
static int
op_map_prepare(struct nouveau_uvmm *uvmm,
struct nouveau_uvma **puvma,
struct drm_gpuva_op_map *op,
struct uvmm_map_args *args)
{
struct nouveau_uvma *uvma;
int ret;
ret = nouveau_uvma_alloc(&uvma);
if (ret)
return ret;
uvma->region = args->region;
uvma->kind = args->kind;
drm_gpuva_map(&uvmm->umgr, &uvma->va, op);
/* Keep a reference until this uvma is destroyed. */
nouveau_uvma_gem_get(uvma);
*puvma = uvma;
return 0;
}
static void
op_unmap_prepare(struct drm_gpuva_op_unmap *u)
{
drm_gpuva_unmap(u);
}
static int
nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
struct drm_gpuva_ops *ops,
struct uvmm_map_args *args)
{
struct drm_gpuva_op *op;
u64 vmm_get_start = args ? args->addr : 0;
u64 vmm_get_end = args ? args->addr + args->range : 0;
int ret;
drm_gpuva_for_each_op(op, ops) {
switch (op->op) {
case DRM_GPUVA_OP_MAP: {
u64 vmm_get_range = vmm_get_end - vmm_get_start;
ret = op_map_prepare(uvmm, &new->map, &op->map, args);
if (ret)
goto unwind;
if (args && vmm_get_range) {
ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
vmm_get_range);
if (ret) {
op_map_prepare_unwind(new->map);
goto unwind;
}
}
break;
}
case DRM_GPUVA_OP_REMAP: {
struct drm_gpuva_op_remap *r = &op->remap;
struct drm_gpuva *va = r->unmap->va;
struct uvmm_map_args remap_args = {
.kind = uvma_from_va(va)->kind,
.region = uvma_from_va(va)->region,
};
u64 ustart = va->va.addr;
u64 urange = va->va.range;
u64 uend = ustart + urange;
op_unmap_prepare(r->unmap);
if (r->prev) {
ret = op_map_prepare(uvmm, &new->prev, r->prev,
&remap_args);
if (ret)
goto unwind;
if (args)
vmm_get_start = uend;
}
if (r->next) {
ret = op_map_prepare(uvmm, &new->next, r->next,
&remap_args);
if (ret) {
if (r->prev)
op_map_prepare_unwind(new->prev);
goto unwind;
}
if (args)
vmm_get_end = ustart;
}
if (args && (r->prev && r->next))
vmm_get_start = vmm_get_end = 0;
break;
}
case DRM_GPUVA_OP_UNMAP: {
struct drm_gpuva_op_unmap *u = &op->unmap;
struct drm_gpuva *va = u->va;
u64 ustart = va->va.addr;
u64 urange = va->va.range;
u64 uend = ustart + urange;
op_unmap_prepare(u);
if (!args)
break;
/* Nothing to do for mappings we merge with. */
if (uend == vmm_get_start ||
ustart == vmm_get_end)
break;
if (ustart > vmm_get_start) {
u64 vmm_get_range = ustart - vmm_get_start;
ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
vmm_get_range);
if (ret) {
op_unmap_prepare_unwind(va);
goto unwind;
}
}
vmm_get_start = uend;
break;
}
default:
ret = -EINVAL;
goto unwind;
}
}
return 0;
unwind:
if (op != drm_gpuva_first_op(ops))
nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops,
drm_gpuva_prev_op(op),
args);
return ret;
}
static int
nouveau_uvmm_sm_map_prepare(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
struct nouveau_uvma_region *region,
struct drm_gpuva_ops *ops,
u64 addr, u64 range, u8 kind)
{
struct uvmm_map_args args = {
.region = region,
.addr = addr,
.range = range,
.kind = kind,
};
return nouveau_uvmm_sm_prepare(uvmm, new, ops, &args);
}
static int
nouveau_uvmm_sm_unmap_prepare(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
struct drm_gpuva_ops *ops)
{
return nouveau_uvmm_sm_prepare(uvmm, new, ops, NULL);
}
static struct drm_gem_object *
op_gem_obj(struct drm_gpuva_op *op)
{
switch (op->op) {
case DRM_GPUVA_OP_MAP:
return op->map.gem.obj;
case DRM_GPUVA_OP_REMAP:
/* Actually, we're looking for the GEMs backing remap.prev and
* remap.next, but since this is a remap they're identical to
* the GEM backing the unmapped GPUVA.
*/
return op->remap.unmap->va->gem.obj;
case DRM_GPUVA_OP_UNMAP:
return op->unmap.va->gem.obj;
default:
WARN(1, "Unknown operation.\n");
return NULL;
}
}
static void
op_map(struct nouveau_uvma *uvma)
{
struct nouveau_bo *nvbo = nouveau_gem_object(uvma->va.gem.obj);
nouveau_uvma_map(uvma, nouveau_mem(nvbo->bo.resource));
}
static void
op_unmap(struct drm_gpuva_op_unmap *u)
{
struct drm_gpuva *va = u->va;
struct nouveau_uvma *uvma = uvma_from_va(va);
/* nouveau_uvma_unmap() does not unmap if backing BO is evicted. */
if (!u->keep)
nouveau_uvma_unmap(uvma);
}
static void
op_unmap_range(struct drm_gpuva_op_unmap *u,
u64 addr, u64 range)
{
struct nouveau_uvma *uvma = uvma_from_va(u->va);
bool sparse = !!uvma->region;
if (!drm_gpuva_invalidated(u->va))
nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
}
static void
op_remap(struct drm_gpuva_op_remap *r,
struct nouveau_uvma_prealloc *new)
{
struct drm_gpuva_op_unmap *u = r->unmap;
struct nouveau_uvma *uvma = uvma_from_va(u->va);
u64 addr = uvma->va.va.addr;
u64 range = uvma->va.va.range;
if (r->prev)
addr = r->prev->va.addr + r->prev->va.range;
if (r->next)
range = r->next->va.addr - addr;
op_unmap_range(u, addr, range);
}
static int
nouveau_uvmm_sm(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
struct drm_gpuva_ops *ops)
{
struct drm_gpuva_op *op;
drm_gpuva_for_each_op(op, ops) {
switch (op->op) {
case DRM_GPUVA_OP_MAP:
op_map(new->map);
break;
case DRM_GPUVA_OP_REMAP:
op_remap(&op->remap, new);
break;
case DRM_GPUVA_OP_UNMAP:
op_unmap(&op->unmap);
break;
default:
break;
}
}
return 0;
}
static int
nouveau_uvmm_sm_map(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
struct drm_gpuva_ops *ops)
{
return nouveau_uvmm_sm(uvmm, new, ops);
}
static int
nouveau_uvmm_sm_unmap(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
struct drm_gpuva_ops *ops)
{
return nouveau_uvmm_sm(uvmm, new, ops);
}
static void
nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
struct drm_gpuva_ops *ops, bool unmap)
{
struct drm_gpuva_op *op;
drm_gpuva_for_each_op(op, ops) {
switch (op->op) {
case DRM_GPUVA_OP_MAP:
break;
case DRM_GPUVA_OP_REMAP: {
struct drm_gpuva_op_remap *r = &op->remap;
struct drm_gpuva_op_map *p = r->prev;
struct drm_gpuva_op_map *n = r->next;
struct drm_gpuva *va = r->unmap->va;
struct nouveau_uvma *uvma = uvma_from_va(va);
if (unmap) {
u64 addr = va->va.addr;
u64 end = addr + va->va.range;
if (p)
addr = p->va.addr + p->va.range;
if (n)
end = n->va.addr;
nouveau_uvmm_vmm_put(uvmm, addr, end - addr);
}
nouveau_uvma_gem_put(uvma);
nouveau_uvma_free(uvma);
break;
}
case DRM_GPUVA_OP_UNMAP: {
struct drm_gpuva_op_unmap *u = &op->unmap;
struct drm_gpuva *va = u->va;
struct nouveau_uvma *uvma = uvma_from_va(va);
if (unmap)
nouveau_uvma_vmm_put(uvma);
nouveau_uvma_gem_put(uvma);
nouveau_uvma_free(uvma);
break;
}
default:
break;
}
}
}
static void
nouveau_uvmm_sm_map_cleanup(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
struct drm_gpuva_ops *ops)
{
nouveau_uvmm_sm_cleanup(uvmm, new, ops, false);
}
static void
nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
struct drm_gpuva_ops *ops)
{
nouveau_uvmm_sm_cleanup(uvmm, new, ops, true);
}
static int
nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range)
{
u64 end = addr + range;
u64 kernel_managed_end = uvmm->kernel_managed_addr +
uvmm->kernel_managed_size;
if (addr & ~PAGE_MASK)
return -EINVAL;
if (range & ~PAGE_MASK)
return -EINVAL;
if (end <= addr)
return -EINVAL;
if (addr < NOUVEAU_VA_SPACE_START ||
end > NOUVEAU_VA_SPACE_END)
return -EINVAL;
if (addr < kernel_managed_end &&
end > uvmm->kernel_managed_addr)
return -EINVAL;
return 0;
}
static int
nouveau_uvmm_bind_job_alloc(struct nouveau_uvmm_bind_job **pjob)
{
*pjob = kzalloc(sizeof(**pjob), GFP_KERNEL);
if (!*pjob)
return -ENOMEM;
kref_init(&(*pjob)->kref);
return 0;
}
static void
nouveau_uvmm_bind_job_free(struct kref *kref)
{
struct nouveau_uvmm_bind_job *job =
container_of(kref, struct nouveau_uvmm_bind_job, kref);
nouveau_job_free(&job->base);
kfree(job);
}
static void
nouveau_uvmm_bind_job_get(struct nouveau_uvmm_bind_job *job)
{
kref_get(&job->kref);
}
static void
nouveau_uvmm_bind_job_put(struct nouveau_uvmm_bind_job *job)
{
kref_put(&job->kref, nouveau_uvmm_bind_job_free);
}
static int
bind_validate_op(struct nouveau_job *job,
struct bind_job_op *op)
{
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
struct drm_gem_object *obj = op->gem.obj;
if (op->op == OP_MAP) {
if (op->gem.offset & ~PAGE_MASK)
return -EINVAL;
if (obj->size <= op->gem.offset)
return -EINVAL;
if (op->va.range > (obj->size - op->gem.offset))
return -EINVAL;
}
return nouveau_uvmm_validate_range(uvmm, op->va.addr, op->va.range);
}
static void
bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
{
struct nouveau_uvmm_bind_job *bind_job;
struct nouveau_sched_entity *entity = job->entity;
struct bind_job_op *op;
u64 end = addr + range;
again:
spin_lock(&entity->job.list.lock);
list_for_each_entry(bind_job, &entity->job.list.head, entry) {
list_for_each_op(op, &bind_job->ops) {
if (op->op == OP_UNMAP) {
u64 op_addr = op->va.addr;
u64 op_end = op_addr + op->va.range;
if (!(end <= op_addr || addr >= op_end)) {
nouveau_uvmm_bind_job_get(bind_job);
spin_unlock(&entity->job.list.lock);
wait_for_completion(&bind_job->complete);
nouveau_uvmm_bind_job_put(bind_job);
goto again;
}
}
}
}
spin_unlock(&entity->job.list.lock);
}
static int
bind_validate_map_common(struct nouveau_job *job, u64 addr, u64 range,
bool sparse)
{
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
struct nouveau_uvma_region *reg;
u64 reg_addr, reg_end;
u64 end = addr + range;
again:
nouveau_uvmm_lock(uvmm);
reg = nouveau_uvma_region_find_first(uvmm, addr, range);
if (!reg) {
nouveau_uvmm_unlock(uvmm);
return 0;
}
/* Generally, job submits are serialized, hence only
* dirty regions can be modified concurrently.
*/
if (reg->dirty) {
nouveau_uvma_region_get(reg);
nouveau_uvmm_unlock(uvmm);
wait_for_completion(®->complete);
nouveau_uvma_region_put(reg);
goto again;
}
nouveau_uvmm_unlock(uvmm);
if (sparse)
return -ENOSPC;
reg_addr = reg->va.addr;
reg_end = reg_addr + reg->va.range;
/* Make sure the mapping is either outside of a
* region or fully enclosed by a region.
*/
if (reg_addr > addr || reg_end < end)
return -ENOSPC;
return 0;
}
static int
bind_validate_region(struct nouveau_job *job)
{
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
struct bind_job_op *op;
int ret;
list_for_each_op(op, &bind_job->ops) {
u64 op_addr = op->va.addr;
u64 op_range = op->va.range;
bool sparse = false;
switch (op->op) {
case OP_MAP_SPARSE:
sparse = true;
bind_validate_map_sparse(job, op_addr, op_range);
fallthrough;
case OP_MAP:
ret = bind_validate_map_common(job, op_addr, op_range,
sparse);
if (ret)
return ret;
break;
default:
break;
}
}
return 0;
}
static void
bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new)
{
struct drm_gpuva_op *op;
drm_gpuva_for_each_op(op, ops) {
switch (op->op) {
case DRM_GPUVA_OP_MAP:
drm_gpuva_link(&new->map->va);
break;
case DRM_GPUVA_OP_REMAP:
if (op->remap.prev)
drm_gpuva_link(&new->prev->va);
if (op->remap.next)
drm_gpuva_link(&new->next->va);
drm_gpuva_unlink(op->remap.unmap->va);
break;
case DRM_GPUVA_OP_UNMAP:
drm_gpuva_unlink(op->unmap.va);
break;
default:
break;
}
}
}
static int
nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
{
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
struct nouveau_sched_entity *entity = job->entity;
struct drm_exec *exec = &job->exec;
struct bind_job_op *op;
int ret;
list_for_each_op(op, &bind_job->ops) {
if (op->op == OP_MAP) {
op->gem.obj = drm_gem_object_lookup(job->file_priv,
op->gem.handle);
if (!op->gem.obj)
return -ENOENT;
}
ret = bind_validate_op(job, op);
if (ret)
return ret;
}
/* If a sparse region or mapping overlaps a dirty region, we need to
* wait for the region to complete the unbind process. This is due to
* how page table management is currently implemented. A future
* implementation might change this.
*/
ret = bind_validate_region(job);
if (ret)
return ret;
/* Once we start modifying the GPU VA space we need to keep holding the
* uvmm lock until we can't fail anymore. This is due to the set of GPU
* VA space changes must appear atomically and we need to be able to
* unwind all GPU VA space changes on failure.
*/
nouveau_uvmm_lock(uvmm);
list_for_each_op(op, &bind_job->ops) {
switch (op->op) {
case OP_MAP_SPARSE:
ret = nouveau_uvma_region_create(uvmm,
op->va.addr,
op->va.range);
if (ret)
goto unwind_continue;
break;
case OP_UNMAP_SPARSE:
op->reg = nouveau_uvma_region_find(uvmm, op->va.addr,
op->va.range);
if (!op->reg || op->reg->dirty) {
ret = -ENOENT;
goto unwind_continue;
}
op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
op->va.addr,
op->va.range);
if (IS_ERR(op->ops)) {
ret = PTR_ERR(op->ops);
goto unwind_continue;
}
ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
op->ops);
if (ret) {
drm_gpuva_ops_free(&uvmm->umgr, op->ops);
op->ops = NULL;
op->reg = NULL;
goto unwind_continue;
}
nouveau_uvma_region_dirty(op->reg);
break;
case OP_MAP: {
struct nouveau_uvma_region *reg;
reg = nouveau_uvma_region_find_first(uvmm,
op->va.addr,
op->va.range);
if (reg) {
u64 reg_addr = reg->va.addr;
u64 reg_end = reg_addr + reg->va.range;
u64 op_addr = op->va.addr;
u64 op_end = op_addr + op->va.range;
if (unlikely(reg->dirty)) {
ret = -EINVAL;
goto unwind_continue;
}
/* Make sure the mapping is either outside of a
* region or fully enclosed by a region.
*/
if (reg_addr > op_addr || reg_end < op_end) {
ret = -ENOSPC;
goto unwind_continue;
}
}
op->ops = drm_gpuva_sm_map_ops_create(&uvmm->umgr,
op->va.addr,
op->va.range,
op->gem.obj,
op->gem.offset);
if (IS_ERR(op->ops)) {
ret = PTR_ERR(op->ops);
goto unwind_continue;
}
ret = nouveau_uvmm_sm_map_prepare(uvmm, &op->new,
reg, op->ops,
op->va.addr,
op->va.range,
op->flags & 0xff);
if (ret) {
drm_gpuva_ops_free(&uvmm->umgr, op->ops);
op->ops = NULL;
goto unwind_continue;
}
break;
}
case OP_UNMAP:
op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
op->va.addr,
op->va.range);
if (IS_ERR(op->ops)) {
ret = PTR_ERR(op->ops);
goto unwind_continue;
}
ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
op->ops);
if (ret) {
drm_gpuva_ops_free(&uvmm->umgr, op->ops);
op->ops = NULL;
goto unwind_continue;
}
break;
default:
ret = -EINVAL;
goto unwind_continue;
}
}
drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
DRM_EXEC_IGNORE_DUPLICATES);
drm_exec_until_all_locked(exec) {
list_for_each_op(op, &bind_job->ops) {
struct drm_gpuva_op *va_op;
if (IS_ERR_OR_NULL(op->ops))
continue;
drm_gpuva_for_each_op(va_op, op->ops) {
struct drm_gem_object *obj = op_gem_obj(va_op);
if (unlikely(!obj))
continue;
ret = drm_exec_prepare_obj(exec, obj, 1);
drm_exec_retry_on_contention(exec);
if (ret) {
op = list_last_op(&bind_job->ops);
goto unwind;
}
}
}
}
list_for_each_op(op, &bind_job->ops) {
struct drm_gpuva_op *va_op;
if (IS_ERR_OR_NULL(op->ops))
continue;
drm_gpuva_for_each_op(va_op, op->ops) {
struct drm_gem_object *obj = op_gem_obj(va_op);
if (unlikely(!obj))
continue;
/* Don't validate GEMs backing mappings we're about to
* unmap, it's not worth the effort.
*/
if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP))
continue;
ret = nouveau_bo_validate(nouveau_gem_object(obj),
true, false);
if (ret) {
op = list_last_op(&bind_job->ops);
goto unwind;
}
}
}
/* Link and unlink GPUVAs while holding the dma_resv lock.
*
* As long as we validate() all GEMs and add fences to all GEMs DMA
* reservations backing map and remap operations we can be sure there
* won't be any concurrent (in)validations during job execution, hence
* we're safe to check drm_gpuva_invalidated() within the fence
* signalling critical path without holding a separate lock.
*
* GPUVAs about to be unmapped are safe as well, since they're unlinked
* already.
*
* GEMs from map and remap operations must be validated before linking
* their corresponding mappings to prevent the actual PT update to
* happen right away in validate() rather than asynchronously as
* intended.
*
* Note that after linking and unlinking the GPUVAs in this loop this
* function cannot fail anymore, hence there is no need for an unwind
* path.
*/
list_for_each_op(op, &bind_job->ops) {
switch (op->op) {
case OP_UNMAP_SPARSE:
case OP_MAP:
case OP_UNMAP:
bind_link_gpuvas(op->ops, &op->new);
break;
default:
break;
}
}
nouveau_uvmm_unlock(uvmm);
spin_lock(&entity->job.list.lock);
list_add(&bind_job->entry, &entity->job.list.head);
spin_unlock(&entity->job.list.lock);
return 0;
unwind_continue:
op = list_prev_op(op);
unwind:
list_for_each_op_from_reverse(op, &bind_job->ops) {
switch (op->op) {
case OP_MAP_SPARSE:
nouveau_uvma_region_destroy(uvmm, op->va.addr,
op->va.range);
break;
case OP_UNMAP_SPARSE:
__nouveau_uvma_region_insert(uvmm, op->reg);
nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new,
op->ops);
break;
case OP_MAP:
nouveau_uvmm_sm_map_prepare_unwind(uvmm, &op->new,
op->ops,
op->va.addr,
op->va.range);
break;
case OP_UNMAP:
nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new,
op->ops);
break;
}
drm_gpuva_ops_free(&uvmm->umgr, op->ops);
op->ops = NULL;
op->reg = NULL;
}
nouveau_uvmm_unlock(uvmm);
drm_exec_fini(exec);
return ret;
}
static void
nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job)
{
struct drm_exec *exec = &job->exec;
struct drm_gem_object *obj;
unsigned long index;
drm_exec_for_each_locked_object(exec, index, obj)
dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
drm_exec_fini(exec);
}
static struct dma_fence *
nouveau_uvmm_bind_job_run(struct nouveau_job *job)
{
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
struct bind_job_op *op;
int ret = 0;
list_for_each_op(op, &bind_job->ops) {
switch (op->op) {
case OP_MAP_SPARSE:
/* noop */
break;
case OP_MAP:
ret = nouveau_uvmm_sm_map(uvmm, &op->new, op->ops);
if (ret)
goto out;
break;
case OP_UNMAP_SPARSE:
fallthrough;
case OP_UNMAP:
ret = nouveau_uvmm_sm_unmap(uvmm, &op->new, op->ops);
if (ret)
goto out;
break;
}
}
out:
if (ret)
NV_PRINTK(err, job->cli, "bind job failed: %d\n", ret);
return ERR_PTR(ret);
}
static void
nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
{
struct nouveau_uvmm_bind_job *bind_job =
container_of(work, struct nouveau_uvmm_bind_job, work);
struct nouveau_job *job = &bind_job->base;
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
struct nouveau_sched_entity *entity = job->entity;
struct bind_job_op *op, *next;
list_for_each_op(op, &bind_job->ops) {
struct drm_gem_object *obj = op->gem.obj;
/* When nouveau_uvmm_bind_job_submit() fails op->ops and op->reg
* will be NULL, hence skip the cleanup.
*/
switch (op->op) {
case OP_MAP_SPARSE:
/* noop */
break;
case OP_UNMAP_SPARSE:
if (!IS_ERR_OR_NULL(op->ops))
nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new,
op->ops);
if (op->reg) {
nouveau_uvma_region_sparse_unref(op->reg);
nouveau_uvmm_lock(uvmm);
nouveau_uvma_region_remove(op->reg);
nouveau_uvmm_unlock(uvmm);
nouveau_uvma_region_complete(op->reg);
nouveau_uvma_region_put(op->reg);
}
break;
case OP_MAP:
if (!IS_ERR_OR_NULL(op->ops))
nouveau_uvmm_sm_map_cleanup(uvmm, &op->new,
op->ops);
break;
case OP_UNMAP:
if (!IS_ERR_OR_NULL(op->ops))
nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new,
op->ops);
break;
}
if (!IS_ERR_OR_NULL(op->ops))
drm_gpuva_ops_free(&uvmm->umgr, op->ops);
if (obj)
drm_gem_object_put(obj);
}
spin_lock(&entity->job.list.lock);
list_del(&bind_job->entry);
spin_unlock(&entity->job.list.lock);
complete_all(&bind_job->complete);
wake_up(&entity->job.wq);
/* Remove and free ops after removing the bind job from the job list to
* avoid races against bind_validate_map_sparse().
*/
list_for_each_op_safe(op, next, &bind_job->ops) {
list_del(&op->entry);
kfree(op);
}
nouveau_uvmm_bind_job_put(bind_job);
}
static void
nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job)
{
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
struct nouveau_sched_entity *entity = job->entity;
nouveau_sched_entity_qwork(entity, &bind_job->work);
}
static struct nouveau_job_ops nouveau_bind_job_ops = {
.submit = nouveau_uvmm_bind_job_submit,
.armed_submit = nouveau_uvmm_bind_job_armed_submit,
.run = nouveau_uvmm_bind_job_run,
.free = nouveau_uvmm_bind_job_free_qwork,
};
static int
bind_job_op_from_uop(struct bind_job_op **pop,
struct drm_nouveau_vm_bind_op *uop)
{
struct bind_job_op *op;
op = *pop = kzalloc(sizeof(*op), GFP_KERNEL);
if (!op)
return -ENOMEM;
switch (uop->op) {
case OP_MAP:
op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ?
OP_MAP_SPARSE : OP_MAP;
break;
case OP_UNMAP:
op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ?
OP_UNMAP_SPARSE : OP_UNMAP;
break;
default:
op->op = uop->op;
break;
}
op->flags = uop->flags;
op->va.addr = uop->addr;
op->va.range = uop->range;
op->gem.handle = uop->handle;
op->gem.offset = uop->bo_offset;
return 0;
}
static void
bind_job_ops_free(struct list_head *ops)
{
struct bind_job_op *op, *next;
list_for_each_op_safe(op, next, ops) {
list_del(&op->entry);
kfree(op);
}
}
static int
nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
struct nouveau_uvmm_bind_job_args *__args)
{
struct nouveau_uvmm_bind_job *job;
struct nouveau_job_args args = {};
struct bind_job_op *op;
int i, ret;
ret = nouveau_uvmm_bind_job_alloc(&job);
if (ret)
return ret;
INIT_LIST_HEAD(&job->ops);
INIT_LIST_HEAD(&job->entry);
for (i = 0; i < __args->op.count; i++) {
ret = bind_job_op_from_uop(&op, &__args->op.s[i]);
if (ret)
goto err_free;
list_add_tail(&op->entry, &job->ops);
}
init_completion(&job->complete);
INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn);
args.sched_entity = __args->sched_entity;
args.file_priv = __args->file_priv;
args.in_sync.count = __args->in_sync.count;
args.in_sync.s = __args->in_sync.s;
args.out_sync.count = __args->out_sync.count;
args.out_sync.s = __args->out_sync.s;
args.sync = !(__args->flags & DRM_NOUVEAU_VM_BIND_RUN_ASYNC);
args.ops = &nouveau_bind_job_ops;
args.resv_usage = DMA_RESV_USAGE_BOOKKEEP;
ret = nouveau_job_init(&job->base, &args);
if (ret)
goto err_free;
*pjob = job;
return 0;
err_free:
bind_job_ops_free(&job->ops);
kfree(job);
*pjob = NULL;
return ret;
}
int
nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
void *data,
struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_nouveau_vm_init *init = data;
return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr,
init->kernel_managed_size);
}
static int
nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args)
{
struct nouveau_uvmm_bind_job *job;
int ret;
ret = nouveau_uvmm_bind_job_init(&job, args);
if (ret)
return ret;
ret = nouveau_job_submit(&job->base);
if (ret)
goto err_job_fini;
return 0;
err_job_fini:
nouveau_job_fini(&job->base);
return ret;
}
static int
nouveau_uvmm_vm_bind_ucopy(struct nouveau_uvmm_bind_job_args *args,
struct drm_nouveau_vm_bind *req)
{
struct drm_nouveau_sync **s;
u32 inc = req->wait_count;
u64 ins = req->wait_ptr;
u32 outc = req->sig_count;
u64 outs = req->sig_ptr;
u32 opc = req->op_count;
u64 ops = req->op_ptr;
int ret;
args->flags = req->flags;
if (opc) {
args->op.count = opc;
args->op.s = u_memcpya(ops, opc,
sizeof(*args->op.s));
if (IS_ERR(args->op.s))
return PTR_ERR(args->op.s);
}
if (inc) {
s = &args->in_sync.s;
args->in_sync.count = inc;
*s = u_memcpya(ins, inc, sizeof(**s));
if (IS_ERR(*s)) {
ret = PTR_ERR(*s);
goto err_free_ops;
}
}
if (outc) {
s = &args->out_sync.s;
args->out_sync.count = outc;
*s = u_memcpya(outs, outc, sizeof(**s));
if (IS_ERR(*s)) {
ret = PTR_ERR(*s);
goto err_free_ins;
}
}
return 0;
err_free_ops:
u_free(args->op.s);
err_free_ins:
u_free(args->in_sync.s);
return ret;
}
static void
nouveau_uvmm_vm_bind_ufree(struct nouveau_uvmm_bind_job_args *args)
{
u_free(args->op.s);
u_free(args->in_sync.s);
u_free(args->out_sync.s);
}
int
nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
void *data,
struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_uvmm_bind_job_args args = {};
struct drm_nouveau_vm_bind *req = data;
int ret = 0;
if (unlikely(!nouveau_cli_uvmm_locked(cli)))
return -ENOSYS;
ret = nouveau_uvmm_vm_bind_ucopy(&args, req);
if (ret)
return ret;
args.sched_entity = &cli->sched_entity;
args.file_priv = file_priv;
ret = nouveau_uvmm_vm_bind(&args);
if (ret)
goto out_free_args;
out_free_args:
nouveau_uvmm_vm_bind_ufree(&args);
return ret;
}
void
nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbo, struct nouveau_mem *mem)
{
struct drm_gem_object *obj = &nvbo->bo.base;
struct drm_gpuva *va;
dma_resv_assert_held(obj->resv);
drm_gem_for_each_gpuva(va, obj) {
struct nouveau_uvma *uvma = uvma_from_va(va);
nouveau_uvma_map(uvma, mem);
drm_gpuva_invalidate(va, false);
}
}
void
nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo)
{
struct drm_gem_object *obj = &nvbo->bo.base;
struct drm_gpuva *va;
dma_resv_assert_held(obj->resv);
drm_gem_for_each_gpuva(va, obj) {
struct nouveau_uvma *uvma = uvma_from_va(va);
nouveau_uvma_unmap(uvma);
drm_gpuva_invalidate(va, true);
}
}
int
nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
u64 kernel_managed_addr, u64 kernel_managed_size)
{
int ret;
u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size;
mutex_init(&uvmm->mutex);
dma_resv_init(&uvmm->resv);
mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
mutex_lock(&cli->mutex);
if (unlikely(cli->uvmm.disabled)) {
ret = -ENOSYS;
goto out_unlock;
}
if (kernel_managed_end <= kernel_managed_addr) {
ret = -EINVAL;
goto out_unlock;
}
if (kernel_managed_end > NOUVEAU_VA_SPACE_END) {
ret = -EINVAL;
goto out_unlock;
}
uvmm->kernel_managed_addr = kernel_managed_addr;
uvmm->kernel_managed_size = kernel_managed_size;
drm_gpuva_manager_init(&uvmm->umgr, cli->name,
NOUVEAU_VA_SPACE_START,
NOUVEAU_VA_SPACE_END,
kernel_managed_addr, kernel_managed_size,
NULL);
ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
cli->vmm.vmm.object.oclass, RAW,
kernel_managed_addr, kernel_managed_size,
NULL, 0, &cli->uvmm.vmm.vmm);
if (ret)
goto out_free_gpuva_mgr;
cli->uvmm.vmm.cli = cli;
mutex_unlock(&cli->mutex);
return 0;
out_free_gpuva_mgr:
drm_gpuva_manager_destroy(&uvmm->umgr);
out_unlock:
mutex_unlock(&cli->mutex);
return ret;
}
void
nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
{
MA_STATE(mas, &uvmm->region_mt, 0, 0);
struct nouveau_uvma_region *reg;
struct nouveau_cli *cli = uvmm->vmm.cli;
struct nouveau_sched_entity *entity = &cli->sched_entity;
struct drm_gpuva *va, *next;
if (!cli)
return;
rmb(); /* for list_empty to work without lock */
wait_event(entity->job.wq, list_empty(&entity->job.list.head));
nouveau_uvmm_lock(uvmm);
drm_gpuva_for_each_va_safe(va, next, &uvmm->umgr) {
struct nouveau_uvma *uvma = uvma_from_va(va);
struct drm_gem_object *obj = va->gem.obj;
if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
continue;
drm_gpuva_remove(va);
dma_resv_lock(obj->resv, NULL);
drm_gpuva_unlink(va);
dma_resv_unlock(obj->resv);
nouveau_uvma_unmap(uvma);
nouveau_uvma_vmm_put(uvma);
nouveau_uvma_gem_put(uvma);
nouveau_uvma_free(uvma);
}
mas_for_each(&mas, reg, ULONG_MAX) {
mas_erase(&mas);
nouveau_uvma_region_sparse_unref(reg);
nouveau_uvma_region_put(reg);
}
WARN(!mtree_empty(&uvmm->region_mt),
"nouveau_uvma_region tree not empty, potentially leaking memory.");
__mt_destroy(&uvmm->region_mt);
nouveau_uvmm_unlock(uvmm);
mutex_lock(&cli->mutex);
nouveau_vmm_fini(&uvmm->vmm);
drm_gpuva_manager_destroy(&uvmm->umgr);
mutex_unlock(&cli->mutex);
dma_resv_fini(&uvmm->resv);
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_uvmm.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
/*******************************************************************************
* NVIF client driver - NVKM directly linked
******************************************************************************/
#include <core/client.h>
#include <core/ioctl.h>
#include <nvif/client.h>
#include <nvif/driver.h>
#include <nvif/event.h>
#include <nvif/ioctl.h>
#include "nouveau_drv.h"
#include "nouveau_usif.h"
static void
nvkm_client_unmap(void *priv, void __iomem *ptr, u32 size)
{
iounmap(ptr);
}
static void __iomem *
nvkm_client_map(void *priv, u64 handle, u32 size)
{
return ioremap(handle, size);
}
static int
nvkm_client_ioctl(void *priv, void *data, u32 size, void **hack)
{
return nvkm_ioctl(priv, data, size, hack);
}
static int
nvkm_client_resume(void *priv)
{
struct nvkm_client *client = priv;
return nvkm_object_init(&client->object);
}
static int
nvkm_client_suspend(void *priv)
{
struct nvkm_client *client = priv;
return nvkm_object_fini(&client->object, true);
}
static int
nvkm_client_event(u64 token, void *repv, u32 repc)
{
struct nvif_object *object = (void *)(unsigned long)token;
struct nvif_event *event = container_of(object, typeof(*event), object);
if (event->func(event, repv, repc) == NVIF_EVENT_KEEP)
return NVKM_EVENT_KEEP;
return NVKM_EVENT_DROP;
}
static int
nvkm_client_driver_init(const char *name, u64 device, const char *cfg,
const char *dbg, void **ppriv)
{
return nvkm_client_new(name, device, cfg, dbg, nvkm_client_event,
(struct nvkm_client **)ppriv);
}
const struct nvif_driver
nvif_driver_nvkm = {
.name = "nvkm",
.init = nvkm_client_driver_init,
.suspend = nvkm_client_suspend,
.resume = nvkm_client_resume,
.ioctl = nvkm_client_ioctl,
.map = nvkm_client_map,
.unmap = nvkm_client_unmap,
.keep = false,
};
| linux-master | drivers/gpu/drm/nouveau/nouveau_nvif.c |
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/ttm/ttm_tt.h>
#include "nouveau_mem.h"
#include "nouveau_drv.h"
#include "nouveau_bo.h"
#include <nvif/class.h>
#include <nvif/if000a.h>
#include <nvif/if500b.h>
#include <nvif/if500d.h>
#include <nvif/if900b.h>
#include <nvif/if900d.h>
int
nouveau_mem_map(struct nouveau_mem *mem,
struct nvif_vmm *vmm, struct nvif_vma *vma)
{
union {
struct nv50_vmm_map_v0 nv50;
struct gf100_vmm_map_v0 gf100;
} args;
u32 argc = 0;
switch (vmm->object.oclass) {
case NVIF_CLASS_VMM_NV04:
break;
case NVIF_CLASS_VMM_NV50:
args.nv50.version = 0;
args.nv50.ro = 0;
args.nv50.priv = 0;
args.nv50.kind = mem->kind;
args.nv50.comp = mem->comp;
argc = sizeof(args.nv50);
break;
case NVIF_CLASS_VMM_GF100:
case NVIF_CLASS_VMM_GM200:
case NVIF_CLASS_VMM_GP100:
args.gf100.version = 0;
if (mem->mem.type & NVIF_MEM_VRAM)
args.gf100.vol = 0;
else
args.gf100.vol = 1;
args.gf100.ro = 0;
args.gf100.priv = 0;
args.gf100.kind = mem->kind;
argc = sizeof(args.gf100);
break;
default:
WARN_ON(1);
return -ENOSYS;
}
return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0);
}
void
nouveau_mem_fini(struct nouveau_mem *mem)
{
nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]);
nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]);
mutex_lock(&mem->cli->drm->master.lock);
nvif_mem_dtor(&mem->mem);
mutex_unlock(&mem->cli->drm->master.lock);
}
int
nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
{
struct nouveau_mem *mem = nouveau_mem(reg);
struct nouveau_cli *cli = mem->cli;
struct nouveau_drm *drm = cli->drm;
struct nvif_mmu *mmu = &cli->mmu;
struct nvif_mem_ram_v0 args = {};
u8 type;
int ret;
if (!nouveau_drm_use_coherent_gpu_mapping(drm))
type = drm->ttm.type_ncoh[!!mem->kind];
else
type = drm->ttm.type_host[0];
if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
mem->comp = mem->kind = 0;
if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) {
if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
mem->kind = mmu->kind[mem->kind];
mem->comp = 0;
}
if (tt->sg)
args.sgl = tt->sg->sgl;
else
args.dma = tt->dma_address;
mutex_lock(&drm->master.lock);
ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
reg->size,
&args, sizeof(args), &mem->mem);
mutex_unlock(&drm->master.lock);
return ret;
}
int
nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
{
struct nouveau_mem *mem = nouveau_mem(reg);
struct nouveau_cli *cli = mem->cli;
struct nouveau_drm *drm = cli->drm;
struct nvif_mmu *mmu = &cli->mmu;
u64 size = ALIGN(reg->size, 1 << page);
int ret;
mutex_lock(&drm->master.lock);
switch (cli->mem->oclass) {
case NVIF_CLASS_MEM_GF100:
ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
drm->ttm.type_vram, page, size,
&(struct gf100_mem_v0) {
.contig = contig,
}, sizeof(struct gf100_mem_v0),
&mem->mem);
break;
case NVIF_CLASS_MEM_NV50:
ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
drm->ttm.type_vram, page, size,
&(struct nv50_mem_v0) {
.bankswz = mmu->kind[mem->kind] == 2,
.contig = contig,
}, sizeof(struct nv50_mem_v0),
&mem->mem);
break;
default:
ret = -ENOSYS;
WARN_ON(1);
break;
}
mutex_unlock(&drm->master.lock);
reg->start = mem->mem.addr >> PAGE_SHIFT;
return ret;
}
void
nouveau_mem_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
nouveau_mem_fini(mem);
ttm_resource_fini(man, reg);
kfree(mem);
}
int
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
struct ttm_resource **res)
{
struct nouveau_mem *mem;
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
return -ENOMEM;
mem->cli = cli;
mem->kind = kind;
mem->comp = comp;
*res = &mem->base;
return 0;
}
bool
nouveau_mem_intersects(struct ttm_resource *res,
const struct ttm_place *place,
size_t size)
{
u32 num_pages = PFN_UP(size);
/* Don't evict BOs outside of the requested placement range */
if (place->fpfn >= (res->start + num_pages) ||
(place->lpfn && place->lpfn <= res->start))
return false;
return true;
}
bool
nouveau_mem_compatible(struct ttm_resource *res,
const struct ttm_place *place,
size_t size)
{
u32 num_pages = PFN_UP(size);
if (res->start < place->fpfn ||
(place->lpfn && (res->start + num_pages) > place->lpfn))
return false;
return true;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_mem.c |
/*
* Copyright 2007 Dave Airlied
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Authors: Dave Airlied <[email protected]>
* Ben Skeggs <[email protected]>
* Jeremy Kolb <[email protected]>
*/
#include "nouveau_bo.h"
#include "nouveau_dma.h"
#include "nouveau_mem.h"
#include <nvif/push906f.h>
#include <nvhw/class/cla0b5.h>
int
nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
struct nvif_push *push = chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 10);
if (ret)
return ret;
PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(mem->vma[0].addr)),
OFFSET_IN_LOWER, lower_32_bits(mem->vma[0].addr),
OFFSET_OUT_UPPER,
NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(mem->vma[1].addr)),
OFFSET_OUT_LOWER, lower_32_bits(mem->vma[1].addr),
PITCH_IN, PAGE_SIZE,
PITCH_OUT, PAGE_SIZE,
LINE_LENGTH_IN, PAGE_SIZE,
LINE_COUNT, PFN_UP(new_reg->size));
PUSH_IMMD(push, NVA0B5, LAUNCH_DMA,
NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING) |
NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, VIRTUAL) |
NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, VIRTUAL));
return 0;
}
int
nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
struct nvif_push *push = chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 2);
if (ret)
return ret;
PUSH_NVSQ(push, NVA0B5, 0x0000, handle & 0x0000ffff);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_boa0b5.c |
/*
* Copyright 2007 Dave Airlied
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Authors: Dave Airlied <[email protected]>
* Ben Skeggs <[email protected]>
* Jeremy Kolb <[email protected]>
*/
#include "nouveau_bo.h"
#include "nouveau_dma.h"
#include "nouveau_drv.h"
#include <nvif/push006c.h>
#include <nvhw/class/cl0039.h>
static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
struct nouveau_channel *chan, struct ttm_resource *reg)
{
if (reg->mem_type == TTM_PL_TT)
return NvDmaTT;
return chan->vram.handle;
}
int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nvif_push *push = chan->chan.push;
u32 src_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, old_reg);
u32 src_offset = old_reg->start << PAGE_SHIFT;
u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg);
u32 dst_offset = new_reg->start << PAGE_SHIFT;
u32 page_count = PFN_UP(new_reg->size);
int ret;
ret = PUSH_WAIT(push, 3);
if (ret)
return ret;
PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_BUFFER_IN, src_ctxdma,
SET_CONTEXT_DMA_BUFFER_OUT, dst_ctxdma);
page_count = PFN_UP(new_reg->size);
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
ret = PUSH_WAIT(push, 11);
if (ret)
return ret;
PUSH_MTHD(push, NV039, OFFSET_IN, src_offset,
OFFSET_OUT, dst_offset,
PITCH_IN, PAGE_SIZE,
PITCH_OUT, PAGE_SIZE,
LINE_LENGTH_IN, PAGE_SIZE,
LINE_COUNT, line_count,
FORMAT,
NVVAL(NV039, FORMAT, IN, 1) |
NVVAL(NV039, FORMAT, OUT, 1),
BUFFER_NOTIFY, NV039_BUFFER_NOTIFY_WRITE_ONLY);
PUSH_MTHD(push, NV039, NO_OPERATION, 0x00000000);
page_count -= line_count;
src_offset += (PAGE_SIZE * line_count);
dst_offset += (PAGE_SIZE * line_count);
}
return 0;
}
int
nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
struct nvif_push *push = chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 4);
if (ret)
return ret;
PUSH_MTHD(push, NV039, SET_OBJECT, handle);
PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_NOTIFIES, chan->drm->ntfy.handle);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_bo0039.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
#include "nv50_display.h"
#include <nvif/push906f.h>
#include <nvhw/class/cl906f.h>
static int
nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
struct nvif_push *push = chan->chan.push;
int ret = PUSH_WAIT(push, 6);
if (ret == 0) {
PUSH_MTHD(push, NV906F, SEMAPHOREA,
NVVAL(NV906F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)),
SEMAPHOREB, lower_32_bits(virtual),
SEMAPHOREC, sequence,
SEMAPHORED,
NVDEF(NV906F, SEMAPHORED, OPERATION, RELEASE) |
NVDEF(NV906F, SEMAPHORED, RELEASE_WFI, EN) |
NVDEF(NV906F, SEMAPHORED, RELEASE_SIZE, 16BYTE),
NON_STALL_INTERRUPT, 0);
PUSH_KICK(push);
}
return ret;
}
static int
nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
struct nvif_push *push = chan->chan.push;
int ret = PUSH_WAIT(push, 5);
if (ret == 0) {
PUSH_MTHD(push, NV906F, SEMAPHOREA,
NVVAL(NV906F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)),
SEMAPHOREB, lower_32_bits(virtual),
SEMAPHOREC, sequence,
SEMAPHORED,
NVDEF(NV906F, SEMAPHORED, OPERATION, ACQ_GEQ) |
NVDEF(NV906F, SEMAPHORED, ACQUIRE_SWITCH, ENABLED));
PUSH_KICK(push);
}
return ret;
}
static int
nvc0_fence_context_new(struct nouveau_channel *chan)
{
int ret = nv84_fence_context_new(chan);
if (ret == 0) {
struct nv84_fence_chan *fctx = chan->fence;
fctx->base.emit32 = nvc0_fence_emit32;
fctx->base.sync32 = nvc0_fence_sync32;
}
return ret;
}
int
nvc0_fence_create(struct nouveau_drm *drm)
{
int ret = nv84_fence_create(drm);
if (ret == 0) {
struct nv84_fence_priv *priv = drm->fence;
priv->base.context_new = nvc0_fence_context_new;
}
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvc0_fence.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
#include "nouveau_vmm.h"
#include "nv50_display.h"
#include <nvif/push206e.h>
#include <nvhw/class/cl826f.h>
static int
nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
struct nvif_push *push = chan->chan.push;
int ret = PUSH_WAIT(push, 8);
if (ret == 0) {
PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle);
PUSH_MTHD(push, NV826F, SEMAPHOREA,
NVVAL(NV826F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)),
SEMAPHOREB, lower_32_bits(virtual),
SEMAPHOREC, sequence,
SEMAPHORED,
NVDEF(NV826F, SEMAPHORED, OPERATION, RELEASE),
NON_STALLED_INTERRUPT, 0);
PUSH_KICK(push);
}
return ret;
}
static int
nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
struct nvif_push *push = chan->chan.push;
int ret = PUSH_WAIT(push, 7);
if (ret == 0) {
PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle);
PUSH_MTHD(push, NV826F, SEMAPHOREA,
NVVAL(NV826F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(virtual)),
SEMAPHOREB, lower_32_bits(virtual),
SEMAPHOREC, sequence,
SEMAPHORED,
NVDEF(NV826F, SEMAPHORED, OPERATION, ACQ_GEQ));
PUSH_KICK(push);
}
return ret;
}
static inline u32
nv84_fence_chid(struct nouveau_channel *chan)
{
return chan->drm->runl[chan->runlist].chan_id_base + chan->chid;
}
static int
nv84_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
struct nv84_fence_chan *fctx = chan->fence;
u64 addr = fctx->vma->addr + nv84_fence_chid(chan) * 16;
return fctx->base.emit32(chan, addr, fence->base.seqno);
}
static int
nv84_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
struct nv84_fence_chan *fctx = chan->fence;
u64 addr = fctx->vma->addr + nv84_fence_chid(prev) * 16;
return fctx->base.sync32(chan, addr, fence->base.seqno);
}
static u32
nv84_fence_read(struct nouveau_channel *chan)
{
struct nv84_fence_priv *priv = chan->drm->fence;
return nouveau_bo_rd32(priv->bo, nv84_fence_chid(chan) * 16/4);
}
static void
nv84_fence_context_del(struct nouveau_channel *chan)
{
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx = chan->fence;
nouveau_bo_wr32(priv->bo, nv84_fence_chid(chan) * 16 / 4, fctx->base.sequence);
mutex_lock(&priv->mutex);
nouveau_vma_del(&fctx->vma);
mutex_unlock(&priv->mutex);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
}
int
nv84_fence_context_new(struct nouveau_channel *chan)
{
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
int ret;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
nouveau_fence_context_new(chan, &fctx->base);
fctx->base.emit = nv84_fence_emit;
fctx->base.sync = nv84_fence_sync;
fctx->base.read = nv84_fence_read;
fctx->base.emit32 = nv84_fence_emit32;
fctx->base.sync32 = nv84_fence_sync32;
fctx->base.sequence = nv84_fence_read(chan);
mutex_lock(&priv->mutex);
ret = nouveau_vma_new(priv->bo, chan->vmm, &fctx->vma);
mutex_unlock(&priv->mutex);
if (ret)
nv84_fence_context_del(chan);
return ret;
}
static bool
nv84_fence_suspend(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
int i;
priv->suspend = vmalloc(array_size(sizeof(u32), drm->chan_total));
if (priv->suspend) {
for (i = 0; i < drm->chan_total; i++)
priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
}
return priv->suspend != NULL;
}
static void
nv84_fence_resume(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
int i;
if (priv->suspend) {
for (i = 0; i < drm->chan_total; i++)
nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
vfree(priv->suspend);
priv->suspend = NULL;
}
}
static void
nv84_fence_destroy(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
nouveau_bo_unmap(priv->bo);
if (priv->bo)
nouveau_bo_unpin(priv->bo);
nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
}
int
nv84_fence_create(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv;
u32 domain;
int ret;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base.dtor = nv84_fence_destroy;
priv->base.suspend = nv84_fence_suspend;
priv->base.resume = nv84_fence_resume;
priv->base.context_new = nv84_fence_context_new;
priv->base.context_del = nv84_fence_context_del;
priv->base.uevent = true;
mutex_init(&priv->mutex);
/* Use VRAM if there is any ; otherwise fallback to system memory */
domain = drm->client.device.info.ram_size != 0 ?
NOUVEAU_GEM_DOMAIN_VRAM :
/*
* fences created in sysmem must be non-cached or we
* will lose CPU/GPU coherency!
*/
NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
ret = nouveau_bo_new(&drm->client, 16 * drm->chan_total, 0,
domain, 0, 0, NULL, NULL, &priv->bo);
if (ret == 0) {
ret = nouveau_bo_pin(priv->bo, domain, false);
if (ret == 0) {
ret = nouveau_bo_map(priv->bo);
if (ret)
nouveau_bo_unpin(priv->bo);
}
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
if (ret)
nv84_fence_destroy(drm);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nv84_fence.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <nvif/client.h>
#include <nvif/driver.h>
#include <nvif/fifo.h>
#include <nvif/ioctl.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include <nvif/unpack.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_gem.h"
#include "nouveau_chan.h"
#include "nouveau_abi16.h"
#include "nouveau_vmm.h"
#include "nouveau_sched.h"
static struct nouveau_abi16 *
nouveau_abi16(struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
if (!cli->abi16) {
struct nouveau_abi16 *abi16;
cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
if (cli->abi16) {
struct nv_device_v0 args = {
.device = ~0ULL,
};
INIT_LIST_HEAD(&abi16->channels);
/* allocate device object targeting client's default
* device (ie. the one that belongs to the fd it
* opened)
*/
if (nvif_device_ctor(&cli->base.object, "abi16Device",
0, NV_DEVICE, &args, sizeof(args),
&abi16->device) == 0)
return cli->abi16;
kfree(cli->abi16);
cli->abi16 = NULL;
}
}
return cli->abi16;
}
struct nouveau_abi16 *
nouveau_abi16_get(struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
mutex_lock(&cli->mutex);
if (nouveau_abi16(file_priv))
return cli->abi16;
mutex_unlock(&cli->mutex);
return NULL;
}
int
nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
{
struct nouveau_cli *cli = (void *)abi16->device.object.client;
mutex_unlock(&cli->mutex);
return ret;
}
s32
nouveau_abi16_swclass(struct nouveau_drm *drm)
{
switch (drm->client.device.info.family) {
case NV_DEVICE_INFO_V0_TNT:
return NVIF_CLASS_SW_NV04;
case NV_DEVICE_INFO_V0_CELSIUS:
case NV_DEVICE_INFO_V0_KELVIN:
case NV_DEVICE_INFO_V0_RANKINE:
case NV_DEVICE_INFO_V0_CURIE:
return NVIF_CLASS_SW_NV10;
case NV_DEVICE_INFO_V0_TESLA:
return NVIF_CLASS_SW_NV50;
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
case NV_DEVICE_INFO_V0_PASCAL:
case NV_DEVICE_INFO_V0_VOLTA:
return NVIF_CLASS_SW_GF100;
}
return 0x0000;
}
static void
nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
struct nouveau_abi16_ntfy *ntfy)
{
nvif_object_dtor(&ntfy->object);
nvkm_mm_free(&chan->heap, &ntfy->node);
list_del(&ntfy->head);
kfree(ntfy);
}
static void
nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
struct nouveau_abi16_chan *chan)
{
struct nouveau_abi16_ntfy *ntfy, *temp;
/* When a client exits without waiting for it's queued up jobs to
* finish it might happen that we fault the channel. This is due to
* drm_file_free() calling drm_gem_release() before the postclose()
* callback. Hence, we can't tear down this scheduler entity before
* uvmm mappings are unmapped. Currently, we can't detect this case.
*
* However, this should be rare and harmless, since the channel isn't
* needed anymore.
*/
nouveau_sched_entity_fini(&chan->sched_entity);
/* wait for all activity to stop before cleaning up */
if (chan->chan)
nouveau_channel_idle(chan->chan);
/* cleanup notifier state */
list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
nouveau_abi16_ntfy_fini(chan, ntfy);
}
if (chan->ntfy) {
nouveau_vma_del(&chan->ntfy_vma);
nouveau_bo_unpin(chan->ntfy);
drm_gem_object_put(&chan->ntfy->bo.base);
}
if (chan->heap.block_size)
nvkm_mm_fini(&chan->heap);
/* destroy channel object, all children will be killed too */
if (chan->chan) {
nvif_object_dtor(&chan->ce);
nouveau_channel_del(&chan->chan);
}
list_del(&chan->head);
kfree(chan);
}
void
nouveau_abi16_fini(struct nouveau_abi16 *abi16)
{
struct nouveau_cli *cli = (void *)abi16->device.object.client;
struct nouveau_abi16_chan *chan, *temp;
/* cleanup channels */
list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
nouveau_abi16_chan_fini(abi16, chan);
}
/* destroy the device object */
nvif_device_dtor(&abi16->device);
kfree(cli->abi16);
cli->abi16 = NULL;
}
int
nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
struct nvkm_gr *gr = nvxx_gr(device);
struct drm_nouveau_getparam *getparam = data;
struct pci_dev *pdev = to_pci_dev(dev->dev);
switch (getparam->param) {
case NOUVEAU_GETPARAM_CHIPSET_ID:
getparam->value = device->info.chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
getparam->value = pdev->vendor;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
getparam->value = pdev->device;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
switch (device->info.platform) {
case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break;
case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break;
case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
case NV_DEVICE_INFO_V0_IGP :
if (!pci_is_pcie(pdev))
getparam->value = 1;
else
getparam->value = 2;
break;
default:
WARN_ON(1);
break;
}
break;
case NOUVEAU_GETPARAM_FB_SIZE:
getparam->value = drm->gem.vram_available;
break;
case NOUVEAU_GETPARAM_AGP_SIZE:
getparam->value = drm->gem.gart_available;
break;
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
getparam->value = 0; /* deprecated */
break;
case NOUVEAU_GETPARAM_PTIMER_TIME:
getparam->value = nvif_device_time(device);
break;
case NOUVEAU_GETPARAM_HAS_BO_USAGE:
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
getparam->value = nvkm_gr_units(gr);
break;
default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
}
return 0;
}
int
nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_channel_alloc *init = data;
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
struct nvif_device *device;
u64 engine, runm;
int ret;
if (unlikely(!abi16))
return -ENOMEM;
if (!drm->channel)
return nouveau_abi16_put(abi16, -ENODEV);
/* If uvmm wasn't initialized until now disable it completely to prevent
* userspace from mixing up UAPIs.
*
* The client lock is already acquired by nouveau_abi16_get().
*/
__nouveau_cli_disable_uvmm_noinit(cli);
device = &abi16->device;
engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
/* hack to allow channel engine type specification on kepler */
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
if (init->fb_ctxdma_handle == ~0) {
switch (init->tt_ctxdma_handle) {
case 0x01: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR ; break;
case 0x02: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; break;
case 0x04: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP ; break;
case 0x08: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD ; break;
case 0x30: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE ; break;
default:
return nouveau_abi16_put(abi16, -ENOSYS);
}
init->fb_ctxdma_handle = 0;
init->tt_ctxdma_handle = 0;
}
}
if (engine != NV_DEVICE_HOST_RUNLIST_ENGINES_CE)
runm = nvif_fifo_runlist(device, engine);
else
runm = nvif_fifo_runlist_ce(device);
if (!runm || init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
return nouveau_abi16_put(abi16, -EINVAL);
/* allocate "abi16 channel" data and make up a handle for it */
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
return nouveau_abi16_put(abi16, -ENOMEM);
INIT_LIST_HEAD(&chan->notifiers);
list_add(&chan->head, &abi16->channels);
/* create channel object and initialise dma and fence management */
ret = nouveau_channel_new(drm, device, false, runm, init->fb_ctxdma_handle,
init->tt_ctxdma_handle, &chan->chan);
if (ret)
goto done;
ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched,
drm->sched_wq);
if (ret)
goto done;
init->channel = chan->chan->chid;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
else
if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
init->subchan[0].handle = 0x00000000;
init->subchan[0].grclass = 0x0000;
init->subchan[1].handle = chan->chan->nvsw.handle;
init->subchan[1].grclass = 0x506e;
init->nr_subchan = 2;
}
/* Workaround "nvc0" gallium driver using classes it doesn't allocate on
* Kepler and above. NVKM no longer always sets CE_CTX_VALID as part of
* channel init, now we know what that stuff actually is.
*
* Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN.
*
* Userspace was fixed prior to adding Ampere support.
*/
switch (device->info.family) {
case NV_DEVICE_INFO_V0_VOLTA:
ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
NULL, 0, &chan->ce);
if (ret)
goto done;
break;
case NV_DEVICE_INFO_V0_TURING:
ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
NULL, 0, &chan->ce);
if (ret)
goto done;
break;
default:
break;
}
/* Named memory object area */
ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
0, 0, &chan->ntfy);
if (ret == 0)
ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART,
false);
if (ret)
goto done;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_vma_new(chan->ntfy, chan->chan->vmm,
&chan->ntfy_vma);
if (ret)
goto done;
}
ret = drm_gem_handle_create(file_priv, &chan->ntfy->bo.base,
&init->notifier_handle);
if (ret)
goto done;
ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
done:
if (ret)
nouveau_abi16_chan_fini(abi16, chan);
return nouveau_abi16_put(abi16, ret);
}
static struct nouveau_abi16_chan *
nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
{
struct nouveau_abi16_chan *chan;
list_for_each_entry(chan, &abi16->channels, head) {
if (chan->chan->chid == channel)
return chan;
}
return NULL;
}
int
nouveau_abi16_usif(struct drm_file *file_priv, void *data, u32 size)
{
union {
struct nvif_ioctl_v0 v0;
} *args = data;
struct nouveau_abi16_chan *chan;
struct nouveau_abi16 *abi16;
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
switch (args->v0.type) {
case NVIF_IOCTL_V0_NEW:
case NVIF_IOCTL_V0_MTHD:
case NVIF_IOCTL_V0_SCLASS:
break;
default:
return -EACCES;
}
} else
return ret;
if (!(abi16 = nouveau_abi16(file_priv)))
return -ENOMEM;
if (args->v0.token != ~0ULL) {
if (!(chan = nouveau_abi16_chan(abi16, args->v0.token)))
return -EINVAL;
args->v0.object = nvif_handle(&chan->chan->user);
args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
return 0;
}
args->v0.object = nvif_handle(&abi16->device.object);
args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
return 0;
}
int
nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_channel_free *req = data;
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
if (unlikely(!abi16))
return -ENOMEM;
chan = nouveau_abi16_chan(abi16, req->channel);
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
nouveau_abi16_chan_fini(abi16, chan);
return nouveau_abi16_put(abi16, 0);
}
int
nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_grobj_alloc *init = data;
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
struct nvif_client *client;
struct nvif_sclass *sclass;
s32 oclass = 0;
int ret, i;
if (unlikely(!abi16))
return -ENOMEM;
if (init->handle == ~0)
return nouveau_abi16_put(abi16, -EINVAL);
client = abi16->device.object.client;
chan = nouveau_abi16_chan(abi16, init->channel);
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
if (ret < 0)
return nouveau_abi16_put(abi16, ret);
if ((init->class & 0x00ff) == 0x006e) {
/* nvsw: compatibility with older 0x*6e class identifier */
for (i = 0; !oclass && i < ret; i++) {
switch (sclass[i].oclass) {
case NVIF_CLASS_SW_NV04:
case NVIF_CLASS_SW_NV10:
case NVIF_CLASS_SW_NV50:
case NVIF_CLASS_SW_GF100:
oclass = sclass[i].oclass;
break;
default:
break;
}
}
} else
if ((init->class & 0x00ff) == 0x00b1) {
/* msvld: compatibility with incorrect version exposure */
for (i = 0; i < ret; i++) {
if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
oclass = sclass[i].oclass;
break;
}
}
} else
if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
/* mspdec: compatibility with incorrect version exposure */
for (i = 0; i < ret; i++) {
if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
oclass = sclass[i].oclass;
break;
}
}
} else
if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
/* msppp: compatibility with incorrect version exposure */
for (i = 0; i < ret; i++) {
if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
oclass = sclass[i].oclass;
break;
}
}
} else {
oclass = init->class;
}
nvif_object_sclass_put(&sclass);
if (!oclass)
return nouveau_abi16_put(abi16, -EINVAL);
ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
if (!ntfy)
return nouveau_abi16_put(abi16, -ENOMEM);
list_add(&ntfy->head, &chan->notifiers);
client->route = NVDRM_OBJECT_ABI16;
ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
oclass, NULL, 0, &ntfy->object);
client->route = NVDRM_OBJECT_NVIF;
if (ret)
nouveau_abi16_ntfy_fini(chan, ntfy);
return nouveau_abi16_put(abi16, ret);
}
int
nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_notifierobj_alloc *info = data;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
struct nvif_device *device = &abi16->device;
struct nvif_client *client;
struct nv_dma_v0 args = {};
int ret;
if (unlikely(!abi16))
return -ENOMEM;
/* completely unnecessary for these chipsets... */
if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
return nouveau_abi16_put(abi16, -EINVAL);
client = abi16->device.object.client;
chan = nouveau_abi16_chan(abi16, info->channel);
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
if (!ntfy)
return nouveau_abi16_put(abi16, -ENOMEM);
list_add(&ntfy->head, &chan->notifiers);
ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
&ntfy->node);
if (ret)
goto done;
args.start = ntfy->node->offset;
args.limit = ntfy->node->offset + ntfy->node->length - 1;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start += chan->ntfy_vma->addr;
args.limit += chan->ntfy_vma->addr;
} else
if (drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start += drm->agp.base + chan->ntfy->offset;
args.limit += drm->agp.base + chan->ntfy->offset;
} else {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start += chan->ntfy->offset;
args.limit += chan->ntfy->offset;
}
client->route = NVDRM_OBJECT_ABI16;
ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
NV_DMA_IN_MEMORY, &args, sizeof(args),
&ntfy->object);
client->route = NVDRM_OBJECT_NVIF;
if (ret)
goto done;
info->offset = ntfy->node->offset;
done:
if (ret)
nouveau_abi16_ntfy_fini(chan, ntfy);
return nouveau_abi16_put(abi16, ret);
}
int
nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_gpuobj_free *fini = data;
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
int ret = -ENOENT;
if (unlikely(!abi16))
return -ENOMEM;
chan = nouveau_abi16_chan(abi16, fini->channel);
if (!chan)
return nouveau_abi16_put(abi16, -EINVAL);
/* synchronize with the user channel and destroy the gpu object */
nouveau_channel_idle(chan->chan);
list_for_each_entry(ntfy, &chan->notifiers, head) {
if (ntfy->object.handle == fini->handle) {
nouveau_abi16_ntfy_fini(chan, ntfy);
ret = 0;
break;
}
}
return nouveau_abi16_put(abi16, ret);
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_abi16.c |
// SPDX-License-Identifier: MIT
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/mxm-wmi.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_edid.h>
#include <acpi/video.h>
#include "nouveau_drv.h"
#include "nouveau_acpi.h"
#define NOUVEAU_DSM_LED 0x02
#define NOUVEAU_DSM_LED_STATE 0x00
#define NOUVEAU_DSM_LED_OFF 0x10
#define NOUVEAU_DSM_LED_STAMINA 0x11
#define NOUVEAU_DSM_LED_SPEED 0x12
#define NOUVEAU_DSM_POWER 0x03
#define NOUVEAU_DSM_POWER_STATE 0x00
#define NOUVEAU_DSM_POWER_SPEED 0x01
#define NOUVEAU_DSM_POWER_STAMINA 0x02
#define NOUVEAU_DSM_OPTIMUS_CAPS 0x1A
#define NOUVEAU_DSM_OPTIMUS_FLAGS 0x1B
#define NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 (3 << 24)
#define NOUVEAU_DSM_OPTIMUS_NO_POWERDOWN_PS3 (2 << 24)
#define NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED (1)
#define NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN (NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 | NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED)
/* result of the optimus caps function */
#define OPTIMUS_ENABLED (1 << 0)
#define OPTIMUS_STATUS_MASK (3 << 3)
#define OPTIMUS_STATUS_OFF (0 << 3)
#define OPTIMUS_STATUS_ON_ENABLED (1 << 3)
#define OPTIMUS_STATUS_PWR_STABLE (3 << 3)
#define OPTIMUS_DISPLAY_HOTPLUG (1 << 6)
#define OPTIMUS_CAPS_MASK (7 << 24)
#define OPTIMUS_DYNAMIC_PWR_CAP (1 << 24)
#define OPTIMUS_AUDIO_CAPS_MASK (3 << 27)
#define OPTIMUS_HDA_CODEC_MASK (2 << 27) /* hda bios control */
static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
bool optimus_flags_detected;
bool optimus_skip_dsm;
acpi_handle dhandle;
} nouveau_dsm_priv;
bool nouveau_is_optimus(void) {
return nouveau_dsm_priv.optimus_detected;
}
bool nouveau_is_v1_dsm(void) {
return nouveau_dsm_priv.dsm_detected;
}
#ifdef CONFIG_VGA_SWITCHEROO
static const guid_t nouveau_dsm_muid =
GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48,
0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4);
static const guid_t nouveau_op_dsm_muid =
GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B,
0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0);
static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
{
int i;
union acpi_object *obj;
char args_buff[4];
union acpi_object argv4 = {
.buffer.type = ACPI_TYPE_BUFFER,
.buffer.length = 4,
.buffer.pointer = args_buff
};
/* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */
for (i = 0; i < 4; i++)
args_buff[i] = (arg >> i * 8) & 0xFF;
*result = 0;
obj = acpi_evaluate_dsm_typed(handle, &nouveau_op_dsm_muid, 0x00000100,
func, &argv4, ACPI_TYPE_BUFFER);
if (!obj) {
acpi_handle_info(handle, "failed to evaluate _DSM\n");
return AE_ERROR;
} else {
if (obj->buffer.length == 4) {
*result |= obj->buffer.pointer[0];
*result |= (obj->buffer.pointer[1] << 8);
*result |= (obj->buffer.pointer[2] << 16);
*result |= (obj->buffer.pointer[3] << 24);
}
ACPI_FREE(obj);
}
return 0;
}
/*
* On some platforms, _DSM(nouveau_op_dsm_muid, func0) has special
* requirements on the fourth parameter, so a private implementation
* instead of using acpi_check_dsm().
*/
static int nouveau_dsm_get_optimus_functions(acpi_handle handle)
{
int result;
/*
* Function 0 returns a Buffer containing available functions.
* The args parameter is ignored for function 0, so just put 0 in it
*/
if (nouveau_optimus_dsm(handle, 0, 0, &result))
return 0;
/*
* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported.
* If the n-th bit is enabled, function n is supported
*/
if (result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS))
return result;
return 0;
}
static int nouveau_dsm(acpi_handle handle, int func, int arg)
{
int ret = 0;
union acpi_object *obj;
union acpi_object argv4 = {
.integer.type = ACPI_TYPE_INTEGER,
.integer.value = arg,
};
obj = acpi_evaluate_dsm_typed(handle, &nouveau_dsm_muid, 0x00000102,
func, &argv4, ACPI_TYPE_INTEGER);
if (!obj) {
acpi_handle_info(handle, "failed to evaluate _DSM\n");
return AE_ERROR;
} else {
if (obj->integer.value == 0x80000002)
ret = -ENODEV;
ACPI_FREE(obj);
}
return ret;
}
static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
{
mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id);
}
static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state)
{
int arg;
if (state == VGA_SWITCHEROO_ON)
arg = NOUVEAU_DSM_POWER_SPEED;
else
arg = NOUVEAU_DSM_POWER_STAMINA;
nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg);
return 0;
}
static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
{
if (!nouveau_dsm_priv.dsm_detected)
return 0;
if (id == VGA_SWITCHEROO_IGD)
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
else
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED);
}
static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state)
{
if (id == VGA_SWITCHEROO_IGD)
return 0;
/* Optimus laptops have the card already disabled in
* nouveau_switcheroo_set_state */
if (!nouveau_dsm_priv.dsm_detected)
return 0;
return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
}
static enum vga_switcheroo_client_id nouveau_dsm_get_client_id(struct pci_dev *pdev)
{
/* easy option one - intel vendor ID means Integrated */
if (pdev->vendor == PCI_VENDOR_ID_INTEL)
return VGA_SWITCHEROO_IGD;
/* is this device on Bus 0? - this may need improving */
if (pdev->bus->number == 0)
return VGA_SWITCHEROO_IGD;
return VGA_SWITCHEROO_DIS;
}
static const struct vga_switcheroo_handler nouveau_dsm_handler = {
.switchto = nouveau_dsm_switchto,
.power_state = nouveau_dsm_power_state,
.get_client_id = nouveau_dsm_get_client_id,
};
static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
bool *has_mux, bool *has_opt,
bool *has_opt_flags, bool *has_pr3)
{
acpi_handle dhandle;
bool supports_mux;
int optimus_funcs;
struct pci_dev *parent_pdev;
if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
return;
*has_pr3 = false;
parent_pdev = pci_upstream_bridge(pdev);
if (parent_pdev) {
if (parent_pdev->bridge_d3)
*has_pr3 = pci_pr3_present(parent_pdev);
else
pci_d3cold_disable(pdev);
}
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return;
if (!acpi_has_method(dhandle, "_DSM"))
return;
supports_mux = acpi_check_dsm(dhandle, &nouveau_dsm_muid, 0x00000102,
1 << NOUVEAU_DSM_POWER);
optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle);
/* Does not look like a Nvidia device. */
if (!supports_mux && !optimus_funcs)
return;
*dhandle_out = dhandle;
*has_mux = supports_mux;
*has_opt = !!optimus_funcs;
*has_opt_flags = optimus_funcs & (1 << NOUVEAU_DSM_OPTIMUS_FLAGS);
if (optimus_funcs) {
uint32_t result;
nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0,
&result);
dev_info(&pdev->dev, "optimus capabilities: %s, status %s%s\n",
(result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
(result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
(result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
}
}
static bool nouveau_dsm_detect(void)
{
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL;
acpi_handle dhandle = NULL;
bool has_mux = false;
bool has_optimus = false;
bool has_optimus_flags = false;
bool has_power_resources = false;
int vga_count = 0;
bool guid_valid;
bool ret = false;
/* lookup the MXM GUID */
guid_valid = mxm_wmi_supported();
if (guid_valid)
printk("MXM: GUID detected in BIOS\n");
/* now do DSM detection */
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus,
&has_optimus_flags, &has_power_resources);
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pdev)) != NULL) {
vga_count++;
nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus,
&has_optimus_flags, &has_power_resources);
}
/* find the optimus DSM or the old v1 DSM */
if (has_optimus) {
nouveau_dsm_priv.dhandle = dhandle;
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
pr_info("VGA switcheroo: detected Optimus DSM method %s handle\n",
acpi_method_name);
if (has_power_resources)
pr_info("nouveau: detected PR support, will not use DSM\n");
nouveau_dsm_priv.optimus_detected = true;
nouveau_dsm_priv.optimus_flags_detected = has_optimus_flags;
nouveau_dsm_priv.optimus_skip_dsm = has_power_resources;
ret = true;
} else if (vga_count == 2 && has_mux && guid_valid) {
nouveau_dsm_priv.dhandle = dhandle;
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
pr_info("VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
ret = true;
}
return ret;
}
void nouveau_register_dsm_handler(void)
{
bool r;
r = nouveau_dsm_detect();
if (!r)
return;
vga_switcheroo_register_handler(&nouveau_dsm_handler, 0);
}
/* Must be called for Optimus models before the card can be turned off */
void nouveau_switcheroo_optimus_dsm(void)
{
u32 result = 0;
if (!nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.optimus_skip_dsm)
return;
if (nouveau_dsm_priv.optimus_flags_detected)
nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS,
0x3, &result);
nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS,
NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result);
}
void nouveau_unregister_dsm_handler(void)
{
if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
vga_switcheroo_unregister_handler();
}
#else
void nouveau_register_dsm_handler(void) {}
void nouveau_unregister_dsm_handler(void) {}
void nouveau_switcheroo_optimus_dsm(void) {}
#endif
void *
nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
{
struct acpi_device *acpidev;
int type, ret;
void *edid;
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
type = ACPI_VIDEO_DISPLAY_LCD;
break;
default:
return NULL;
}
acpidev = ACPI_COMPANION(dev->dev);
if (!acpidev)
return NULL;
ret = acpi_video_get_edid(acpidev, type, -1, &edid);
if (ret < 0)
return NULL;
return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
}
bool nouveau_acpi_video_backlight_use_native(void)
{
return acpi_video_backlight_use_native();
}
void nouveau_acpi_video_register_backlight(void)
{
acpi_video_register_backlight();
}
| linux-master | drivers/gpu/drm/nouveau/nouveau_acpi.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/option.h>
#include <core/debug.h>
const char *
nvkm_stropt(const char *optstr, const char *opt, int *arglen)
{
while (optstr && *optstr != '\0') {
int len = strcspn(optstr, ",=");
switch (optstr[len]) {
case '=':
if (!strncasecmpz(optstr, opt, len)) {
optstr += len + 1;
*arglen = strcspn(optstr, ",=");
return *arglen ? optstr : NULL;
}
optstr++;
break;
case ',':
optstr++;
break;
default:
break;
}
optstr += len;
}
return NULL;
}
bool
nvkm_boolopt(const char *optstr, const char *opt, bool value)
{
int arglen;
optstr = nvkm_stropt(optstr, opt, &arglen);
if (optstr) {
if (!strncasecmpz(optstr, "0", arglen) ||
!strncasecmpz(optstr, "no", arglen) ||
!strncasecmpz(optstr, "off", arglen) ||
!strncasecmpz(optstr, "false", arglen))
value = false;
else
if (!strncasecmpz(optstr, "1", arglen) ||
!strncasecmpz(optstr, "yes", arglen) ||
!strncasecmpz(optstr, "on", arglen) ||
!strncasecmpz(optstr, "true", arglen))
value = true;
}
return value;
}
long
nvkm_longopt(const char *optstr, const char *opt, long value)
{
long result = value;
int arglen;
char *s;
optstr = nvkm_stropt(optstr, opt, &arglen);
if (optstr && (s = kstrndup(optstr, arglen, GFP_KERNEL))) {
int ret = kstrtol(s, 0, &value);
if (ret == 0)
result = value;
kfree(s);
}
return result;
}
int
nvkm_dbgopt(const char *optstr, const char *sub)
{
int mode = 1, level = CONFIG_NOUVEAU_DEBUG_DEFAULT;
while (optstr) {
int len = strcspn(optstr, ",=");
switch (optstr[len]) {
case '=':
if (strncasecmpz(optstr, sub, len))
mode = 0;
optstr++;
break;
default:
if (mode) {
if (!strncasecmpz(optstr, "fatal", len))
level = NV_DBG_FATAL;
else if (!strncasecmpz(optstr, "error", len))
level = NV_DBG_ERROR;
else if (!strncasecmpz(optstr, "warn", len))
level = NV_DBG_WARN;
else if (!strncasecmpz(optstr, "info", len))
level = NV_DBG_INFO;
else if (!strncasecmpz(optstr, "debug", len))
level = NV_DBG_DEBUG;
else if (!strncasecmpz(optstr, "trace", len))
level = NV_DBG_TRACE;
else if (!strncasecmpz(optstr, "paranoia", len))
level = NV_DBG_PARANOIA;
else if (!strncasecmpz(optstr, "spam", len))
level = NV_DBG_SPAM;
}
if (optstr[len] != '\0') {
optstr++;
mode = 1;
break;
}
return level;
}
optstr += len;
}
return level;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/option.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/object.h>
#include <core/client.h>
#include <core/engine.h>
struct nvkm_object *
nvkm_object_search(struct nvkm_client *client, u64 handle,
const struct nvkm_object_func *func)
{
struct nvkm_object *object;
if (handle) {
struct rb_node *node = client->objroot.rb_node;
while (node) {
object = rb_entry(node, typeof(*object), node);
if (handle < object->object)
node = node->rb_left;
else
if (handle > object->object)
node = node->rb_right;
else
goto done;
}
return ERR_PTR(-ENOENT);
} else {
object = &client->object;
}
done:
if (unlikely(func && object->func != func))
return ERR_PTR(-EINVAL);
return object;
}
void
nvkm_object_remove(struct nvkm_object *object)
{
if (!RB_EMPTY_NODE(&object->node))
rb_erase(&object->node, &object->client->objroot);
}
bool
nvkm_object_insert(struct nvkm_object *object)
{
struct rb_node **ptr = &object->client->objroot.rb_node;
struct rb_node *parent = NULL;
while (*ptr) {
struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node);
parent = *ptr;
if (object->object < this->object)
ptr = &parent->rb_left;
else
if (object->object > this->object)
ptr = &parent->rb_right;
else
return false;
}
rb_link_node(&object->node, parent, ptr);
rb_insert_color(&object->node, &object->client->objroot);
return true;
}
int
nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
if (likely(object->func->mthd))
return object->func->mthd(object, mthd, data, size);
return -ENODEV;
}
int
nvkm_object_ntfy(struct nvkm_object *object, u32 mthd,
struct nvkm_event **pevent)
{
if (likely(object->func->ntfy))
return object->func->ntfy(object, mthd, pevent);
return -ENODEV;
}
int
nvkm_object_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{
if (likely(object->func->map))
return object->func->map(object, argv, argc, type, addr, size);
return -ENODEV;
}
int
nvkm_object_unmap(struct nvkm_object *object)
{
if (likely(object->func->unmap))
return object->func->unmap(object);
return -ENODEV;
}
int
nvkm_object_rd08(struct nvkm_object *object, u64 addr, u8 *data)
{
if (likely(object->func->rd08))
return object->func->rd08(object, addr, data);
return -ENODEV;
}
int
nvkm_object_rd16(struct nvkm_object *object, u64 addr, u16 *data)
{
if (likely(object->func->rd16))
return object->func->rd16(object, addr, data);
return -ENODEV;
}
int
nvkm_object_rd32(struct nvkm_object *object, u64 addr, u32 *data)
{
if (likely(object->func->rd32))
return object->func->rd32(object, addr, data);
return -ENODEV;
}
int
nvkm_object_wr08(struct nvkm_object *object, u64 addr, u8 data)
{
if (likely(object->func->wr08))
return object->func->wr08(object, addr, data);
return -ENODEV;
}
int
nvkm_object_wr16(struct nvkm_object *object, u64 addr, u16 data)
{
if (likely(object->func->wr16))
return object->func->wr16(object, addr, data);
return -ENODEV;
}
int
nvkm_object_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
if (likely(object->func->wr32))
return object->func->wr32(object, addr, data);
return -ENODEV;
}
int
nvkm_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *gpuobj,
int align, struct nvkm_gpuobj **pgpuobj)
{
if (object->func->bind)
return object->func->bind(object, gpuobj, align, pgpuobj);
return -ENODEV;
}
int
nvkm_object_fini(struct nvkm_object *object, bool suspend)
{
const char *action = suspend ? "suspend" : "fini";
struct nvkm_object *child;
s64 time;
int ret;
nvif_debug(object, "%s children...\n", action);
time = ktime_to_us(ktime_get());
list_for_each_entry_reverse(child, &object->tree, head) {
ret = nvkm_object_fini(child, suspend);
if (ret && suspend)
goto fail_child;
}
nvif_debug(object, "%s running...\n", action);
if (object->func->fini) {
ret = object->func->fini(object, suspend);
if (ret) {
nvif_error(object, "%s failed with %d\n", action, ret);
if (suspend)
goto fail;
}
}
time = ktime_to_us(ktime_get()) - time;
nvif_debug(object, "%s completed in %lldus\n", action, time);
return 0;
fail:
if (object->func->init) {
int rret = object->func->init(object);
if (rret)
nvif_fatal(object, "failed to restart, %d\n", rret);
}
fail_child:
list_for_each_entry_continue_reverse(child, &object->tree, head) {
nvkm_object_init(child);
}
return ret;
}
int
nvkm_object_init(struct nvkm_object *object)
{
struct nvkm_object *child;
s64 time;
int ret;
nvif_debug(object, "init running...\n");
time = ktime_to_us(ktime_get());
if (object->func->init) {
ret = object->func->init(object);
if (ret)
goto fail;
}
nvif_debug(object, "init children...\n");
list_for_each_entry(child, &object->tree, head) {
ret = nvkm_object_init(child);
if (ret)
goto fail_child;
}
time = ktime_to_us(ktime_get()) - time;
nvif_debug(object, "init completed in %lldus\n", time);
return 0;
fail_child:
list_for_each_entry_continue_reverse(child, &object->tree, head)
nvkm_object_fini(child, false);
fail:
nvif_error(object, "init failed with %d\n", ret);
if (object->func->fini)
object->func->fini(object, false);
return ret;
}
void *
nvkm_object_dtor(struct nvkm_object *object)
{
struct nvkm_object *child, *ctemp;
void *data = object;
s64 time;
nvif_debug(object, "destroy children...\n");
time = ktime_to_us(ktime_get());
list_for_each_entry_safe(child, ctemp, &object->tree, head) {
nvkm_object_del(&child);
}
nvif_debug(object, "destroy running...\n");
nvkm_object_unmap(object);
if (object->func->dtor)
data = object->func->dtor(object);
nvkm_engine_unref(&object->engine);
time = ktime_to_us(ktime_get()) - time;
nvif_debug(object, "destroy completed in %lldus...\n", time);
return data;
}
void
nvkm_object_del(struct nvkm_object **pobject)
{
struct nvkm_object *object = *pobject;
if (object && !WARN_ON(!object->func)) {
*pobject = nvkm_object_dtor(object);
nvkm_object_remove(object);
list_del(&object->head);
kfree(*pobject);
*pobject = NULL;
}
}
void
nvkm_object_ctor(const struct nvkm_object_func *func,
const struct nvkm_oclass *oclass, struct nvkm_object *object)
{
object->func = func;
object->client = oclass->client;
object->engine = nvkm_engine_ref(oclass->engine);
object->oclass = oclass->base.oclass;
object->handle = oclass->handle;
object->route = oclass->route;
object->token = oclass->token;
object->object = oclass->object;
INIT_LIST_HEAD(&object->head);
INIT_LIST_HEAD(&object->tree);
RB_CLEAR_NODE(&object->node);
WARN_ON(IS_ERR(object->engine));
}
int
nvkm_object_new_(const struct nvkm_object_func *func,
const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
if (size == 0) {
if (!(*pobject = kzalloc(sizeof(**pobject), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(func, oclass, *pobject);
return 0;
}
return -ENOSYS;
}
static const struct nvkm_object_func
nvkm_object_func = {
};
int
nvkm_object_new(const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
const struct nvkm_object_func *func =
oclass->base.func ? oclass->base.func : &nvkm_object_func;
return nvkm_object_new_(func, oclass, data, size, pobject);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/object.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include <core/memory.h>
#include <core/mm.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
void
nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device,
struct nvkm_tags **ptags)
{
struct nvkm_fb *fb = device->fb;
struct nvkm_tags *tags = *ptags;
if (tags) {
mutex_lock(&fb->tags.mutex);
if (refcount_dec_and_test(&tags->refcount)) {
nvkm_mm_free(&fb->tags.mm, &tags->mn);
kfree(memory->tags);
memory->tags = NULL;
}
mutex_unlock(&fb->tags.mutex);
*ptags = NULL;
}
}
int
nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
u32 nr, void (*clr)(struct nvkm_device *, u32, u32),
struct nvkm_tags **ptags)
{
struct nvkm_fb *fb = device->fb;
struct nvkm_tags *tags;
mutex_lock(&fb->tags.mutex);
if ((tags = memory->tags)) {
/* If comptags exist for the memory, but a different amount
* than requested, the buffer is being mapped with settings
* that are incompatible with existing mappings.
*/
if (tags->mn && tags->mn->length != nr) {
mutex_unlock(&fb->tags.mutex);
return -EINVAL;
}
refcount_inc(&tags->refcount);
mutex_unlock(&fb->tags.mutex);
*ptags = tags;
return 0;
}
if (!(tags = kmalloc(sizeof(*tags), GFP_KERNEL))) {
mutex_unlock(&fb->tags.mutex);
return -ENOMEM;
}
if (!nvkm_mm_head(&fb->tags.mm, 0, 1, nr, nr, 1, &tags->mn)) {
if (clr)
clr(device, tags->mn->offset, tags->mn->length);
} else {
/* Failure to allocate HW comptags is not an error, the
* caller should fall back to an uncompressed map.
*
* As memory can be mapped in multiple places, we still
* need to track the allocation failure and ensure that
* any additional mappings remain uncompressed.
*
* This is handled by returning an empty nvkm_tags.
*/
tags->mn = NULL;
}
refcount_set(&tags->refcount, 1);
*ptags = memory->tags = tags;
mutex_unlock(&fb->tags.mutex);
return 0;
}
void
nvkm_memory_ctor(const struct nvkm_memory_func *func,
struct nvkm_memory *memory)
{
memory->func = func;
kref_init(&memory->kref);
}
static void
nvkm_memory_del(struct kref *kref)
{
struct nvkm_memory *memory = container_of(kref, typeof(*memory), kref);
if (!WARN_ON(!memory->func)) {
if (memory->func->dtor)
memory = memory->func->dtor(memory);
kfree(memory);
}
}
void
nvkm_memory_unref(struct nvkm_memory **pmemory)
{
struct nvkm_memory *memory = *pmemory;
if (memory) {
kref_put(&memory->kref, nvkm_memory_del);
*pmemory = NULL;
}
}
struct nvkm_memory *
nvkm_memory_ref(struct nvkm_memory *memory)
{
if (memory)
kref_get(&memory->kref);
return memory;
}
int
nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
u64 size, u32 align, bool zero,
struct nvkm_memory **pmemory)
{
struct nvkm_instmem *imem = device->imem;
struct nvkm_memory *memory;
int ret;
if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
return -ENOSYS;
ret = nvkm_instobj_new(imem, size, align, zero, &memory);
if (ret)
return ret;
*pmemory = memory;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/memory.c |
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <core/device.h>
#include <core/firmware.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
int
nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *base,
const char *name, int ver, const struct firmware **pfw)
{
char path[64];
int ret;
snprintf(path, sizeof(path), "%s%s", base, name);
ret = nvkm_firmware_get(subdev, path, ver, pfw);
if (ret < 0)
return ret;
return 0;
}
int
nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *base,
const char *name, int ver, struct nvkm_blob *blob)
{
const struct firmware *fw;
int ret;
ret = nvkm_firmware_load_name(subdev, base, name, ver, &fw);
if (ret == 0) {
blob->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
blob->size = fw->size;
nvkm_firmware_put(fw);
if (!blob->data)
return -ENOMEM;
}
return ret;
}
/**
* nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
* @subdev: subdevice that will use that firmware
* @fwname: name of firmware file to load
* @ver: firmware version to load
* @fw: firmware structure to load to
*
* Use this function to load firmware files in the form nvidia/chip/fwname.bin.
* Firmware files released by NVIDIA will always follow this format.
*/
int
nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver,
const struct firmware **fw)
{
struct nvkm_device *device = subdev->device;
char f[64];
char cname[16];
int i;
/* Convert device name to lowercase */
strncpy(cname, device->chip->name, sizeof(cname));
cname[sizeof(cname) - 1] = '\0';
i = strlen(cname);
while (i) {
--i;
cname[i] = tolower(cname[i]);
}
if (ver != 0)
snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, ver);
else
snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
if (!firmware_request_nowarn(fw, f, device->dev)) {
nvkm_debug(subdev, "firmware \"%s\" loaded - %zu byte(s)\n",
f, (*fw)->size);
return 0;
}
nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
return -ENOENT;
}
/*
* nvkm_firmware_put - release firmware loaded with nvkm_firmware_get
*/
void
nvkm_firmware_put(const struct firmware *fw)
{
release_firmware(fw);
}
#define nvkm_firmware_mem(p) container_of((p), struct nvkm_firmware, mem.memory)
static int
nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
struct nvkm_vma *vma, void *argv, u32 argc)
{
struct nvkm_firmware *fw = nvkm_firmware_mem(memory);
struct nvkm_vmm_map map = {
.memory = &fw->mem.memory,
.offset = offset,
.sgl = &fw->mem.sgl,
};
if (WARN_ON(fw->func->type != NVKM_FIRMWARE_IMG_DMA))
return -ENOSYS;
return nvkm_vmm_map(vmm, vma, argv, argc, &map);
}
static u64
nvkm_firmware_mem_size(struct nvkm_memory *memory)
{
return sg_dma_len(&nvkm_firmware_mem(memory)->mem.sgl);
}
static u64
nvkm_firmware_mem_addr(struct nvkm_memory *memory)
{
return nvkm_firmware_mem(memory)->phys;
}
static u8
nvkm_firmware_mem_page(struct nvkm_memory *memory)
{
return PAGE_SHIFT;
}
static enum nvkm_memory_target
nvkm_firmware_mem_target(struct nvkm_memory *memory)
{
if (nvkm_firmware_mem(memory)->device->func->tegra)
return NVKM_MEM_TARGET_NCOH;
return NVKM_MEM_TARGET_HOST;
}
static void *
nvkm_firmware_mem_dtor(struct nvkm_memory *memory)
{
return NULL;
}
static const struct nvkm_memory_func
nvkm_firmware_mem = {
.dtor = nvkm_firmware_mem_dtor,
.target = nvkm_firmware_mem_target,
.page = nvkm_firmware_mem_page,
.addr = nvkm_firmware_mem_addr,
.size = nvkm_firmware_mem_size,
.map = nvkm_firmware_mem_map,
};
void
nvkm_firmware_dtor(struct nvkm_firmware *fw)
{
struct nvkm_memory *memory = &fw->mem.memory;
if (!fw->img)
return;
switch (fw->func->type) {
case NVKM_FIRMWARE_IMG_RAM:
kfree(fw->img);
break;
case NVKM_FIRMWARE_IMG_DMA:
nvkm_memory_unref(&memory);
dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys);
break;
default:
WARN_ON(1);
break;
}
fw->img = NULL;
}
int
nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name,
struct nvkm_device *device, const void *src, int len, struct nvkm_firmware *fw)
{
fw->func = func;
fw->name = name;
fw->device = device;
fw->len = len;
switch (fw->func->type) {
case NVKM_FIRMWARE_IMG_RAM:
fw->img = kmemdup(src, fw->len, GFP_KERNEL);
break;
case NVKM_FIRMWARE_IMG_DMA: {
dma_addr_t addr;
len = ALIGN(fw->len, PAGE_SIZE);
fw->img = dma_alloc_coherent(fw->device->dev, len, &addr, GFP_KERNEL);
if (fw->img) {
memcpy(fw->img, src, fw->len);
fw->phys = addr;
}
sg_init_one(&fw->mem.sgl, fw->img, len);
sg_dma_address(&fw->mem.sgl) = fw->phys;
sg_dma_len(&fw->mem.sgl) = len;
}
break;
default:
WARN_ON(1);
return -EINVAL;
}
if (!fw->img)
return -ENOMEM;
nvkm_memory_ctor(&nvkm_firmware_mem, &fw->mem.memory);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/firmware.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define nvkm_uevent(p) container_of((p), struct nvkm_uevent, object)
#include <core/event.h>
#include <core/client.h>
#include <nvif/if000e.h>
struct nvkm_uevent {
struct nvkm_object object;
struct nvkm_object *parent;
nvkm_uevent_func func;
bool wait;
struct nvkm_event_ntfy ntfy;
atomic_t allowed;
};
static int
nvkm_uevent_mthd_block(struct nvkm_uevent *uevent, union nvif_event_block_args *args, u32 argc)
{
if (argc != sizeof(args->vn))
return -ENOSYS;
nvkm_event_ntfy_block(&uevent->ntfy);
atomic_set(&uevent->allowed, 0);
return 0;
}
static int
nvkm_uevent_mthd_allow(struct nvkm_uevent *uevent, union nvif_event_allow_args *args, u32 argc)
{
if (argc != sizeof(args->vn))
return -ENOSYS;
nvkm_event_ntfy_allow(&uevent->ntfy);
atomic_set(&uevent->allowed, 1);
return 0;
}
static int
nvkm_uevent_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
struct nvkm_uevent *uevent = nvkm_uevent(object);
switch (mthd) {
case NVIF_EVENT_V0_ALLOW: return nvkm_uevent_mthd_allow(uevent, argv, argc);
case NVIF_EVENT_V0_BLOCK: return nvkm_uevent_mthd_block(uevent, argv, argc);
default:
break;
}
return -EINVAL;
}
static int
nvkm_uevent_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_uevent *uevent = nvkm_uevent(object);
nvkm_event_ntfy_block(&uevent->ntfy);
return 0;
}
static int
nvkm_uevent_init(struct nvkm_object *object)
{
struct nvkm_uevent *uevent = nvkm_uevent(object);
if (atomic_read(&uevent->allowed))
nvkm_event_ntfy_allow(&uevent->ntfy);
return 0;
}
static void *
nvkm_uevent_dtor(struct nvkm_object *object)
{
struct nvkm_uevent *uevent = nvkm_uevent(object);
nvkm_event_ntfy_del(&uevent->ntfy);
return uevent;
}
static const struct nvkm_object_func
nvkm_uevent = {
.dtor = nvkm_uevent_dtor,
.init = nvkm_uevent_init,
.fini = nvkm_uevent_fini,
.mthd = nvkm_uevent_mthd,
};
static int
nvkm_uevent_ntfy(struct nvkm_event_ntfy *ntfy, u32 bits)
{
struct nvkm_uevent *uevent = container_of(ntfy, typeof(*uevent), ntfy);
struct nvkm_client *client = uevent->object.client;
if (uevent->func)
return uevent->func(uevent->parent, uevent->object.token, bits);
return client->event(uevent->object.token, NULL, 0);
}
int
nvkm_uevent_add(struct nvkm_uevent *uevent, struct nvkm_event *event, int id, u32 bits,
nvkm_uevent_func func)
{
if (WARN_ON(uevent->func))
return -EBUSY;
nvkm_event_ntfy_add(event, id, bits, uevent->wait, nvkm_uevent_ntfy, &uevent->ntfy);
uevent->func = func;
return 0;
}
int
nvkm_uevent_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
struct nvkm_object *parent = oclass->parent;
struct nvkm_uevent *uevent;
union nvif_event_args *args = argv;
if (argc < sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
if (!(uevent = kzalloc(sizeof(*uevent), GFP_KERNEL)))
return -ENOMEM;
*pobject = &uevent->object;
nvkm_object_ctor(&nvkm_uevent, oclass, &uevent->object);
uevent->parent = parent;
uevent->func = NULL;
uevent->wait = args->v0.wait;
uevent->ntfy.event = NULL;
return parent->func->uevent(parent, &args->v0.data, argc - sizeof(args->v0), uevent);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/uevent.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/subdev.h>
#include <core/device.h>
#include <core/option.h>
#include <subdev/mc.h>
const char *
nvkm_subdev_type[NVKM_SUBDEV_NR] = {
#define NVKM_LAYOUT_ONCE(type,data,ptr,...) [type] = #ptr,
#define NVKM_LAYOUT_INST(A...) NVKM_LAYOUT_ONCE(A)
#include <core/layout.h>
#undef NVKM_LAYOUT_ONCE
#undef NVKM_LAYOUT_INST
};
void
nvkm_subdev_intr(struct nvkm_subdev *subdev)
{
if (subdev->func->intr)
subdev->func->intr(subdev);
}
int
nvkm_subdev_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data)
{
if (subdev->func->info)
return subdev->func->info(subdev, mthd, data);
return -ENOSYS;
}
int
nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_device *device = subdev->device;
const char *action = suspend ? "suspend" : subdev->use.enabled ? "fini" : "reset";
s64 time;
nvkm_trace(subdev, "%s running...\n", action);
time = ktime_to_us(ktime_get());
if (subdev->func->fini) {
int ret = subdev->func->fini(subdev, suspend);
if (ret) {
nvkm_error(subdev, "%s failed, %d\n", action, ret);
if (suspend)
return ret;
}
}
subdev->use.enabled = false;
nvkm_mc_reset(device, subdev->type, subdev->inst);
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
return 0;
}
int
nvkm_subdev_preinit(struct nvkm_subdev *subdev)
{
s64 time;
nvkm_trace(subdev, "preinit running...\n");
time = ktime_to_us(ktime_get());
if (subdev->func->preinit) {
int ret = subdev->func->preinit(subdev);
if (ret) {
nvkm_error(subdev, "preinit failed, %d\n", ret);
return ret;
}
}
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "preinit completed in %lldus\n", time);
return 0;
}
static int
nvkm_subdev_oneinit_(struct nvkm_subdev *subdev)
{
s64 time;
int ret;
if (!subdev->func->oneinit || subdev->oneinit)
return 0;
nvkm_trace(subdev, "one-time init running...\n");
time = ktime_to_us(ktime_get());
ret = subdev->func->oneinit(subdev);
if (ret) {
nvkm_error(subdev, "one-time init failed, %d\n", ret);
return ret;
}
subdev->oneinit = true;
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
return 0;
}
static int
nvkm_subdev_init_(struct nvkm_subdev *subdev)
{
s64 time;
int ret;
if (subdev->use.enabled) {
nvkm_trace(subdev, "init skipped, already running\n");
return 0;
}
nvkm_trace(subdev, "init running...\n");
time = ktime_to_us(ktime_get());
ret = nvkm_subdev_oneinit_(subdev);
if (ret)
return ret;
subdev->use.enabled = true;
if (subdev->func->init) {
ret = subdev->func->init(subdev);
if (ret) {
nvkm_error(subdev, "init failed, %d\n", ret);
return ret;
}
}
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "init completed in %lldus\n", time);
return 0;
}
int
nvkm_subdev_init(struct nvkm_subdev *subdev)
{
int ret;
mutex_lock(&subdev->use.mutex);
if (refcount_read(&subdev->use.refcount) == 0) {
nvkm_trace(subdev, "init skipped, no users\n");
mutex_unlock(&subdev->use.mutex);
return 0;
}
ret = nvkm_subdev_init_(subdev);
mutex_unlock(&subdev->use.mutex);
return ret;
}
int
nvkm_subdev_oneinit(struct nvkm_subdev *subdev)
{
int ret;
mutex_lock(&subdev->use.mutex);
ret = nvkm_subdev_oneinit_(subdev);
mutex_unlock(&subdev->use.mutex);
return ret;
}
void
nvkm_subdev_unref(struct nvkm_subdev *subdev)
{
if (refcount_dec_and_mutex_lock(&subdev->use.refcount, &subdev->use.mutex)) {
nvkm_subdev_fini(subdev, false);
mutex_unlock(&subdev->use.mutex);
}
}
int
nvkm_subdev_ref(struct nvkm_subdev *subdev)
{
int ret;
if (subdev && !refcount_inc_not_zero(&subdev->use.refcount)) {
mutex_lock(&subdev->use.mutex);
if (!refcount_inc_not_zero(&subdev->use.refcount)) {
if ((ret = nvkm_subdev_init_(subdev))) {
mutex_unlock(&subdev->use.mutex);
return ret;
}
refcount_set(&subdev->use.refcount, 1);
}
mutex_unlock(&subdev->use.mutex);
}
return 0;
}
void
nvkm_subdev_del(struct nvkm_subdev **psubdev)
{
struct nvkm_subdev *subdev = *psubdev;
s64 time;
if (subdev && !WARN_ON(!subdev->func)) {
nvkm_trace(subdev, "destroy running...\n");
time = ktime_to_us(ktime_get());
list_del(&subdev->head);
if (subdev->func->dtor)
*psubdev = subdev->func->dtor(subdev);
mutex_destroy(&subdev->use.mutex);
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "destroy completed in %lldus\n", time);
kfree(*psubdev);
*psubdev = NULL;
}
}
void
nvkm_subdev_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
struct nvkm_subdev *subdev;
list_for_each_entry(subdev, &device->subdev, head) {
if (subdev->type == type && subdev->inst == inst) {
*subdev->pself = NULL;
nvkm_subdev_del(&subdev);
break;
}
}
}
void
__nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_subdev *subdev)
{
subdev->func = func;
subdev->device = device;
subdev->type = type;
subdev->inst = inst < 0 ? 0 : inst;
if (inst >= 0)
snprintf(subdev->name, sizeof(subdev->name), "%s%d", nvkm_subdev_type[type], inst);
else
strscpy(subdev->name, nvkm_subdev_type[type], sizeof(subdev->name));
subdev->debug = nvkm_dbgopt(device->dbgopt, subdev->name);
refcount_set(&subdev->use.refcount, 1);
list_add_tail(&subdev->head, &device->subdev);
}
int
nvkm_subdev_new_(const struct nvkm_subdev_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_subdev **psubdev)
{
if (!(*psubdev = kzalloc(sizeof(**psubdev), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(func, device, type, inst, *psubdev);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/subdev.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/client.h>
#include <core/device.h>
#include <core/option.h>
#include <nvif/class.h>
#include <nvif/event.h>
#include <nvif/if0000.h>
#include <nvif/unpack.h>
static int
nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_object **pobject)
{
union {
struct nvif_client_v0 v0;
} *args = argv;
struct nvkm_client *client;
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))){
args->v0.name[sizeof(args->v0.name) - 1] = 0;
ret = nvkm_client_new(args->v0.name, args->v0.device, NULL,
NULL, oclass->client->event, &client);
if (ret)
return ret;
} else
return ret;
client->object.client = oclass->client;
client->object.handle = oclass->handle;
client->object.route = oclass->route;
client->object.token = oclass->token;
client->object.object = oclass->object;
client->debug = oclass->client->debug;
*pobject = &client->object;
return 0;
}
static const struct nvkm_sclass
nvkm_uclient_sclass = {
.oclass = NVIF_CLASS_CLIENT,
.minver = 0,
.maxver = 0,
.ctor = nvkm_uclient_new,
};
static const struct nvkm_object_func nvkm_client;
struct nvkm_client *
nvkm_client_search(struct nvkm_client *client, u64 handle)
{
struct nvkm_object *object;
object = nvkm_object_search(client, handle, &nvkm_client);
if (IS_ERR(object))
return (void *)object;
return nvkm_client(object);
}
static int
nvkm_client_mthd_devlist(struct nvkm_client *client, void *data, u32 size)
{
union {
struct nvif_client_devlist_v0 v0;
} *args = data;
int ret = -ENOSYS;
nvif_ioctl(&client->object, "client devlist size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
nvif_ioctl(&client->object, "client devlist vers %d count %d\n",
args->v0.version, args->v0.count);
if (size == sizeof(args->v0.device[0]) * args->v0.count) {
ret = nvkm_device_list(args->v0.device, args->v0.count);
if (ret >= 0) {
args->v0.count = ret;
ret = 0;
}
} else {
ret = -EINVAL;
}
}
return ret;
}
static int
nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
struct nvkm_client *client = nvkm_client(object);
switch (mthd) {
case NVIF_CLIENT_V0_DEVLIST:
return nvkm_client_mthd_devlist(client, data, size);
default:
break;
}
return -EINVAL;
}
static int
nvkm_client_child_new(const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
{
return oclass->base.ctor(oclass, data, size, pobject);
}
static int
nvkm_client_child_get(struct nvkm_object *object, int index,
struct nvkm_oclass *oclass)
{
const struct nvkm_sclass *sclass;
switch (index) {
case 0: sclass = &nvkm_uclient_sclass; break;
case 1: sclass = &nvkm_udevice_sclass; break;
default:
return -EINVAL;
}
oclass->ctor = nvkm_client_child_new;
oclass->base = *sclass;
return 0;
}
static int
nvkm_client_fini(struct nvkm_object *object, bool suspend)
{
return 0;
}
static void *
nvkm_client_dtor(struct nvkm_object *object)
{
return nvkm_client(object);
}
static const struct nvkm_object_func
nvkm_client = {
.dtor = nvkm_client_dtor,
.fini = nvkm_client_fini,
.mthd = nvkm_client_mthd,
.sclass = nvkm_client_child_get,
};
int
nvkm_client_new(const char *name, u64 device, const char *cfg, const char *dbg,
int (*event)(u64, void *, u32), struct nvkm_client **pclient)
{
struct nvkm_oclass oclass = { .base = nvkm_uclient_sclass };
struct nvkm_client *client;
if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL)))
return -ENOMEM;
oclass.client = client;
nvkm_object_ctor(&nvkm_client, &oclass, &client->object);
snprintf(client->name, sizeof(client->name), "%s", name);
client->device = device;
client->debug = nvkm_dbgopt(dbg, "CLIENT");
client->objroot = RB_ROOT;
client->event = event;
INIT_LIST_HEAD(&client->umem);
spin_lock_init(&client->lock);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/client.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/engine.h>
#include <core/device.h>
#include <core/option.h>
#include <subdev/fb.h>
bool
nvkm_engine_chsw_load(struct nvkm_engine *engine)
{
if (engine->func->chsw_load)
return engine->func->chsw_load(engine);
return false;
}
int
nvkm_engine_reset(struct nvkm_engine *engine)
{
if (engine->func->reset)
return engine->func->reset(engine);
nvkm_subdev_fini(&engine->subdev, false);
return nvkm_subdev_init(&engine->subdev);
}
void
nvkm_engine_unref(struct nvkm_engine **pengine)
{
struct nvkm_engine *engine = *pengine;
if (engine) {
nvkm_subdev_unref(&engine->subdev);
*pengine = NULL;
}
}
struct nvkm_engine *
nvkm_engine_ref(struct nvkm_engine *engine)
{
int ret;
if (engine) {
ret = nvkm_subdev_ref(&engine->subdev);
if (ret)
return ERR_PTR(ret);
}
return engine;
}
void
nvkm_engine_tile(struct nvkm_engine *engine, int region)
{
struct nvkm_fb *fb = engine->subdev.device->fb;
if (engine->func->tile)
engine->func->tile(engine, region, &fb->tile.region[region]);
}
static void
nvkm_engine_intr(struct nvkm_subdev *subdev)
{
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->intr)
engine->func->intr(engine);
}
static int
nvkm_engine_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data)
{
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->info)
return engine->func->info(engine, mthd, data);
return -ENOSYS;
}
static int
nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->fini)
return engine->func->fini(engine, suspend);
return 0;
}
static int
nvkm_engine_init(struct nvkm_subdev *subdev)
{
struct nvkm_engine *engine = nvkm_engine(subdev);
struct nvkm_fb *fb = subdev->device->fb;
int ret = 0, i;
if (engine->func->init)
ret = engine->func->init(engine);
for (i = 0; fb && i < fb->tile.regions; i++)
nvkm_engine_tile(engine, i);
return ret;
}
static int
nvkm_engine_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->oneinit)
return engine->func->oneinit(engine);
return 0;
}
static int
nvkm_engine_preinit(struct nvkm_subdev *subdev)
{
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->preinit)
engine->func->preinit(engine);
return 0;
}
static void *
nvkm_engine_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->dtor)
return engine->func->dtor(engine);
return engine;
}
const struct nvkm_subdev_func
nvkm_engine = {
.dtor = nvkm_engine_dtor,
.preinit = nvkm_engine_preinit,
.oneinit = nvkm_engine_oneinit,
.init = nvkm_engine_init,
.fini = nvkm_engine_fini,
.info = nvkm_engine_info,
.intr = nvkm_engine_intr,
};
int
nvkm_engine_ctor(const struct nvkm_engine_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_engine *engine)
{
engine->func = func;
nvkm_subdev_ctor(&nvkm_engine, device, type, inst, &engine->subdev);
refcount_set(&engine->subdev.use.refcount, 0);
if (!nvkm_boolopt(device->cfgopt, engine->subdev.name, enable)) {
nvkm_debug(&engine->subdev, "disabled\n");
return -ENODEV;
}
spin_lock_init(&engine->lock);
return 0;
}
int
nvkm_engine_new_(const struct nvkm_engine_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, bool enable,
struct nvkm_engine **pengine)
{
if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL)))
return -ENOMEM;
return nvkm_engine_ctor(func, device, type, inst, enable, *pengine);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/engine.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include <core/ioctl.h>
#include <core/client.h>
#include <core/engine.h>
#include <core/event.h>
#include <nvif/unpack.h>
#include <nvif/ioctl.h>
static int
nvkm_ioctl_nop(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_nop_v0 v0;
} *args = data;
int ret = -ENOSYS;
nvif_ioctl(object, "nop size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(object, "nop vers %lld\n", args->v0.version);
args->v0.version = NVIF_VERSION_LATEST;
}
return ret;
}
#include <nvif/class.h>
static int
nvkm_ioctl_sclass_(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
{
if ( object->func->uevent &&
!object->func->uevent(object, NULL, 0, NULL) && index-- == 0) {
oclass->ctor = nvkm_uevent_new;
oclass->base.minver = 0;
oclass->base.maxver = 0;
oclass->base.oclass = NVIF_CLASS_EVENT;
return 0;
}
if (object->func->sclass)
return object->func->sclass(object, index, oclass);
return -ENOSYS;
}
static int
nvkm_ioctl_sclass(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_sclass_v0 v0;
} *args = data;
struct nvkm_oclass oclass = { .client = client };
int ret = -ENOSYS, i = 0;
nvif_ioctl(object, "sclass size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
nvif_ioctl(object, "sclass vers %d count %d\n",
args->v0.version, args->v0.count);
if (size != args->v0.count * sizeof(args->v0.oclass[0]))
return -EINVAL;
while (nvkm_ioctl_sclass_(object, i, &oclass) >= 0) {
if (i < args->v0.count) {
args->v0.oclass[i].oclass = oclass.base.oclass;
args->v0.oclass[i].minver = oclass.base.minver;
args->v0.oclass[i].maxver = oclass.base.maxver;
}
i++;
}
args->v0.count = i;
}
return ret;
}
static int
nvkm_ioctl_new(struct nvkm_client *client,
struct nvkm_object *parent, void *data, u32 size)
{
union {
struct nvif_ioctl_new_v0 v0;
} *args = data;
struct nvkm_object *object = NULL;
struct nvkm_oclass oclass;
int ret = -ENOSYS, i = 0;
nvif_ioctl(parent, "new size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
nvif_ioctl(parent, "new vers %d handle %08x class %08x "
"route %02x token %llx object %016llx\n",
args->v0.version, args->v0.handle, args->v0.oclass,
args->v0.route, args->v0.token, args->v0.object);
} else
return ret;
if (!parent->func->sclass && !parent->func->uevent) {
nvif_ioctl(parent, "cannot have children\n");
return -EINVAL;
}
do {
memset(&oclass, 0x00, sizeof(oclass));
oclass.handle = args->v0.handle;
oclass.route = args->v0.route;
oclass.token = args->v0.token;
oclass.object = args->v0.object;
oclass.client = client;
oclass.parent = parent;
ret = nvkm_ioctl_sclass_(parent, i++, &oclass);
if (ret)
return ret;
} while (oclass.base.oclass != args->v0.oclass);
if (oclass.engine) {
oclass.engine = nvkm_engine_ref(oclass.engine);
if (IS_ERR(oclass.engine))
return PTR_ERR(oclass.engine);
}
ret = oclass.ctor(&oclass, data, size, &object);
nvkm_engine_unref(&oclass.engine);
if (ret == 0) {
ret = nvkm_object_init(object);
if (ret == 0) {
list_add_tail(&object->head, &parent->tree);
if (nvkm_object_insert(object)) {
client->data = object;
return 0;
}
ret = -EEXIST;
}
nvkm_object_fini(object, false);
}
nvkm_object_del(&object);
return ret;
}
static int
nvkm_ioctl_del(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_del none;
} *args = data;
int ret = -ENOSYS;
nvif_ioctl(object, "delete size %d\n", size);
if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
nvif_ioctl(object, "delete\n");
nvkm_object_fini(object, false);
nvkm_object_del(&object);
}
return ret ? ret : 1;
}
static int
nvkm_ioctl_mthd(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_mthd_v0 v0;
} *args = data;
int ret = -ENOSYS;
nvif_ioctl(object, "mthd size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
nvif_ioctl(object, "mthd vers %d mthd %02x\n",
args->v0.version, args->v0.method);
ret = nvkm_object_mthd(object, args->v0.method, data, size);
}
return ret;
}
static int
nvkm_ioctl_rd(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_rd_v0 v0;
} *args = data;
union {
u8 b08;
u16 b16;
u32 b32;
} v;
int ret = -ENOSYS;
nvif_ioctl(object, "rd size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(object, "rd vers %d size %d addr %016llx\n",
args->v0.version, args->v0.size, args->v0.addr);
switch (args->v0.size) {
case 1:
ret = nvkm_object_rd08(object, args->v0.addr, &v.b08);
args->v0.data = v.b08;
break;
case 2:
ret = nvkm_object_rd16(object, args->v0.addr, &v.b16);
args->v0.data = v.b16;
break;
case 4:
ret = nvkm_object_rd32(object, args->v0.addr, &v.b32);
args->v0.data = v.b32;
break;
default:
ret = -EINVAL;
break;
}
}
return ret;
}
static int
nvkm_ioctl_wr(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_wr_v0 v0;
} *args = data;
int ret = -ENOSYS;
nvif_ioctl(object, "wr size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(object,
"wr vers %d size %d addr %016llx data %08x\n",
args->v0.version, args->v0.size, args->v0.addr,
args->v0.data);
} else
return ret;
switch (args->v0.size) {
case 1: return nvkm_object_wr08(object, args->v0.addr, args->v0.data);
case 2: return nvkm_object_wr16(object, args->v0.addr, args->v0.data);
case 4: return nvkm_object_wr32(object, args->v0.addr, args->v0.data);
default:
break;
}
return -EINVAL;
}
static int
nvkm_ioctl_map(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_map_v0 v0;
} *args = data;
enum nvkm_object_map type;
int ret = -ENOSYS;
nvif_ioctl(object, "map size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
nvif_ioctl(object, "map vers %d\n", args->v0.version);
ret = nvkm_object_map(object, data, size, &type,
&args->v0.handle,
&args->v0.length);
if (type == NVKM_OBJECT_MAP_IO)
args->v0.type = NVIF_IOCTL_MAP_V0_IO;
else
args->v0.type = NVIF_IOCTL_MAP_V0_VA;
}
return ret;
}
static int
nvkm_ioctl_unmap(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
union {
struct nvif_ioctl_unmap none;
} *args = data;
int ret = -ENOSYS;
nvif_ioctl(object, "unmap size %d\n", size);
if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
nvif_ioctl(object, "unmap\n");
ret = nvkm_object_unmap(object);
}
return ret;
}
static struct {
int version;
int (*func)(struct nvkm_client *, struct nvkm_object *, void *, u32);
}
nvkm_ioctl_v0[] = {
{ 0x00, nvkm_ioctl_nop },
{ 0x00, nvkm_ioctl_sclass },
{ 0x00, nvkm_ioctl_new },
{ 0x00, nvkm_ioctl_del },
{ 0x00, nvkm_ioctl_mthd },
{ 0x00, nvkm_ioctl_rd },
{ 0x00, nvkm_ioctl_wr },
{ 0x00, nvkm_ioctl_map },
{ 0x00, nvkm_ioctl_unmap },
};
static int
nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
void *data, u32 size, u8 owner, u8 *route, u64 *token)
{
struct nvkm_object *object;
int ret;
object = nvkm_object_search(client, handle, NULL);
if (IS_ERR(object)) {
nvif_ioctl(&client->object, "object not found\n");
return PTR_ERR(object);
}
if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) {
nvif_ioctl(&client->object, "route != owner\n");
return -EACCES;
}
*route = object->route;
*token = object->token;
if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
if (nvkm_ioctl_v0[type].version == 0)
ret = nvkm_ioctl_v0[type].func(client, object, data, size);
}
return ret;
}
int
nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack)
{
struct nvkm_object *object = &client->object;
union {
struct nvif_ioctl_v0 v0;
} *args = data;
int ret = -ENOSYS;
nvif_ioctl(object, "size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
nvif_ioctl(object,
"vers %d type %02x object %016llx owner %02x\n",
args->v0.version, args->v0.type, args->v0.object,
args->v0.owner);
ret = nvkm_ioctl_path(client, args->v0.object, args->v0.type,
data, size, args->v0.owner,
&args->v0.route, &args->v0.token);
}
if (ret != 1) {
nvif_ioctl(object, "return %d\n", ret);
if (hack) {
*hack = client->data;
client->data = NULL;
}
}
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/ioctl.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <core/ramht.h>
#include <core/engine.h>
#include <core/object.h>
static u32
nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
{
u32 hash = 0;
while (handle) {
hash ^= (handle & ((1 << ramht->bits) - 1));
handle >>= ramht->bits;
}
hash ^= chid << (ramht->bits - 4);
return hash;
}
struct nvkm_gpuobj *
nvkm_ramht_search(struct nvkm_ramht *ramht, int chid, u32 handle)
{
u32 co, ho;
co = ho = nvkm_ramht_hash(ramht, chid, handle);
do {
if (ramht->data[co].chid == chid) {
if (ramht->data[co].handle == handle)
return ramht->data[co].inst;
}
if (++co >= ramht->size)
co = 0;
} while (co != ho);
return NULL;
}
static int
nvkm_ramht_update(struct nvkm_ramht *ramht, int co, struct nvkm_object *object,
int chid, int addr, u32 handle, u32 context)
{
struct nvkm_ramht_data *data = &ramht->data[co];
u64 inst = 0x00000040; /* just non-zero for <=g8x fifo ramht */
int ret;
nvkm_gpuobj_del(&data->inst);
data->chid = chid;
data->handle = handle;
if (object) {
ret = nvkm_object_bind(object, ramht->parent, 16, &data->inst);
if (ret) {
if (ret != -ENODEV) {
data->chid = -1;
return ret;
}
data->inst = NULL;
}
if (data->inst) {
if (ramht->device->card_type >= NV_50)
inst = data->inst->node->offset;
else
inst = data->inst->addr;
}
if (addr < 0) context |= inst << -addr;
else context |= inst >> addr;
}
nvkm_kmap(ramht->gpuobj);
nvkm_wo32(ramht->gpuobj, (co << 3) + 0, handle);
nvkm_wo32(ramht->gpuobj, (co << 3) + 4, context);
nvkm_done(ramht->gpuobj);
return co + 1;
}
void
nvkm_ramht_remove(struct nvkm_ramht *ramht, int cookie)
{
if (--cookie >= 0)
nvkm_ramht_update(ramht, cookie, NULL, -1, 0, 0, 0);
}
int
nvkm_ramht_insert(struct nvkm_ramht *ramht, struct nvkm_object *object,
int chid, int addr, u32 handle, u32 context)
{
u32 co, ho;
if (nvkm_ramht_search(ramht, chid, handle))
return -EEXIST;
co = ho = nvkm_ramht_hash(ramht, chid, handle);
do {
if (ramht->data[co].chid < 0) {
return nvkm_ramht_update(ramht, co, object, chid,
addr, handle, context);
}
if (++co >= ramht->size)
co = 0;
} while (co != ho);
return -ENOSPC;
}
void
nvkm_ramht_del(struct nvkm_ramht **pramht)
{
struct nvkm_ramht *ramht = *pramht;
if (ramht) {
nvkm_gpuobj_del(&ramht->gpuobj);
vfree(*pramht);
*pramht = NULL;
}
}
int
nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
struct nvkm_gpuobj *parent, struct nvkm_ramht **pramht)
{
struct nvkm_ramht *ramht;
int ret, i;
if (!(ramht = *pramht = vzalloc(struct_size(ramht, data, (size >> 3)))))
return -ENOMEM;
ramht->device = device;
ramht->parent = parent;
ramht->size = size >> 3;
ramht->bits = order_base_2(ramht->size);
for (i = 0; i < ramht->size; i++)
ramht->data[i].chid = -1;
ret = nvkm_gpuobj_new(ramht->device, size, align, true,
ramht->parent, &ramht->gpuobj);
if (ret)
nvkm_ramht_del(pramht);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/ramht.c |
/*
* Copyright 2013-2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <core/event.h>
#include <core/subdev.h>
static void
nvkm_event_put(struct nvkm_event *event, u32 types, int index)
{
assert_spin_locked(&event->refs_lock);
nvkm_trace(event->subdev, "event: decr %08x on %d\n", types, index);
while (types) {
int type = __ffs(types); types &= ~(1 << type);
if (--event->refs[index * event->types_nr + type] == 0) {
nvkm_trace(event->subdev, "event: blocking %d on %d\n", type, index);
if (event->func->fini)
event->func->fini(event, 1 << type, index);
}
}
}
static void
nvkm_event_get(struct nvkm_event *event, u32 types, int index)
{
assert_spin_locked(&event->refs_lock);
nvkm_trace(event->subdev, "event: incr %08x on %d\n", types, index);
while (types) {
int type = __ffs(types); types &= ~(1 << type);
if (++event->refs[index * event->types_nr + type] == 1) {
nvkm_trace(event->subdev, "event: allowing %d on %d\n", type, index);
if (event->func->init)
event->func->init(event, 1 << type, index);
}
}
}
static void
nvkm_event_ntfy_state(struct nvkm_event_ntfy *ntfy)
{
struct nvkm_event *event = ntfy->event;
unsigned long flags;
nvkm_trace(event->subdev, "event: ntfy state changed\n");
spin_lock_irqsave(&event->refs_lock, flags);
if (atomic_read(&ntfy->allowed) != ntfy->running) {
if (ntfy->running) {
nvkm_event_put(ntfy->event, ntfy->bits, ntfy->id);
ntfy->running = false;
} else {
nvkm_event_get(ntfy->event, ntfy->bits, ntfy->id);
ntfy->running = true;
}
}
spin_unlock_irqrestore(&event->refs_lock, flags);
}
static void
nvkm_event_ntfy_remove(struct nvkm_event_ntfy *ntfy)
{
spin_lock_irq(&ntfy->event->list_lock);
list_del_init(&ntfy->head);
spin_unlock_irq(&ntfy->event->list_lock);
}
static void
nvkm_event_ntfy_insert(struct nvkm_event_ntfy *ntfy)
{
spin_lock_irq(&ntfy->event->list_lock);
list_add_tail(&ntfy->head, &ntfy->event->ntfy);
spin_unlock_irq(&ntfy->event->list_lock);
}
static void
nvkm_event_ntfy_block_(struct nvkm_event_ntfy *ntfy, bool wait)
{
struct nvkm_subdev *subdev = ntfy->event->subdev;
nvkm_trace(subdev, "event: ntfy block %08x on %d wait:%d\n", ntfy->bits, ntfy->id, wait);
if (atomic_xchg(&ntfy->allowed, 0) == 1) {
nvkm_event_ntfy_state(ntfy);
if (wait)
nvkm_event_ntfy_remove(ntfy);
}
}
void
nvkm_event_ntfy_block(struct nvkm_event_ntfy *ntfy)
{
if (ntfy->event)
nvkm_event_ntfy_block_(ntfy, ntfy->wait);
}
void
nvkm_event_ntfy_allow(struct nvkm_event_ntfy *ntfy)
{
nvkm_trace(ntfy->event->subdev, "event: ntfy allow %08x on %d\n", ntfy->bits, ntfy->id);
if (atomic_xchg(&ntfy->allowed, 1) == 0) {
nvkm_event_ntfy_state(ntfy);
if (ntfy->wait)
nvkm_event_ntfy_insert(ntfy);
}
}
void
nvkm_event_ntfy_del(struct nvkm_event_ntfy *ntfy)
{
struct nvkm_event *event = ntfy->event;
if (!event)
return;
nvkm_trace(event->subdev, "event: ntfy del %08x on %d\n", ntfy->bits, ntfy->id);
nvkm_event_ntfy_block_(ntfy, false);
nvkm_event_ntfy_remove(ntfy);
ntfy->event = NULL;
}
void
nvkm_event_ntfy_add(struct nvkm_event *event, int id, u32 bits, bool wait, nvkm_event_func func,
struct nvkm_event_ntfy *ntfy)
{
nvkm_trace(event->subdev, "event: ntfy add %08x on %d wait:%d\n", id, bits, wait);
ntfy->event = event;
ntfy->id = id;
ntfy->bits = bits;
ntfy->wait = wait;
ntfy->func = func;
atomic_set(&ntfy->allowed, 0);
ntfy->running = false;
INIT_LIST_HEAD(&ntfy->head);
if (!ntfy->wait)
nvkm_event_ntfy_insert(ntfy);
}
bool
nvkm_event_ntfy_valid(struct nvkm_event *event, int id, u32 bits)
{
return true;
}
void
nvkm_event_ntfy(struct nvkm_event *event, int id, u32 bits)
{
struct nvkm_event_ntfy *ntfy, *ntmp;
unsigned long flags;
if (!event->refs || WARN_ON(id >= event->index_nr))
return;
nvkm_trace(event->subdev, "event: ntfy %08x on %d\n", bits, id);
spin_lock_irqsave(&event->list_lock, flags);
list_for_each_entry_safe(ntfy, ntmp, &event->ntfy, head) {
if (ntfy->id == id && ntfy->bits & bits) {
if (atomic_read(&ntfy->allowed))
ntfy->func(ntfy, ntfy->bits & bits);
}
}
spin_unlock_irqrestore(&event->list_lock, flags);
}
void
nvkm_event_fini(struct nvkm_event *event)
{
if (event->refs) {
kfree(event->refs);
event->refs = NULL;
}
}
int
__nvkm_event_init(const struct nvkm_event_func *func, struct nvkm_subdev *subdev,
int types_nr, int index_nr, struct nvkm_event *event)
{
event->refs = kzalloc(array3_size(index_nr, types_nr, sizeof(*event->refs)), GFP_KERNEL);
if (!event->refs)
return -ENOMEM;
event->func = func;
event->subdev = subdev;
event->types_nr = types_nr;
event->index_nr = index_nr;
INIT_LIST_HEAD(&event->ntfy);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/event.c |
/*
* Copyright (C) 2010 Nouveau Project
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <core/enum.h>
const struct nvkm_enum *
nvkm_enum_find(const struct nvkm_enum *en, u32 value)
{
while (en->name) {
if (en->value == value)
return en;
en++;
}
return NULL;
}
void
nvkm_snprintbf(char *data, int size, const struct nvkm_bitfield *bf, u32 value)
{
bool space = false;
while (size >= 1 && bf->name) {
if (value & bf->mask) {
int this = snprintf(data, size, "%s%s",
space ? " " : "", bf->name);
size -= this;
data += this;
space = true;
}
bf++;
}
data[0] = '\0';
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/enum.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/mm.h>
#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry)
void
nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
{
struct nvkm_mm_node *node;
pr_err("nvkm: %s\n", header);
pr_err("nvkm: node list:\n");
list_for_each_entry(node, &mm->nodes, nl_entry) {
pr_err("nvkm: \t%08x %08x %d\n",
node->offset, node->length, node->type);
}
pr_err("nvkm: free list:\n");
list_for_each_entry(node, &mm->free, fl_entry) {
pr_err("nvkm: \t%08x %08x %d\n",
node->offset, node->length, node->type);
}
}
void
nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis)
{
struct nvkm_mm_node *this = *pthis;
if (this) {
struct nvkm_mm_node *prev = node(this, prev);
struct nvkm_mm_node *next = node(this, next);
if (prev && prev->type == NVKM_MM_TYPE_NONE) {
prev->length += this->length;
list_del(&this->nl_entry);
kfree(this); this = prev;
}
if (next && next->type == NVKM_MM_TYPE_NONE) {
next->offset = this->offset;
next->length += this->length;
if (this->type == NVKM_MM_TYPE_NONE)
list_del(&this->fl_entry);
list_del(&this->nl_entry);
kfree(this); this = NULL;
}
if (this && this->type != NVKM_MM_TYPE_NONE) {
list_for_each_entry(prev, &mm->free, fl_entry) {
if (this->offset < prev->offset)
break;
}
list_add_tail(&this->fl_entry, &prev->fl_entry);
this->type = NVKM_MM_TYPE_NONE;
}
}
*pthis = NULL;
}
static struct nvkm_mm_node *
region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
{
struct nvkm_mm_node *b;
if (a->length == size)
return a;
b = kmalloc(sizeof(*b), GFP_KERNEL);
if (unlikely(b == NULL))
return NULL;
b->offset = a->offset;
b->length = size;
b->heap = a->heap;
b->type = a->type;
a->offset += size;
a->length -= size;
list_add_tail(&b->nl_entry, &a->nl_entry);
if (b->type == NVKM_MM_TYPE_NONE)
list_add_tail(&b->fl_entry, &a->fl_entry);
return b;
}
int
nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
u32 align, struct nvkm_mm_node **pnode)
{
struct nvkm_mm_node *prev, *this, *next;
u32 mask = align - 1;
u32 splitoff;
u32 s, e;
BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
list_for_each_entry(this, &mm->free, fl_entry) {
if (unlikely(heap != NVKM_MM_HEAP_ANY)) {
if (this->heap != heap)
continue;
}
e = this->offset + this->length;
s = this->offset;
prev = node(this, prev);
if (prev && prev->type != type)
s = roundup(s, mm->block_size);
next = node(this, next);
if (next && next->type != type)
e = rounddown(e, mm->block_size);
s = (s + mask) & ~mask;
e &= ~mask;
if (s > e || e - s < size_min)
continue;
splitoff = s - this->offset;
if (splitoff && !region_head(mm, this, splitoff))
return -ENOMEM;
this = region_head(mm, this, min(size_max, e - s));
if (!this)
return -ENOMEM;
this->next = NULL;
this->type = type;
list_del(&this->fl_entry);
*pnode = this;
return 0;
}
return -ENOSPC;
}
static struct nvkm_mm_node *
region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
{
struct nvkm_mm_node *b;
if (a->length == size)
return a;
b = kmalloc(sizeof(*b), GFP_KERNEL);
if (unlikely(b == NULL))
return NULL;
a->length -= size;
b->offset = a->offset + a->length;
b->length = size;
b->heap = a->heap;
b->type = a->type;
list_add(&b->nl_entry, &a->nl_entry);
if (b->type == NVKM_MM_TYPE_NONE)
list_add(&b->fl_entry, &a->fl_entry);
return b;
}
int
nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
u32 align, struct nvkm_mm_node **pnode)
{
struct nvkm_mm_node *prev, *this, *next;
u32 mask = align - 1;
BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
list_for_each_entry_reverse(this, &mm->free, fl_entry) {
u32 e = this->offset + this->length;
u32 s = this->offset;
u32 c = 0, a;
if (unlikely(heap != NVKM_MM_HEAP_ANY)) {
if (this->heap != heap)
continue;
}
prev = node(this, prev);
if (prev && prev->type != type)
s = roundup(s, mm->block_size);
next = node(this, next);
if (next && next->type != type) {
e = rounddown(e, mm->block_size);
c = next->offset - e;
}
s = (s + mask) & ~mask;
a = e - s;
if (s > e || a < size_min)
continue;
a = min(a, size_max);
s = (e - a) & ~mask;
c += (e - s) - a;
if (c && !region_tail(mm, this, c))
return -ENOMEM;
this = region_tail(mm, this, a);
if (!this)
return -ENOMEM;
this->next = NULL;
this->type = type;
list_del(&this->fl_entry);
*pnode = this;
return 0;
}
return -ENOSPC;
}
int
nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
{
struct nvkm_mm_node *node, *prev;
u32 next;
if (nvkm_mm_initialised(mm)) {
prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry);
next = prev->offset + prev->length;
if (next != offset) {
BUG_ON(next > offset);
if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
return -ENOMEM;
node->type = NVKM_MM_TYPE_HOLE;
node->offset = next;
node->length = offset - next;
list_add_tail(&node->nl_entry, &mm->nodes);
}
BUG_ON(block != mm->block_size);
} else {
INIT_LIST_HEAD(&mm->nodes);
INIT_LIST_HEAD(&mm->free);
mm->block_size = block;
mm->heap_nodes = 0;
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
if (length) {
node->offset = roundup(offset, mm->block_size);
node->length = rounddown(offset + length, mm->block_size);
node->length -= node->offset;
}
list_add_tail(&node->nl_entry, &mm->nodes);
list_add_tail(&node->fl_entry, &mm->free);
node->heap = heap;
mm->heap_nodes++;
return 0;
}
int
nvkm_mm_fini(struct nvkm_mm *mm)
{
struct nvkm_mm_node *node, *temp;
int nodes = 0;
if (!nvkm_mm_initialised(mm))
return 0;
list_for_each_entry(node, &mm->nodes, nl_entry) {
if (node->type != NVKM_MM_TYPE_HOLE) {
if (++nodes > mm->heap_nodes) {
nvkm_mm_dump(mm, "mm not clean!");
return -EBUSY;
}
}
}
list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) {
list_del(&node->nl_entry);
kfree(node);
}
mm->heap_nodes = 0;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/mm.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/gpuobj.h>
#include <core/engine.h>
#include <subdev/instmem.h>
#include <subdev/bar.h>
#include <subdev/mmu.h>
/* fast-path, where backend is able to provide direct pointer to memory */
static u32
nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset)
{
return ioread32_native(gpuobj->map + offset);
}
static void
nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
{
iowrite32_native(data, gpuobj->map + offset);
}
/* accessor functions for gpuobjs allocated directly from instmem */
static int
nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset,
struct nvkm_vmm *vmm, struct nvkm_vma *vma,
void *argv, u32 argc)
{
return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc);
}
static u32
nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{
return nvkm_ro32(gpuobj->memory, offset);
}
static void
nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
{
nvkm_wo32(gpuobj->memory, offset, data);
}
static const struct nvkm_gpuobj_func nvkm_gpuobj_heap;
static void
nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj)
{
gpuobj->func = &nvkm_gpuobj_heap;
nvkm_done(gpuobj->memory);
}
static const struct nvkm_gpuobj_func
nvkm_gpuobj_heap_fast = {
.release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast,
.map = nvkm_gpuobj_heap_map,
};
static const struct nvkm_gpuobj_func
nvkm_gpuobj_heap_slow = {
.release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_heap_rd32,
.wr32 = nvkm_gpuobj_heap_wr32,
.map = nvkm_gpuobj_heap_map,
};
static void *
nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
{
gpuobj->map = nvkm_kmap(gpuobj->memory);
if (likely(gpuobj->map))
gpuobj->func = &nvkm_gpuobj_heap_fast;
else
gpuobj->func = &nvkm_gpuobj_heap_slow;
return gpuobj->map;
}
static const struct nvkm_gpuobj_func
nvkm_gpuobj_heap = {
.acquire = nvkm_gpuobj_heap_acquire,
.map = nvkm_gpuobj_heap_map,
};
/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
static int
nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset,
struct nvkm_vmm *vmm, struct nvkm_vma *vma,
void *argv, u32 argc)
{
return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset,
vmm, vma, argv, argc);
}
static u32
nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{
return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
}
static void
nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
{
nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
}
static const struct nvkm_gpuobj_func nvkm_gpuobj_func;
static void
nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj)
{
gpuobj->func = &nvkm_gpuobj_func;
nvkm_done(gpuobj->parent);
}
static const struct nvkm_gpuobj_func
nvkm_gpuobj_fast = {
.release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast,
.map = nvkm_gpuobj_map,
};
static const struct nvkm_gpuobj_func
nvkm_gpuobj_slow = {
.release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32,
.wr32 = nvkm_gpuobj_wr32,
.map = nvkm_gpuobj_map,
};
static void *
nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
{
gpuobj->map = nvkm_kmap(gpuobj->parent);
if (likely(gpuobj->map)) {
gpuobj->map = (u8 *)gpuobj->map + gpuobj->node->offset;
gpuobj->func = &nvkm_gpuobj_fast;
} else {
gpuobj->func = &nvkm_gpuobj_slow;
}
return gpuobj->map;
}
static const struct nvkm_gpuobj_func
nvkm_gpuobj_func = {
.acquire = nvkm_gpuobj_acquire,
.map = nvkm_gpuobj_map,
};
static int
nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj)
{
u32 offset;
int ret;
if (parent) {
if (align >= 0) {
ret = nvkm_mm_head(&parent->heap, 0, 1, size, size,
max(align, 1), &gpuobj->node);
} else {
ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size,
-align, &gpuobj->node);
}
if (ret)
return ret;
gpuobj->parent = parent;
gpuobj->func = &nvkm_gpuobj_func;
gpuobj->addr = parent->addr + gpuobj->node->offset;
gpuobj->size = gpuobj->node->length;
if (zero) {
nvkm_kmap(gpuobj);
for (offset = 0; offset < gpuobj->size; offset += 4)
nvkm_wo32(gpuobj, offset, 0x00000000);
nvkm_done(gpuobj);
}
} else {
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size,
abs(align), zero, &gpuobj->memory);
if (ret)
return ret;
gpuobj->func = &nvkm_gpuobj_heap;
gpuobj->addr = nvkm_memory_addr(gpuobj->memory);
gpuobj->size = nvkm_memory_size(gpuobj->memory);
}
return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1);
}
void
nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_gpuobj *gpuobj = *pgpuobj;
if (gpuobj) {
if (gpuobj->parent)
nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
nvkm_mm_fini(&gpuobj->heap);
nvkm_memory_unref(&gpuobj->memory);
kfree(*pgpuobj);
*pgpuobj = NULL;
}
}
int
nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_gpuobj *gpuobj;
int ret;
if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL)))
return -ENOMEM;
ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj);
if (ret)
nvkm_gpuobj_del(pgpuobj);
return ret;
}
/* the below is basically only here to support sharing the paged dma object
* for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
* anywhere else.
*/
int
nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj)
{
if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL)))
return -ENOMEM;
(*pgpuobj)->addr = nvkm_memory_addr(memory);
(*pgpuobj)->size = nvkm_memory_size(memory);
return 0;
}
void
nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
u32 length)
{
int i;
for (i = 0; i < length; i += 4)
nvkm_wo32(dst, dstoffset + i, *(u32 *)(src + i));
}
void
nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
u32 length)
{
int i;
for (i = 0; i < length; i += 4)
((u32 *)src)[i / 4] = nvkm_ro32(src, srcoffset + i);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <core/intr.h>
#include <core/device.h>
#include <core/subdev.h>
#include <subdev/pci.h>
#include <subdev/top.h>
static int
nvkm_intr_xlat(struct nvkm_subdev *subdev, struct nvkm_intr *intr,
enum nvkm_intr_type type, int *leaf, u32 *mask)
{
struct nvkm_device *device = subdev->device;
if (type < NVKM_INTR_VECTOR_0) {
if (type == NVKM_INTR_SUBDEV) {
const struct nvkm_intr_data *data = intr->data;
struct nvkm_top_device *tdev;
while (data && data->mask) {
if (data->type == NVKM_SUBDEV_TOP) {
list_for_each_entry(tdev, &device->top->device, head) {
if (tdev->intr >= 0 &&
tdev->type == subdev->type &&
tdev->inst == subdev->inst) {
if (data->mask & BIT(tdev->intr)) {
*leaf = data->leaf;
*mask = BIT(tdev->intr);
return 0;
}
}
}
} else
if (data->type == subdev->type && data->inst == subdev->inst) {
*leaf = data->leaf;
*mask = data->mask;
return 0;
}
data++;
}
} else {
return -ENOSYS;
}
} else {
if (type < intr->leaves * sizeof(*intr->stat) * 8) {
*leaf = type / 32;
*mask = BIT(type % 32);
return 0;
}
}
return -EINVAL;
}
static struct nvkm_intr *
nvkm_intr_find(struct nvkm_subdev *subdev, enum nvkm_intr_type type, int *leaf, u32 *mask)
{
struct nvkm_intr *intr;
int ret;
list_for_each_entry(intr, &subdev->device->intr.intr, head) {
ret = nvkm_intr_xlat(subdev, intr, type, leaf, mask);
if (ret == 0)
return intr;
}
return NULL;
}
static void
nvkm_intr_allow_locked(struct nvkm_intr *intr, int leaf, u32 mask)
{
intr->mask[leaf] |= mask;
if (intr->func->allow) {
if (intr->func->reset)
intr->func->reset(intr, leaf, mask);
intr->func->allow(intr, leaf, mask);
}
}
void
nvkm_intr_allow(struct nvkm_subdev *subdev, enum nvkm_intr_type type)
{
struct nvkm_device *device = subdev->device;
struct nvkm_intr *intr;
unsigned long flags;
int leaf;
u32 mask;
intr = nvkm_intr_find(subdev, type, &leaf, &mask);
if (intr) {
nvkm_debug(intr->subdev, "intr %d/%08x allowed by %s\n", leaf, mask, subdev->name);
spin_lock_irqsave(&device->intr.lock, flags);
nvkm_intr_allow_locked(intr, leaf, mask);
spin_unlock_irqrestore(&device->intr.lock, flags);
}
}
static void
nvkm_intr_block_locked(struct nvkm_intr *intr, int leaf, u32 mask)
{
intr->mask[leaf] &= ~mask;
if (intr->func->block)
intr->func->block(intr, leaf, mask);
}
void
nvkm_intr_block(struct nvkm_subdev *subdev, enum nvkm_intr_type type)
{
struct nvkm_device *device = subdev->device;
struct nvkm_intr *intr;
unsigned long flags;
int leaf;
u32 mask;
intr = nvkm_intr_find(subdev, type, &leaf, &mask);
if (intr) {
nvkm_debug(intr->subdev, "intr %d/%08x blocked by %s\n", leaf, mask, subdev->name);
spin_lock_irqsave(&device->intr.lock, flags);
nvkm_intr_block_locked(intr, leaf, mask);
spin_unlock_irqrestore(&device->intr.lock, flags);
}
}
static void
nvkm_intr_rearm_locked(struct nvkm_device *device)
{
struct nvkm_intr *intr;
list_for_each_entry(intr, &device->intr.intr, head)
intr->func->rearm(intr);
}
static void
nvkm_intr_unarm_locked(struct nvkm_device *device)
{
struct nvkm_intr *intr;
list_for_each_entry(intr, &device->intr.intr, head)
intr->func->unarm(intr);
}
static irqreturn_t
nvkm_intr(int irq, void *arg)
{
struct nvkm_device *device = arg;
struct nvkm_intr *intr;
struct nvkm_inth *inth;
irqreturn_t ret = IRQ_NONE;
bool pending = false;
int prio, leaf;
/* Disable all top-level interrupt sources, and re-arm MSI interrupts. */
spin_lock(&device->intr.lock);
if (!device->intr.armed)
goto done_unlock;
nvkm_intr_unarm_locked(device);
nvkm_pci_msi_rearm(device);
/* Fetch pending interrupt masks. */
list_for_each_entry(intr, &device->intr.intr, head) {
if (intr->func->pending(intr))
pending = true;
}
if (!pending)
goto done;
/* Check that GPU is still on the bus by reading NV_PMC_BOOT_0. */
if (WARN_ON(nvkm_rd32(device, 0x000000) == 0xffffffff))
goto done;
/* Execute handlers. */
for (prio = 0; prio < ARRAY_SIZE(device->intr.prio); prio++) {
list_for_each_entry(inth, &device->intr.prio[prio], head) {
struct nvkm_intr *intr = inth->intr;
if (intr->stat[inth->leaf] & inth->mask) {
if (atomic_read(&inth->allowed)) {
if (intr->func->reset)
intr->func->reset(intr, inth->leaf, inth->mask);
if (inth->func(inth) == IRQ_HANDLED)
ret = IRQ_HANDLED;
}
}
}
}
/* Nothing handled? Some debugging/protection from IRQ storms is in order... */
if (ret == IRQ_NONE) {
list_for_each_entry(intr, &device->intr.intr, head) {
for (leaf = 0; leaf < intr->leaves; leaf++) {
if (intr->stat[leaf]) {
nvkm_debug(intr->subdev, "intr%d: %08x\n",
leaf, intr->stat[leaf]);
nvkm_intr_block_locked(intr, leaf, intr->stat[leaf]);
}
}
}
}
done:
/* Re-enable all top-level interrupt sources. */
nvkm_intr_rearm_locked(device);
done_unlock:
spin_unlock(&device->intr.lock);
return ret;
}
int
nvkm_intr_add(const struct nvkm_intr_func *func, const struct nvkm_intr_data *data,
struct nvkm_subdev *subdev, int leaves, struct nvkm_intr *intr)
{
struct nvkm_device *device = subdev->device;
int i;
intr->func = func;
intr->data = data;
intr->subdev = subdev;
intr->leaves = leaves;
intr->stat = kcalloc(leaves, sizeof(*intr->stat), GFP_KERNEL);
intr->mask = kcalloc(leaves, sizeof(*intr->mask), GFP_KERNEL);
if (!intr->stat || !intr->mask) {
kfree(intr->stat);
return -ENOMEM;
}
if (intr->subdev->debug >= NV_DBG_DEBUG) {
for (i = 0; i < intr->leaves; i++)
intr->mask[i] = ~0;
}
spin_lock_irq(&device->intr.lock);
list_add_tail(&intr->head, &device->intr.intr);
spin_unlock_irq(&device->intr.lock);
return 0;
}
static irqreturn_t
nvkm_intr_subdev(struct nvkm_inth *inth)
{
struct nvkm_subdev *subdev = container_of(inth, typeof(*subdev), inth);
nvkm_subdev_intr(subdev);
return IRQ_HANDLED;
}
static void
nvkm_intr_subdev_add_dev(struct nvkm_intr *intr, enum nvkm_subdev_type type, int inst)
{
struct nvkm_subdev *subdev;
enum nvkm_intr_prio prio;
int ret;
subdev = nvkm_device_subdev(intr->subdev->device, type, inst);
if (!subdev || !subdev->func->intr)
return;
if (type == NVKM_ENGINE_DISP)
prio = NVKM_INTR_PRIO_VBLANK;
else
prio = NVKM_INTR_PRIO_NORMAL;
ret = nvkm_inth_add(intr, NVKM_INTR_SUBDEV, prio, subdev, nvkm_intr_subdev, &subdev->inth);
if (WARN_ON(ret))
return;
nvkm_inth_allow(&subdev->inth);
}
static void
nvkm_intr_subdev_add(struct nvkm_intr *intr)
{
const struct nvkm_intr_data *data;
struct nvkm_device *device = intr->subdev->device;
struct nvkm_top_device *tdev;
for (data = intr->data; data && data->mask; data++) {
if (data->legacy) {
if (data->type == NVKM_SUBDEV_TOP) {
list_for_each_entry(tdev, &device->top->device, head) {
if (tdev->intr < 0 || !(data->mask & BIT(tdev->intr)))
continue;
nvkm_intr_subdev_add_dev(intr, tdev->type, tdev->inst);
}
} else {
nvkm_intr_subdev_add_dev(intr, data->type, data->inst);
}
}
}
}
void
nvkm_intr_rearm(struct nvkm_device *device)
{
struct nvkm_intr *intr;
int i;
if (unlikely(!device->intr.legacy_done)) {
list_for_each_entry(intr, &device->intr.intr, head)
nvkm_intr_subdev_add(intr);
device->intr.legacy_done = true;
}
spin_lock_irq(&device->intr.lock);
list_for_each_entry(intr, &device->intr.intr, head) {
for (i = 0; intr->func->block && i < intr->leaves; i++) {
intr->func->block(intr, i, ~0);
intr->func->allow(intr, i, intr->mask[i]);
}
}
nvkm_intr_rearm_locked(device);
device->intr.armed = true;
spin_unlock_irq(&device->intr.lock);
}
void
nvkm_intr_unarm(struct nvkm_device *device)
{
spin_lock_irq(&device->intr.lock);
nvkm_intr_unarm_locked(device);
device->intr.armed = false;
spin_unlock_irq(&device->intr.lock);
}
int
nvkm_intr_install(struct nvkm_device *device)
{
int ret;
device->intr.irq = device->func->irq(device);
if (device->intr.irq < 0)
return device->intr.irq;
ret = request_irq(device->intr.irq, nvkm_intr, IRQF_SHARED, "nvkm", device);
if (ret)
return ret;
device->intr.alloc = true;
return 0;
}
void
nvkm_intr_dtor(struct nvkm_device *device)
{
struct nvkm_intr *intr, *intt;
list_for_each_entry_safe(intr, intt, &device->intr.intr, head) {
list_del(&intr->head);
kfree(intr->mask);
kfree(intr->stat);
}
if (device->intr.alloc)
free_irq(device->intr.irq, device);
}
void
nvkm_intr_ctor(struct nvkm_device *device)
{
int i;
INIT_LIST_HEAD(&device->intr.intr);
for (i = 0; i < ARRAY_SIZE(device->intr.prio); i++)
INIT_LIST_HEAD(&device->intr.prio[i]);
spin_lock_init(&device->intr.lock);
device->intr.armed = false;
}
void
nvkm_inth_block(struct nvkm_inth *inth)
{
if (unlikely(!inth->intr))
return;
atomic_set(&inth->allowed, 0);
}
void
nvkm_inth_allow(struct nvkm_inth *inth)
{
struct nvkm_intr *intr = inth->intr;
unsigned long flags;
if (unlikely(!inth->intr))
return;
spin_lock_irqsave(&intr->subdev->device->intr.lock, flags);
if (!atomic_xchg(&inth->allowed, 1)) {
if ((intr->mask[inth->leaf] & inth->mask) != inth->mask)
nvkm_intr_allow_locked(intr, inth->leaf, inth->mask);
}
spin_unlock_irqrestore(&intr->subdev->device->intr.lock, flags);
}
int
nvkm_inth_add(struct nvkm_intr *intr, enum nvkm_intr_type type, enum nvkm_intr_prio prio,
struct nvkm_subdev *subdev, nvkm_inth_func func, struct nvkm_inth *inth)
{
struct nvkm_device *device = subdev->device;
int ret;
if (WARN_ON(inth->mask))
return -EBUSY;
ret = nvkm_intr_xlat(subdev, intr, type, &inth->leaf, &inth->mask);
if (ret)
return ret;
nvkm_debug(intr->subdev, "intr %d/%08x requested by %s\n",
inth->leaf, inth->mask, subdev->name);
inth->intr = intr;
inth->func = func;
atomic_set(&inth->allowed, 0);
list_add_tail(&inth->head, &device->intr.prio[prio]);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/intr.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include <core/oproxy.h>
static int
nvkm_oproxy_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
{
return nvkm_object_mthd(nvkm_oproxy(object)->object, mthd, data, size);
}
static int
nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd,
struct nvkm_event **pevent)
{
return nvkm_object_ntfy(nvkm_oproxy(object)->object, mthd, pevent);
}
static int
nvkm_oproxy_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
return nvkm_object_map(oproxy->object, argv, argc, type, addr, size);
}
static int
nvkm_oproxy_unmap(struct nvkm_object *object)
{
struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
if (unlikely(!oproxy->object))
return 0;
return nvkm_object_unmap(oproxy->object);
}
static int
nvkm_oproxy_rd08(struct nvkm_object *object, u64 addr, u8 *data)
{
return nvkm_object_rd08(nvkm_oproxy(object)->object, addr, data);
}
static int
nvkm_oproxy_rd16(struct nvkm_object *object, u64 addr, u16 *data)
{
return nvkm_object_rd16(nvkm_oproxy(object)->object, addr, data);
}
static int
nvkm_oproxy_rd32(struct nvkm_object *object, u64 addr, u32 *data)
{
return nvkm_object_rd32(nvkm_oproxy(object)->object, addr, data);
}
static int
nvkm_oproxy_wr08(struct nvkm_object *object, u64 addr, u8 data)
{
return nvkm_object_wr08(nvkm_oproxy(object)->object, addr, data);
}
static int
nvkm_oproxy_wr16(struct nvkm_object *object, u64 addr, u16 data)
{
return nvkm_object_wr16(nvkm_oproxy(object)->object, addr, data);
}
static int
nvkm_oproxy_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
return nvkm_object_wr32(nvkm_oproxy(object)->object, addr, data);
}
static int
nvkm_oproxy_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
return nvkm_object_bind(nvkm_oproxy(object)->object,
parent, align, pgpuobj);
}
static int
nvkm_oproxy_sclass(struct nvkm_object *object, int index,
struct nvkm_oclass *oclass)
{
struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
oclass->parent = oproxy->object;
if (!oproxy->object->func->sclass)
return -ENODEV;
return oproxy->object->func->sclass(oproxy->object, index, oclass);
}
static int
nvkm_oproxy_uevent(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_uevent *uevent)
{
struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
if (!oproxy->object->func->uevent)
return -ENOSYS;
return oproxy->object->func->uevent(oproxy->object, argv, argc, uevent);
}
static int
nvkm_oproxy_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
int ret;
if (oproxy->func->fini[0]) {
ret = oproxy->func->fini[0](oproxy, suspend);
if (ret && suspend)
return ret;
}
if (oproxy->object->func->fini) {
ret = oproxy->object->func->fini(oproxy->object, suspend);
if (ret && suspend)
return ret;
}
if (oproxy->func->fini[1]) {
ret = oproxy->func->fini[1](oproxy, suspend);
if (ret && suspend)
return ret;
}
return 0;
}
static int
nvkm_oproxy_init(struct nvkm_object *object)
{
struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
int ret;
if (oproxy->func->init[0]) {
ret = oproxy->func->init[0](oproxy);
if (ret)
return ret;
}
if (oproxy->object->func->init) {
ret = oproxy->object->func->init(oproxy->object);
if (ret)
return ret;
}
if (oproxy->func->init[1]) {
ret = oproxy->func->init[1](oproxy);
if (ret)
return ret;
}
return 0;
}
static void *
nvkm_oproxy_dtor(struct nvkm_object *object)
{
struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
if (oproxy->func->dtor[0])
oproxy->func->dtor[0](oproxy);
nvkm_object_del(&oproxy->object);
if (oproxy->func->dtor[1])
oproxy->func->dtor[1](oproxy);
return oproxy;
}
static const struct nvkm_object_func
nvkm_oproxy_func = {
.dtor = nvkm_oproxy_dtor,
.init = nvkm_oproxy_init,
.fini = nvkm_oproxy_fini,
.mthd = nvkm_oproxy_mthd,
.ntfy = nvkm_oproxy_ntfy,
.map = nvkm_oproxy_map,
.unmap = nvkm_oproxy_unmap,
.rd08 = nvkm_oproxy_rd08,
.rd16 = nvkm_oproxy_rd16,
.rd32 = nvkm_oproxy_rd32,
.wr08 = nvkm_oproxy_wr08,
.wr16 = nvkm_oproxy_wr16,
.wr32 = nvkm_oproxy_wr32,
.bind = nvkm_oproxy_bind,
.sclass = nvkm_oproxy_sclass,
.uevent = nvkm_oproxy_uevent,
};
void
nvkm_oproxy_ctor(const struct nvkm_oproxy_func *func,
const struct nvkm_oclass *oclass, struct nvkm_oproxy *oproxy)
{
nvkm_object_ctor(&nvkm_oproxy_func, oclass, &oproxy->base);
oproxy->func = func;
}
int
nvkm_oproxy_new_(const struct nvkm_oproxy_func *func,
const struct nvkm_oclass *oclass, struct nvkm_oproxy **poproxy)
{
if (!(*poproxy = kzalloc(sizeof(**poproxy), GFP_KERNEL)))
return -ENOMEM;
nvkm_oproxy_ctor(func, oclass, *poproxy);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/core/oproxy.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
static int
gk104_top_parse(struct nvkm_top *top)
{
struct nvkm_subdev *subdev = &top->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_top_device *info = NULL;
u32 data, type, inst;
int i;
for (i = 0; i < 64; i++) {
if (!info) {
if (!(info = nvkm_top_device_new(top)))
return -ENOMEM;
type = ~0;
inst = 0;
}
data = nvkm_rd32(device, 0x022700 + (i * 0x04));
nvkm_trace(subdev, "%02x: %08x\n", i, data);
switch (data & 0x00000003) {
case 0x00000000: /* NOT_VALID */
continue;
case 0x00000001: /* DATA */
inst = (data & 0x3c000000) >> 26;
info->addr = (data & 0x00fff000);
if (data & 0x00000004)
info->fault = (data & 0x000003f8) >> 3;
break;
case 0x00000002: /* ENUM */
if (data & 0x00000020)
info->engine = (data & 0x3c000000) >> 26;
if (data & 0x00000010)
info->runlist = (data & 0x01e00000) >> 21;
if (data & 0x00000008)
info->intr = (data & 0x000f8000) >> 15;
if (data & 0x00000004)
info->reset = (data & 0x00003e00) >> 9;
break;
case 0x00000003: /* ENGINE_TYPE */
type = (data & 0x7ffffffc) >> 2;
break;
}
if (data & 0x80000000)
continue;
/* Translate engine type to NVKM engine identifier. */
#define I_(T,I) do { info->type = (T); info->inst = (I); } while(0)
#define O_(T,I) do { WARN_ON(inst); I_(T, I); } while (0)
switch (type) {
case 0x00000000: O_(NVKM_ENGINE_GR , 0); break;
case 0x00000001: O_(NVKM_ENGINE_CE , 0); break;
case 0x00000002: O_(NVKM_ENGINE_CE , 1); break;
case 0x00000003: O_(NVKM_ENGINE_CE , 2); break;
case 0x00000008: O_(NVKM_ENGINE_MSPDEC, 0); break;
case 0x00000009: O_(NVKM_ENGINE_MSPPP , 0); break;
case 0x0000000a: O_(NVKM_ENGINE_MSVLD , 0); break;
case 0x0000000b: O_(NVKM_ENGINE_MSENC , 0); break;
case 0x0000000c: O_(NVKM_ENGINE_VIC , 0); break;
case 0x0000000d: O_(NVKM_ENGINE_SEC2 , 0); break;
case 0x0000000e: I_(NVKM_ENGINE_NVENC , inst); break;
case 0x0000000f: O_(NVKM_ENGINE_NVENC , 1); break;
case 0x00000010: I_(NVKM_ENGINE_NVDEC , inst); break;
case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
case 0x00000013: I_(NVKM_ENGINE_CE , inst); break;
case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break;
case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break;
default:
break;
}
nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
"engine %2d runlist %2d intr %2d "
"reset %2d\n", type, inst,
info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type],
info->addr, info->fault, info->engine, info->runlist,
info->intr, info->reset);
info = NULL;
}
return 0;
}
static const struct nvkm_top_func
gk104_top = {
.parse = gk104_top_parse,
};
int
gk104_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_top **ptop)
{
return nvkm_top_new_(&gk104_top, device, type, inst, ptop);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
static int
ga100_top_parse(struct nvkm_top *top)
{
struct nvkm_subdev *subdev = &top->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_top_device *info = NULL;
u32 data, type, inst;
int i, n, size = nvkm_rd32(device, 0x0224fc) >> 20;
for (i = 0, n = 0; i < size; i++) {
if (!info) {
if (!(info = nvkm_top_device_new(top)))
return -ENOMEM;
type = ~0;
inst = 0;
}
data = nvkm_rd32(device, 0x022800 + (i * 0x04));
nvkm_trace(subdev, "%02x: %08x\n", i, data);
if (!data && n == 0)
continue;
switch (n++) {
case 0:
type = (data & 0x3f000000) >> 24;
inst = (data & 0x000f0000) >> 16;
info->fault = (data & 0x0000007f);
break;
case 1:
info->addr = (data & 0x00fff000);
info->reset = (data & 0x0000001f);
break;
case 2:
info->runlist = (data & 0x00fffc00);
info->engine = (data & 0x00000003);
break;
default:
break;
}
if (data & 0x80000000)
continue;
n = 0;
/* Translate engine type to NVKM engine identifier. */
#define I_(T,I) do { info->type = (T); info->inst = (I); } while(0)
#define O_(T,I) do { WARN_ON(inst); I_(T, I); } while (0)
switch (type) {
case 0x00000000: O_(NVKM_ENGINE_GR , 0); break;
case 0x0000000d: O_(NVKM_ENGINE_SEC2 , 0); break;
case 0x0000000e: I_(NVKM_ENGINE_NVENC , inst); break;
case 0x00000010: I_(NVKM_ENGINE_NVDEC , inst); break;
case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
case 0x00000013: I_(NVKM_ENGINE_CE , inst); break;
case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break;
case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break;
case 0x00000016: O_(NVKM_ENGINE_OFA , 0); break;
case 0x00000017: O_(NVKM_SUBDEV_FLA , 0); break;
break;
default:
break;
}
nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
"runlist %6x engine %2d reset %2d\n", type, inst,
info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type],
info->addr, info->fault, info->runlist < 0 ? 0 : info->runlist,
info->engine, info->reset);
info = NULL;
}
return 0;
}
static const struct nvkm_top_func
ga100_top = {
.parse = ga100_top_parse,
};
int
ga100_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_top **ptop)
{
return nvkm_top_new_(&ga100_top, device, type, inst, ptop);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
struct nvkm_top_device *
nvkm_top_device_new(struct nvkm_top *top)
{
struct nvkm_top_device *info = kmalloc(sizeof(*info), GFP_KERNEL);
if (info) {
info->type = NVKM_SUBDEV_NR;
info->inst = -1;
info->addr = 0;
info->fault = -1;
info->engine = -1;
info->runlist = -1;
info->reset = -1;
info->intr = -1;
list_add_tail(&info->head, &top->device);
}
return info;
}
u32
nvkm_top_addr(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
if (top) {
list_for_each_entry(info, &top->device, head) {
if (info->type == type && info->inst == inst)
return info->addr;
}
}
return 0;
}
u32
nvkm_top_reset(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
if (top) {
list_for_each_entry(info, &top->device, head) {
if (info->type == type && info->inst == inst && info->reset >= 0)
return BIT(info->reset);
}
}
return 0;
}
u32
nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
if (top) {
list_for_each_entry(info, &top->device, head) {
if (info->type == type && info->inst == inst && info->intr >= 0)
return BIT(info->intr);
}
}
return 0;
}
int
nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
{
struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
list_for_each_entry(info, &top->device, head) {
if (info->type == type && info->inst == inst && info->fault >= 0)
return info->fault;
}
return -ENOENT;
}
struct nvkm_subdev *
nvkm_top_fault(struct nvkm_device *device, int fault)
{
struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
list_for_each_entry(info, &top->device, head) {
if (info->fault == fault)
return nvkm_device_subdev(device, info->type, info->inst);
}
return NULL;
}
int
nvkm_top_parse(struct nvkm_device *device)
{
struct nvkm_top *top = device->top;
if (!top || !list_empty(&top->device))
return 0;
return top->func->parse(top);
}
static void *
nvkm_top_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_top *top = nvkm_top(subdev);
struct nvkm_top_device *info, *temp;
list_for_each_entry_safe(info, temp, &top->device, head) {
list_del(&info->head);
kfree(info);
}
return top;
}
static const struct nvkm_subdev_func
nvkm_top = {
.dtor = nvkm_top_dtor,
};
int
nvkm_top_new_(const struct nvkm_top_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_top **ptop)
{
struct nvkm_top *top;
if (!(top = *ptop = kzalloc(sizeof(*top), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_top, device, type, inst, &top->subdev);
top->func = func;
INIT_LIST_HEAD(&top->device);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <nvif/class.h>
static void
tu102_vfn_intr_reset(struct nvkm_intr *intr, int leaf, u32 mask)
{
struct nvkm_vfn *vfn = container_of(intr, typeof(*vfn), intr);
nvkm_wr32(vfn->subdev.device, vfn->addr.priv + 0x1000 + (leaf * 4), mask);
}
static void
tu102_vfn_intr_allow(struct nvkm_intr *intr, int leaf, u32 mask)
{
struct nvkm_vfn *vfn = container_of(intr, typeof(*vfn), intr);
nvkm_wr32(vfn->subdev.device, vfn->addr.priv + 0x1200 + (leaf * 4), mask);
}
static void
tu102_vfn_intr_block(struct nvkm_intr *intr, int leaf, u32 mask)
{
struct nvkm_vfn *vfn = container_of(intr, typeof(*vfn), intr);
nvkm_wr32(vfn->subdev.device, vfn->addr.priv + 0x1400 + (leaf * 4), mask);
}
static void
tu102_vfn_intr_rearm(struct nvkm_intr *intr)
{
struct nvkm_vfn *vfn = container_of(intr, typeof(*vfn), intr);
nvkm_wr32(vfn->subdev.device, vfn->addr.priv + 0x1608, 0x0000000f);
}
static void
tu102_vfn_intr_unarm(struct nvkm_intr *intr)
{
struct nvkm_vfn *vfn = container_of(intr, typeof(*vfn), intr);
nvkm_wr32(vfn->subdev.device, vfn->addr.priv + 0x1610, 0x0000000f);
}
static bool
tu102_vfn_intr_pending(struct nvkm_intr *intr)
{
struct nvkm_vfn *vfn = container_of(intr, typeof(*vfn), intr);
struct nvkm_device *device = vfn->subdev.device;
u32 intr_top = nvkm_rd32(device, vfn->addr.priv + 0x1600);
int pending = 0, leaf;
for (leaf = 0; leaf < 8; leaf++) {
if (intr_top & BIT(leaf / 2)) {
intr->stat[leaf] = nvkm_rd32(device, vfn->addr.priv + 0x1000 + (leaf * 4));
if (intr->stat[leaf])
pending++;
} else {
intr->stat[leaf] = 0;
}
}
return pending != 0;
}
const struct nvkm_intr_func
tu102_vfn_intr = {
.pending = tu102_vfn_intr_pending,
.unarm = tu102_vfn_intr_unarm,
.rearm = tu102_vfn_intr_rearm,
.block = tu102_vfn_intr_block,
.allow = tu102_vfn_intr_allow,
.reset = tu102_vfn_intr_reset,
};
static const struct nvkm_vfn_func
tu102_vfn = {
.intr = &tu102_vfn_intr,
.user = { 0x030000, 0x010000, { -1, -1, TURING_USERMODE_A } },
};
int
tu102_vfn_new(struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_vfn **pvfn)
{
return nvkm_vfn_new_(&tu102_vfn, device, type, inst, 0xb80000, pvfn);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <nvif/class.h>
static const struct nvkm_intr_data
ga100_vfn_intrs[] = {
{ NVKM_ENGINE_DISP , 0, 4, 0x04000000, true },
{ NVKM_SUBDEV_GPIO , 0, 4, 0x00200000, true },
{ NVKM_SUBDEV_I2C , 0, 4, 0x00200000, true },
{ NVKM_SUBDEV_PRIVRING, 0, 4, 0x40000000, true },
{}
};
static const struct nvkm_vfn_func
ga100_vfn = {
.intr = &tu102_vfn_intr,
.intrs = ga100_vfn_intrs,
.user = { 0x030000, 0x010000, { -1, -1, AMPERE_USERMODE_A } },
};
int
ga100_vfn_new(struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_vfn **pvfn)
{
return nvkm_vfn_new_(&ga100_vfn, device, type, inst, 0xb80000, pvfn);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
static void *
nvkm_vfn_dtor(struct nvkm_subdev *subdev)
{
return nvkm_vfn(subdev);
}
static const struct nvkm_subdev_func
nvkm_vfn = {
.dtor = nvkm_vfn_dtor,
};
int
nvkm_vfn_new_(const struct nvkm_vfn_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, u32 addr, struct nvkm_vfn **pvfn)
{
struct nvkm_vfn *vfn;
int ret;
if (!(vfn = *pvfn = kzalloc(sizeof(*vfn), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_vfn, device, type, inst, &vfn->subdev);
vfn->func = func;
vfn->addr.priv = addr;
vfn->addr.user = vfn->addr.priv + func->user.addr;
if (vfn->func->intr) {
ret = nvkm_intr_add(vfn->func->intr, vfn->func->intrs,
&vfn->subdev, 8, &vfn->intr);
if (ret)
return ret;
}
vfn->user.ctor = nvkm_uvfn_new;
vfn->user.base = func->user.base;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/vfn/base.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <nvif/class.h>
static const struct nvkm_vfn_func
gv100_vfn = {
.user = { 0x810000, 0x010000, { -1, -1, VOLTA_USERMODE_A } },
};
int
gv100_vfn_new(struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_vfn **pvfn)
{
return nvkm_vfn_new_(&gv100_vfn, device, type, inst, 0, pvfn);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/vfn/gv100.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define nvkm_uvfn(p) container_of((p), struct nvkm_uvfn, object)
#include "priv.h"
#include <core/object.h>
struct nvkm_uvfn {
struct nvkm_object object;
struct nvkm_vfn *vfn;
};
static int
nvkm_uvfn_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nvkm_vfn *vfn = nvkm_uvfn(object)->vfn;
struct nvkm_device *device = vfn->subdev.device;
*addr = device->func->resource_addr(device, 0) + vfn->addr.user;
*size = vfn->func->user.size;
*type = NVKM_OBJECT_MAP_IO;
return 0;
}
static const struct nvkm_object_func
nvkm_uvfn = {
.map = nvkm_uvfn_map,
};
int
nvkm_uvfn_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
void *argv, u32 argc, struct nvkm_object **pobject)
{
struct nvkm_uvfn *uvfn;
if (argc != 0)
return -ENOSYS;
if (!(uvfn = kzalloc(sizeof(*uvfn), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nvkm_uvfn, oclass, &uvfn->object);
uvfn->vfn = device->vfn;
*pobject = &uvfn->object;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
int
g92_pcie_version_supported(struct nvkm_pci *pci)
{
if ((nvkm_pci_rd32(pci, 0x460) & 0x200) == 0x200)
return 2;
return 1;
}
static const struct nvkm_pci_func
g92_pci_func = {
.init = g84_pci_init,
.rd32 = nv40_pci_rd32,
.wr08 = nv40_pci_wr08,
.wr32 = nv40_pci_wr32,
.msi_rearm = nv46_pci_msi_rearm,
.pcie.init = g84_pcie_init,
.pcie.set_link = g84_pcie_set_link,
.pcie.max_speed = g84_pcie_max_speed,
.pcie.cur_speed = g84_pcie_cur_speed,
.pcie.set_version = g84_pcie_set_version,
.pcie.version = g84_pcie_version,
.pcie.version_supported = g92_pcie_version_supported,
};
int
g92_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pci **ppci)
{
return nvkm_pci_new_(&g92_pci_func, device, type, inst, ppci);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
u32
nv40_pci_rd32(struct nvkm_pci *pci, u16 addr)
{
struct nvkm_device *device = pci->subdev.device;
return nvkm_rd32(device, 0x088000 + addr);
}
void
nv40_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
{
struct nvkm_device *device = pci->subdev.device;
nvkm_wr08(device, 0x088000 + addr, data);
}
void
nv40_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
{
struct nvkm_device *device = pci->subdev.device;
nvkm_wr32(device, 0x088000 + addr, data);
}
void
nv40_pci_msi_rearm(struct nvkm_pci *pci)
{
nvkm_pci_wr08(pci, 0x0068, 0xff);
}
static const struct nvkm_pci_func
nv40_pci_func = {
.rd32 = nv40_pci_rd32,
.wr08 = nv40_pci_wr08,
.wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
};
int
nv40_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pci **ppci)
{
return nvkm_pci_new_(&nv40_pci_func, device, type, inst, ppci);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
static u32
nv04_pci_rd32(struct nvkm_pci *pci, u16 addr)
{
struct nvkm_device *device = pci->subdev.device;
return nvkm_rd32(device, 0x001800 + addr);
}
static void
nv04_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
{
struct nvkm_device *device = pci->subdev.device;
nvkm_wr08(device, 0x001800 + addr, data);
}
static void
nv04_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
{
struct nvkm_device *device = pci->subdev.device;
nvkm_wr32(device, 0x001800 + addr, data);
}
static const struct nvkm_pci_func
nv04_pci_func = {
.rd32 = nv04_pci_rd32,
.wr08 = nv04_pci_wr08,
.wr32 = nv04_pci_wr32,
};
int
nv04_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pci **ppci)
{
return nvkm_pci_new_(&nv04_pci_func, device, type, inst, ppci);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c |
/*
* Copyright 2015 Karol Herbst <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Karol Herbst <[email protected]>
*/
#include "priv.h"
static int
gk104_pcie_version_supported(struct nvkm_pci *pci)
{
return (nvkm_rd32(pci->subdev.device, 0x8c1c0) & 0x4) == 0x4 ? 2 : 1;
}
static void
gk104_pcie_set_cap_speed(struct nvkm_pci *pci, enum nvkm_pcie_speed speed)
{
struct nvkm_device *device = pci->subdev.device;
switch (speed) {
case NVKM_PCIE_SPEED_2_5:
gf100_pcie_set_cap_speed(pci, false);
nvkm_mask(device, 0x8c1c0, 0x30000, 0x10000);
break;
case NVKM_PCIE_SPEED_5_0:
gf100_pcie_set_cap_speed(pci, true);
nvkm_mask(device, 0x8c1c0, 0x30000, 0x20000);
break;
case NVKM_PCIE_SPEED_8_0:
gf100_pcie_set_cap_speed(pci, true);
nvkm_mask(device, 0x8c1c0, 0x30000, 0x30000);
break;
}
}
static enum nvkm_pcie_speed
gk104_pcie_cap_speed(struct nvkm_pci *pci)
{
int speed = gf100_pcie_cap_speed(pci);
if (speed == 0)
return NVKM_PCIE_SPEED_2_5;
if (speed >= 1) {
int speed2 = nvkm_rd32(pci->subdev.device, 0x8c1c0) & 0x30000;
switch (speed2) {
case 0x00000:
case 0x10000:
return NVKM_PCIE_SPEED_2_5;
case 0x20000:
return NVKM_PCIE_SPEED_5_0;
case 0x30000:
return NVKM_PCIE_SPEED_8_0;
}
}
return -EINVAL;
}
static void
gk104_pcie_set_lnkctl_speed(struct nvkm_pci *pci, enum nvkm_pcie_speed speed)
{
u8 reg_v = 0;
switch (speed) {
case NVKM_PCIE_SPEED_2_5:
reg_v = 1;
break;
case NVKM_PCIE_SPEED_5_0:
reg_v = 2;
break;
case NVKM_PCIE_SPEED_8_0:
reg_v = 3;
break;
}
nvkm_pci_mask(pci, 0xa8, 0x3, reg_v);
}
static enum nvkm_pcie_speed
gk104_pcie_lnkctl_speed(struct nvkm_pci *pci)
{
u8 reg_v = nvkm_pci_rd32(pci, 0xa8) & 0x3;
switch (reg_v) {
case 0:
case 1:
return NVKM_PCIE_SPEED_2_5;
case 2:
return NVKM_PCIE_SPEED_5_0;
case 3:
return NVKM_PCIE_SPEED_8_0;
}
return -1;
}
static enum nvkm_pcie_speed
gk104_pcie_max_speed(struct nvkm_pci *pci)
{
u32 max_speed = nvkm_rd32(pci->subdev.device, 0x8c1c0) & 0x300000;
switch (max_speed) {
case 0x000000:
return NVKM_PCIE_SPEED_8_0;
case 0x100000:
return NVKM_PCIE_SPEED_5_0;
case 0x200000:
return NVKM_PCIE_SPEED_2_5;
}
return NVKM_PCIE_SPEED_2_5;
}
static void
gk104_pcie_set_link_speed(struct nvkm_pci *pci, enum nvkm_pcie_speed speed)
{
struct nvkm_device *device = pci->subdev.device;
u32 mask_value;
switch (speed) {
case NVKM_PCIE_SPEED_8_0:
mask_value = 0x00000;
break;
case NVKM_PCIE_SPEED_5_0:
mask_value = 0x40000;
break;
case NVKM_PCIE_SPEED_2_5:
default:
mask_value = 0x80000;
break;
}
nvkm_mask(device, 0x8c040, 0xc0000, mask_value);
nvkm_mask(device, 0x8c040, 0x1, 0x1);
}
static int
gk104_pcie_init(struct nvkm_pci * pci)
{
enum nvkm_pcie_speed lnkctl_speed, max_speed, cap_speed;
struct nvkm_subdev *subdev = &pci->subdev;
if (gf100_pcie_version(pci) < 2)
return 0;
lnkctl_speed = gk104_pcie_lnkctl_speed(pci);
max_speed = gk104_pcie_max_speed(pci);
cap_speed = gk104_pcie_cap_speed(pci);
if (cap_speed != max_speed) {
nvkm_trace(subdev, "adjusting cap to max speed\n");
gk104_pcie_set_cap_speed(pci, max_speed);
cap_speed = gk104_pcie_cap_speed(pci);
if (cap_speed != max_speed)
nvkm_warn(subdev, "failed to adjust cap speed\n");
}
if (lnkctl_speed != max_speed) {
nvkm_debug(subdev, "adjusting lnkctl to max speed\n");
gk104_pcie_set_lnkctl_speed(pci, max_speed);
lnkctl_speed = gk104_pcie_lnkctl_speed(pci);
if (lnkctl_speed != max_speed)
nvkm_error(subdev, "failed to adjust lnkctl speed\n");
}
return 0;
}
static int
gk104_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
{
struct nvkm_subdev *subdev = &pci->subdev;
enum nvkm_pcie_speed lnk_ctl_speed = gk104_pcie_lnkctl_speed(pci);
enum nvkm_pcie_speed lnk_cap_speed = gk104_pcie_cap_speed(pci);
if (speed > lnk_cap_speed) {
speed = lnk_cap_speed;
nvkm_warn(subdev, "dropping requested speed due too low cap"
" speed\n");
}
if (speed > lnk_ctl_speed) {
speed = lnk_ctl_speed;
nvkm_warn(subdev, "dropping requested speed due too low"
" lnkctl speed\n");
}
gk104_pcie_set_link_speed(pci, speed);
return 0;
}
static const struct nvkm_pci_func
gk104_pci_func = {
.init = g84_pci_init,
.rd32 = nv40_pci_rd32,
.wr08 = nv40_pci_wr08,
.wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = gk104_pcie_init,
.pcie.set_link = gk104_pcie_set_link,
.pcie.max_speed = gk104_pcie_max_speed,
.pcie.cur_speed = g84_pcie_cur_speed,
.pcie.set_version = gf100_pcie_set_version,
.pcie.version = gf100_pcie_version,
.pcie.version_supported = gk104_pcie_version_supported,
};
int
gk104_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pci **ppci)
{
return nvkm_pci_new_(&gk104_pci_func, device, type, inst, ppci);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
static void
gf100_pci_msi_rearm(struct nvkm_pci *pci)
{
nvkm_pci_wr08(pci, 0x0704, 0xff);
}
void
gf100_pcie_set_version(struct nvkm_pci *pci, u8 ver)
{
struct nvkm_device *device = pci->subdev.device;
nvkm_mask(device, 0x02241c, 0x1, ver > 1 ? 1 : 0);
}
int
gf100_pcie_version(struct nvkm_pci *pci)
{
struct nvkm_device *device = pci->subdev.device;
return (nvkm_rd32(device, 0x02241c) & 0x1) + 1;
}
void
gf100_pcie_set_cap_speed(struct nvkm_pci *pci, bool full_speed)
{
struct nvkm_device *device = pci->subdev.device;
nvkm_mask(device, 0x02241c, 0x80, full_speed ? 0x80 : 0x0);
}
int
gf100_pcie_cap_speed(struct nvkm_pci *pci)
{
struct nvkm_device *device = pci->subdev.device;
u8 punits_pci_cap_speed = nvkm_rd32(device, 0x02241c) & 0x80;
if (punits_pci_cap_speed == 0x80)
return 1;
return 0;
}
int
gf100_pcie_init(struct nvkm_pci *pci)
{
bool full_speed = g84_pcie_cur_speed(pci) == NVKM_PCIE_SPEED_5_0;
gf100_pcie_set_cap_speed(pci, full_speed);
return 0;
}
int
gf100_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
{
gf100_pcie_set_cap_speed(pci, speed == NVKM_PCIE_SPEED_5_0);
g84_pcie_set_link_speed(pci, speed);
return 0;
}
static const struct nvkm_pci_func
gf100_pci_func = {
.init = g84_pci_init,
.rd32 = nv40_pci_rd32,
.wr08 = nv40_pci_wr08,
.wr32 = nv40_pci_wr32,
.msi_rearm = gf100_pci_msi_rearm,
.pcie.init = gf100_pcie_init,
.pcie.set_link = gf100_pcie_set_link,
.pcie.max_speed = g84_pcie_max_speed,
.pcie.cur_speed = g84_pcie_cur_speed,
.pcie.set_version = gf100_pcie_set_version,
.pcie.version = gf100_pcie_version,
.pcie.version_supported = g92_pcie_version_supported,
};
int
gf100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pci **ppci)
{
return nvkm_pci_new_(&gf100_pci_func, device, type, inst, ppci);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
#include "agp.h"
#include <core/option.h>
#include <core/pci.h>
void
nvkm_pci_msi_rearm(struct nvkm_device *device)
{
struct nvkm_pci *pci = device->pci;
if (pci && pci->msi)
pci->func->msi_rearm(pci);
}
u32
nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
{
return pci->func->rd32(pci, addr);
}
void
nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
{
pci->func->wr08(pci, addr, data);
}
void
nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
{
pci->func->wr32(pci, addr, data);
}
u32
nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value)
{
u32 data = pci->func->rd32(pci, addr);
pci->func->wr32(pci, addr, (data & ~mask) | value);
return data;
}
void
nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow)
{
u32 data = nvkm_pci_rd32(pci, 0x0050);
if (shadow)
data |= 0x00000001;
else
data &= ~0x00000001;
nvkm_pci_wr32(pci, 0x0050, data);
}
static int
nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
if (pci->agp.bridge)
nvkm_agp_fini(pci);
return 0;
}
static int
nvkm_pci_preinit(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
if (pci->agp.bridge)
nvkm_agp_preinit(pci);
return 0;
}
static int
nvkm_pci_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
int ret;
if (pci_is_pcie(pci->pdev)) {
ret = nvkm_pcie_oneinit(pci);
if (ret)
return ret;
}
return 0;
}
static int
nvkm_pci_init(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
int ret;
if (pci->agp.bridge) {
ret = nvkm_agp_init(pci);
if (ret)
return ret;
} else if (pci_is_pcie(pci->pdev)) {
nvkm_pcie_init(pci);
}
if (pci->func->init)
pci->func->init(pci);
/* Ensure MSI interrupts are armed, for the case where there are
* already interrupts pending (for whatever reason) at load time.
*/
if (pci->msi)
pci->func->msi_rearm(pci);
return 0;
}
static void *
nvkm_pci_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
nvkm_agp_dtor(pci);
if (pci->msi)
pci_disable_msi(pci->pdev);
return nvkm_pci(subdev);
}
static const struct nvkm_subdev_func
nvkm_pci_func = {
.dtor = nvkm_pci_dtor,
.oneinit = nvkm_pci_oneinit,
.preinit = nvkm_pci_preinit,
.init = nvkm_pci_init,
.fini = nvkm_pci_fini,
};
int
nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_pci **ppci)
{
struct nvkm_pci *pci;
if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_pci_func, device, type, inst, &pci->subdev);
pci->func = func;
pci->pdev = device->func->pci(device)->pdev;
pci->pcie.speed = -1;
pci->pcie.width = -1;
if (device->type == NVKM_DEVICE_AGP)
nvkm_agp_ctor(pci);
switch (pci->pdev->device & 0x0ff0) {
case 0x00f0:
case 0x02e0:
/* BR02? NFI how these would be handled yet exactly */
break;
default:
switch (device->chipset) {
case 0xaa:
/* reported broken, nv also disable it */
break;
default:
pci->msi = true;
break;
}
}
#ifdef __BIG_ENDIAN
pci->msi = false;
#endif
pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
if (pci->msi && func->msi_rearm) {
pci->msi = pci_enable_msi(pci->pdev) == 0;
if (pci->msi)
nvkm_debug(&pci->subdev, "MSI enabled\n");
} else {
pci->msi = false;
}
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
static const struct nvkm_pci_func
nv4c_pci_func = {
.rd32 = nv40_pci_rd32,
.wr08 = nv40_pci_wr08,
.wr32 = nv40_pci_wr32,
};
int
nv4c_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pci **ppci)
{
return nvkm_pci_new_(&nv4c_pci_func, device, type, inst, ppci);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
static const struct nvkm_pci_func
g94_pci_func = {
.init = g84_pci_init,
.rd32 = nv40_pci_rd32,
.wr08 = nv40_pci_wr08,
.wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = g84_pcie_init,
.pcie.set_link = g84_pcie_set_link,
.pcie.max_speed = g84_pcie_max_speed,
.pcie.cur_speed = g84_pcie_cur_speed,
.pcie.set_version = g84_pcie_set_version,
.pcie.version = g84_pcie_version,
.pcie.version_supported = g92_pcie_version_supported,
};
int
g94_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pci **ppci)
{
return nvkm_pci_new_(&g94_pci_func, device, type, inst, ppci);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
#include <core/pci.h>
/* MSI re-arm through the PRI appears to be broken on NV46/NV50/G84/G86/G92,
* so we access it via alternate PCI config space mechanisms.
*/
void
nv46_pci_msi_rearm(struct nvkm_pci *pci)
{
struct nvkm_device *device = pci->subdev.device;
struct pci_dev *pdev = device->func->pci(device)->pdev;
pci_write_config_byte(pdev, 0x68, 0xff);
}
static const struct nvkm_pci_func
nv46_pci_func = {
.rd32 = nv40_pci_rd32,
.wr08 = nv40_pci_wr08,
.wr32 = nv40_pci_wr32,
.msi_rearm = nv46_pci_msi_rearm,
};
int
nv46_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pci **ppci)
{
return nvkm_pci_new_(&nv46_pci_func, device, type, inst, ppci);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c |
/*
* Copyright 2015 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "agp.h"
#ifdef __NVKM_PCI_AGP_H__
#include <core/option.h>
struct nvkm_device_agp_quirk {
u16 hostbridge_vendor;
u16 hostbridge_device;
u16 chip_vendor;
u16 chip_device;
int mode;
};
static const struct nvkm_device_agp_quirk
nvkm_device_agp_quirks[] = {
/* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */
{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
/* SiS 761 does not support AGP cards, use PCI mode */
{ PCI_VENDOR_ID_SI, 0x0761, PCI_ANY_ID, PCI_ANY_ID, 0 },
{},
};
void
nvkm_agp_fini(struct nvkm_pci *pci)
{
if (pci->agp.acquired) {
agp_backend_release(pci->agp.bridge);
pci->agp.acquired = false;
}
}
/* Ensure AGP controller is in a consistent state in case we need to
* execute the VBIOS DEVINIT scripts.
*/
void
nvkm_agp_preinit(struct nvkm_pci *pci)
{
struct nvkm_device *device = pci->subdev.device;
u32 mode = nvkm_pci_rd32(pci, 0x004c);
u32 save[2];
/* First of all, disable fast writes, otherwise if it's already
* enabled in the AGP bridge and we disable the card's AGP
* controller we might be locking ourselves out of it.
*/
if ((mode | pci->agp.mode) & PCI_AGP_COMMAND_FW) {
mode = pci->agp.mode & ~PCI_AGP_COMMAND_FW;
agp_enable(pci->agp.bridge, mode);
}
/* clear busmaster bit, and disable AGP */
save[0] = nvkm_pci_rd32(pci, 0x0004);
nvkm_pci_wr32(pci, 0x0004, save[0] & ~0x00000004);
nvkm_pci_wr32(pci, 0x004c, 0x00000000);
/* reset PGRAPH, PFIFO and PTIMER */
save[1] = nvkm_mask(device, 0x000200, 0x00011100, 0x00000000);
nvkm_mask(device, 0x000200, 0x00011100, save[1]);
/* and restore busmaster bit (gives effect of resetting AGP) */
nvkm_pci_wr32(pci, 0x0004, save[0]);
}
int
nvkm_agp_init(struct nvkm_pci *pci)
{
if (!agp_backend_acquire(pci->pdev)) {
nvkm_error(&pci->subdev, "failed to acquire agp\n");
return -ENODEV;
}
agp_enable(pci->agp.bridge, pci->agp.mode);
pci->agp.acquired = true;
return 0;
}
void
nvkm_agp_dtor(struct nvkm_pci *pci)
{
arch_phys_wc_del(pci->agp.mtrr);
}
void
nvkm_agp_ctor(struct nvkm_pci *pci)
{
const struct nvkm_device_agp_quirk *quirk = nvkm_device_agp_quirks;
struct nvkm_subdev *subdev = &pci->subdev;
struct nvkm_device *device = subdev->device;
struct agp_kern_info info;
int mode = -1;
#ifdef __powerpc__
/* Disable AGP by default on all PowerPC machines for now -- At
* least some UniNorth-2 AGP bridges are known to be broken:
* DMA from the host to the card works just fine, but writeback
* from the card to the host goes straight to memory
* untranslated bypassing that GATT somehow, making them quite
* painful to deal with...
*/
mode = 0;
#endif
mode = nvkm_longopt(device->cfgopt, "NvAGP", mode);
/* acquire bridge temporarily, so that we can copy its info */
if (!(pci->agp.bridge = agp_backend_acquire(pci->pdev))) {
nvkm_warn(subdev, "failed to acquire agp\n");
return;
}
agp_copy_info(pci->agp.bridge, &info);
agp_backend_release(pci->agp.bridge);
pci->agp.mode = info.mode;
pci->agp.base = info.aper_base;
pci->agp.size = info.aper_size * 1024 * 1024;
pci->agp.cma = info.cant_use_aperture;
pci->agp.mtrr = -1;
/* determine if bridge + chipset combination needs a workaround */
while (quirk->hostbridge_vendor) {
if (info.device->vendor == quirk->hostbridge_vendor &&
info.device->device == quirk->hostbridge_device &&
(quirk->chip_vendor == (u16)PCI_ANY_ID ||
pci->pdev->vendor == quirk->chip_vendor) &&
(quirk->chip_device == (u16)PCI_ANY_ID ||
pci->pdev->device == quirk->chip_device)) {
nvkm_info(subdev, "forcing default agp mode to %dX, "
"use NvAGP=<mode> to override\n",
quirk->mode);
mode = quirk->mode;
break;
}
quirk++;
}
/* apply quirk / user-specified mode */
if (mode >= 1) {
if (pci->agp.mode & 0x00000008)
mode /= 4; /* AGPv3 */
pci->agp.mode &= ~0x00000007;
pci->agp.mode |= (mode & 0x7);
} else
if (mode == 0) {
pci->agp.bridge = NULL;
return;
}
/* fast writes appear to be broken on nv18, they make the card
* lock up randomly.
*/
if (device->chipset == 0x18)
pci->agp.mode &= ~PCI_AGP_COMMAND_FW;
pci->agp.mtrr = arch_phys_wc_add(pci->agp.base, pci->agp.size);
}
#endif
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
#include <core/pci.h>
static int
g84_pcie_version_supported(struct nvkm_pci *pci)
{
/* g84 and g86 report wrong information about what they support */
return 1;
}
int
g84_pcie_version(struct nvkm_pci *pci)
{
struct nvkm_device *device = pci->subdev.device;
return (nvkm_rd32(device, 0x00154c) & 0x1) + 1;
}
void
g84_pcie_set_version(struct nvkm_pci *pci, u8 ver)
{
struct nvkm_device *device = pci->subdev.device;
nvkm_mask(device, 0x00154c, 0x1, (ver >= 2 ? 0x1 : 0x0));
}
static void
g84_pcie_set_cap_speed(struct nvkm_pci *pci, bool full_speed)
{
struct nvkm_device *device = pci->subdev.device;
nvkm_mask(device, 0x00154c, 0x80, full_speed ? 0x80 : 0x0);
}
enum nvkm_pcie_speed
g84_pcie_cur_speed(struct nvkm_pci *pci)
{
u32 reg_v = nvkm_pci_rd32(pci, 0x88) & 0x30000;
switch (reg_v) {
case 0x30000:
return NVKM_PCIE_SPEED_8_0;
case 0x20000:
return NVKM_PCIE_SPEED_5_0;
case 0x10000:
default:
return NVKM_PCIE_SPEED_2_5;
}
}
enum nvkm_pcie_speed
g84_pcie_max_speed(struct nvkm_pci *pci)
{
u32 reg_v = nvkm_pci_rd32(pci, 0x460) & 0x3300;
if (reg_v == 0x2200)
return NVKM_PCIE_SPEED_5_0;
return NVKM_PCIE_SPEED_2_5;
}
void
g84_pcie_set_link_speed(struct nvkm_pci *pci, enum nvkm_pcie_speed speed)
{
u32 mask_value;
if (speed == NVKM_PCIE_SPEED_5_0)
mask_value = 0x20;
else
mask_value = 0x10;
nvkm_pci_mask(pci, 0x460, 0x30, mask_value);
nvkm_pci_mask(pci, 0x460, 0x1, 0x1);
}
int
g84_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
{
g84_pcie_set_cap_speed(pci, speed == NVKM_PCIE_SPEED_5_0);
g84_pcie_set_link_speed(pci, speed);
return 0;
}
void
g84_pci_init(struct nvkm_pci *pci)
{
/* The following only concerns PCIe cards. */
if (!pci_is_pcie(pci->pdev))
return;
/* Tag field is 8-bit long, regardless of EXT_TAG.
* However, if EXT_TAG is disabled, only the lower 5 bits of the tag
* field should be used, limiting the number of request to 32.
*
* Apparently, 0x041c stores some limit on the number of requests
* possible, so if EXT_TAG is disabled, limit that requests number to
* 32
*
* Fixes fdo#86537
*/
if (nvkm_pci_rd32(pci, 0x007c) & 0x00000020)
nvkm_pci_mask(pci, 0x0080, 0x00000100, 0x00000100);
else
nvkm_pci_mask(pci, 0x041c, 0x00000060, 0x00000000);
}
int
g84_pcie_init(struct nvkm_pci *pci)
{
bool full_speed = g84_pcie_cur_speed(pci) == NVKM_PCIE_SPEED_5_0;
g84_pcie_set_cap_speed(pci, full_speed);
return 0;
}
static const struct nvkm_pci_func
g84_pci_func = {
.init = g84_pci_init,
.rd32 = nv40_pci_rd32,
.wr08 = nv40_pci_wr08,
.wr32 = nv40_pci_wr32,
.msi_rearm = nv46_pci_msi_rearm,
.pcie.init = g84_pcie_init,
.pcie.set_link = g84_pcie_set_link,
.pcie.max_speed = g84_pcie_max_speed,
.pcie.cur_speed = g84_pcie_cur_speed,
.pcie.set_version = g84_pcie_set_version,
.pcie.version = g84_pcie_version,
.pcie.version_supported = g84_pcie_version_supported,
};
int
g84_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pci **ppci)
{
return nvkm_pci_new_(&g84_pci_func, device, type, inst, ppci);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c |
/*
* Copyright 2015 Karol Herbst <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Karol Herbst <[email protected]>
*/
#include "priv.h"
static const struct nvkm_pci_func
gf106_pci_func = {
.init = g84_pci_init,
.rd32 = nv40_pci_rd32,
.wr08 = nv40_pci_wr08,
.wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = gf100_pcie_init,
.pcie.set_link = gf100_pcie_set_link,
.pcie.max_speed = g84_pcie_max_speed,
.pcie.cur_speed = g84_pcie_cur_speed,
.pcie.set_version = gf100_pcie_set_version,
.pcie.version = gf100_pcie_version,
.pcie.version_supported = g92_pcie_version_supported,
};
int
gf106_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pci **ppci)
{
return nvkm_pci_new_(&gf106_pci_func, device, type, inst, ppci);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c |
/*
* Copyright 2015 Karol Herbst <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Karol Herbst <[email protected]>
*/
#include "priv.h"
static char *nvkm_pcie_speeds[] = {
"2.5GT/s",
"5.0GT/s",
"8.0GT/s",
};
static enum nvkm_pcie_speed
nvkm_pcie_speed(enum pci_bus_speed speed)
{
switch (speed) {
case PCIE_SPEED_2_5GT:
return NVKM_PCIE_SPEED_2_5;
case PCIE_SPEED_5_0GT:
return NVKM_PCIE_SPEED_5_0;
case PCIE_SPEED_8_0GT:
return NVKM_PCIE_SPEED_8_0;
default:
/* XXX 0x16 is 8_0, assume 0x17 will be 16_0 for now */
if (speed == 0x17)
return NVKM_PCIE_SPEED_8_0;
return -1;
}
}
static int
nvkm_pcie_get_version(struct nvkm_pci *pci)
{
if (!pci->func->pcie.version)
return -ENOSYS;
return pci->func->pcie.version(pci);
}
static int
nvkm_pcie_get_max_version(struct nvkm_pci *pci)
{
if (!pci->func->pcie.version_supported)
return -ENOSYS;
return pci->func->pcie.version_supported(pci);
}
static int
nvkm_pcie_set_version(struct nvkm_pci *pci, int version)
{
if (!pci->func->pcie.set_version)
return -ENOSYS;
nvkm_trace(&pci->subdev, "set to version %i\n", version);
pci->func->pcie.set_version(pci, version);
return nvkm_pcie_get_version(pci);
}
int
nvkm_pcie_oneinit(struct nvkm_pci *pci)
{
if (pci->func->pcie.max_speed)
nvkm_debug(&pci->subdev, "pcie max speed: %s\n",
nvkm_pcie_speeds[pci->func->pcie.max_speed(pci)]);
return 0;
}
int
nvkm_pcie_init(struct nvkm_pci *pci)
{
struct nvkm_subdev *subdev = &pci->subdev;
int ret;
/* raise pcie version first */
ret = nvkm_pcie_get_version(pci);
if (ret > 0) {
int max_version = nvkm_pcie_get_max_version(pci);
if (max_version > 0 && max_version > ret)
ret = nvkm_pcie_set_version(pci, max_version);
if (ret < max_version)
nvkm_error(subdev, "couldn't raise version: %i\n", ret);
}
if (pci->func->pcie.init)
pci->func->pcie.init(pci);
if (pci->pcie.speed != -1)
nvkm_pcie_set_link(pci, pci->pcie.speed, pci->pcie.width);
return 0;
}
int
nvkm_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
{
struct nvkm_subdev *subdev;
enum nvkm_pcie_speed cur_speed, max_speed;
int ret;
if (!pci || !pci_is_pcie(pci->pdev))
return 0;
if (!pci->func->pcie.set_link)
return -ENOSYS;
subdev = &pci->subdev;
nvkm_trace(subdev, "requested %s\n", nvkm_pcie_speeds[speed]);
if (pci->func->pcie.version(pci) < 2) {
nvkm_error(subdev, "setting link failed due to low version\n");
return -ENODEV;
}
cur_speed = pci->func->pcie.cur_speed(pci);
max_speed = min(nvkm_pcie_speed(pci->pdev->bus->max_bus_speed),
pci->func->pcie.max_speed(pci));
nvkm_trace(subdev, "current speed: %s\n", nvkm_pcie_speeds[cur_speed]);
if (speed > max_speed) {
nvkm_debug(subdev, "%s not supported by bus or card, dropping"
"requested speed to %s", nvkm_pcie_speeds[speed],
nvkm_pcie_speeds[max_speed]);
speed = max_speed;
}
pci->pcie.speed = speed;
pci->pcie.width = width;
if (speed == cur_speed) {
nvkm_debug(subdev, "requested matches current speed\n");
return speed;
}
nvkm_debug(subdev, "set link to %s x%i\n",
nvkm_pcie_speeds[speed], width);
ret = pci->func->pcie.set_link(pci, speed, width);
if (ret < 0)
nvkm_error(subdev, "setting link failed: %i\n", ret);
return ret;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/pcie.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
static void
gp100_pci_msi_rearm(struct nvkm_pci *pci)
{
nvkm_pci_wr32(pci, 0x0704, 0x00000000);
}
static const struct nvkm_pci_func
gp100_pci_func = {
.rd32 = nv40_pci_rd32,
.wr08 = nv40_pci_wr08,
.wr32 = nv40_pci_wr32,
.msi_rearm = gp100_pci_msi_rearm,
};
int
gp100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_pci **ppci)
{
return nvkm_pci_new_(&gp100_pci_func, device, type, inst, ppci);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <subdev/fb.h>
#include <subdev/timer.h>
void
gm107_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
{
struct nvkm_device *device = ltc->subdev.device;
nvkm_wr32(device, 0x17e270, start);
nvkm_wr32(device, 0x17e274, limit);
nvkm_mask(device, 0x17e26c, 0x00000000, 0x00000004);
}
void
gm107_ltc_cbc_wait(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
int c, s;
for (c = 0; c < ltc->ltc_nr; c++) {
for (s = 0; s < ltc->lts_nr; s++) {
const u32 addr = 0x14046c + (c * 0x2000) + (s * 0x200);
nvkm_wait_msec(device, 2000, addr,
0x00000004, 0x00000000);
}
}
}
void
gm107_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
{
struct nvkm_device *device = ltc->subdev.device;
nvkm_mask(device, 0x17e338, 0x0000000f, i);
nvkm_wr32(device, 0x17e33c, color[0]);
nvkm_wr32(device, 0x17e340, color[1]);
nvkm_wr32(device, 0x17e344, color[2]);
nvkm_wr32(device, 0x17e348, color[3]);
}
void
gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
{
struct nvkm_device *device = ltc->subdev.device;
nvkm_mask(device, 0x17e338, 0x0000000f, i);
nvkm_wr32(device, 0x17e34c, depth);
}
void
gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
{
struct nvkm_subdev *subdev = <c->subdev;
struct nvkm_device *device = subdev->device;
u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
u32 intr = nvkm_rd32(device, base + 0x00c);
u16 stat = intr & 0x0000ffff;
char msg[128];
if (stat) {
nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, intr, msg);
}
nvkm_wr32(device, base + 0x00c, intr);
}
void
gm107_ltc_intr(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
u32 mask;
mask = nvkm_rd32(device, 0x00017c);
while (mask) {
u32 s, c = __ffs(mask);
for (s = 0; s < ltc->lts_nr; s++)
gm107_ltc_intr_lts(ltc, c, s);
mask &= ~(1 << c);
}
}
static int
gm107_ltc_oneinit(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
const u32 parts = nvkm_rd32(device, 0x022438);
const u32 mask = nvkm_rd32(device, 0x021c14);
const u32 slice = nvkm_rd32(device, 0x17e280) >> 28;
int i;
for (i = 0; i < parts; i++) {
if (!(mask & (1 << i)))
ltc->ltc_nr++;
}
ltc->lts_nr = slice;
return gf100_ltc_oneinit_tag_ram(ltc);
}
static void
gm107_ltc_init(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
nvkm_wr32(device, 0x17e278, ltc->tag_base);
nvkm_mask(device, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
}
static const struct nvkm_ltc_func
gm107_ltc = {
.oneinit = gm107_ltc_oneinit,
.init = gm107_ltc_init,
.intr = gm107_ltc_intr,
.cbc_clear = gm107_ltc_cbc_clear,
.cbc_wait = gm107_ltc_cbc_wait,
.zbc_color = 16,
.zbc_depth = 16,
.zbc_clear_color = gm107_ltc_zbc_clear_color,
.zbc_clear_depth = gm107_ltc_zbc_clear_depth,
.invalidate = gf100_ltc_invalidate,
.flush = gf100_ltc_flush,
};
int
gm107_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_ltc **pltc)
{
return nvkm_ltc_new_(&gm107_ltc, device, type, inst, pltc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
static void
gk104_ltc_init(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr);
nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
nvkm_wr32(device, 0x17e8d4, ltc->tag_base);
nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
}
static const struct nvkm_ltc_func
gk104_ltc = {
.oneinit = gf100_ltc_oneinit,
.init = gk104_ltc_init,
.intr = gf100_ltc_intr,
.cbc_clear = gf100_ltc_cbc_clear,
.cbc_wait = gf100_ltc_cbc_wait,
.zbc_color = 16,
.zbc_depth = 16,
.zbc_clear_color = gf100_ltc_zbc_clear_color,
.zbc_clear_depth = gf100_ltc_zbc_clear_depth,
.invalidate = gf100_ltc_invalidate,
.flush = gf100_ltc_flush,
};
int
gk104_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_ltc **pltc)
{
return nvkm_ltc_new_(&gk104_ltc, device, type, inst, pltc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <core/memory.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
void
gf100_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
{
struct nvkm_device *device = ltc->subdev.device;
nvkm_wr32(device, 0x17e8cc, start);
nvkm_wr32(device, 0x17e8d0, limit);
nvkm_wr32(device, 0x17e8c8, 0x00000004);
}
void
gf100_ltc_cbc_wait(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
int c, s;
for (c = 0; c < ltc->ltc_nr; c++) {
for (s = 0; s < ltc->lts_nr; s++) {
const u32 addr = 0x1410c8 + (c * 0x2000) + (s * 0x400);
nvkm_msec(device, 2000,
if (!nvkm_rd32(device, addr))
break;
);
}
}
}
void
gf100_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
{
struct nvkm_device *device = ltc->subdev.device;
nvkm_mask(device, 0x17ea44, 0x0000000f, i);
nvkm_wr32(device, 0x17ea48, color[0]);
nvkm_wr32(device, 0x17ea4c, color[1]);
nvkm_wr32(device, 0x17ea50, color[2]);
nvkm_wr32(device, 0x17ea54, color[3]);
}
void
gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
{
struct nvkm_device *device = ltc->subdev.device;
nvkm_mask(device, 0x17ea44, 0x0000000f, i);
nvkm_wr32(device, 0x17ea58, depth);
}
const struct nvkm_bitfield
gf100_ltc_lts_intr_name[] = {
{ 0x00000001, "IDLE_ERROR_IQ" },
{ 0x00000002, "IDLE_ERROR_CBC" },
{ 0x00000004, "IDLE_ERROR_TSTG" },
{ 0x00000008, "IDLE_ERROR_DSTG" },
{ 0x00000010, "EVICTED_CB" },
{ 0x00000020, "ILLEGAL_COMPSTAT" },
{ 0x00000040, "BLOCKLINEAR_CB" },
{ 0x00000100, "ECC_SEC_ERROR" },
{ 0x00000200, "ECC_DED_ERROR" },
{ 0x00000400, "DEBUG" },
{ 0x00000800, "ATOMIC_TO_Z" },
{ 0x00001000, "ILLEGAL_ATOMIC" },
{ 0x00002000, "BLKACTIVITY_ERR" },
{}
};
static void
gf100_ltc_lts_intr(struct nvkm_ltc *ltc, int c, int s)
{
struct nvkm_subdev *subdev = <c->subdev;
struct nvkm_device *device = subdev->device;
u32 base = 0x141000 + (c * 0x2000) + (s * 0x400);
u32 intr = nvkm_rd32(device, base + 0x020);
u32 stat = intr & 0x0000ffff;
char msg[128];
if (stat) {
nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, stat, msg);
}
nvkm_wr32(device, base + 0x020, intr);
}
void
gf100_ltc_intr(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
u32 mask;
mask = nvkm_rd32(device, 0x00017c);
while (mask) {
u32 s, c = __ffs(mask);
for (s = 0; s < ltc->lts_nr; s++)
gf100_ltc_lts_intr(ltc, c, s);
mask &= ~(1 << c);
}
}
void
gf100_ltc_invalidate(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
s64 taken;
nvkm_wr32(device, 0x70004, 0x00000001);
taken = nvkm_wait_msec(device, 2000, 0x70004, 0x00000003, 0x00000000);
if (taken > 0)
nvkm_debug(<c->subdev, "LTC invalidate took %lld ns\n", taken);
}
void
gf100_ltc_flush(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
s64 taken;
nvkm_wr32(device, 0x70010, 0x00000001);
taken = nvkm_wait_msec(device, 2000, 0x70010, 0x00000003, 0x00000000);
if (taken > 0)
nvkm_debug(<c->subdev, "LTC flush took %lld ns\n", taken);
}
/* TODO: Figure out tag memory details and drop the over-cautious allocation.
*/
int
gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
struct nvkm_fb *fb = device->fb;
struct nvkm_ram *ram = fb->ram;
u32 bits = (nvkm_rd32(device, 0x100c80) & 0x00001000) ? 16 : 17;
u32 tag_size, tag_margin, tag_align;
int ret;
/* No VRAM, no tags for now. */
if (!ram) {
ltc->num_tags = 0;
goto mm_init;
}
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
ltc->num_tags = (ram->size >> 17) / 4;
if (ltc->num_tags > (1 << bits))
ltc->num_tags = 1 << bits; /* we have 16/17 bits in PTE */
ltc->num_tags = (ltc->num_tags + 63) & ~63; /* round up to 64 */
tag_align = ltc->ltc_nr * 0x800;
tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
/* 4 part 4 sub: 0x2000 bytes for 56 tags */
/* 3 part 4 sub: 0x6000 bytes for 168 tags */
/*
* About 147 bytes per tag. Let's be safe and allocate x2, which makes
* 0x4980 bytes for 64 tags, and round up to 0x6000 bytes for 64 tags.
*
* For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %.
*/
tag_size = (ltc->num_tags / 64) * 0x6000 + tag_margin;
tag_size += tag_align;
ret = nvkm_ram_get(device, NVKM_RAM_MM_NORMAL, 0x01, 12, tag_size,
true, true, <c->tag_ram);
if (ret) {
ltc->num_tags = 0;
} else {
u64 tag_base = nvkm_memory_addr(ltc->tag_ram) + tag_margin;
tag_base += tag_align - 1;
do_div(tag_base, tag_align);
ltc->tag_base = tag_base;
}
mm_init:
nvkm_mm_fini(&fb->tags.mm);
return nvkm_mm_init(&fb->tags.mm, 0, 0, ltc->num_tags, 1);
}
int
gf100_ltc_oneinit(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
const u32 parts = nvkm_rd32(device, 0x022438);
const u32 mask = nvkm_rd32(device, 0x022554);
const u32 slice = nvkm_rd32(device, 0x17e8dc) >> 28;
int i;
for (i = 0; i < parts; i++) {
if (!(mask & (1 << i)))
ltc->ltc_nr++;
}
ltc->lts_nr = slice;
return gf100_ltc_oneinit_tag_ram(ltc);
}
static void
gf100_ltc_init(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
nvkm_mask(device, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr);
nvkm_wr32(device, 0x17e8d4, ltc->tag_base);
nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
}
static const struct nvkm_ltc_func
gf100_ltc = {
.oneinit = gf100_ltc_oneinit,
.init = gf100_ltc_init,
.intr = gf100_ltc_intr,
.cbc_clear = gf100_ltc_cbc_clear,
.cbc_wait = gf100_ltc_cbc_wait,
.zbc_color = 16,
.zbc_depth = 16,
.zbc_clear_color = gf100_ltc_zbc_clear_color,
.zbc_clear_depth = gf100_ltc_zbc_clear_depth,
.invalidate = gf100_ltc_invalidate,
.flush = gf100_ltc_flush,
};
int
gf100_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_ltc **pltc)
{
return nvkm_ltc_new_(&gf100_ltc, device, type, inst, pltc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c |
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
#include <subdev/fb.h>
#include <subdev/timer.h>
static int
gm200_ltc_oneinit(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
return gf100_ltc_oneinit_tag_ram(ltc);
}
static void
gm200_ltc_init(struct nvkm_ltc *ltc)
{
nvkm_wr32(ltc->subdev.device, 0x17e278, ltc->tag_base);
}
static const struct nvkm_ltc_func
gm200_ltc = {
.oneinit = gm200_ltc_oneinit,
.init = gm200_ltc_init,
.intr = gm107_ltc_intr,
.cbc_clear = gm107_ltc_cbc_clear,
.cbc_wait = gm107_ltc_cbc_wait,
.zbc_color = 16,
.zbc_depth = 16,
.zbc_clear_color = gm107_ltc_zbc_clear_color,
.zbc_clear_depth = gm107_ltc_zbc_clear_depth,
.invalidate = gf100_ltc_invalidate,
.flush = gf100_ltc_flush,
};
int
gm200_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_ltc **pltc)
{
return nvkm_ltc_new_(&gm200_ltc, device, type, inst, pltc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
void
gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *ltc, int i, const u32 stencil)
{
struct nvkm_device *device = ltc->subdev.device;
nvkm_mask(device, 0x17e338, 0x0000000f, i);
nvkm_wr32(device, 0x17e204, stencil);
}
static const struct nvkm_ltc_func
gp102_ltc = {
.oneinit = gp100_ltc_oneinit,
.init = gp100_ltc_init,
.intr = gp100_ltc_intr,
.cbc_clear = gm107_ltc_cbc_clear,
.cbc_wait = gm107_ltc_cbc_wait,
.zbc_color = 16,
.zbc_depth = 16,
.zbc_clear_color = gm107_ltc_zbc_clear_color,
.zbc_clear_depth = gm107_ltc_zbc_clear_depth,
.zbc_clear_stencil = gp102_ltc_zbc_clear_stencil,
.invalidate = gf100_ltc_invalidate,
.flush = gf100_ltc_flush,
};
int
gp102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_ltc **pltc)
{
return nvkm_ltc_new_(&gp102_ltc, device, type, inst, pltc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c |
/*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
*/
#include "priv.h"
#include <core/memory.h>
void
nvkm_ltc_tags_clear(struct nvkm_device *device, u32 first, u32 count)
{
struct nvkm_ltc *ltc = device->ltc;
const u32 limit = first + count - 1;
BUG_ON((first > limit) || (limit >= ltc->num_tags));
mutex_lock(<c->mutex);
ltc->func->cbc_clear(ltc, first, limit);
ltc->func->cbc_wait(ltc);
mutex_unlock(<c->mutex);
}
int
nvkm_ltc_zbc_color_get(struct nvkm_ltc *ltc, int index, const u32 color[4])
{
memcpy(ltc->zbc_color[index], color, sizeof(ltc->zbc_color[index]));
ltc->func->zbc_clear_color(ltc, index, color);
return index;
}
int
nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth)
{
ltc->zbc_depth[index] = depth;
ltc->func->zbc_clear_depth(ltc, index, depth);
return index;
}
int
nvkm_ltc_zbc_stencil_get(struct nvkm_ltc *ltc, int index, const u32 stencil)
{
ltc->zbc_stencil[index] = stencil;
ltc->func->zbc_clear_stencil(ltc, index, stencil);
return index;
}
void
nvkm_ltc_invalidate(struct nvkm_ltc *ltc)
{
if (ltc->func->invalidate)
ltc->func->invalidate(ltc);
}
void
nvkm_ltc_flush(struct nvkm_ltc *ltc)
{
if (ltc->func->flush)
ltc->func->flush(ltc);
}
static void
nvkm_ltc_intr(struct nvkm_subdev *subdev)
{
struct nvkm_ltc *ltc = nvkm_ltc(subdev);
ltc->func->intr(ltc);
}
static int
nvkm_ltc_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_ltc *ltc = nvkm_ltc(subdev);
return ltc->func->oneinit(ltc);
}
static int
nvkm_ltc_init(struct nvkm_subdev *subdev)
{
struct nvkm_ltc *ltc = nvkm_ltc(subdev);
int i;
for (i = ltc->zbc_color_min; i <= ltc->zbc_color_max; i++)
ltc->func->zbc_clear_color(ltc, i, ltc->zbc_color[i]);
for (i = ltc->zbc_depth_min; i <= ltc->zbc_depth_max; i++) {
ltc->func->zbc_clear_depth(ltc, i, ltc->zbc_depth[i]);
if (ltc->func->zbc_clear_stencil)
ltc->func->zbc_clear_stencil(ltc, i, ltc->zbc_stencil[i]);
}
ltc->func->init(ltc);
return 0;
}
static void *
nvkm_ltc_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_ltc *ltc = nvkm_ltc(subdev);
nvkm_memory_unref(<c->tag_ram);
mutex_destroy(<c->mutex);
return ltc;
}
static const struct nvkm_subdev_func
nvkm_ltc = {
.dtor = nvkm_ltc_dtor,
.oneinit = nvkm_ltc_oneinit,
.init = nvkm_ltc_init,
.intr = nvkm_ltc_intr,
};
int
nvkm_ltc_new_(const struct nvkm_ltc_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_ltc **pltc)
{
struct nvkm_ltc *ltc;
if (!(ltc = *pltc = kzalloc(sizeof(*ltc), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_ltc, device, type, inst, <c->subdev);
ltc->func = func;
mutex_init(<c->mutex);
ltc->zbc_color_min = 1; /* reserve 0 for disabled */
ltc->zbc_color_max = min(func->zbc_color, NVKM_LTC_MAX_ZBC_COLOR_CNT) - 1;
ltc->zbc_depth_min = 1; /* reserve 0 for disabled */
ltc->zbc_depth_max = min(func->zbc_depth, NVKM_LTC_MAX_ZBC_DEPTH_CNT) - 1;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c |
/*
* Copyright (c) 2019 NVIDIA Corporation.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Thierry Reding
*/
#include "priv.h"
static void
gp10b_ltc_init(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
struct iommu_fwspec *spec;
nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
nvkm_wr32(device, 0x100800, ltc->ltc_nr);
spec = dev_iommu_fwspec_get(device->dev);
if (spec) {
u32 sid = spec->ids[0] & 0xffff;
/* stream ID */
nvkm_wr32(device, 0x160000, sid << 2);
}
}
static const struct nvkm_ltc_func
gp10b_ltc = {
.oneinit = gp100_ltc_oneinit,
.init = gp10b_ltc_init,
.intr = gp100_ltc_intr,
.cbc_clear = gm107_ltc_cbc_clear,
.cbc_wait = gm107_ltc_cbc_wait,
.zbc_color = 16,
.zbc_depth = 16,
.zbc_clear_color = gm107_ltc_zbc_clear_color,
.zbc_clear_depth = gm107_ltc_zbc_clear_depth,
.zbc_clear_stencil = gp102_ltc_zbc_clear_stencil,
.invalidate = gf100_ltc_invalidate,
.flush = gf100_ltc_flush,
};
int
gp10b_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_ltc **pltc)
{
return nvkm_ltc_new_(&gp10b_ltc, device, type, inst, pltc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
static void
ga102_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
{
struct nvkm_device *device = ltc->subdev.device;
nvkm_mask(device, 0x17e338, 0x0000001f, i);
nvkm_wr32(device, 0x17e33c, color[0]);
nvkm_wr32(device, 0x17e340, color[1]);
nvkm_wr32(device, 0x17e344, color[2]);
nvkm_wr32(device, 0x17e348, color[3]);
}
static const struct nvkm_ltc_func
ga102_ltc = {
.oneinit = gp100_ltc_oneinit,
.init = gp100_ltc_init,
.intr = gp100_ltc_intr,
.cbc_clear = gm107_ltc_cbc_clear,
.cbc_wait = gm107_ltc_cbc_wait,
.zbc_color = 31,
.zbc_depth = 16,
.zbc_clear_color = ga102_ltc_zbc_clear_color,
.zbc_clear_depth = gm107_ltc_zbc_clear_depth,
.zbc_clear_stencil = gp102_ltc_zbc_clear_stencil,
.invalidate = gf100_ltc_invalidate,
.flush = gf100_ltc_flush,
};
int
ga102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_ltc **pltc)
{
return nvkm_ltc_new_(&ga102_ltc, device, type, inst, pltc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c |
/*
* Copyright 2016 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
void
gp100_ltc_intr(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
u32 mask;
mask = nvkm_rd32(device, 0x0001c0);
while (mask) {
u32 s, c = __ffs(mask);
for (s = 0; s < ltc->lts_nr; s++)
gm107_ltc_intr_lts(ltc, c, s);
mask &= ~(1 << c);
}
}
int
gp100_ltc_oneinit(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
/*XXX: tagram allocation - TBD */
return 0;
}
void
gp100_ltc_init(struct nvkm_ltc *ltc)
{
/*XXX: PMU LS call to setup tagram address */
}
static const struct nvkm_ltc_func
gp100_ltc = {
.oneinit = gp100_ltc_oneinit,
.init = gp100_ltc_init,
.intr = gp100_ltc_intr,
.cbc_clear = gm107_ltc_cbc_clear,
.cbc_wait = gm107_ltc_cbc_wait,
.zbc_color = 16,
.zbc_depth = 16,
.zbc_clear_color = gm107_ltc_zbc_clear_color,
.zbc_clear_depth = gm107_ltc_zbc_clear_depth,
.invalidate = gf100_ltc_invalidate,
.flush = gf100_ltc_flush,
};
int
gp100_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_ltc **pltc)
{
return nvkm_ltc_new_(&gp100_ltc, device, type, inst, pltc);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/init.h>
static void
g98_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
u32 r001540 = nvkm_rd32(device, 0x001540);
u32 r00154c = nvkm_rd32(device, 0x00154c);
if (!(r001540 & 0x40000000)) {
nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
}
if (!(r00154c & 0x00000004))
nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (!(r00154c & 0x00000020))
nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (!(r00154c & 0x00000040))
nvkm_subdev_disable(device, NVKM_ENGINE_SEC, 0);
}
static const struct nvkm_devinit_func
g98_devinit = {
.preinit = nv50_devinit_preinit,
.init = nv50_devinit_init,
.post = nv04_devinit_post,
.pll_set = nv50_devinit_pll_set,
.disable = g98_devinit_disable,
};
int
g98_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv50_devinit_new_(&g98_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "nv04.h"
#include "fbmem.h"
#include <subdev/bios.h>
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/clk/pll.h>
#include <subdev/vga.h>
static void
nv04_devinit_meminit(struct nvkm_devinit *init)
{
struct nvkm_subdev *subdev = &init->subdev;
struct nvkm_device *device = subdev->device;
u32 patt = 0xdeadbeef;
struct io_mapping *fb;
int i;
/* Map the framebuffer aperture */
fb = fbmem_init(device);
if (!fb) {
nvkm_error(subdev, "failed to map fb\n");
return;
}
/* Sequencer and refresh off */
nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20);
nvkm_mask(device, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
nvkm_mask(device, NV04_PFB_BOOT_0, ~0,
NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
NV04_PFB_BOOT_0_RAM_WIDTH_128 |
NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
for (i = 0; i < 4; i++)
fbmem_poke(fb, 4 * i, patt);
fbmem_poke(fb, 0x400000, patt + 1);
if (fbmem_peek(fb, 0) == patt + 1) {
nvkm_mask(device, NV04_PFB_BOOT_0,
NV04_PFB_BOOT_0_RAM_TYPE,
NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
nvkm_mask(device, NV04_PFB_DEBUG_0,
NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
for (i = 0; i < 4; i++)
fbmem_poke(fb, 4 * i, patt);
if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff))
nvkm_mask(device, NV04_PFB_BOOT_0,
NV04_PFB_BOOT_0_RAM_WIDTH_128 |
NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
} else
if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) {
nvkm_mask(device, NV04_PFB_BOOT_0,
NV04_PFB_BOOT_0_RAM_WIDTH_128 |
NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
} else
if (fbmem_peek(fb, 0) != patt) {
if (fbmem_readback(fb, 0x800000, patt))
nvkm_mask(device, NV04_PFB_BOOT_0,
NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
else
nvkm_mask(device, NV04_PFB_BOOT_0,
NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
} else
if (!fbmem_readback(fb, 0x800000, patt)) {
nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
}
/* Refresh on, sequencer on */
nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20);
fbmem_fini(fb);
}
static int
powerctrl_1_shift(int chip_version, int reg)
{
int shift = -4;
if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
return shift;
switch (reg) {
case 0x680520:
shift += 4; fallthrough;
case 0x680508:
shift += 4; fallthrough;
case 0x680504:
shift += 4; fallthrough;
case 0x680500:
shift += 4;
}
/*
* the shift for vpll regs is only used for nv3x chips with a single
* stage pll
*/
if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
chip_version == 0x36 || chip_version >= 0x40))
shift = -4;
return shift;
}
void
setPLL_single(struct nvkm_devinit *init, u32 reg,
struct nvkm_pll_vals *pv)
{
struct nvkm_device *device = init->subdev.device;
int chip_version = device->bios->version.chip;
uint32_t oldpll = nvkm_rd32(device, reg);
int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
uint32_t saved_powerctrl_1 = 0;
int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
if (oldpll == pll)
return; /* already set */
if (shift_powerctrl_1 >= 0) {
saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
nvkm_wr32(device, 0x001584,
(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
1 << shift_powerctrl_1);
}
if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
/* upclock -- write new post divider first */
nvkm_wr32(device, reg, pv->log2P << 16 | (oldpll & 0xffff));
else
/* downclock -- write new NM first */
nvkm_wr32(device, reg, (oldpll & 0xffff0000) | pv->NM1);
if ((chip_version < 0x17 || chip_version == 0x1a) &&
chip_version != 0x11)
/* wait a bit on older chips */
msleep(64);
nvkm_rd32(device, reg);
/* then write the other half as well */
nvkm_wr32(device, reg, pll);
if (shift_powerctrl_1 >= 0)
nvkm_wr32(device, 0x001584, saved_powerctrl_1);
}
static uint32_t
new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
{
bool head_a = (reg1 == 0x680508);
if (ss) /* single stage pll mode */
ramdac580 |= head_a ? 0x00000100 : 0x10000000;
else
ramdac580 &= head_a ? 0xfffffeff : 0xefffffff;
return ramdac580;
}
void
setPLL_double_highregs(struct nvkm_devinit *init, u32 reg1,
struct nvkm_pll_vals *pv)
{
struct nvkm_device *device = init->subdev.device;
int chip_version = device->bios->version.chip;
bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70);
uint32_t oldpll1 = nvkm_rd32(device, reg1);
uint32_t oldpll2 = !nv3035 ? nvkm_rd32(device, reg2) : 0;
uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
uint32_t oldramdac580 = 0, ramdac580 = 0;
bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */
uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
/* model specific additions to generic pll1 and pll2 set up above */
if (nv3035) {
pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
(pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
pll2 = 0;
}
if (chip_version > 0x40 && reg1 >= 0x680508) { /* !nv40 */
oldramdac580 = nvkm_rd32(device, 0x680580);
ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
if (oldramdac580 != ramdac580)
oldpll1 = ~0; /* force mismatch */
if (single_stage)
/* magic value used by nvidia in single stage mode */
pll2 |= 0x011f;
}
if (chip_version > 0x70)
/* magic bits set by the blob (but not the bios) on g71-73 */
pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
if (oldpll1 == pll1 && oldpll2 == pll2)
return; /* already set */
if (shift_powerctrl_1 >= 0) {
saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
nvkm_wr32(device, 0x001584,
(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
1 << shift_powerctrl_1);
}
if (chip_version >= 0x40) {
int shift_c040 = 14;
switch (reg1) {
case 0x680504:
shift_c040 += 2; fallthrough;
case 0x680500:
shift_c040 += 2; fallthrough;
case 0x680520:
shift_c040 += 2; fallthrough;
case 0x680508:
shift_c040 += 2;
}
savedc040 = nvkm_rd32(device, 0xc040);
if (shift_c040 != 14)
nvkm_wr32(device, 0xc040, savedc040 & ~(3 << shift_c040));
}
if (oldramdac580 != ramdac580)
nvkm_wr32(device, 0x680580, ramdac580);
if (!nv3035)
nvkm_wr32(device, reg2, pll2);
nvkm_wr32(device, reg1, pll1);
if (shift_powerctrl_1 >= 0)
nvkm_wr32(device, 0x001584, saved_powerctrl_1);
if (chip_version >= 0x40)
nvkm_wr32(device, 0xc040, savedc040);
}
void
setPLL_double_lowregs(struct nvkm_devinit *init, u32 NMNMreg,
struct nvkm_pll_vals *pv)
{
/* When setting PLLs, there is a merry game of disabling and enabling
* various bits of hardware during the process. This function is a
* synthesis of six nv4x traces, nearly each card doing a subtly
* different thing. With luck all the necessary bits for each card are
* combined herein. Without luck it deviates from each card's formula
* so as to not work on any :)
*/
struct nvkm_device *device = init->subdev.device;
uint32_t Preg = NMNMreg - 4;
bool mpll = Preg == 0x4020;
uint32_t oldPval = nvkm_rd32(device, Preg);
uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
0xc << 28 | pv->log2P << 16;
uint32_t saved4600 = 0;
/* some cards have different maskc040s */
uint32_t maskc040 = ~(3 << 14), savedc040;
bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
if (nvkm_rd32(device, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
return;
if (Preg == 0x4000)
maskc040 = ~0x333;
if (Preg == 0x4058)
maskc040 = ~(0xc << 24);
if (mpll) {
struct nvbios_pll info;
uint8_t Pval2;
if (nvbios_pll_parse(device->bios, Preg, &info))
return;
Pval2 = pv->log2P + info.bias_p;
if (Pval2 > info.max_p)
Pval2 = info.max_p;
Pval |= 1 << 28 | Pval2 << 20;
saved4600 = nvkm_rd32(device, 0x4600);
nvkm_wr32(device, 0x4600, saved4600 | 8 << 28);
}
if (single_stage)
Pval |= mpll ? 1 << 12 : 1 << 8;
nvkm_wr32(device, Preg, oldPval | 1 << 28);
nvkm_wr32(device, Preg, Pval & ~(4 << 28));
if (mpll) {
Pval |= 8 << 20;
nvkm_wr32(device, 0x4020, Pval & ~(0xc << 28));
nvkm_wr32(device, 0x4038, Pval & ~(0xc << 28));
}
savedc040 = nvkm_rd32(device, 0xc040);
nvkm_wr32(device, 0xc040, savedc040 & maskc040);
nvkm_wr32(device, NMNMreg, NMNM);
if (NMNMreg == 0x4024)
nvkm_wr32(device, 0x403c, NMNM);
nvkm_wr32(device, Preg, Pval);
if (mpll) {
Pval &= ~(8 << 20);
nvkm_wr32(device, 0x4020, Pval);
nvkm_wr32(device, 0x4038, Pval);
nvkm_wr32(device, 0x4600, saved4600);
}
nvkm_wr32(device, 0xc040, savedc040);
if (mpll) {
nvkm_wr32(device, 0x4020, Pval & ~(1 << 28));
nvkm_wr32(device, 0x4038, Pval & ~(1 << 28));
}
}
int
nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
{
struct nvkm_subdev *subdev = &devinit->subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvkm_pll_vals pv;
struct nvbios_pll info;
int cv = bios->version.chip;
int N1, M1, N2, M2, P;
int ret;
ret = nvbios_pll_parse(bios, type > 0x405c ? type : type - 4, &info);
if (ret)
return ret;
ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
if (!ret)
return -EINVAL;
pv.refclk = info.refclk;
pv.N1 = N1;
pv.M1 = M1;
pv.N2 = N2;
pv.M2 = M2;
pv.log2P = P;
if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
cv >= 0x40) {
if (type > 0x405c)
setPLL_double_highregs(devinit, type, &pv);
else
setPLL_double_lowregs(devinit, type, &pv);
} else
setPLL_single(devinit, type, &pv);
return 0;
}
int
nv04_devinit_post(struct nvkm_devinit *init, bool execute)
{
return nvbios_post(&init->subdev, execute);
}
void
nv04_devinit_preinit(struct nvkm_devinit *base)
{
struct nv04_devinit *init = nv04_devinit(base);
struct nvkm_subdev *subdev = &init->base.subdev;
struct nvkm_device *device = subdev->device;
/* make i2c busses accessible */
nvkm_mask(device, 0x000200, 0x00000001, 0x00000001);
/* unslave crtcs */
if (init->owner < 0)
init->owner = nvkm_rdvgaowner(device);
nvkm_wrvgaowner(device, 0);
if (!init->base.post) {
u32 htotal = nvkm_rdvgac(device, 0, 0x06);
htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x01) << 8;
htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x20) << 4;
htotal |= (nvkm_rdvgac(device, 0, 0x25) & 0x01) << 10;
htotal |= (nvkm_rdvgac(device, 0, 0x41) & 0x01) << 11;
if (!htotal) {
nvkm_debug(subdev, "adaptor not initialised\n");
init->base.post = true;
}
}
}
void *
nv04_devinit_dtor(struct nvkm_devinit *base)
{
struct nv04_devinit *init = nv04_devinit(base);
/* restore vga owner saved at first init */
nvkm_wrvgaowner(init->base.subdev.device, init->owner);
return init;
}
int
nv04_devinit_new_(const struct nvkm_devinit_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit)
{
struct nv04_devinit *init;
if (!(init = kzalloc(sizeof(*init), GFP_KERNEL)))
return -ENOMEM;
*pinit = &init->base;
nvkm_devinit_ctor(func, device, type, inst, &init->base);
init->owner = -1;
return 0;
}
static const struct nvkm_devinit_func
nv04_devinit = {
.dtor = nv04_devinit_dtor,
.preinit = nv04_devinit_preinit,
.post = nv04_devinit_post,
.meminit = nv04_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
};
int
nv04_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv04_devinit_new_(&nv04_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/init.h>
void
gm107_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
u32 r021c00 = nvkm_rd32(device, 0x021c00);
u32 r021c04 = nvkm_rd32(device, 0x021c04);
if (r021c00 & 0x00000001)
nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
if (r021c00 & 0x00000004)
nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
if (r021c04 & 0x00000001)
nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
}
static const struct nvkm_devinit_func
gm107_devinit = {
.preinit = gf100_devinit_preinit,
.init = nv50_devinit_init,
.post = nv04_devinit_post,
.pll_set = gf100_devinit_pll_set,
.disable = gm107_devinit_disable,
};
int
gm107_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv50_devinit_new_(&gm107_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/clk/pll.h>
int
gf100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
{
struct nvkm_subdev *subdev = &init->subdev;
struct nvkm_device *device = subdev->device;
struct nvbios_pll info;
int N, fN, M, P;
int ret;
ret = nvbios_pll_parse(device->bios, type, &info);
if (ret)
return ret;
ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
if (ret < 0)
return ret;
switch (info.type) {
case PLL_VPLL0:
case PLL_VPLL1:
case PLL_VPLL2:
case PLL_VPLL3:
nvkm_mask(device, info.reg + 0x0c, 0x00000000, 0x00000100);
nvkm_wr32(device, info.reg + 0x04, (P << 16) | (N << 8) | M);
nvkm_wr32(device, info.reg + 0x10, fN << 16);
break;
default:
nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
ret = -EINVAL;
break;
}
return ret;
}
static void
gf100_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
u32 r022500 = nvkm_rd32(device, 0x022500);
if (r022500 & 0x00000001)
nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (r022500 & 0x00000002) {
nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
}
if (r022500 & 0x00000004)
nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (r022500 & 0x00000008)
nvkm_subdev_disable(device, NVKM_ENGINE_MSENC, 0);
if (r022500 & 0x00000100)
nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
if (r022500 & 0x00000200)
nvkm_subdev_disable(device, NVKM_ENGINE_CE, 1);
}
void
gf100_devinit_preinit(struct nvkm_devinit *base)
{
struct nv50_devinit *init = nv50_devinit(base);
struct nvkm_subdev *subdev = &init->base.subdev;
struct nvkm_device *device = subdev->device;
/*
* This bit is set by devinit, and flips back to 0 on suspend. We
* can use it as a reliable way to know whether we should run devinit.
*/
base->post = ((nvkm_rd32(device, 0x2240c) & BIT(1)) == 0);
}
static const struct nvkm_devinit_func
gf100_devinit = {
.preinit = gf100_devinit_preinit,
.init = nv50_devinit_init,
.post = nv04_devinit_post,
.pll_set = gf100_devinit_pll_set,
.disable = gf100_devinit_disable,
};
int
gf100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv50_devinit_new_(&gf100_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "nv04.h"
#include "fbmem.h"
#include <subdev/bios.h>
#include <subdev/bios/init.h>
static void
nv10_devinit_meminit(struct nvkm_devinit *init)
{
struct nvkm_subdev *subdev = &init->subdev;
struct nvkm_device *device = subdev->device;
static const int mem_width[] = { 0x10, 0x00, 0x20 };
int mem_width_count;
uint32_t patt = 0xdeadbeef;
struct io_mapping *fb;
int i, j, k;
if (device->card_type >= NV_11 && device->chipset >= 0x17)
mem_width_count = 3;
else
mem_width_count = 2;
/* Map the framebuffer aperture */
fb = fbmem_init(device);
if (!fb) {
nvkm_error(subdev, "failed to map fb\n");
return;
}
nvkm_wr32(device, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
/* Probe memory bus width */
for (i = 0; i < mem_width_count; i++) {
nvkm_mask(device, NV04_PFB_CFG0, 0x30, mem_width[i]);
for (j = 0; j < 4; j++) {
for (k = 0; k < 4; k++)
fbmem_poke(fb, 0x1c, 0);
fbmem_poke(fb, 0x1c, patt);
fbmem_poke(fb, 0x3c, 0);
if (fbmem_peek(fb, 0x1c) == patt)
goto mem_width_found;
}
}
mem_width_found:
patt <<= 1;
/* Probe amount of installed memory */
for (i = 0; i < 4; i++) {
int off = nvkm_rd32(device, 0x10020c) - 0x100000;
fbmem_poke(fb, off, patt);
fbmem_poke(fb, 0, 0);
fbmem_peek(fb, 0);
fbmem_peek(fb, 0);
fbmem_peek(fb, 0);
fbmem_peek(fb, 0);
if (fbmem_peek(fb, off) == patt)
goto amount_found;
}
/* IC missing - disable the upper half memory space. */
nvkm_mask(device, NV04_PFB_CFG0, 0x1000, 0);
amount_found:
fbmem_fini(fb);
}
static const struct nvkm_devinit_func
nv10_devinit = {
.dtor = nv04_devinit_dtor,
.preinit = nv04_devinit_preinit,
.post = nv04_devinit_post,
.meminit = nv10_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
};
int
nv10_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv04_devinit_new_(&nv10_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/clk/pll.h>
static int
tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
{
struct nvkm_subdev *subdev = &init->subdev;
struct nvkm_device *device = subdev->device;
struct nvbios_pll info;
int head = type - PLL_VPLL0;
int N, fN, M, P;
int ret;
ret = nvbios_pll_parse(device->bios, type, &info);
if (ret)
return ret;
ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
if (ret < 0)
return ret;
switch (info.type) {
case PLL_VPLL0:
case PLL_VPLL1:
case PLL_VPLL2:
case PLL_VPLL3:
nvkm_wr32(device, 0x00ef10 + (head * 0x40), fN << 16);
nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) |
(N << 8) |
(M << 0));
/*XXX*/
nvkm_wr32(device, 0x00ef0c + (head * 0x40), 0x00000900);
nvkm_wr32(device, 0x00ef00 + (head * 0x40), 0x02000014);
break;
default:
nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
ret = -EINVAL;
break;
}
return ret;
}
static int
tu102_devinit_wait(struct nvkm_device *device)
{
unsigned timeout = 50 + 2000;
do {
if (nvkm_rd32(device, 0x118128) & 0x00000001) {
if ((nvkm_rd32(device, 0x118234) & 0x000000ff) == 0xff)
return 0;
}
usleep_range(1000, 2000);
} while (timeout--);
return -ETIMEDOUT;
}
int
tu102_devinit_post(struct nvkm_devinit *base, bool post)
{
struct nv50_devinit *init = nv50_devinit(base);
int ret;
ret = tu102_devinit_wait(init->base.subdev.device);
if (ret)
return ret;
gm200_devinit_preos(init, post);
return 0;
}
static const struct nvkm_devinit_func
tu102_devinit = {
.init = nv50_devinit_init,
.post = tu102_devinit_post,
.pll_set = tu102_devinit_pll_set,
.disable = gm107_devinit_disable,
};
int
tu102_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv50_devinit_new_(&tu102_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/disp.h>
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/clk/pll.h>
#include <subdev/vga.h>
int
nv50_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
{
struct nvkm_subdev *subdev = &init->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
struct nvbios_pll info;
int N1, M1, N2, M2, P;
int ret;
ret = nvbios_pll_parse(bios, type, &info);
if (ret) {
nvkm_error(subdev, "failed to retrieve pll data, %d\n", ret);
return ret;
}
ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
if (!ret) {
nvkm_error(subdev, "failed pll calculation\n");
return -EINVAL;
}
switch (info.type) {
case PLL_VPLL0:
case PLL_VPLL1:
nvkm_wr32(device, info.reg + 0, 0x10000611);
nvkm_mask(device, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
nvkm_mask(device, info.reg + 8, 0x7fff00ff, (P << 28) |
(M2 << 16) | N2);
break;
case PLL_MEMORY:
nvkm_mask(device, info.reg + 0, 0x01ff0000,
(P << 22) |
(info.bias_p << 19) |
(P << 16));
nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1);
break;
default:
nvkm_mask(device, info.reg + 0, 0x00070000, (P << 16));
nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1);
break;
}
return 0;
}
static void
nv50_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
u32 r001540 = nvkm_rd32(device, 0x001540);
if (!(r001540 & 0x40000000))
nvkm_subdev_disable(device, NVKM_ENGINE_MPEG, 0);
}
void
nv50_devinit_preinit(struct nvkm_devinit *base)
{
struct nvkm_subdev *subdev = &base->subdev;
struct nvkm_device *device = subdev->device;
/* our heuristics can't detect whether the board has had its
* devinit scripts executed or not if the display engine is
* missing, assume it's a secondary gpu which requires post
*/
if (!base->post) {
nvkm_devinit_disable(base);
if (!device->disp)
base->post = true;
}
/* magic to detect whether or not x86 vbios code has executed
* the devinit scripts to initialise the board
*/
if (!base->post) {
if (!nvkm_rdvgac(device, 0, 0x00) &&
!nvkm_rdvgac(device, 0, 0x1a)) {
nvkm_debug(subdev, "adaptor not initialised\n");
base->post = true;
}
}
}
void
nv50_devinit_init(struct nvkm_devinit *base)
{
struct nv50_devinit *init = nv50_devinit(base);
struct nvkm_subdev *subdev = &init->base.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
struct nvbios_outp info;
struct dcb_output outp;
u8 ver = 0xff, hdr, cnt, len;
int i = 0;
/* if we ran the init tables, we have to execute the first script
* pointer of each dcb entry's display encoder table in order
* to properly initialise each encoder.
*/
while (init->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
if (nvbios_outp_match(bios, outp.hasht, outp.hashm,
&ver, &hdr, &cnt, &len, &info)) {
nvbios_init(subdev, info.script[0],
init.outp = &outp;
init.or = ffs(outp.or) - 1;
init.link = outp.sorconf.link == 2;
);
}
i++;
}
}
int
nv50_devinit_new_(const struct nvkm_devinit_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit)
{
struct nv50_devinit *init;
if (!(init = kzalloc(sizeof(*init), GFP_KERNEL)))
return -ENOMEM;
*pinit = &init->base;
nvkm_devinit_ctor(func, device, type, inst, &init->base);
return 0;
}
static const struct nvkm_devinit_func
nv50_devinit = {
.preinit = nv50_devinit_preinit,
.init = nv50_devinit_init,
.post = nv04_devinit_post,
.pll_set = nv50_devinit_pll_set,
.disable = nv50_devinit_disable,
};
int
nv50_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv50_devinit_new_(&nv50_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c |
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/clk/pll.h>
static int
ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
{
struct nvkm_subdev *subdev = &init->subdev;
struct nvkm_device *device = subdev->device;
struct nvbios_pll info;
int head = type - PLL_VPLL0;
int N, fN, M, P;
int ret;
ret = nvbios_pll_parse(device->bios, type, &info);
if (ret)
return ret;
ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
if (ret < 0)
return ret;
switch (info.type) {
case PLL_VPLL0:
case PLL_VPLL1:
case PLL_VPLL2:
case PLL_VPLL3:
nvkm_wr32(device, 0x00ef00 + (head * 0x40), 0x02080004);
nvkm_wr32(device, 0x00ef18 + (head * 0x40), (N << 16) | fN);
nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) | M);
nvkm_wr32(device, 0x00e9c0 + (head * 0x04), 0x00000001);
break;
default:
nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
ret = -EINVAL;
break;
}
return ret;
}
static const struct nvkm_devinit_func
ga100_devinit = {
.init = nv50_devinit_init,
.post = tu102_devinit_post,
.pll_set = ga100_devinit_pll_set,
};
int
ga100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv50_devinit_new_(&ga100_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/bit.h>
#include <subdev/bios/pmu.h>
#include <subdev/pmu.h>
#include <subdev/timer.h>
static void
pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec)
{
struct nvkm_device *device = init->base.subdev.device;
struct nvkm_bios *bios = device->bios;
int i;
nvkm_wr32(device, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
for (i = 0; i < len; i += 4) {
if ((i & 0xff) == 0)
nvkm_wr32(device, 0x10a188, (pmu + i) >> 8);
nvkm_wr32(device, 0x10a184, nvbios_rd32(bios, img + i));
}
while (i & 0xff) {
nvkm_wr32(device, 0x10a184, 0x00000000);
i += 4;
}
}
static void
pmu_data(struct nv50_devinit *init, u32 pmu, u32 img, u32 len)
{
struct nvkm_device *device = init->base.subdev.device;
struct nvkm_bios *bios = device->bios;
int i;
nvkm_wr32(device, 0x10a1c0, 0x01000000 | pmu);
for (i = 0; i < len; i += 4)
nvkm_wr32(device, 0x10a1c4, nvbios_rd32(bios, img + i));
}
static u32
pmu_args(struct nv50_devinit *init, u32 argp, u32 argi)
{
struct nvkm_device *device = init->base.subdev.device;
nvkm_wr32(device, 0x10a1c0, argp);
nvkm_wr32(device, 0x10a1c0, nvkm_rd32(device, 0x10a1c4) + argi);
return nvkm_rd32(device, 0x10a1c4);
}
static void
pmu_exec(struct nv50_devinit *init, u32 init_addr)
{
struct nvkm_device *device = init->base.subdev.device;
nvkm_wr32(device, 0x10a104, init_addr);
nvkm_wr32(device, 0x10a10c, 0x00000000);
nvkm_wr32(device, 0x10a100, 0x00000002);
}
static int
pmu_load(struct nv50_devinit *init, u8 type, bool post,
u32 *init_addr_pmu, u32 *args_addr_pmu)
{
struct nvkm_subdev *subdev = &init->base.subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_pmuR pmu;
int ret;
if (!nvbios_pmuRm(bios, type, &pmu))
return -EINVAL;
if (!post || !subdev->device->pmu)
return 0;
ret = nvkm_falcon_reset(&subdev->device->pmu->falcon);
if (ret)
return ret;
pmu_code(init, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
pmu_code(init, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
pmu_data(init, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
if (init_addr_pmu) {
*init_addr_pmu = pmu.init_addr_pmu;
*args_addr_pmu = pmu.args_addr_pmu;
return 0;
}
return pmu_exec(init, pmu.init_addr_pmu), 0;
}
void
gm200_devinit_preos(struct nv50_devinit *init, bool post)
{
/* Optional: Execute PRE_OS application on PMU, which should at
* least take care of fans until a full PMU has been loaded.
*/
pmu_load(init, 0x01, post, NULL, NULL);
}
int
gm200_devinit_post(struct nvkm_devinit *base, bool post)
{
struct nv50_devinit *init = nv50_devinit(base);
struct nvkm_subdev *subdev = &init->base.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
struct bit_entry bit_I;
u32 exec, args;
int ret;
if (bit_entry(bios, 'I', &bit_I) || bit_I.version != 1 ||
bit_I.length < 0x1c) {
nvkm_error(subdev, "VBIOS PMU init data not found\n");
return -EINVAL;
}
/* Upload DEVINIT application from VBIOS onto PMU. */
ret = pmu_load(init, 0x04, post, &exec, &args);
if (ret) {
nvkm_error(subdev, "VBIOS PMU/DEVINIT not found\n");
return ret;
}
/* Upload tables required by opcodes in boot scripts. */
if (post) {
u32 pmu = pmu_args(init, args + 0x08, 0x08);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
pmu_data(init, pmu, img, len);
}
/* Upload boot scripts. */
if (post) {
u32 pmu = pmu_args(init, args + 0x08, 0x10);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
pmu_data(init, pmu, img, len);
}
/* Execute DEVINIT. */
if (post) {
nvkm_wr32(device, 0x10a040, 0x00005000);
pmu_exec(init, exec);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x10a040) & 0x00002000)
break;
) < 0)
return -ETIMEDOUT;
}
gm200_devinit_preos(init, post);
return 0;
}
static const struct nvkm_devinit_func
gm200_devinit = {
.preinit = gf100_devinit_preinit,
.init = nv50_devinit_init,
.post = gm200_devinit_post,
.pll_set = gf100_devinit_pll_set,
.disable = gm107_devinit_disable,
};
int
gm200_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv50_devinit_new_(&gm200_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include <core/option.h>
#include <subdev/vga.h>
u32
nvkm_devinit_mmio(struct nvkm_devinit *init, u32 addr)
{
if (init->func->mmio)
addr = init->func->mmio(init, addr);
return addr;
}
int
nvkm_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 khz)
{
return init->func->pll_set(init, type, khz);
}
void
nvkm_devinit_meminit(struct nvkm_devinit *init)
{
if (init->func->meminit)
init->func->meminit(init);
}
u64
nvkm_devinit_disable(struct nvkm_devinit *init)
{
if (init && init->func->disable)
init->func->disable(init);
return 0;
}
int
nvkm_devinit_post(struct nvkm_devinit *init)
{
int ret = 0;
if (init && init->func->post)
ret = init->func->post(init, init->post);
nvkm_devinit_disable(init);
return ret;
}
static int
nvkm_devinit_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_devinit *init = nvkm_devinit(subdev);
/* force full reinit on resume */
if (suspend)
init->post = true;
return 0;
}
static int
nvkm_devinit_preinit(struct nvkm_subdev *subdev)
{
struct nvkm_devinit *init = nvkm_devinit(subdev);
if (init->func->preinit)
init->func->preinit(init);
/* Override the post flag during the first call if NvForcePost is set */
if (init->force_post) {
init->post = init->force_post;
init->force_post = false;
}
/* unlock the extended vga crtc regs */
nvkm_lockvgac(subdev->device, false);
return 0;
}
static int
nvkm_devinit_init(struct nvkm_subdev *subdev)
{
struct nvkm_devinit *init = nvkm_devinit(subdev);
if (init->func->init)
init->func->init(init);
return 0;
}
static void *
nvkm_devinit_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_devinit *init = nvkm_devinit(subdev);
void *data = init;
if (init->func->dtor)
data = init->func->dtor(init);
/* lock crtc regs */
nvkm_lockvgac(subdev->device, true);
return data;
}
static const struct nvkm_subdev_func
nvkm_devinit = {
.dtor = nvkm_devinit_dtor,
.preinit = nvkm_devinit_preinit,
.init = nvkm_devinit_init,
.fini = nvkm_devinit_fini,
};
void
nvkm_devinit_ctor(const struct nvkm_devinit_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_devinit *init)
{
nvkm_subdev_ctor(&nvkm_devinit, device, type, inst, &init->subdev);
init->func = func;
init->force_post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/init.h>
static void
mcp89_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
u32 r001540 = nvkm_rd32(device, 0x001540);
u32 r00154c = nvkm_rd32(device, 0x00154c);
if (!(r001540 & 0x40000000)) {
nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
}
if (!(r00154c & 0x00000004))
nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (!(r00154c & 0x00000020))
nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (!(r00154c & 0x00000040))
nvkm_subdev_disable(device, NVKM_ENGINE_VIC, 0);
if (!(r00154c & 0x00000200))
nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
}
static const struct nvkm_devinit_func
mcp89_devinit = {
.preinit = nv50_devinit_preinit,
.init = nv50_devinit_init,
.post = nv04_devinit_post,
.pll_set = gt215_devinit_pll_set,
.disable = mcp89_devinit_disable,
};
int
mcp89_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv50_devinit_new_(&mcp89_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c |
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include <subdev/clk/pll.h>
static int
gv100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
{
struct nvkm_subdev *subdev = &init->subdev;
struct nvkm_device *device = subdev->device;
struct nvbios_pll info;
int head = type - PLL_VPLL0;
int N, fN, M, P;
int ret;
ret = nvbios_pll_parse(device->bios, type, &info);
if (ret)
return ret;
ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
if (ret < 0)
return ret;
switch (info.type) {
case PLL_VPLL0:
case PLL_VPLL1:
case PLL_VPLL2:
case PLL_VPLL3:
nvkm_wr32(device, 0x00ef10 + (head * 0x40), fN << 16);
nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) |
(N << 8) |
(M << 0));
break;
default:
nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
ret = -EINVAL;
break;
}
return ret;
}
static const struct nvkm_devinit_func
gv100_devinit = {
.preinit = gf100_devinit_preinit,
.init = nv50_devinit_init,
.post = gm200_devinit_post,
.pll_set = gv100_devinit_pll_set,
.disable = gm107_devinit_disable,
};
int
gv100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv50_devinit_new_(&gv100_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/init.h>
static void
g84_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
u32 r001540 = nvkm_rd32(device, 0x001540);
u32 r00154c = nvkm_rd32(device, 0x00154c);
if (!(r001540 & 0x40000000)) {
nvkm_subdev_disable(device, NVKM_ENGINE_MPEG, 0);
nvkm_subdev_disable(device, NVKM_ENGINE_VP, 0);
nvkm_subdev_disable(device, NVKM_ENGINE_BSP, 0);
nvkm_subdev_disable(device, NVKM_ENGINE_CIPHER, 0);
}
if (!(r00154c & 0x00000004))
nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (!(r00154c & 0x00000020))
nvkm_subdev_disable(device, NVKM_ENGINE_BSP, 0);
if (!(r00154c & 0x00000040))
nvkm_subdev_disable(device, NVKM_ENGINE_CIPHER, 0);
}
static const struct nvkm_devinit_func
g84_devinit = {
.preinit = nv50_devinit_preinit,
.init = nv50_devinit_init,
.post = nv04_devinit_post,
.pll_set = nv50_devinit_pll_set,
.disable = g84_devinit_disable,
};
int
g84_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv50_devinit_new_(&g84_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/clk/pll.h>
int
gt215_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
{
struct nvkm_subdev *subdev = &init->subdev;
struct nvkm_device *device = subdev->device;
struct nvbios_pll info;
int N, fN, M, P;
int ret;
ret = nvbios_pll_parse(device->bios, type, &info);
if (ret)
return ret;
ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
if (ret < 0)
return ret;
switch (info.type) {
case PLL_VPLL0:
case PLL_VPLL1:
nvkm_wr32(device, info.reg + 0, 0x50000610);
nvkm_mask(device, info.reg + 4, 0x003fffff,
(P << 16) | (M << 8) | N);
nvkm_wr32(device, info.reg + 8, fN);
break;
default:
nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
ret = -EINVAL;
break;
}
return ret;
}
static void
gt215_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
u32 r001540 = nvkm_rd32(device, 0x001540);
u32 r00154c = nvkm_rd32(device, 0x00154c);
if (!(r001540 & 0x40000000)) {
nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0);
nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0);
}
if (!(r00154c & 0x00000004))
nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
if (!(r00154c & 0x00000020))
nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (!(r00154c & 0x00000200))
nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
}
static u32
gt215_devinit_mmio_part[] = {
0x100720, 0x1008bc, 4,
0x100a20, 0x100adc, 4,
0x100d80, 0x100ddc, 4,
0x110000, 0x110f9c, 4,
0x111000, 0x11103c, 8,
0x111080, 0x1110fc, 4,
0x111120, 0x1111fc, 4,
0x111300, 0x1114bc, 4,
0,
};
static u32
gt215_devinit_mmio(struct nvkm_devinit *base, u32 addr)
{
struct nv50_devinit *init = nv50_devinit(base);
struct nvkm_device *device = init->base.subdev.device;
u32 *mmio = gt215_devinit_mmio_part;
/* the init tables on some boards have INIT_RAM_RESTRICT_ZM_REG_GROUP
* instructions which touch registers that may not even exist on
* some configurations (Quadro 400), which causes the register
* interface to screw up for some amount of time after attempting to
* write to one of these, and results in all sorts of things going
* horribly wrong.
*
* the binary driver avoids touching these registers at all, however,
* the video bios doesn't care and does what the scripts say. it's
* presumed that the io-port access to init registers isn't effected
* by the screw-up bug mentioned above.
*
* really, a new opcode should've been invented to handle these
* requirements, but whatever, it's too late for that now.
*/
while (mmio[0]) {
if (addr >= mmio[0] && addr <= mmio[1]) {
u32 part = (addr / mmio[2]) & 7;
if (!init->r001540)
init->r001540 = nvkm_rd32(device, 0x001540);
if (part >= hweight8((init->r001540 >> 16) & 0xff))
return ~0;
return addr;
}
mmio += 3;
}
return addr;
}
static const struct nvkm_devinit_func
gt215_devinit = {
.preinit = nv50_devinit_preinit,
.init = nv50_devinit_init,
.post = nv04_devinit_post,
.mmio = gt215_devinit_mmio,
.pll_set = gt215_devinit_pll_set,
.disable = gt215_devinit_disable,
};
int
gt215_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv50_devinit_new_(>215_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "nv04.h"
#include "fbmem.h"
#include <subdev/bios.h>
#include <subdev/bios/init.h>
static void
nv20_devinit_meminit(struct nvkm_devinit *init)
{
struct nvkm_subdev *subdev = &init->subdev;
struct nvkm_device *device = subdev->device;
uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
uint32_t amount, off;
struct io_mapping *fb;
/* Map the framebuffer aperture */
fb = fbmem_init(device);
if (!fb) {
nvkm_error(subdev, "failed to map fb\n");
return;
}
nvkm_wr32(device, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
/* Allow full addressing */
nvkm_mask(device, NV04_PFB_CFG0, 0, mask);
amount = nvkm_rd32(device, 0x10020c);
for (off = amount; off > 0x2000000; off -= 0x2000000)
fbmem_poke(fb, off - 4, off);
amount = nvkm_rd32(device, 0x10020c);
if (amount != fbmem_peek(fb, amount - 4))
/* IC missing - disable the upper half memory space. */
nvkm_mask(device, NV04_PFB_CFG0, mask, 0);
fbmem_fini(fb);
}
static const struct nvkm_devinit_func
nv20_devinit = {
.dtor = nv04_devinit_dtor,
.preinit = nv04_devinit_preinit,
.post = nv04_devinit_post,
.meminit = nv20_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
};
int
nv20_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv04_devinit_new_(&nv20_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "nv04.h"
#include "fbmem.h"
#include <subdev/bios.h>
#include <subdev/bios/bmp.h>
#include <subdev/bios/init.h>
#include <subdev/vga.h>
static void
nv05_devinit_meminit(struct nvkm_devinit *init)
{
static const u8 default_config_tab[][2] = {
{ 0x24, 0x00 },
{ 0x28, 0x00 },
{ 0x24, 0x01 },
{ 0x1f, 0x00 },
{ 0x0f, 0x00 },
{ 0x17, 0x00 },
{ 0x06, 0x00 },
{ 0x00, 0x00 }
};
struct nvkm_subdev *subdev = &init->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
struct io_mapping *fb;
u32 patt = 0xdeadbeef;
u16 data;
u8 strap, ramcfg[2];
int i, v;
/* Map the framebuffer aperture */
fb = fbmem_init(device);
if (!fb) {
nvkm_error(subdev, "failed to map fb\n");
return;
}
strap = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
if ((data = bmp_mem_init_table(bios))) {
ramcfg[0] = nvbios_rd08(bios, data + 2 * strap + 0);
ramcfg[1] = nvbios_rd08(bios, data + 2 * strap + 1);
} else {
ramcfg[0] = default_config_tab[strap][0];
ramcfg[1] = default_config_tab[strap][1];
}
/* Sequencer off */
nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20);
if (nvkm_rd32(device, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
goto out;
nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
/* If present load the hardcoded scrambling table */
if (data) {
for (i = 0, data += 0x10; i < 8; i++, data += 4) {
u32 scramble = nvbios_rd32(bios, data);
nvkm_wr32(device, NV04_PFB_SCRAMBLE(i), scramble);
}
}
/* Set memory type/width/length defaults depending on the straps */
nvkm_mask(device, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
if (ramcfg[1] & 0x80)
nvkm_mask(device, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
nvkm_mask(device, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
nvkm_mask(device, NV04_PFB_CFG1, 0, 1);
/* Probe memory bus width */
for (i = 0; i < 4; i++)
fbmem_poke(fb, 4 * i, patt);
if (fbmem_peek(fb, 0xc) != patt)
nvkm_mask(device, NV04_PFB_BOOT_0,
NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
/* Probe memory length */
v = nvkm_rd32(device, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
(!fbmem_readback(fb, 0x1000000, ++patt) ||
!fbmem_readback(fb, 0, ++patt)))
nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
!fbmem_readback(fb, 0x800000, ++patt))
nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
if (!fbmem_readback(fb, 0x400000, ++patt))
nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
out:
/* Sequencer on */
nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20);
fbmem_fini(fb);
}
static const struct nvkm_devinit_func
nv05_devinit = {
.dtor = nv04_devinit_dtor,
.preinit = nv04_devinit_preinit,
.post = nv04_devinit_post,
.meminit = nv05_devinit_meminit,
.pll_set = nv04_devinit_pll_set,
};
int
nv05_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv04_devinit_new_(&nv05_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv04.h"
#include <subdev/bios.h>
#include <subdev/bios/init.h>
static const struct nvkm_devinit_func
nv1a_devinit = {
.dtor = nv04_devinit_dtor,
.preinit = nv04_devinit_preinit,
.post = nv04_devinit_post,
.pll_set = nv04_devinit_pll_set,
};
int
nv1a_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_devinit **pinit)
{
return nv04_devinit_new_(&nv1a_devinit, device, type, inst, pinit);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c |
/*
* Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "mxms.h"
#include <subdev/bios.h>
#include <subdev/bios/conn.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/mxm.h>
struct context {
u32 *outp;
struct mxms_odev desc;
};
static bool
mxm_match_tmds_partner(struct nvkm_mxm *mxm, u8 *data, void *info)
{
struct context *ctx = info;
struct mxms_odev desc;
mxms_output_device(mxm, data, &desc);
if (desc.outp_type == 2 &&
desc.dig_conn == ctx->desc.dig_conn)
return false;
return true;
}
static bool
mxm_match_dcb(struct nvkm_mxm *mxm, u8 *data, void *info)
{
struct nvkm_bios *bios = mxm->subdev.device->bios;
struct context *ctx = info;
u64 desc = *(u64 *)data;
mxms_output_device(mxm, data, &ctx->desc);
/* match dcb encoder type to mxm-ods device type */
if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
return true;
/* digital output, have some extra stuff to match here, there's a
* table in the vbios that provides a mapping from the mxm digital
* connection enum values to SOR/link
*/
if ((desc & 0x00000000000000f0) >= 0x20) {
/* check against sor index */
u8 link = mxm_sor_map(bios, ctx->desc.dig_conn);
if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
return true;
/* check dcb entry has a compatible link field */
link = (link & 0x30) >> 4;
if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
return true;
}
/* mark this descriptor accounted for by setting invalid device type,
* except of course some manufactures don't follow specs properly and
* we need to avoid killing off the TMDS function on DP connectors
* if MXM-SIS is missing an entry for it.
*/
data[0] &= ~0xf0;
if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
mxms_foreach(mxm, 0x01, mxm_match_tmds_partner, ctx)) {
data[0] |= 0x20; /* modify descriptor to match TMDS now */
} else {
data[0] |= 0xf0;
}
return false;
}
static int
mxm_dcb_sanitise_entry(struct nvkm_bios *bios, void *data, int idx, u16 pdcb)
{
struct nvkm_mxm *mxm = data;
struct context ctx = { .outp = (u32 *)(bios->data + pdcb) };
u8 type, i2cidx, link, ver, len;
u8 *conn;
/* look for an output device structure that matches this dcb entry.
* if one isn't found, disable it.
*/
if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) {
nvkm_debug(&mxm->subdev, "disable %d: %08x %08x\n",
idx, ctx.outp[0], ctx.outp[1]);
ctx.outp[0] |= 0x0000000f;
return 0;
}
/* modify the output's ddc/aux port, there's a pointer to a table
* with the mapping from mxm ddc/aux port to dcb i2c_index in the
* vbios mxm table
*/
i2cidx = mxm_ddc_map(bios, ctx.desc.ddc_port);
if ((ctx.outp[0] & 0x0000000f) != DCB_OUTPUT_DP)
i2cidx = (i2cidx & 0x0f) << 4;
else
i2cidx = (i2cidx & 0xf0);
if (i2cidx != 0xf0) {
ctx.outp[0] &= ~0x000000f0;
ctx.outp[0] |= i2cidx;
}
/* override dcb sorconf.link, based on what mxm data says */
switch (ctx.desc.outp_type) {
case 0x00: /* Analog CRT */
case 0x01: /* Analog TV/HDTV */
break;
default:
link = mxm_sor_map(bios, ctx.desc.dig_conn) & 0x30;
ctx.outp[1] &= ~0x00000030;
ctx.outp[1] |= link;
break;
}
/* we may need to fixup various other vbios tables based on what
* the descriptor says the connector type should be.
*
* in a lot of cases, the vbios tables will claim DVI-I is possible,
* and the mxm data says the connector is really HDMI. another
* common example is DP->eDP.
*/
conn = bios->data;
conn += nvbios_connEe(bios, (ctx.outp[0] & 0x0000f000) >> 12, &ver, &len);
type = conn[0];
switch (ctx.desc.conn_type) {
case 0x01: /* LVDS */
ctx.outp[1] |= 0x00000004; /* use_power_scripts */
/* XXX: modify default link width in LVDS table */
break;
case 0x02: /* HDMI */
type = DCB_CONNECTOR_HDMI_1;
break;
case 0x03: /* DVI-D */
type = DCB_CONNECTOR_DVI_D;
break;
case 0x0e: /* eDP, falls through to DPint */
ctx.outp[1] |= 0x00010000;
fallthrough;
case 0x07: /* DP internal, wtf is this?? HP8670w */
ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
type = DCB_CONNECTOR_eDP;
break;
default:
break;
}
if (mxms_version(mxm) >= 0x0300)
conn[0] = type;
return 0;
}
static bool
mxm_show_unmatched(struct nvkm_mxm *mxm, u8 *data, void *info)
{
struct nvkm_subdev *subdev = &mxm->subdev;
u64 desc = *(u64 *)data;
if ((desc & 0xf0) != 0xf0)
nvkm_info(subdev, "unmatched output device %016llx\n", desc);
return true;
}
static void
mxm_dcb_sanitise(struct nvkm_mxm *mxm)
{
struct nvkm_subdev *subdev = &mxm->subdev;
struct nvkm_bios *bios = subdev->device->bios;
u8 ver, hdr, cnt, len;
u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
if (dcb == 0x0000 || (ver != 0x40 && ver != 0x41)) {
nvkm_warn(subdev, "unsupported DCB version\n");
return;
}
dcb_outp_foreach(bios, mxm, mxm_dcb_sanitise_entry);
mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
}
int
nv50_mxm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_subdev **pmxm)
{
struct nvkm_mxm *mxm;
int ret;
ret = nvkm_mxm_new_(device, type, inst, &mxm);
if (mxm)
*pmxm = &mxm->subdev;
if (ret)
return ret;
if (mxm->action & MXM_SANITISE_DCB)
mxm_dcb_sanitise(mxm);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c |
/*
* Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "mxms.h"
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/mxm.h>
#include <subdev/i2c.h>
static bool
mxm_shadow_rom_fetch(struct nvkm_i2c_bus *bus, u8 addr,
u8 offset, u8 size, u8 *data)
{
struct i2c_msg msgs[] = {
{ .addr = addr, .flags = 0, .len = 1, .buf = &offset },
{ .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
};
return i2c_transfer(&bus->i2c, msgs, 2) == 2;
}
static bool
mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version)
{
struct nvkm_device *device = mxm->subdev.device;
struct nvkm_bios *bios = device->bios;
struct nvkm_i2c *i2c = device->i2c;
struct nvkm_i2c_bus *bus = NULL;
u8 i2cidx, mxms[6], addr, size;
i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f;
if (i2cidx < 0x0f)
bus = nvkm_i2c_bus_find(i2c, i2cidx);
if (!bus)
return false;
addr = 0x54;
if (!mxm_shadow_rom_fetch(bus, addr, 0, 6, mxms)) {
addr = 0x56;
if (!mxm_shadow_rom_fetch(bus, addr, 0, 6, mxms))
return false;
}
mxm->mxms = mxms;
size = mxms_headerlen(mxm) + mxms_structlen(mxm);
mxm->mxms = kmalloc(size, GFP_KERNEL);
if (mxm->mxms &&
mxm_shadow_rom_fetch(bus, addr, 0, size, mxm->mxms))
return true;
kfree(mxm->mxms);
mxm->mxms = NULL;
return false;
}
#if defined(CONFIG_ACPI)
static bool
mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
{
struct nvkm_subdev *subdev = &mxm->subdev;
struct nvkm_device *device = subdev->device;
static guid_t muid =
GUID_INIT(0x4004A400, 0x917D, 0x4CF2,
0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65);
u32 mxms_args[] = { 0x00000000 };
union acpi_object argv4 = {
.buffer.type = ACPI_TYPE_BUFFER,
.buffer.length = sizeof(mxms_args),
.buffer.pointer = (char *)mxms_args,
};
union acpi_object *obj;
acpi_handle handle;
int rev;
handle = ACPI_HANDLE(device->dev);
if (!handle)
return false;
/*
* spec says this can be zero to mean "highest revision", but
* of course there's at least one bios out there which fails
* unless you pass in exactly the version it supports..
*/
rev = (version & 0xf0) << 4 | (version & 0x0f);
obj = acpi_evaluate_dsm(handle, &muid, rev, 0x00000010, &argv4);
if (!obj) {
nvkm_debug(subdev, "DSM MXMS failed\n");
return false;
}
if (obj->type == ACPI_TYPE_BUFFER) {
mxm->mxms = kmemdup(obj->buffer.pointer,
obj->buffer.length, GFP_KERNEL);
} else if (obj->type == ACPI_TYPE_INTEGER) {
nvkm_debug(subdev, "DSM MXMS returned 0x%llx\n",
obj->integer.value);
}
ACPI_FREE(obj);
return mxm->mxms != NULL;
}
#endif
#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
static u8
wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version)
{
struct nvkm_subdev *subdev = &mxm->subdev;
u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
if (ACPI_FAILURE(status)) {
nvkm_debug(subdev, "WMMX MXMI returned %d\n", status);
return 0x00;
}
obj = retn.pointer;
if (obj->type == ACPI_TYPE_INTEGER) {
version = obj->integer.value;
nvkm_debug(subdev, "WMMX MXMI version %d.%d\n",
(version >> 4), version & 0x0f);
} else {
version = 0;
nvkm_debug(subdev, "WMMX MXMI returned non-integer\n");
}
kfree(obj);
return version;
}
static bool
mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version)
{
struct nvkm_subdev *subdev = &mxm->subdev;
u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
if (!wmi_has_guid(WMI_WMMX_GUID)) {
nvkm_debug(subdev, "WMMX GUID not found\n");
return false;
}
mxms_args[1] = wmi_wmmx_mxmi(mxm, 0x00);
if (!mxms_args[1])
mxms_args[1] = wmi_wmmx_mxmi(mxm, version);
if (!mxms_args[1])
return false;
status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
if (ACPI_FAILURE(status)) {
nvkm_debug(subdev, "WMMX MXMS returned %d\n", status);
return false;
}
obj = retn.pointer;
if (obj->type == ACPI_TYPE_BUFFER) {
mxm->mxms = kmemdup(obj->buffer.pointer,
obj->buffer.length, GFP_KERNEL);
}
kfree(obj);
return mxm->mxms != NULL;
}
#endif
static struct mxm_shadow_h {
const char *name;
bool (*exec)(struct nvkm_mxm *, u8 version);
} _mxm_shadow[] = {
{ "ROM", mxm_shadow_rom },
#if defined(CONFIG_ACPI)
{ "DSM", mxm_shadow_dsm },
#endif
#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
{ "WMI", mxm_shadow_wmi },
#endif
{}
};
static int
mxm_shadow(struct nvkm_mxm *mxm, u8 version)
{
struct mxm_shadow_h *shadow = _mxm_shadow;
do {
nvkm_debug(&mxm->subdev, "checking %s\n", shadow->name);
if (shadow->exec(mxm, version)) {
if (mxms_valid(mxm))
return 0;
kfree(mxm->mxms);
mxm->mxms = NULL;
}
} while ((++shadow)->name);
return -ENOENT;
}
static const struct nvkm_subdev_func
nvkm_mxm = {
};
int
nvkm_mxm_new_(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_mxm **pmxm)
{
struct nvkm_bios *bios = device->bios;
struct nvkm_mxm *mxm;
u8 ver, len;
u16 data;
if (!(mxm = *pmxm = kzalloc(sizeof(*mxm), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_mxm, device, type, inst, &mxm->subdev);
data = mxm_table(bios, &ver, &len);
if (!data || !(ver = nvbios_rd08(bios, data))) {
nvkm_debug(&mxm->subdev, "no VBIOS data, nothing to do\n");
return 0;
}
nvkm_info(&mxm->subdev, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
nvkm_debug(&mxm->subdev, "module flags: %02x\n",
nvbios_rd08(bios, data + 0x01));
nvkm_debug(&mxm->subdev, "config flags: %02x\n",
nvbios_rd08(bios, data + 0x02));
if (mxm_shadow(mxm, ver)) {
nvkm_warn(&mxm->subdev, "failed to locate valid SIS\n");
#if 0
/* we should, perhaps, fall back to some kind of limited
* mode here if the x86 vbios hasn't already done the
* work for us (so we prevent loading with completely
* whacked vbios tables).
*/
return -EINVAL;
#else
return 0;
#endif
}
nvkm_debug(&mxm->subdev, "MXMS Version %d.%d\n",
mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
mxms_foreach(mxm, 0, NULL, NULL);
if (nvkm_boolopt(device->cfgopt, "NvMXMDCB", true))
mxm->action |= MXM_SANITISE_DCB;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "mxms.h"
#define ROM16(x) get_unaligned_le16(&(x))
#define ROM32(x) get_unaligned_le32(&(x))
static u8 *
mxms_data(struct nvkm_mxm *mxm)
{
return mxm->mxms;
}
u16
mxms_version(struct nvkm_mxm *mxm)
{
u8 *mxms = mxms_data(mxm);
u16 version = (mxms[4] << 8) | mxms[5];
switch (version ) {
case 0x0200:
case 0x0201:
case 0x0300:
return version;
default:
break;
}
nvkm_debug(&mxm->subdev, "unknown version %d.%d\n", mxms[4], mxms[5]);
return 0x0000;
}
u16
mxms_headerlen(struct nvkm_mxm *mxm)
{
return 8;
}
u16
mxms_structlen(struct nvkm_mxm *mxm)
{
return *(u16 *)&mxms_data(mxm)[6];
}
bool
mxms_checksum(struct nvkm_mxm *mxm)
{
u16 size = mxms_headerlen(mxm) + mxms_structlen(mxm);
u8 *mxms = mxms_data(mxm), sum = 0;
while (size--)
sum += *mxms++;
if (sum) {
nvkm_debug(&mxm->subdev, "checksum invalid\n");
return false;
}
return true;
}
bool
mxms_valid(struct nvkm_mxm *mxm)
{
u8 *mxms = mxms_data(mxm);
if (*(u32 *)mxms != 0x5f4d584d) {
nvkm_debug(&mxm->subdev, "signature invalid\n");
return false;
}
if (!mxms_version(mxm) || !mxms_checksum(mxm))
return false;
return true;
}
bool
mxms_foreach(struct nvkm_mxm *mxm, u8 types,
bool (*exec)(struct nvkm_mxm *, u8 *, void *), void *info)
{
struct nvkm_subdev *subdev = &mxm->subdev;
u8 *mxms = mxms_data(mxm);
u8 *desc = mxms + mxms_headerlen(mxm);
u8 *fini = desc + mxms_structlen(mxm) - 1;
while (desc < fini) {
u8 type = desc[0] & 0x0f;
u8 headerlen = 0;
u8 recordlen = 0;
u8 entries = 0;
switch (type) {
case 0: /* Output Device Structure */
if (mxms_version(mxm) >= 0x0300)
headerlen = 8;
else
headerlen = 6;
break;
case 1: /* System Cooling Capability Structure */
case 2: /* Thermal Structure */
case 3: /* Input Power Structure */
headerlen = 4;
break;
case 4: /* GPIO Device Structure */
headerlen = 4;
recordlen = 2;
entries = (ROM32(desc[0]) & 0x01f00000) >> 20;
break;
case 5: /* Vendor Specific Structure */
headerlen = 8;
break;
case 6: /* Backlight Control Structure */
if (mxms_version(mxm) >= 0x0300) {
headerlen = 4;
recordlen = 8;
entries = (desc[1] & 0xf0) >> 4;
} else {
headerlen = 8;
}
break;
case 7: /* Fan Control Structure */
headerlen = 8;
recordlen = 4;
entries = desc[1] & 0x07;
break;
default:
nvkm_debug(subdev, "unknown descriptor type %d\n", type);
return false;
}
if (mxm->subdev.debug >= NV_DBG_DEBUG && (exec == NULL)) {
static const char * mxms_desc[] = {
"ODS", "SCCS", "TS", "IPS",
"GSD", "VSS", "BCS", "FCS",
};
u8 *dump = desc;
char data[32], *ptr;
int i, j;
for (j = headerlen - 1, ptr = data; j >= 0; j--)
ptr += sprintf(ptr, "%02x", dump[j]);
dump += headerlen;
nvkm_debug(subdev, "%4s: %s\n", mxms_desc[type], data);
for (i = 0; i < entries; i++, dump += recordlen) {
for (j = recordlen - 1, ptr = data; j >= 0; j--)
ptr += sprintf(ptr, "%02x", dump[j]);
nvkm_debug(subdev, " %s\n", data);
}
}
if (types & (1 << type)) {
if (!exec(mxm, desc, info))
return false;
}
desc += headerlen + (entries * recordlen);
}
return true;
}
void
mxms_output_device(struct nvkm_mxm *mxm, u8 *pdata, struct mxms_odev *desc)
{
u64 data = ROM32(pdata[0]);
if (mxms_version(mxm) >= 0x0300)
data |= (u64)ROM16(pdata[4]) << 32;
desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8;
desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
desc->dig_conn = (data & 0x0000000000780000ULL) >> 19;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c |
/*
* Copyright 2015 Karol Herbst
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Karol Herbst
*/
#include "priv.h"
int
gf100_iccsense_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_iccsense **piccsense)
{
return nvkm_iccsense_new_(device, type, inst, piccsense);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c |
/*
* Copyright 2015 Martin Peres
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include "priv.h"
#include <subdev/bios.h>
#include <subdev/bios/extdev.h>
#include <subdev/bios/iccsense.h>
#include <subdev/bios/power_budget.h>
#include <subdev/i2c.h>
static bool
nvkm_iccsense_validate_device(struct i2c_adapter *i2c, u8 addr,
enum nvbios_extdev_type type)
{
switch (type) {
case NVBIOS_EXTDEV_INA209:
case NVBIOS_EXTDEV_INA219:
return nv_rd16i2cr(i2c, addr, 0x0) >= 0;
case NVBIOS_EXTDEV_INA3221:
return nv_rd16i2cr(i2c, addr, 0xff) == 0x3220 &&
nv_rd16i2cr(i2c, addr, 0xfe) == 0x5449;
default:
return false;
}
}
static int
nvkm_iccsense_poll_lane(struct i2c_adapter *i2c, u8 addr, u8 shunt_reg,
u8 shunt_shift, u8 bus_reg, u8 bus_shift, u8 shunt,
u16 lsb)
{
int vshunt = nv_rd16i2cr(i2c, addr, shunt_reg);
int vbus = nv_rd16i2cr(i2c, addr, bus_reg);
if (vshunt < 0 || vbus < 0)
return -EINVAL;
vshunt >>= shunt_shift;
vbus >>= bus_shift;
return vbus * vshunt * lsb / shunt;
}
static int
nvkm_iccsense_ina2x9_read(struct nvkm_iccsense *iccsense,
struct nvkm_iccsense_rail *rail,
u8 shunt_reg, u8 bus_reg)
{
return nvkm_iccsense_poll_lane(rail->sensor->i2c, rail->sensor->addr,
shunt_reg, 0, bus_reg, 3, rail->mohm,
10 * 4);
}
static int
nvkm_iccsense_ina209_read(struct nvkm_iccsense *iccsense,
struct nvkm_iccsense_rail *rail)
{
return nvkm_iccsense_ina2x9_read(iccsense, rail, 3, 4);
}
static int
nvkm_iccsense_ina219_read(struct nvkm_iccsense *iccsense,
struct nvkm_iccsense_rail *rail)
{
return nvkm_iccsense_ina2x9_read(iccsense, rail, 1, 2);
}
static int
nvkm_iccsense_ina3221_read(struct nvkm_iccsense *iccsense,
struct nvkm_iccsense_rail *rail)
{
return nvkm_iccsense_poll_lane(rail->sensor->i2c, rail->sensor->addr,
1 + (rail->idx * 2), 3,
2 + (rail->idx * 2), 3, rail->mohm,
40 * 8);
}
static void
nvkm_iccsense_sensor_config(struct nvkm_iccsense *iccsense,
struct nvkm_iccsense_sensor *sensor)
{
struct nvkm_subdev *subdev = &iccsense->subdev;
nvkm_trace(subdev, "write config of extdev %i: 0x%04x\n", sensor->id, sensor->config);
nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, sensor->config);
}
int
nvkm_iccsense_read_all(struct nvkm_iccsense *iccsense)
{
int result = 0;
struct nvkm_iccsense_rail *rail;
if (!iccsense)
return -EINVAL;
list_for_each_entry(rail, &iccsense->rails, head) {
int res;
if (!rail->read)
return -ENODEV;
res = rail->read(iccsense, rail);
if (res < 0)
return res;
result += res;
}
return result;
}
static void *
nvkm_iccsense_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
struct nvkm_iccsense_sensor *sensor, *tmps;
struct nvkm_iccsense_rail *rail, *tmpr;
list_for_each_entry_safe(sensor, tmps, &iccsense->sensors, head) {
list_del(&sensor->head);
kfree(sensor);
}
list_for_each_entry_safe(rail, tmpr, &iccsense->rails, head) {
list_del(&rail->head);
kfree(rail);
}
return iccsense;
}
static struct nvkm_iccsense_sensor*
nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
{
struct nvkm_subdev *subdev = &iccsense->subdev;
struct nvkm_bios *bios = subdev->device->bios;
struct nvkm_i2c *i2c = subdev->device->i2c;
struct nvbios_extdev_func extdev;
struct nvkm_i2c_bus *i2c_bus;
struct nvkm_iccsense_sensor *sensor;
u8 addr;
if (!i2c || !bios || nvbios_extdev_parse(bios, id, &extdev))
return NULL;
if (extdev.type == 0xff)
return NULL;
if (extdev.type != NVBIOS_EXTDEV_INA209 &&
extdev.type != NVBIOS_EXTDEV_INA219 &&
extdev.type != NVBIOS_EXTDEV_INA3221) {
iccsense->data_valid = false;
nvkm_error(subdev, "Unknown sensor type %x, power reading "
"disabled\n", extdev.type);
return NULL;
}
if (extdev.bus)
i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_SEC);
else
i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
if (!i2c_bus)
return NULL;
addr = extdev.addr >> 1;
if (!nvkm_iccsense_validate_device(&i2c_bus->i2c, addr,
extdev.type)) {
iccsense->data_valid = false;
nvkm_warn(subdev, "found invalid sensor id: %i, power reading"
"might be invalid\n", id);
return NULL;
}
sensor = kmalloc(sizeof(*sensor), GFP_KERNEL);
if (!sensor)
return NULL;
list_add_tail(&sensor->head, &iccsense->sensors);
sensor->id = id;
sensor->type = extdev.type;
sensor->i2c = &i2c_bus->i2c;
sensor->addr = addr;
sensor->config = 0x0;
return sensor;
}
static struct nvkm_iccsense_sensor*
nvkm_iccsense_get_sensor(struct nvkm_iccsense *iccsense, u8 id)
{
struct nvkm_iccsense_sensor *sensor;
list_for_each_entry(sensor, &iccsense->sensors, head) {
if (sensor->id == id)
return sensor;
}
return nvkm_iccsense_create_sensor(iccsense, id);
}
static int
nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_power_budget budget;
struct nvbios_iccsense stbl;
int i, ret;
if (!bios)
return 0;
ret = nvbios_power_budget_header(bios, &budget);
if (!ret && budget.cap_entry != 0xff) {
struct nvbios_power_budget_entry entry;
ret = nvbios_power_budget_entry(bios, &budget,
budget.cap_entry, &entry);
if (!ret) {
iccsense->power_w_max = entry.avg_w;
iccsense->power_w_crit = entry.max_w;
}
}
if (nvbios_iccsense_parse(bios, &stbl) || !stbl.nr_entry)
return 0;
iccsense->data_valid = true;
for (i = 0; i < stbl.nr_entry; ++i) {
struct pwr_rail_t *pwr_rail = &stbl.rail[i];
struct nvkm_iccsense_sensor *sensor;
int r;
if (pwr_rail->mode != 1 || !pwr_rail->resistor_count)
continue;
sensor = nvkm_iccsense_get_sensor(iccsense, pwr_rail->extdev_id);
if (!sensor)
continue;
if (!sensor->config)
sensor->config = pwr_rail->config;
else if (sensor->config != pwr_rail->config)
nvkm_error(subdev, "config mismatch found for extdev %i\n", pwr_rail->extdev_id);
for (r = 0; r < pwr_rail->resistor_count; ++r) {
struct nvkm_iccsense_rail *rail;
struct pwr_rail_resistor_t *res = &pwr_rail->resistors[r];
int (*read)(struct nvkm_iccsense *,
struct nvkm_iccsense_rail *);
if (!res->mohm || !res->enabled)
continue;
switch (sensor->type) {
case NVBIOS_EXTDEV_INA209:
read = nvkm_iccsense_ina209_read;
break;
case NVBIOS_EXTDEV_INA219:
read = nvkm_iccsense_ina219_read;
break;
case NVBIOS_EXTDEV_INA3221:
read = nvkm_iccsense_ina3221_read;
break;
default:
continue;
}
rail = kmalloc(sizeof(*rail), GFP_KERNEL);
if (!rail)
return -ENOMEM;
rail->read = read;
rail->sensor = sensor;
rail->idx = r;
rail->mohm = res->mohm;
nvkm_debug(subdev, "create rail for extdev %i: { idx: %i, mohm: %i }\n", pwr_rail->extdev_id, r, rail->mohm);
list_add_tail(&rail->head, &iccsense->rails);
}
}
return 0;
}
static int
nvkm_iccsense_init(struct nvkm_subdev *subdev)
{
struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
struct nvkm_iccsense_sensor *sensor;
list_for_each_entry(sensor, &iccsense->sensors, head)
nvkm_iccsense_sensor_config(iccsense, sensor);
return 0;
}
static const struct nvkm_subdev_func
iccsense_func = {
.oneinit = nvkm_iccsense_oneinit,
.init = nvkm_iccsense_init,
.dtor = nvkm_iccsense_dtor,
};
void
nvkm_iccsense_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_iccsense *iccsense)
{
nvkm_subdev_ctor(&iccsense_func, device, type, inst, &iccsense->subdev);
}
int
nvkm_iccsense_new_(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_iccsense **iccsense)
{
if (!(*iccsense = kzalloc(sizeof(**iccsense), GFP_KERNEL)))
return -ENOMEM;
INIT_LIST_HEAD(&(*iccsense)->sensors);
INIT_LIST_HEAD(&(*iccsense)->rails);
nvkm_iccsense_ctor(device, type, inst, *iccsense);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c |
/*
* Copyright 2014 Martin Peres
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include "priv.h"
static u32
gm107_fuse_read(struct nvkm_fuse *fuse, u32 addr)
{
struct nvkm_device *device = fuse->subdev.device;
return nvkm_rd32(device, 0x021100 + addr);
}
static const struct nvkm_fuse_func
gm107_fuse = {
.read = gm107_fuse_read,
};
int
gm107_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fuse **pfuse)
{
return nvkm_fuse_new_(&gm107_fuse, device, type, inst, pfuse);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c |
/*
* Copyright 2014 Martin Peres
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include "priv.h"
static u32
gf100_fuse_read(struct nvkm_fuse *fuse, u32 addr)
{
struct nvkm_device *device = fuse->subdev.device;
unsigned long flags;
u32 fuse_enable, unk, val;
/* racy if another part of nvkm start writing to these regs */
spin_lock_irqsave(&fuse->lock, flags);
fuse_enable = nvkm_mask(device, 0x022400, 0x800, 0x800);
unk = nvkm_mask(device, 0x021000, 0x1, 0x1);
val = nvkm_rd32(device, 0x021100 + addr);
nvkm_wr32(device, 0x021000, unk);
nvkm_wr32(device, 0x022400, fuse_enable);
spin_unlock_irqrestore(&fuse->lock, flags);
return val;
}
static const struct nvkm_fuse_func
gf100_fuse = {
.read = gf100_fuse_read,
};
int
gf100_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fuse **pfuse)
{
return nvkm_fuse_new_(&gf100_fuse, device, type, inst, pfuse);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c |
/*
* Copyright 2014 Martin Peres
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include "priv.h"
static u32
nv50_fuse_read(struct nvkm_fuse *fuse, u32 addr)
{
struct nvkm_device *device = fuse->subdev.device;
unsigned long flags;
u32 fuse_enable, val;
/* racy if another part of nvkm start writing to this reg */
spin_lock_irqsave(&fuse->lock, flags);
fuse_enable = nvkm_mask(device, 0x001084, 0x800, 0x800);
val = nvkm_rd32(device, 0x021000 + addr);
nvkm_wr32(device, 0x001084, fuse_enable);
spin_unlock_irqrestore(&fuse->lock, flags);
return val;
}
static const struct nvkm_fuse_func
nv50_fuse = {
.read = &nv50_fuse_read,
};
int
nv50_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fuse **pfuse)
{
return nvkm_fuse_new_(&nv50_fuse, device, type, inst, pfuse);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c |
/*
* Copyright 2014 Martin Peres
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres
*/
#include "priv.h"
u32
nvkm_fuse_read(struct nvkm_fuse *fuse, u32 addr)
{
return fuse->func->read(fuse, addr);
}
static void *
nvkm_fuse_dtor(struct nvkm_subdev *subdev)
{
return nvkm_fuse(subdev);
}
static const struct nvkm_subdev_func
nvkm_fuse = {
.dtor = nvkm_fuse_dtor,
};
int
nvkm_fuse_new_(const struct nvkm_fuse_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_fuse **pfuse)
{
struct nvkm_fuse *fuse;
if (!(fuse = *pfuse = kzalloc(sizeof(*fuse), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_fuse, device, type, inst, &fuse->subdev);
fuse->func = func;
spin_lock_init(&fuse->lock);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "regsnv04.h"
static void
nv40_timer_init(struct nvkm_timer *tmr)
{
struct nvkm_subdev *subdev = &tmr->subdev;
struct nvkm_device *device = subdev->device;
u32 f = 0; /*XXX: figure this out */
u32 n, d;
/* aim for 31.25MHz, which gives us nanosecond timestamps */
d = 1000000 / 32;
n = f;
if (!f) {
n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR);
d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR);
if (!n || !d) {
n = 1;
d = 1;
}
nvkm_warn(subdev, "unknown input clock freq\n");
}
/* reduce ratio to acceptable values */
while (((n % 5) == 0) && ((d % 5) == 0)) {
n /= 5;
d /= 5;
}
while (((n % 2) == 0) && ((d % 2) == 0)) {
n /= 2;
d /= 2;
}
while (n > 0xffff || d > 0xffff) {
n >>= 1;
d >>= 1;
}
nvkm_debug(subdev, "input frequency : %dHz\n", f);
nvkm_debug(subdev, "numerator : %08x\n", n);
nvkm_debug(subdev, "denominator : %08x\n", d);
nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n);
nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
}
static const struct nvkm_timer_func
nv40_timer = {
.init = nv40_timer_init,
.intr = nv04_timer_intr,
.read = nv04_timer_read,
.time = nv04_timer_time,
.alarm_init = nv04_timer_alarm_init,
.alarm_fini = nv04_timer_alarm_fini,
};
int
nv40_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_timer **ptmr)
{
return nvkm_timer_new_(&nv40_timer, device, type, inst, ptmr);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "regsnv04.h"
void
nv04_timer_time(struct nvkm_timer *tmr, u64 time)
{
struct nvkm_subdev *subdev = &tmr->subdev;
struct nvkm_device *device = subdev->device;
u32 hi = upper_32_bits(time);
u32 lo = lower_32_bits(time);
nvkm_debug(subdev, "time low : %08x\n", lo);
nvkm_debug(subdev, "time high : %08x\n", hi);
nvkm_wr32(device, NV04_PTIMER_TIME_1, hi);
nvkm_wr32(device, NV04_PTIMER_TIME_0, lo);
}
u64
nv04_timer_read(struct nvkm_timer *tmr)
{
struct nvkm_device *device = tmr->subdev.device;
u32 hi, lo;
do {
hi = nvkm_rd32(device, NV04_PTIMER_TIME_1);
lo = nvkm_rd32(device, NV04_PTIMER_TIME_0);
} while (hi != nvkm_rd32(device, NV04_PTIMER_TIME_1));
return ((u64)hi << 32 | lo);
}
void
nv04_timer_alarm_fini(struct nvkm_timer *tmr)
{
struct nvkm_device *device = tmr->subdev.device;
nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000000);
}
void
nv04_timer_alarm_init(struct nvkm_timer *tmr, u32 time)
{
struct nvkm_device *device = tmr->subdev.device;
nvkm_wr32(device, NV04_PTIMER_ALARM_0, time);
nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000001);
}
void
nv04_timer_intr(struct nvkm_timer *tmr)
{
struct nvkm_subdev *subdev = &tmr->subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
if (stat & 0x00000001) {
nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
nvkm_timer_alarm_trigger(tmr);
stat &= ~0x00000001;
}
if (stat) {
nvkm_error(subdev, "intr %08x\n", stat);
nvkm_wr32(device, NV04_PTIMER_INTR_0, stat);
}
}
static void
nv04_timer_init(struct nvkm_timer *tmr)
{
struct nvkm_subdev *subdev = &tmr->subdev;
struct nvkm_device *device = subdev->device;
u32 f = 0; /*XXX: nvclk */
u32 n, d;
/* aim for 31.25MHz, which gives us nanosecond timestamps */
d = 1000000 / 32;
n = f;
if (!f) {
n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR);
d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR);
if (!n || !d) {
n = 1;
d = 1;
}
nvkm_warn(subdev, "unknown input clock freq\n");
}
/* reduce ratio to acceptable values */
while (((n % 5) == 0) && ((d % 5) == 0)) {
n /= 5;
d /= 5;
}
while (((n % 2) == 0) && ((d % 2) == 0)) {
n /= 2;
d /= 2;
}
while (n > 0xffff || d > 0xffff) {
n >>= 1;
d >>= 1;
}
nvkm_debug(subdev, "input frequency : %dHz\n", f);
nvkm_debug(subdev, "numerator : %08x\n", n);
nvkm_debug(subdev, "denominator : %08x\n", d);
nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n);
nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
}
static const struct nvkm_timer_func
nv04_timer = {
.init = nv04_timer_init,
.intr = nv04_timer_intr,
.read = nv04_timer_read,
.time = nv04_timer_time,
.alarm_init = nv04_timer_alarm_init,
.alarm_fini = nv04_timer_alarm_fini,
};
int
nv04_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_timer **ptmr)
{
return nvkm_timer_new_(&nv04_timer, device, type, inst, ptmr);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
s64
nvkm_timer_wait_test(struct nvkm_timer_wait *wait)
{
struct nvkm_subdev *subdev = &wait->tmr->subdev;
u64 time = nvkm_timer_read(wait->tmr);
if (wait->reads == 0) {
wait->time0 = time;
wait->time1 = time;
}
if (wait->time1 == time) {
if (wait->reads++ == 16) {
nvkm_fatal(subdev, "stalled at %016llx\n", time);
return -ETIMEDOUT;
}
} else {
wait->time1 = time;
wait->reads = 1;
}
if (wait->time1 - wait->time0 > wait->limit)
return -ETIMEDOUT;
return wait->time1 - wait->time0;
}
void
nvkm_timer_wait_init(struct nvkm_device *device, u64 nsec,
struct nvkm_timer_wait *wait)
{
wait->tmr = device->timer;
wait->limit = nsec;
wait->reads = 0;
}
u64
nvkm_timer_read(struct nvkm_timer *tmr)
{
return tmr->func->read(tmr);
}
void
nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
{
struct nvkm_alarm *alarm, *atemp;
unsigned long flags;
LIST_HEAD(exec);
/* Process pending alarms. */
spin_lock_irqsave(&tmr->lock, flags);
list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
/* Have we hit the earliest alarm that hasn't gone off? */
if (alarm->timestamp > nvkm_timer_read(tmr)) {
/* Schedule it. If we didn't race, we're done. */
tmr->func->alarm_init(tmr, alarm->timestamp);
if (alarm->timestamp > nvkm_timer_read(tmr))
break;
}
/* Move to completed list. We'll drop the lock before
* executing the callback so it can reschedule itself.
*/
list_del_init(&alarm->head);
list_add(&alarm->exec, &exec);
}
/* Shut down interrupt if no more pending alarms. */
if (list_empty(&tmr->alarms))
tmr->func->alarm_fini(tmr);
spin_unlock_irqrestore(&tmr->lock, flags);
/* Execute completed callbacks. */
list_for_each_entry_safe(alarm, atemp, &exec, exec) {
list_del(&alarm->exec);
alarm->func(alarm);
}
}
void
nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
{
struct nvkm_alarm *list;
unsigned long flags;
/* Remove alarm from pending list.
*
* This both protects against the corruption of the list,
* and implements alarm rescheduling/cancellation.
*/
spin_lock_irqsave(&tmr->lock, flags);
list_del_init(&alarm->head);
if (nsec) {
/* Insert into pending list, ordered earliest to latest. */
alarm->timestamp = nvkm_timer_read(tmr) + nsec;
list_for_each_entry(list, &tmr->alarms, head) {
if (list->timestamp > alarm->timestamp)
break;
}
list_add_tail(&alarm->head, &list->head);
/* Update HW if this is now the earliest alarm. */
list = list_first_entry(&tmr->alarms, typeof(*list), head);
if (list == alarm) {
tmr->func->alarm_init(tmr, alarm->timestamp);
/* This shouldn't happen if callers aren't stupid.
*
* Worst case scenario is that it'll take roughly
* 4 seconds for the next alarm to trigger.
*/
WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr));
}
}
spin_unlock_irqrestore(&tmr->lock, flags);
}
static void
nvkm_timer_intr(struct nvkm_subdev *subdev)
{
struct nvkm_timer *tmr = nvkm_timer(subdev);
tmr->func->intr(tmr);
}
static int
nvkm_timer_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_timer *tmr = nvkm_timer(subdev);
tmr->func->alarm_fini(tmr);
return 0;
}
static int
nvkm_timer_init(struct nvkm_subdev *subdev)
{
struct nvkm_timer *tmr = nvkm_timer(subdev);
if (tmr->func->init)
tmr->func->init(tmr);
tmr->func->time(tmr, ktime_to_ns(ktime_get()));
nvkm_timer_alarm_trigger(tmr);
return 0;
}
static void *
nvkm_timer_dtor(struct nvkm_subdev *subdev)
{
return nvkm_timer(subdev);
}
static const struct nvkm_subdev_func
nvkm_timer = {
.dtor = nvkm_timer_dtor,
.init = nvkm_timer_init,
.fini = nvkm_timer_fini,
.intr = nvkm_timer_intr,
};
int
nvkm_timer_new_(const struct nvkm_timer_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_timer **ptmr)
{
struct nvkm_timer *tmr;
if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL)))
return -ENOMEM;
nvkm_subdev_ctor(&nvkm_timer, device, type, inst, &tmr->subdev);
tmr->func = func;
INIT_LIST_HEAD(&tmr->alarms);
spin_lock_init(&tmr->lock);
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "regsnv04.h"
static void
nv41_timer_init(struct nvkm_timer *tmr)
{
struct nvkm_subdev *subdev = &tmr->subdev;
struct nvkm_device *device = subdev->device;
u32 f = device->crystal;
u32 m = 1, n, d;
/* aim for 31.25MHz, which gives us nanosecond timestamps */
d = 1000000 / 32;
n = f;
while (n < (d * 2)) {
n += (n / m);
m++;
}
/* reduce ratio to acceptable values */
while (((n % 5) == 0) && ((d % 5) == 0)) {
n /= 5;
d /= 5;
}
while (((n % 2) == 0) && ((d % 2) == 0)) {
n /= 2;
d /= 2;
}
while (n > 0xffff || d > 0xffff) {
n >>= 1;
d >>= 1;
}
nvkm_debug(subdev, "input frequency : %dHz\n", f);
nvkm_debug(subdev, "input multiplier: %d\n", m);
nvkm_debug(subdev, "numerator : %08x\n", n);
nvkm_debug(subdev, "denominator : %08x\n", d);
nvkm_debug(subdev, "timer frequency : %dHz\n", (f * m) * d / n);
nvkm_wr32(device, 0x009220, m - 1);
nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
}
static const struct nvkm_timer_func
nv41_timer = {
.init = nv41_timer_init,
.intr = nv04_timer_intr,
.read = nv04_timer_read,
.time = nv04_timer_time,
.alarm_init = nv04_timer_alarm_init,
.alarm_fini = nv04_timer_alarm_fini,
};
int
nv41_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_timer **ptmr)
{
return nvkm_timer_new_(&nv41_timer, device, type, inst, ptmr);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
static const struct nvkm_timer_func
gk20a_timer = {
.intr = nv04_timer_intr,
.read = nv04_timer_read,
.time = nv04_timer_time,
.alarm_init = nv04_timer_alarm_init,
.alarm_fini = nv04_timer_alarm_fini,
};
int
gk20a_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_timer **ptmr)
{
return nvkm_timer_new_(&gk20a_timer, device, type, inst, ptmr);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "ramnv40.h"
int
nv41_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_device *device = fb->subdev.device;
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
u32 fb474 = nvkm_rd32(device, 0x100474);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
if (fb474 & 0x00000004)
type = NVKM_RAM_TYPE_GDDR3;
if (fb474 & 0x00000002)
type = NVKM_RAM_TYPE_DDR2;
if (fb474 & 0x00000001)
type = NVKM_RAM_TYPE_DDR1;
ret = nv40_ram_new_(fb, type, size, pram);
if (ret)
return ret;
(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <[email protected]>
* Roy Spliet <[email protected]>
*/
#include "ram.h"
struct ramxlat {
int id;
u8 enc;
};
static inline int
ramxlat(const struct ramxlat *xlat, int id)
{
while (xlat->id >= 0) {
if (xlat->id == id)
return xlat->enc;
xlat++;
}
return -EINVAL;
}
static const struct ramxlat
ramgddr3_cl_lo[] = {
{ 5, 5 }, { 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 }, { 12, 8 },
/* the below are mentioned in some, but not all, gddr3 docs */
{ 13, 9 }, { 14, 6 },
/* XXX: Per Samsung docs, are these used? They overlap with Qimonda */
/* { 4, 4 }, { 5, 5 }, { 6, 6 }, { 12, 8 }, { 13, 9 }, { 14, 10 },
* { 15, 11 }, */
{ -1 }
};
static const struct ramxlat
ramgddr3_cl_hi[] = {
{ 10, 2 }, { 11, 3 }, { 12, 4 }, { 13, 5 }, { 14, 6 }, { 15, 7 },
{ 16, 0 }, { 17, 1 },
{ -1 }
};
static const struct ramxlat
ramgddr3_wr_lo[] = {
{ 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 },
{ 11, 0 }, { 13 , 1 },
/* the below are mentioned in some, but not all, gddr3 docs */
{ 4, 0 }, { 6, 3 }, { 12, 1 },
{ -1 }
};
int
nvkm_gddr3_calc(struct nvkm_ram *ram)
{
int CL, WR, CWL, DLL = 0, ODT = 0, RON, hi;
switch (ram->next->bios.timing_ver) {
case 0x10:
CWL = ram->next->bios.timing_10_CWL;
CL = ram->next->bios.timing_10_CL;
WR = ram->next->bios.timing_10_WR;
DLL = !ram->next->bios.ramcfg_DLLoff;
ODT = ram->next->bios.timing_10_ODT;
RON = ram->next->bios.ramcfg_RON;
break;
case 0x20:
CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
CL = (ram->next->bios.timing[1] & 0x0000001f) >> 0;
WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
/* XXX: Get these values from the VBIOS instead */
DLL = !(ram->mr[1] & 0x1);
RON = !((ram->mr[1] & 0x300) >> 8);
break;
default:
return -ENOSYS;
}
if (ram->next->bios.timing_ver == 0x20 ||
ram->next->bios.ramcfg_timing == 0xff) {
ODT = (ram->mr[1] & 0xc) >> 2;
}
hi = ram->mr[2] & 0x1;
CL = ramxlat(hi ? ramgddr3_cl_hi : ramgddr3_cl_lo, CL);
WR = ramxlat(ramgddr3_wr_lo, WR);
if (CL < 0 || CWL < 1 || CWL > 7 || WR < 0)
return -EINVAL;
ram->mr[0] &= ~0xf74;
ram->mr[0] |= (CWL & 0x07) << 9;
ram->mr[0] |= (CL & 0x07) << 4;
ram->mr[0] |= (CL & 0x08) >> 1;
ram->mr[1] &= ~0x3fc;
ram->mr[1] |= (ODT & 0x03) << 2;
ram->mr[1] |= (RON & 0x03) << 8;
ram->mr[1] |= (WR & 0x03) << 4;
ram->mr[1] |= (WR & 0x04) << 5;
ram->mr[1] |= !DLL << 6;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c |
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "ram.h"
int
nv20_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_device *device = fb->subdev.device;
u32 pbus1218 = nvkm_rd32(device, 0x001218);
u32 size = (nvkm_rd32(device, 0x10020c) & 0xff000000);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
switch (pbus1218 & 0x00000300) {
case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
case 0x00000300: type = NVKM_RAM_TYPE_GDDR2; break;
}
ret = nvkm_ram_new_(&nv04_ram_func, fb, type, size, pram);
if (ret)
return ret;
(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
return 0;
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c |
/*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#include "ram.h"
void
nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
struct nvkm_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x80);
u32 tags = round_up(tiles / fb->ram->parts, 0x100);
if ( (flags & 2) &&
!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
tile->zcomp |= ((tile->tag->offset ) >> 8);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
#ifdef __BIG_ENDIAN
tile->zcomp |= 0x40000000;
#endif
}
}
static void
nv40_fb_init(struct nvkm_fb *fb)
{
nvkm_mask(fb->subdev.device, 0x10033c, 0x00008000, 0x00000000);
}
static const struct nvkm_fb_func
nv40_fb = {
.tags = nv20_fb_tags,
.init = nv40_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
.tile.comp = nv40_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv40_ram_new,
};
int
nv40_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv40_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c |
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "ram.h"
#include "regsnv04.h"
static void
nv04_fb_init(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
* nvidia reading PFB_CFG_0, then writing back its original value.
* (which was 0x701114 in this case)
*/
nvkm_wr32(device, NV04_PFB_CFG0, 0x1114);
}
static const struct nvkm_fb_func
nv04_fb = {
.init = nv04_fb_init,
.ram_new = nv04_ram_new,
};
int
nv04_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
{
return nvkm_fb_new_(&nv04_fb, device, type, inst, pfb);
}
| linux-master | drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.