220 lines
6.4 KiB
Diff
220 lines
6.4 KiB
Diff
From 266f7618e761c8a6aa89dbfe43cda1b69cdbbf14 Mon Sep 17 00:00:00 2001
|
|
From: Danilo Krummrich <dakr@redhat.com>
|
|
Date: Wed, 8 Nov 2023 01:12:38 +0100
|
|
Subject: [PATCH] drm/nouveau: separately allocate struct nouveau_uvmm
|
|
|
|
Allocate struct nouveau_uvmm separately in preparation for subsequent
|
|
commits introducing reference counting for struct drm_gpuvm.
|
|
|
|
While at it, get rid of nouveau_uvmm_init() as indirection of
|
|
nouveau_uvmm_ioctl_vm_init() and perform some minor cleanups.
|
|
|
|
Reviewed-by: Dave Airlie <airlied@redhat.com>
|
|
Signed-off-by: Danilo Krummrich <dakr@redhat.com>
|
|
Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-9-dakr@redhat.com
|
|
---
|
|
drivers/gpu/drm/nouveau/nouveau_drm.c | 5 +-
|
|
drivers/gpu/drm/nouveau/nouveau_drv.h | 10 ++--
|
|
drivers/gpu/drm/nouveau/nouveau_uvmm.c | 63 +++++++++++++-------------
|
|
drivers/gpu/drm/nouveau/nouveau_uvmm.h | 4 --
|
|
4 files changed, 40 insertions(+), 42 deletions(-)
|
|
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
|
|
@@ -190,6 +190,8 @@ nouveau_cli_work_queue(struct nouveau_cl
|
|
static void
|
|
nouveau_cli_fini(struct nouveau_cli *cli)
|
|
{
|
|
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm_locked(cli);
|
|
+
|
|
/* All our channels are dead now, which means all the fences they
|
|
* own are signalled, and all callback functions have been called.
|
|
*
|
|
@@ -199,7 +201,8 @@ nouveau_cli_fini(struct nouveau_cli *cli
|
|
WARN_ON(!list_empty(&cli->worker));
|
|
|
|
usif_client_fini(cli);
|
|
- nouveau_uvmm_fini(&cli->uvmm);
|
|
+ if (uvmm)
|
|
+ nouveau_uvmm_fini(uvmm);
|
|
nouveau_sched_entity_fini(&cli->sched_entity);
|
|
nouveau_vmm_fini(&cli->svm);
|
|
nouveau_vmm_fini(&cli->vmm);
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
|
|
@@ -93,7 +93,10 @@ struct nouveau_cli {
|
|
struct nvif_mmu mmu;
|
|
struct nouveau_vmm vmm;
|
|
struct nouveau_vmm svm;
|
|
- struct nouveau_uvmm uvmm;
|
|
+ struct {
|
|
+ struct nouveau_uvmm *ptr;
|
|
+ bool disabled;
|
|
+ } uvmm;
|
|
|
|
struct nouveau_sched_entity sched_entity;
|
|
|
|
@@ -121,10 +124,7 @@ struct nouveau_cli_work {
|
|
static inline struct nouveau_uvmm *
|
|
nouveau_cli_uvmm(struct nouveau_cli *cli)
|
|
{
|
|
- if (!cli || !cli->uvmm.vmm.cli)
|
|
- return NULL;
|
|
-
|
|
- return &cli->uvmm;
|
|
+ return cli ? cli->uvmm.ptr : NULL;
|
|
}
|
|
|
|
static inline struct nouveau_uvmm *
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
|
|
@@ -1638,18 +1638,6 @@ err_free:
|
|
return ret;
|
|
}
|
|
|
|
-int
|
|
-nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
|
|
- void *data,
|
|
- struct drm_file *file_priv)
|
|
-{
|
|
- struct nouveau_cli *cli = nouveau_cli(file_priv);
|
|
- struct drm_nouveau_vm_init *init = data;
|
|
-
|
|
- return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr,
|
|
- init->kernel_managed_size);
|
|
-}
|
|
-
|
|
static int
|
|
nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args)
|
|
{
|
|
@@ -1795,17 +1783,25 @@ nouveau_uvmm_bo_unmap_all(struct nouveau
|
|
}
|
|
|
|
int
|
|
-nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
|
|
- u64 kernel_managed_addr, u64 kernel_managed_size)
|
|
+nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
|
|
+ void *data,
|
|
+ struct drm_file *file_priv)
|
|
{
|
|
+ struct nouveau_uvmm *uvmm;
|
|
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
|
|
struct drm_device *drm = cli->drm->dev;
|
|
struct drm_gem_object *r_obj;
|
|
- u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size;
|
|
+ struct drm_nouveau_vm_init *init = data;
|
|
+ u64 kernel_managed_end;
|
|
int ret;
|
|
|
|
- mutex_init(&uvmm->mutex);
|
|
- mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
|
|
- mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
|
|
+ if (check_add_overflow(init->kernel_managed_addr,
|
|
+ init->kernel_managed_size,
|
|
+ &kernel_managed_end))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (kernel_managed_end > NOUVEAU_VA_SPACE_END)
|
|
+ return -EINVAL;
|
|
|
|
mutex_lock(&cli->mutex);
|
|
|
|
@@ -1814,44 +1810,49 @@ nouveau_uvmm_init(struct nouveau_uvmm *u
|
|
goto out_unlock;
|
|
}
|
|
|
|
- if (kernel_managed_end <= kernel_managed_addr) {
|
|
- ret = -EINVAL;
|
|
- goto out_unlock;
|
|
- }
|
|
-
|
|
- if (kernel_managed_end > NOUVEAU_VA_SPACE_END) {
|
|
- ret = -EINVAL;
|
|
+ uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL);
|
|
+ if (!uvmm) {
|
|
+ ret = -ENOMEM;
|
|
goto out_unlock;
|
|
}
|
|
|
|
r_obj = drm_gpuvm_resv_object_alloc(drm);
|
|
if (!r_obj) {
|
|
+ kfree(uvmm);
|
|
ret = -ENOMEM;
|
|
goto out_unlock;
|
|
}
|
|
|
|
+ mutex_init(&uvmm->mutex);
|
|
+ mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
|
|
+ mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
|
|
+
|
|
drm_gpuvm_init(&uvmm->base, cli->name, 0, drm, r_obj,
|
|
NOUVEAU_VA_SPACE_START,
|
|
NOUVEAU_VA_SPACE_END,
|
|
- kernel_managed_addr, kernel_managed_size,
|
|
+ init->kernel_managed_addr,
|
|
+ init->kernel_managed_size,
|
|
NULL);
|
|
/* GPUVM takes care from here on. */
|
|
drm_gem_object_put(r_obj);
|
|
|
|
ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
|
|
cli->vmm.vmm.object.oclass, RAW,
|
|
- kernel_managed_addr, kernel_managed_size,
|
|
- NULL, 0, &cli->uvmm.vmm.vmm);
|
|
+ init->kernel_managed_addr,
|
|
+ init->kernel_managed_size,
|
|
+ NULL, 0, &uvmm->vmm.vmm);
|
|
if (ret)
|
|
goto out_gpuvm_fini;
|
|
|
|
- cli->uvmm.vmm.cli = cli;
|
|
+ uvmm->vmm.cli = cli;
|
|
+ cli->uvmm.ptr = uvmm;
|
|
mutex_unlock(&cli->mutex);
|
|
|
|
return 0;
|
|
|
|
out_gpuvm_fini:
|
|
drm_gpuvm_destroy(&uvmm->base);
|
|
+ kfree(uvmm);
|
|
out_unlock:
|
|
mutex_unlock(&cli->mutex);
|
|
return ret;
|
|
@@ -1866,9 +1867,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *u
|
|
struct nouveau_sched_entity *entity = &cli->sched_entity;
|
|
struct drm_gpuva *va, *next;
|
|
|
|
- if (!cli)
|
|
- return;
|
|
-
|
|
rmb(); /* for list_empty to work without lock */
|
|
wait_event(entity->job.wq, list_empty(&entity->job.list.head));
|
|
|
|
@@ -1907,5 +1905,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *u
|
|
mutex_lock(&cli->mutex);
|
|
nouveau_vmm_fini(&uvmm->vmm);
|
|
drm_gpuvm_destroy(&uvmm->base);
|
|
+ kfree(uvmm);
|
|
mutex_unlock(&cli->mutex);
|
|
}
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
|
|
@@ -12,8 +12,6 @@ struct nouveau_uvmm {
|
|
struct nouveau_vmm vmm;
|
|
struct maple_tree region_mt;
|
|
struct mutex mutex;
|
|
-
|
|
- bool disabled;
|
|
};
|
|
|
|
struct nouveau_uvma_region {
|
|
@@ -78,8 +76,6 @@ struct nouveau_uvmm_bind_job_args {
|
|
|
|
#define to_uvmm_bind_job(job) container_of((job), struct nouveau_uvmm_bind_job, base)
|
|
|
|
-int nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
|
|
- u64 kernel_managed_addr, u64 kernel_managed_size);
|
|
void nouveau_uvmm_fini(struct nouveau_uvmm *uvmm);
|
|
|
|
void nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbov, struct nouveau_mem *mem);
|