summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-07-28 14:17:36 +1000
committerDave Airlie <airlied@redhat.com>2020-07-29 10:06:38 +1000
commit5de5b6ecf97a021f29403aa272cb4e03318ef586 (patch)
tree7a31359ebf07c0d4ff0a2c047eacd9f87dadc1a9 /drivers/gpu
parentec731e516acf22ce870b735721c6f59e39d0ed44 (diff)
drm/ttm/nouveau: don't call tt destroy callback on alloc failure.
This is confusing, and from my reading of all the drivers only nouveau got this right. Just make the API act under driver control of it's own allocation failing, and don't call destroy, if the page table fails to create there is nothing to cleanup here. (I'm willing to believe I've missed something here, so please review deeply). Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200728041736.20689-1-airlied@gmail.com
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
2 files changed, 3 insertions, 9 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 20b6d0b3de5c..c3ccf661b7a6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -95,12 +95,9 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
- if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
- /*
- * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
- * and thus our nouveau_sgdma_destroy() hook, so we don't need
- * to free nvbe here.
- */
+ if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
+ kfree(nvbe);
return NULL;
+ }
return &nvbe->ttm.ttm;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bab67873cfd4..9d1c7177384c 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -244,7 +244,6 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
ttm_tt_init_fields(ttm, bo, page_flags);
if (ttm_tt_alloc_page_directory(ttm)) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -268,7 +267,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&ttm_dma->pages_list);
if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -290,7 +288,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
else
ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
if (ret) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}