@@ -96,12 +96,9 @@ nouveau_sgdma_create_ttm(struct ttm_buff
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
- if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
- /*
- * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
- * and thus our nouveau_sgdma_destroy() hook, so we don't need
- * to free nvbe here.
- */
+ if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
+ kfree(nvbe);
return NULL;
+ }
return &nvbe->ttm.ttm;
}
@@ -241,7 +241,6 @@ int ttm_tt_init(struct ttm_tt *ttm, stru
ttm_tt_init_fields(ttm, bo, page_flags);
if (ttm_tt_alloc_page_directory(ttm)) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -265,7 +264,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *t
INIT_LIST_HEAD(&ttm_dma->pages_list);
if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -287,7 +285,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *tt
else
ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
if (ret) {
- ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}