@@ -155,9 +155,6 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
return bo;
obj = &bo->base;
- bo->resv = &bo->_resv;
- reservation_object_init(bo->resv);
-
ret = v3d_bo_get_pages(bo);
if (ret)
goto free_mm;
@@ -194,8 +191,6 @@ void v3d_free_object(struct drm_gem_object *obj)
v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
mutex_unlock(&v3d->bo_lock);
- reservation_object_fini(&bo->_resv);
-
v3d_bo_put_pages(bo);
if (obj->import_attach)
@@ -212,13 +207,6 @@ void v3d_free_object(struct drm_gem_object *obj)
kfree(bo);
}
-struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj)
-{
- struct v3d_bo *bo = to_v3d_bo(obj);
-
- return bo->resv;
-}
-
static void
v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
{
@@ -290,7 +278,7 @@ v3d_prime_import_sg_table(struct drm_device *dev,
return ERR_CAST(bo);
obj = &bo->base;
- bo->resv = attach->dmabuf->resv;
+ obj->resv = attach->dmabuf->resv;
bo->sgt = sgt;
obj->import_attach = attach;
@@ -214,7 +214,6 @@ static struct drm_driver v3d_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_export = drm_gem_prime_export,
- .gem_prime_res_obj = v3d_prime_res_obj,
.gem_prime_get_sg_table = v3d_prime_get_sg_table,
.gem_prime_import_sg_table = v3d_prime_import_sg_table,
.gem_prime_mmap = v3d_prime_mmap,
@@ -133,10 +133,6 @@ struct v3d_bo {
* v3d_exec_info->unref_list
*/
struct list_head unref_head;
-
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
};
static inline struct v3d_bo *
@@ -281,7 +277,6 @@ int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
vm_fault_t v3d_gem_fault(struct vm_fault *vmf);
int v3d_mmap(struct file *filp, struct vm_area_struct *vma);
-struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj);
int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
@@ -214,7 +214,7 @@ v3d_attach_object_fences(struct v3d_bo **bos, int bo_count,
for (i = 0; i < bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
- reservation_object_add_excl_fence(bos[i]->resv, fence);
+ reservation_object_add_excl_fence(bos[i]->base.resv, fence);
}
}
@@ -226,7 +226,7 @@ v3d_unlock_bo_reservations(struct v3d_bo **bos,
int i;
for (i = 0; i < bo_count; i++)
- ww_mutex_unlock(&bos[i]->resv->lock);
+ ww_mutex_unlock(&bos[i]->base.resv->lock);
ww_acquire_fini(acquire_ctx);
}
@@ -252,7 +252,7 @@ v3d_lock_bo_reservations(struct v3d_bo **bos,
if (contended_lock != -1) {
struct v3d_bo *bo = bos[contended_lock];
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
+ ret = ww_mutex_lock_slow_interruptible(&bo->base.resv->lock,
acquire_ctx);
if (ret) {
ww_acquire_done(acquire_ctx);
@@ -264,18 +264,18 @@ v3d_lock_bo_reservations(struct v3d_bo **bos,
if (i == contended_lock)
continue;
- ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock,
+ ret = ww_mutex_lock_interruptible(&bos[i]->base.resv->lock,
acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++)
- ww_mutex_unlock(&bos[j]->resv->lock);
+ ww_mutex_unlock(&bos[j]->base.resv->lock);
if (contended_lock != -1 && contended_lock >= i) {
struct v3d_bo *bo = bos[contended_lock];
- ww_mutex_unlock(&bo->resv->lock);
+ ww_mutex_unlock(&bo->base.resv->lock);
}
if (ret == -EDEADLK) {
@@ -294,7 +294,7 @@ v3d_lock_bo_reservations(struct v3d_bo **bos,
* before we commit the CL to the hardware.
*/
for (i = 0; i < bo_count; i++) {
- ret = reservation_object_reserve_shared(bos[i]->resv, 1);
+ ret = reservation_object_reserve_shared(bos[i]->base.resv, 1);
if (ret) {
v3d_unlock_bo_reservations(bos, bo_count,
acquire_ctx);
@@ -453,8 +453,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
{
int ret;
struct drm_v3d_wait_bo *args = data;
- struct drm_gem_object *gem_obj;
- struct v3d_bo *bo;
ktime_t start = ktime_get();
u64 delta_ns;
unsigned long timeout_jiffies =
@@ -463,21 +461,8 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
- gem_obj = drm_gem_object_lookup(file_priv, args->handle);
- if (!gem_obj) {
- DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
- return -EINVAL;
- }
- bo = to_v3d_bo(gem_obj);
-
- ret = reservation_object_wait_timeout_rcu(bo->resv,
- true, true,
- timeout_jiffies);
-
- if (ret == 0)
- ret = -ETIME;
- else if (ret > 0)
- ret = 0;
+ ret = drm_gem_reservation_object_wait(file_priv, args->handle,
+ true, timeout_jiffies);
/* Decrement the user's timeout, in case we got interrupted
* such that the ioctl will be restarted.
@@ -492,8 +477,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
if (ret == -ETIME && args->timeout_ns)
ret = -EAGAIN;
- drm_gem_object_put_unlocked(gem_obj);
-
return ret;
}
Now that the base struct drm_gem_object has a reservation_object, use it and remove the private BO one. Cc: Eric Anholt <eric@anholt.net> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: David Airlie <airlied@linux.ie> Cc: dri-devel@lists.freedesktop.org Signed-off-by: Rob Herring <robh@kernel.org> --- drivers/gpu/drm/v3d/v3d_bo.c | 14 +------------- drivers/gpu/drm/v3d/v3d_drv.c | 1 - drivers/gpu/drm/v3d/v3d_drv.h | 5 ----- drivers/gpu/drm/v3d/v3d_gem.c | 35 +++++++++-------------------------- 4 files changed, 10 insertions(+), 45 deletions(-) -- 2.19.1