@@ -222,9 +222,7 @@ static void put_pages(struct drm_gem_object *obj)
static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
unsigned madv)
{
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct page **p;
msm_gem_assert_locked(obj);
@@ -234,16 +232,29 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
return ERR_PTR(-EBUSY);
}
- p = get_pages(obj);
- if (IS_ERR(p))
- return p;
+ return get_pages(obj);
+}
+
+/*
+ * Update the pin count of the object, call under lru.lock
+ */
+void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+
+ msm_gem_assert_locked(obj);
+
+ to_msm_bo(obj)->pin_count++;
+ drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
+}
+
+static void pin_obj_locked(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
mutex_lock(&priv->lru.lock);
- msm_obj->pin_count++;
- update_lru_locked(obj);
+ msm_gem_pin_obj_locked(obj);
mutex_unlock(&priv->lru.lock);
-
- return p;
}
struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
@@ -252,6 +263,8 @@ struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
msm_gem_lock(obj);
p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
+ if (!IS_ERR(p))
+ pin_obj_locked(obj);
msm_gem_unlock(obj);
return p;
@@ -463,7 +476,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **pages;
- int ret, prot = IOMMU_READ;
+ int prot = IOMMU_READ;
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
prot |= IOMMU_WRITE;
@@ -480,11 +493,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (IS_ERR(pages))
return PTR_ERR(pages);
- ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
- if (ret)
- msm_gem_unpin_locked(obj);
-
- return ret;
+ return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
}
void msm_gem_unpin_locked(struct drm_gem_object *obj)
@@ -536,8 +545,10 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
return PTR_ERR(vma);
ret = msm_gem_pin_vma_locked(obj, vma);
- if (!ret)
+ if (!ret) {
*iova = vma->iova;
+ pin_obj_locked(obj);
+ }
return ret;
}
@@ -700,6 +711,8 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
if (IS_ERR(pages))
return ERR_CAST(pages);
+ pin_obj_locked(obj);
+
/* increment vmap_count *before* vmap() call, so shrinker can
* check vmap_count (is_vunmapable()) outside of msm_obj lock.
* This guarantees that we won't try to msm_gem_vunmap() this
@@ -142,6 +142,7 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
+void msm_gem_pin_obj_locked(struct drm_gem_object *obj);
struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
void msm_gem_unpin_pages(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -384,6 +384,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
static int submit_pin_objects(struct msm_gem_submit *submit)
{
+ struct msm_drm_private *priv = submit->dev->dev_private;
int i, ret = 0;
submit->valid = true;
@@ -403,7 +404,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
if (ret)
break;
- submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
+ submit->bos[i].flags |= BO_VMA_PINNED;
submit->bos[i].vma = vma;
if (vma->iova == submit->bos[i].iova) {
@@ -416,6 +417,13 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
}
}
+ mutex_lock(&priv->lru.lock);
+ for (i = 0; i < submit->nr_bos; i++) {
+ msm_gem_pin_obj_locked(submit->bos[i].obj);
+ submit->bos[i].flags |= BO_OBJ_PINNED;
+ }
+ mutex_unlock(&priv->lru.lock);
+
return ret;
}