@@ -92,6 +92,10 @@ struct msm_gpu_funcs {
* for cmdstream that is buffered in this FIFO upstream of the CP fw.
*/
bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+ int (*submitqueue_setup)(struct msm_gpu *gpu,
+ struct msm_gpu_submitqueue *queue);
+ void (*submitqueue_close)(struct msm_gpu *gpu,
+ struct msm_gpu_submitqueue *queue);
};
/* Additional state for iommu faults: */
@@ -522,6 +526,9 @@ struct msm_gpu_submitqueue {
struct mutex lock;
struct kref ref;
struct drm_sched_entity *entity;
+ struct msm_gpu *gpu;
+ struct drm_gem_object *bo;
+ uint64_t bo_iova;
};
struct msm_gpu_state_bo {
@@ -71,6 +71,11 @@ void msm_submitqueue_destroy(struct kref *kref)
struct msm_gpu_submitqueue *queue = container_of(kref,
struct msm_gpu_submitqueue, ref);
+ struct msm_gpu *gpu = queue->gpu;
+
+ if (gpu && gpu->funcs->submitqueue_close)
+ gpu->funcs->submitqueue_close(gpu, queue);
+
idr_destroy(&queue->fence_idr);
msm_file_private_put(queue->ctx);
@@ -160,6 +165,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
{
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
+ struct msm_gpu *gpu = priv->gpu;
enum drm_sched_priority sched_prio;
unsigned ring_nr;
int ret;
@@ -195,6 +201,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
queue->ctx = msm_file_private_get(ctx);
queue->id = ctx->queueid++;
+ queue->gpu = gpu;
if (id)
*id = queue->id;
@@ -207,6 +214,9 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
write_unlock(&ctx->queuelock);
+ if (gpu && gpu->funcs->submitqueue_setup)
+ gpu->funcs->submitqueue_setup(gpu, queue);
+
return 0;
}