@@ -580,11 +580,21 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
* under common lock for the struct drm_sched_entity that was set up for
* @sched_job in drm_sched_job_init().
*/
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
+int drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{
struct drm_sched_entity *entity = sched_job->entity;
+ struct drm_gpu_scheduler *sched = sched_job->sched;
bool first;
ktime_t submit_ts;
+ int ret;
+
+ ret = wait_event_interruptible(
+ sched->job_scheduled,
+ atomic_read(&sched->enqueue_credit_count) <=
+ sched->enqueue_credit_limit);
+ if (ret)
+ return ret;
+ atomic_add(sched_job->enqueue_credits, &sched->enqueue_credit_count);
trace_drm_sched_job(sched_job, entity);
atomic_inc(entity->rq->sched->score);
@@ -609,7 +619,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
spin_unlock(&entity->lock);
DRM_ERROR("Trying to push to a killed entity\n");
- return;
+ return -EINVAL;
}
rq = entity->rq;
@@ -626,5 +636,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
drm_sched_wakeup(sched);
}
+
+ return 0;
}
EXPORT_SYMBOL(drm_sched_entity_push_job);
@@ -1217,6 +1217,7 @@ static void drm_sched_run_job_work(struct work_struct *w)
trace_drm_run_job(sched_job, entity);
fence = sched->ops->run_job(sched_job);
+ atomic_sub(sched_job->enqueue_credits, &sched->enqueue_credit_count);
complete_all(&entity->entity_idle);
drm_sched_fence_scheduled(s_fence, fence);
@@ -1253,6 +1254,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_
sched->ops = args->ops;
sched->credit_limit = args->credit_limit;
+ sched->enqueue_credit_limit = args->enqueue_credit_limit;
sched->name = args->name;
sched->timeout = args->timeout;
sched->hang_limit = args->hang_limit;
@@ -1308,6 +1310,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_
INIT_LIST_HEAD(&sched->pending_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->credit_count, 0);
+ atomic_set(&sched->enqueue_credit_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
@@ -329,6 +329,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
* @credits: the number of credits this job contributes to the scheduler
+ * @enqueue_credits: the number of enqueue credits this job contributes
* @work: Helper to reschedule job kill to different context.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
@@ -366,6 +367,7 @@ struct drm_sched_job {
enum drm_sched_priority s_priority;
u32 credits;
+ u32 enqueue_credits;
/** @last_dependency: tracks @dependencies as they signal */
unsigned int last_dependency;
atomic_t karma;
@@ -485,6 +487,10 @@ struct drm_sched_backend_ops {
* @ops: backend operations provided by the driver.
* @credit_limit: the credit limit of this scheduler
* @credit_count: the current credit count of this scheduler
+ * @enqueue_credit_limit: the credit limit of jobs pushed to scheduler and not
+ * yet run
+ * @enqueue_credit_count: the current crdit count of jobs pushed to scheduler
+ * but not yet run
* @timeout: the time after which a job is removed from the scheduler.
* @name: name of the ring for which this scheduler is being used.
* @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
@@ -518,6 +524,8 @@ struct drm_gpu_scheduler {
const struct drm_sched_backend_ops *ops;
u32 credit_limit;
atomic_t credit_count;
+ u32 enqueue_credit_limit;
+ atomic_t enqueue_credit_count;
long timeout;
const char *name;
u32 num_rqs;
@@ -550,6 +558,8 @@ struct drm_gpu_scheduler {
* @num_rqs: Number of run-queues. This may be at most DRM_SCHED_PRIORITY_COUNT,
* as there's usually one run-queue per priority, but may be less.
* @credit_limit: the number of credits this scheduler can hold from all jobs
+ * @enqueue_credit_limit: the number of credits that can be enqueued before
+ * drm_sched_entity_push_job() blocks
* @hang_limit: number of times to allow a job to hang before dropping it.
* This mechanism is DEPRECATED. Set it to 0.
* @timeout: timeout value in jiffies for submitted jobs.
@@ -564,6 +574,7 @@ struct drm_sched_init_args {
struct workqueue_struct *timeout_wq;
u32 num_rqs;
u32 credit_limit;
+ u32 enqueue_credit_limit;
unsigned int hang_limit;
long timeout;
atomic_t *score;
@@ -600,7 +611,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
u32 credits, void *owner);
void drm_sched_job_arm(struct drm_sched_job *job);
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
+int drm_sched_entity_push_job(struct drm_sched_job *sched_job);
int drm_sched_job_add_dependency(struct drm_sched_job *job,
struct dma_fence *fence);
int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,