@@ -162,6 +162,12 @@ static void crypto_pump_requests(struct crypto_engine *engine,
return;
out:
spin_unlock_irqrestore(&engine->queue_lock, flags);
+ if (engine->do_batch_requests) {
+ ret = engine->do_batch_requests(engine);
+ if (ret)
+ dev_err(engine->dev, "failed to do batch requests: %d\n",
+ ret);
+ }
}
static void crypto_pump_work(struct kthread_work *work)
@@ -396,6 +402,12 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop);
* callback(struct crypto_engine *engine)
* where:
* @engine: the crypto engine structure.
+ * @cbk_do_batch: pointer to a callback function to be invoked when executing a
+ * a batch of requests.
+ * This has the form:
+ * callback(struct crypto_engine *engine)
+ * where:
+ * @engine: the crypto engine structure.
* @rt: whether this queue is set to run as a realtime task
* @qlen: maximum size of the crypto-engine queue
*
@@ -404,6 +416,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop);
*/
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool (*cbk_can_enq)(struct crypto_engine *engine),
+ int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
@@ -423,6 +436,8 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
engine->idling = false;
engine->priv_data = dev;
engine->can_enqueue_more = cbk_can_enq;
+ engine->do_batch_requests = cbk_do_batch;
+
snprintf(engine->name, sizeof(engine->name),
"%s-engine", dev_name(dev));
@@ -456,7 +471,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
*/
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
{
- return crypto_engine_alloc_init_and_set(dev, NULL, rt,
+ return crypto_engine_alloc_init_and_set(dev, NULL, NULL, rt,
CRYPTO_ENGINE_MAX_QLEN);
}
EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
@@ -34,6 +34,7 @@
* @unprepare_crypt_hardware: there are currently no more requests on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
+ * @do_batch_requests: execute a batch of requests
* @can_enqueue_more: callback to check whether the hardware can process
* a new request
* @kworker: kthread worker struct for request pump
@@ -55,6 +56,7 @@ struct crypto_engine {
int (*prepare_crypt_hardware)(struct crypto_engine *engine);
int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*do_batch_requests)(struct crypto_engine *engine);
bool (*can_enqueue_more)(struct crypto_engine *engine);
struct kthread_worker *kworker;
@@ -103,6 +105,7 @@ int crypto_engine_stop(struct crypto_engine *engine);
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool (*cbk_can_enq)(struct crypto_engine *engine),
+ int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen);
int crypto_engine_exit(struct crypto_engine *engine);
Added support for batch requests, per crypto engine. A new callback is added, do_batch_requests, which executes a batch of requests. This has the crypto_engine structure as argument (for cases when more than one crypto-engine is used). The crypto_engine_alloc_init_and_set function, initializes crypto-engine, and also, sets the do_batch_requests callback. On crypto_pump_requests, if do_batch_requests callback is implemented in a driver, this will be executed. The link between the requests will be done in driver, in do_one_request(). Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com> --- crypto/crypto_engine.c | 17 ++++++++++++++++- include/crypto/engine.h | 3 +++ 2 files changed, 19 insertions(+), 1 deletion(-)