===================================================================
@@ -1534,6 +1534,7 @@ static blk_status_t crypt_convert(struct
crypt_alloc_req(cc, ctx);
atomic_inc(&ctx->cc_pending);
+again:
if (crypt_integrity_aead(cc))
r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
else
@@ -1541,6 +1542,17 @@ static blk_status_t crypt_convert(struct
switch (r) {
/*
+ * Some hardware crypto drivers use GFP_ATOMIC allocations in
+ * the request routine. These allocations can randomly fail. If
+ * we propagated the failure up to the I/O stack, it would cause
+ * I/O errors and data corruption.
+ *
+ * So, we sleep and retry.
+ */
+ case -ENOMEM:
+ msleep(1);
+ goto again;
+ /*
* The request was queued by a crypto driver
* but the driver request queue is full, let's wait.
*/
Some hardware crypto drivers use GFP_ATOMIC allocations in the request routine. These allocations can randomly fail - for example, they fail if too many network packets are received. If we propagated the failure up to the I/O stack, it would cause I/O errors and data corruption. So, we sleep and retry. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Cc: stable@vger.kernel.org