diff mbox series

[v8,13/14] mm: zswap: Allocate pool batching resources if the compressor supports batching.

Message ID 20250303084724.6490-14-kanchana.p.sridhar@intel.com
State New
Headers show
Series zswap IAA compress batching | expand

Commit Message

Sridhar, Kanchana P March 3, 2025, 8:47 a.m. UTC
This patch adds support for the per-CPU acomp_ctx to track multiple
compression/decompression requests and multiple compression destination
buffers. The zswap_cpu_comp_prepare() CPU onlining code will get the
maximum batch-size the compressor supports. If so, it will allocate the
necessary batching resources.

However, zswap does not use more than one request yet. Follow-up patches
will actually utilize the multiple acomp_ctx requests/buffers for batch
compression/decompression of multiple pages.

The newly added ZSWAP_MAX_BATCH_SIZE limits the amount of extra memory used
for batching. There is a small extra memory overhead of allocating the
"reqs" and "buffers" arrays for compressors that do not support batching.

Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
---
 mm/zswap.c | 99 +++++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 69 insertions(+), 30 deletions(-)

Comments

Yosry Ahmed March 6, 2025, 8 p.m. UTC | #1
On Mon, Mar 03, 2025 at 12:47:23AM -0800, Kanchana P Sridhar wrote:
> This patch adds support for the per-CPU acomp_ctx to track multiple
> compression/decompression requests and multiple compression destination
> buffers. The zswap_cpu_comp_prepare() CPU onlining code will get the
> maximum batch-size the compressor supports. If so, it will allocate the
> necessary batching resources.
> 
> However, zswap does not use more than one request yet. Follow-up patches
> will actually utilize the multiple acomp_ctx requests/buffers for batch
> compression/decompression of multiple pages.
> 
> The newly added ZSWAP_MAX_BATCH_SIZE limits the amount of extra memory used
> for batching. There is a small extra memory overhead of allocating the
> "reqs" and "buffers" arrays for compressors that do not support batching.

That's two pointers per-CPU (i.e. 16 bytes on x86_64), right? Please
call that out in the commit log.

> 
> Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
> ---
>  mm/zswap.c | 99 +++++++++++++++++++++++++++++++++++++-----------------
>  1 file changed, 69 insertions(+), 30 deletions(-)
> 
> diff --git a/mm/zswap.c b/mm/zswap.c
> index cff96df1df8b..fae59d6d5147 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -78,6 +78,16 @@ static bool zswap_pool_reached_full;
>  
>  #define ZSWAP_PARAM_UNSET ""
>  
> +/*
> + * For compression batching of large folios:
> + * Maximum number of acomp compress requests that will be processed
> + * in a batch, iff the zswap compressor supports batching.
> + * This limit exists because we preallocate enough requests and buffers
> + * in the per-cpu acomp_ctx accordingly. Hence, a higher limit means higher
> + * memory usage.
> + */

That's too verbose. Let's do something like:

/* Limit the batch size to limit per-CPU memory usage for reqs and buffers */
#define ZSWAP_MAX_BATCH_SIZE 8U

> +#define ZSWAP_MAX_BATCH_SIZE 8U
> +
>  static int zswap_setup(void);
>  
>  /* Enable/disable zswap */
> @@ -143,8 +153,8 @@ bool zswap_never_enabled(void)
>  
>  struct crypto_acomp_ctx {
>  	struct crypto_acomp *acomp;
> -	struct acomp_req *req;
> -	u8 *buffer;
> +	struct acomp_req **reqs;
> +	u8 **buffers;
>  	u8 nr_reqs;
>  	struct crypto_wait wait;
>  	struct mutex mutex;
> @@ -251,13 +261,22 @@ static void __zswap_pool_empty(struct percpu_ref *ref);
>  static void acomp_ctx_dealloc(struct crypto_acomp_ctx *acomp_ctx)
>  {
>  	if (!IS_ERR_OR_NULL(acomp_ctx) && acomp_ctx->nr_reqs) {
> +		u8 i;
> +
> +		if (acomp_ctx->reqs) {
> +			for (i = 0; i < acomp_ctx->nr_reqs; ++i)
> +				if (!IS_ERR_OR_NULL(acomp_ctx->reqs[i]))

Hmm I just realized we check IS_ERR_OR_NULL() here for the requests, but
only a NULL check in zswap_cpu_comp_prepare(). We also check
IS_ERR_OR_NULL here for acomp, but only IS_ERR() in
zswap_cpu_comp_prepare().

This doesn't make sense. Would you be able to include a patch before
this one to make these consistent? I can also send a follow up patch.

> +					acomp_request_free(acomp_ctx->reqs[i]);

Please add braces for the for loop here for readability, since the body
has more than one line, even if it's technically not required.

> +			kfree(acomp_ctx->reqs);
> +			acomp_ctx->reqs = NULL;
> +		}
>  
> -		if (!IS_ERR_OR_NULL(acomp_ctx->req))
> -			acomp_request_free(acomp_ctx->req);
> -		acomp_ctx->req = NULL;
> -
> -		kfree(acomp_ctx->buffer);
> -		acomp_ctx->buffer = NULL;
> +		if (acomp_ctx->buffers) {
> +			for (i = 0; i < acomp_ctx->nr_reqs; ++i)
> +				kfree(acomp_ctx->buffers[i]);
> +			kfree(acomp_ctx->buffers);
> +			acomp_ctx->buffers = NULL;
> +		}
>  
>  		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
>  			crypto_free_acomp(acomp_ctx->acomp);
> @@ -271,6 +290,7 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
>  	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
>  	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
>  	int ret = -ENOMEM;
> +	u8 i;
>  
>  	/*
>  	 * Just to be even more fail-safe against changes in assumptions and/or
> @@ -292,22 +312,41 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
>  		goto fail;
>  	}
>  
> -	acomp_ctx->nr_reqs = 1;
> +	acomp_ctx->nr_reqs = min(ZSWAP_MAX_BATCH_SIZE,
> +				 crypto_acomp_batch_size(acomp_ctx->acomp));
>  
> -	acomp_ctx->req = acomp_request_alloc(acomp_ctx->acomp);
> -	if (!acomp_ctx->req) {
> -		pr_err("could not alloc crypto acomp_request %s\n",
> -		       pool->tfm_name);
> -		ret = -ENOMEM;
> +	acomp_ctx->reqs = kcalloc_node(acomp_ctx->nr_reqs, sizeof(struct acomp_req *),
> +				       GFP_KERNEL, cpu_to_node(cpu));
> +	if (!acomp_ctx->reqs)
>  		goto fail;
> +
> +	for (i = 0; i < acomp_ctx->nr_reqs; ++i) {
> +		acomp_ctx->reqs[i] = acomp_request_alloc(acomp_ctx->acomp);
> +		if (!acomp_ctx->reqs[i]) {
> +			pr_err("could not alloc crypto acomp_request reqs[%d] %s\n",
> +				i, pool->tfm_name);
> +			goto fail;
> +		}
>  	}
>  
> -	acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
> -	if (!acomp_ctx->buffer) {
> -		ret = -ENOMEM;
> +	acomp_ctx->buffers = kcalloc_node(acomp_ctx->nr_reqs, sizeof(u8 *),
> +					  GFP_KERNEL, cpu_to_node(cpu));
> +	if (!acomp_ctx->buffers)
>  		goto fail;
> +
> +	for (i = 0; i < acomp_ctx->nr_reqs; ++i) {
> +		acomp_ctx->buffers[i] = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL,
> +						     cpu_to_node(cpu));
> +		if (!acomp_ctx->buffers[i])
> +			goto fail;
>  	}
>  
> +	/*
> +	 * The crypto_wait is used only in fully synchronous, i.e., with scomp
> +	 * or non-poll mode of acomp, hence there is only one "wait" per
> +	 * acomp_ctx, with callback set to reqs[0], under the assumption that
> +	 * there is at least 1 request per acomp_ctx.
> +	 */

I am not sure I understand. Does this say that we assume that scomp or
non-poll acomp will never use batching so having a single "wait" is
fine?

If so, this needs to be enforced at runtime or at least have a warning,
and not just mentioned in a comment, in case batching support is ever
added for these. Please clarify.

We should also probably merge the comments above crypto_init_wait() and
acomp_request_set_callback() now.

>  	crypto_init_wait(&acomp_ctx->wait);
>  
>  	/*
Sridhar, Kanchana P April 30, 2025, 9:15 p.m. UTC | #2
> -----Original Message-----
> From: Yosry Ahmed <yosry.ahmed@linux.dev>
> Sent: Thursday, March 6, 2025 12:01 PM
> To: Sridhar, Kanchana P <kanchana.p.sridhar@intel.com>
> Cc: linux-kernel@vger.kernel.org; linux-mm@kvack.org;
> hannes@cmpxchg.org; nphamcs@gmail.com; chengming.zhou@linux.dev;
> usamaarif642@gmail.com; ryan.roberts@arm.com; 21cnbao@gmail.com;
> ying.huang@linux.alibaba.com; akpm@linux-foundation.org; linux-
> crypto@vger.kernel.org; herbert@gondor.apana.org.au;
> davem@davemloft.net; clabbe@baylibre.com; ardb@kernel.org;
> ebiggers@google.com; surenb@google.com; Accardi, Kristen C
> <kristen.c.accardi@intel.com>; Feghali, Wajdi K <wajdi.k.feghali@intel.com>;
> Gopal, Vinodh <vinodh.gopal@intel.com>
> Subject: Re: [PATCH v8 13/14] mm: zswap: Allocate pool batching resources if
> the compressor supports batching.
> 
> On Mon, Mar 03, 2025 at 12:47:23AM -0800, Kanchana P Sridhar wrote:
> > This patch adds support for the per-CPU acomp_ctx to track multiple
> > compression/decompression requests and multiple compression destination
> > buffers. The zswap_cpu_comp_prepare() CPU onlining code will get the
> > maximum batch-size the compressor supports. If so, it will allocate the
> > necessary batching resources.
> >
> > However, zswap does not use more than one request yet. Follow-up
> patches
> > will actually utilize the multiple acomp_ctx requests/buffers for batch
> > compression/decompression of multiple pages.
> >
> > The newly added ZSWAP_MAX_BATCH_SIZE limits the amount of extra
> memory used
> > for batching. There is a small extra memory overhead of allocating the
> > "reqs" and "buffers" arrays for compressors that do not support batching.
> 
> That's two pointers per-CPU (i.e. 16 bytes on x86_64), right? Please
> call that out in the commit log.

Yes, this is done.

Thanks,
Kanchana

> 
> >
> > Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
> > ---
> >  mm/zswap.c | 99 +++++++++++++++++++++++++++++++++++++-------------
> ----
> >  1 file changed, 69 insertions(+), 30 deletions(-)
> >
> > diff --git a/mm/zswap.c b/mm/zswap.c
> > index cff96df1df8b..fae59d6d5147 100644
> > --- a/mm/zswap.c
> > +++ b/mm/zswap.c
> > @@ -78,6 +78,16 @@ static bool zswap_pool_reached_full;
> >
> >  #define ZSWAP_PARAM_UNSET ""
> >
> > +/*
> > + * For compression batching of large folios:
> > + * Maximum number of acomp compress requests that will be processed
> > + * in a batch, iff the zswap compressor supports batching.
> > + * This limit exists because we preallocate enough requests and buffers
> > + * in the per-cpu acomp_ctx accordingly. Hence, a higher limit means
> higher
> > + * memory usage.
> > + */
> 
> That's too verbose. Let's do something like:
> 
> /* Limit the batch size to limit per-CPU memory usage for reqs and buffers */
> #define ZSWAP_MAX_BATCH_SIZE 8U

Addressed in v9.

> 
> > +#define ZSWAP_MAX_BATCH_SIZE 8U
> > +
> >  static int zswap_setup(void);
> >
> >  /* Enable/disable zswap */
> > @@ -143,8 +153,8 @@ bool zswap_never_enabled(void)
> >
> >  struct crypto_acomp_ctx {
> >  	struct crypto_acomp *acomp;
> > -	struct acomp_req *req;
> > -	u8 *buffer;
> > +	struct acomp_req **reqs;
> > +	u8 **buffers;
> >  	u8 nr_reqs;
> >  	struct crypto_wait wait;
> >  	struct mutex mutex;
> > @@ -251,13 +261,22 @@ static void __zswap_pool_empty(struct
> percpu_ref *ref);
> >  static void acomp_ctx_dealloc(struct crypto_acomp_ctx *acomp_ctx)
> >  {
> >  	if (!IS_ERR_OR_NULL(acomp_ctx) && acomp_ctx->nr_reqs) {
> > +		u8 i;
> > +
> > +		if (acomp_ctx->reqs) {
> > +			for (i = 0; i < acomp_ctx->nr_reqs; ++i)
> > +				if (!IS_ERR_OR_NULL(acomp_ctx->reqs[i]))
> 
> Hmm I just realized we check IS_ERR_OR_NULL() here for the requests, but
> only a NULL check in zswap_cpu_comp_prepare(). We also check
> IS_ERR_OR_NULL here for acomp, but only IS_ERR() in
> zswap_cpu_comp_prepare().
> 
> This doesn't make sense. Would you be able to include a patch before
> this one to make these consistent? I can also send a follow up patch.

No worries, I have included this as patch 16 in the v9 series.

> 
> > +					acomp_request_free(acomp_ctx-
> >reqs[i]);
> 
> Please add braces for the for loop here for readability, since the body
> has more than one line, even if it's technically not required.

Done.

> 
> > +			kfree(acomp_ctx->reqs);
> > +			acomp_ctx->reqs = NULL;
> > +		}
> >
> > -		if (!IS_ERR_OR_NULL(acomp_ctx->req))
> > -			acomp_request_free(acomp_ctx->req);
> > -		acomp_ctx->req = NULL;
> > -
> > -		kfree(acomp_ctx->buffer);
> > -		acomp_ctx->buffer = NULL;
> > +		if (acomp_ctx->buffers) {
> > +			for (i = 0; i < acomp_ctx->nr_reqs; ++i)
> > +				kfree(acomp_ctx->buffers[i]);
> > +			kfree(acomp_ctx->buffers);
> > +			acomp_ctx->buffers = NULL;
> > +		}
> >
> >  		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
> >  			crypto_free_acomp(acomp_ctx->acomp);
> > @@ -271,6 +290,7 @@ static int zswap_cpu_comp_prepare(unsigned int
> cpu, struct hlist_node *node)
> >  	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool,
> node);
> >  	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool-
> >acomp_ctx, cpu);
> >  	int ret = -ENOMEM;
> > +	u8 i;
> >
> >  	/*
> >  	 * Just to be even more fail-safe against changes in assumptions
> and/or
> > @@ -292,22 +312,41 @@ static int zswap_cpu_comp_prepare(unsigned int
> cpu, struct hlist_node *node)
> >  		goto fail;
> >  	}
> >
> > -	acomp_ctx->nr_reqs = 1;
> > +	acomp_ctx->nr_reqs = min(ZSWAP_MAX_BATCH_SIZE,
> > +				 crypto_acomp_batch_size(acomp_ctx-
> >acomp));
> >
> > -	acomp_ctx->req = acomp_request_alloc(acomp_ctx->acomp);
> > -	if (!acomp_ctx->req) {
> > -		pr_err("could not alloc crypto acomp_request %s\n",
> > -		       pool->tfm_name);
> > -		ret = -ENOMEM;
> > +	acomp_ctx->reqs = kcalloc_node(acomp_ctx->nr_reqs, sizeof(struct
> acomp_req *),
> > +				       GFP_KERNEL, cpu_to_node(cpu));
> > +	if (!acomp_ctx->reqs)
> >  		goto fail;
> > +
> > +	for (i = 0; i < acomp_ctx->nr_reqs; ++i) {
> > +		acomp_ctx->reqs[i] = acomp_request_alloc(acomp_ctx-
> >acomp);
> > +		if (!acomp_ctx->reqs[i]) {
> > +			pr_err("could not alloc crypto acomp_request
> reqs[%d] %s\n",
> > +				i, pool->tfm_name);
> > +			goto fail;
> > +		}
> >  	}
> >
> > -	acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL,
> cpu_to_node(cpu));
> > -	if (!acomp_ctx->buffer) {
> > -		ret = -ENOMEM;
> > +	acomp_ctx->buffers = kcalloc_node(acomp_ctx->nr_reqs, sizeof(u8
> *),
> > +					  GFP_KERNEL, cpu_to_node(cpu));
> > +	if (!acomp_ctx->buffers)
> >  		goto fail;
> > +
> > +	for (i = 0; i < acomp_ctx->nr_reqs; ++i) {
> > +		acomp_ctx->buffers[i] = kmalloc_node(PAGE_SIZE * 2,
> GFP_KERNEL,
> > +						     cpu_to_node(cpu));
> > +		if (!acomp_ctx->buffers[i])
> > +			goto fail;
> >  	}
> >
> > +	/*
> > +	 * The crypto_wait is used only in fully synchronous, i.e., with scomp
> > +	 * or non-poll mode of acomp, hence there is only one "wait" per
> > +	 * acomp_ctx, with callback set to reqs[0], under the assumption that
> > +	 * there is at least 1 request per acomp_ctx.
> > +	 */
> 
> I am not sure I understand. Does this say that we assume that scomp or
> non-poll acomp will never use batching so having a single "wait" is
> fine?
> 
> If so, this needs to be enforced at runtime or at least have a warning,
> and not just mentioned in a comment, in case batching support is ever
> added for these. Please clarify.

This was pertaining to the request chaining batching implementation and
is no longer relevant. I have deleted this comment in v9, in which
crypto_acomp_batch_[de]compress() do not take a "struct crypto_wait"
parameter.

> 
> We should also probably merge the comments above crypto_init_wait() and
> acomp_request_set_callback() now.

Done, and clarified the use of the single "wait" in zswap calls to
crypto_acomp_[de]compress().

Thanks,
Kanchana

> 
> >  	crypto_init_wait(&acomp_ctx->wait);
> >
> >  	/*
diff mbox series

Patch

diff --git a/mm/zswap.c b/mm/zswap.c
index cff96df1df8b..fae59d6d5147 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -78,6 +78,16 @@  static bool zswap_pool_reached_full;
 
 #define ZSWAP_PARAM_UNSET ""
 
+/*
+ * For compression batching of large folios:
+ * Maximum number of acomp compress requests that will be processed
+ * in a batch, iff the zswap compressor supports batching.
+ * This limit exists because we preallocate enough requests and buffers
+ * in the per-cpu acomp_ctx accordingly. Hence, a higher limit means higher
+ * memory usage.
+ */
+#define ZSWAP_MAX_BATCH_SIZE 8U
+
 static int zswap_setup(void);
 
 /* Enable/disable zswap */
@@ -143,8 +153,8 @@  bool zswap_never_enabled(void)
 
 struct crypto_acomp_ctx {
 	struct crypto_acomp *acomp;
-	struct acomp_req *req;
-	u8 *buffer;
+	struct acomp_req **reqs;
+	u8 **buffers;
 	u8 nr_reqs;
 	struct crypto_wait wait;
 	struct mutex mutex;
@@ -251,13 +261,22 @@  static void __zswap_pool_empty(struct percpu_ref *ref);
 static void acomp_ctx_dealloc(struct crypto_acomp_ctx *acomp_ctx)
 {
 	if (!IS_ERR_OR_NULL(acomp_ctx) && acomp_ctx->nr_reqs) {
+		u8 i;
+
+		if (acomp_ctx->reqs) {
+			for (i = 0; i < acomp_ctx->nr_reqs; ++i)
+				if (!IS_ERR_OR_NULL(acomp_ctx->reqs[i]))
+					acomp_request_free(acomp_ctx->reqs[i]);
+			kfree(acomp_ctx->reqs);
+			acomp_ctx->reqs = NULL;
+		}
 
-		if (!IS_ERR_OR_NULL(acomp_ctx->req))
-			acomp_request_free(acomp_ctx->req);
-		acomp_ctx->req = NULL;
-
-		kfree(acomp_ctx->buffer);
-		acomp_ctx->buffer = NULL;
+		if (acomp_ctx->buffers) {
+			for (i = 0; i < acomp_ctx->nr_reqs; ++i)
+				kfree(acomp_ctx->buffers[i]);
+			kfree(acomp_ctx->buffers);
+			acomp_ctx->buffers = NULL;
+		}
 
 		if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
 			crypto_free_acomp(acomp_ctx->acomp);
@@ -271,6 +290,7 @@  static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
 	struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 	int ret = -ENOMEM;
+	u8 i;
 
 	/*
 	 * Just to be even more fail-safe against changes in assumptions and/or
@@ -292,22 +312,41 @@  static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
 		goto fail;
 	}
 
-	acomp_ctx->nr_reqs = 1;
+	acomp_ctx->nr_reqs = min(ZSWAP_MAX_BATCH_SIZE,
+				 crypto_acomp_batch_size(acomp_ctx->acomp));
 
-	acomp_ctx->req = acomp_request_alloc(acomp_ctx->acomp);
-	if (!acomp_ctx->req) {
-		pr_err("could not alloc crypto acomp_request %s\n",
-		       pool->tfm_name);
-		ret = -ENOMEM;
+	acomp_ctx->reqs = kcalloc_node(acomp_ctx->nr_reqs, sizeof(struct acomp_req *),
+				       GFP_KERNEL, cpu_to_node(cpu));
+	if (!acomp_ctx->reqs)
 		goto fail;
+
+	for (i = 0; i < acomp_ctx->nr_reqs; ++i) {
+		acomp_ctx->reqs[i] = acomp_request_alloc(acomp_ctx->acomp);
+		if (!acomp_ctx->reqs[i]) {
+			pr_err("could not alloc crypto acomp_request reqs[%d] %s\n",
+				i, pool->tfm_name);
+			goto fail;
+		}
 	}
 
-	acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
-	if (!acomp_ctx->buffer) {
-		ret = -ENOMEM;
+	acomp_ctx->buffers = kcalloc_node(acomp_ctx->nr_reqs, sizeof(u8 *),
+					  GFP_KERNEL, cpu_to_node(cpu));
+	if (!acomp_ctx->buffers)
 		goto fail;
+
+	for (i = 0; i < acomp_ctx->nr_reqs; ++i) {
+		acomp_ctx->buffers[i] = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL,
+						     cpu_to_node(cpu));
+		if (!acomp_ctx->buffers[i])
+			goto fail;
 	}
 
+	/*
+	 * The crypto_wait is used only in fully synchronous, i.e., with scomp
+	 * or non-poll mode of acomp, hence there is only one "wait" per
+	 * acomp_ctx, with callback set to reqs[0], under the assumption that
+	 * there is at least 1 request per acomp_ctx.
+	 */
 	crypto_init_wait(&acomp_ctx->wait);
 
 	/*
@@ -315,7 +354,7 @@  static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
 	 * crypto_wait_req(); if the backend of acomp is scomp, the callback
 	 * won't be called, crypto_wait_req() will return without blocking.
 	 */
-	acomp_request_set_callback(acomp_ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+	acomp_request_set_callback(acomp_ctx->reqs[0], CRYPTO_TFM_REQ_MAY_BACKLOG,
 				   crypto_req_done, &acomp_ctx->wait);
 
 	acomp_ctx->is_sleepable = acomp_is_async(acomp_ctx->acomp);
@@ -407,8 +446,8 @@  static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 		struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 
 		acomp_ctx->acomp = NULL;
-		acomp_ctx->req = NULL;
-		acomp_ctx->buffer = NULL;
+		acomp_ctx->reqs = NULL;
+		acomp_ctx->buffers = NULL;
 		acomp_ctx->__online = false;
 		acomp_ctx->nr_reqs = 0;
 		mutex_init(&acomp_ctx->mutex);
@@ -1026,7 +1065,7 @@  static bool zswap_compress(struct page *page, struct zswap_entry *entry,
 	u8 *dst;
 
 	acomp_ctx = acomp_ctx_get_cpu_lock(pool);
-	dst = acomp_ctx->buffer;
+	dst = acomp_ctx->buffers[0];
 	sg_init_table(&input, 1);
 	sg_set_page(&input, page, PAGE_SIZE, 0);
 
@@ -1036,7 +1075,7 @@  static bool zswap_compress(struct page *page, struct zswap_entry *entry,
 	 * giving the dst buffer with enough length to avoid buffer overflow.
 	 */
 	sg_init_one(&output, dst, PAGE_SIZE * 2);
-	acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
+	acomp_request_set_params(acomp_ctx->reqs[0], &input, &output, PAGE_SIZE, dlen);
 
 	/*
 	 * it maybe looks a little bit silly that we send an asynchronous request,
@@ -1050,8 +1089,8 @@  static bool zswap_compress(struct page *page, struct zswap_entry *entry,
 	 * but in different threads running on different cpu, we have different
 	 * acomp instance, so multiple threads can do (de)compression in parallel.
 	 */
-	comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
-	dlen = acomp_ctx->req->dlen;
+	comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->reqs[0]), &acomp_ctx->wait);
+	dlen = acomp_ctx->reqs[0]->dlen;
 	if (comp_ret)
 		goto unlock;
 
@@ -1102,19 +1141,19 @@  static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
 	 */
 	if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
 	    !virt_addr_valid(src)) {
-		memcpy(acomp_ctx->buffer, src, entry->length);
-		src = acomp_ctx->buffer;
+		memcpy(acomp_ctx->buffers[0], src, entry->length);
+		src = acomp_ctx->buffers[0];
 		zpool_unmap_handle(zpool, entry->handle);
 	}
 
 	sg_init_one(&input, src, entry->length);
 	sg_init_table(&output, 1);
 	sg_set_folio(&output, folio, PAGE_SIZE, 0);
-	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
-	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
-	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
+	acomp_request_set_params(acomp_ctx->reqs[0], &input, &output, entry->length, PAGE_SIZE);
+	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->reqs[0]), &acomp_ctx->wait));
+	BUG_ON(acomp_ctx->reqs[0]->dlen != PAGE_SIZE);
 
-	if (src != acomp_ctx->buffer)
+	if (src != acomp_ctx->buffers[0])
 		zpool_unmap_handle(zpool, entry->handle);
 	acomp_ctx_put_unlock(acomp_ctx);
 }