diff mbox series

[RFC,v1,04/13] mm: zswap: zswap_compress()/decompress() can submit, then poll an acomp_req.

Message ID 20241018064101.336232-5-kanchana.p.sridhar@intel.com
State New
Headers show
Series zswap IAA compress batching | expand

Commit Message

Sridhar, Kanchana P Oct. 18, 2024, 6:40 a.m. UTC
If the crypto_acomp has a poll interface registered, zswap_compress()
and zswap_decompress() will submit the acomp_req, and then poll() for a
successful completion/error status in a busy-wait loop. This allows an
asynchronous way to manage (potentially multiple) acomp_reqs without
the use of interrupts, which is supported in the iaa_crypto driver.

This enables us to implement batch submission of multiple
compression/decompression jobs to the Intel IAA hardware accelerator,
which will process them in parallel; followed by polling the batch's
acomp_reqs for completion status.

Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
---
 mm/zswap.c | 51 +++++++++++++++++++++++++++++++++++++++------------
 1 file changed, 39 insertions(+), 12 deletions(-)

Comments

Yosry Ahmed Oct. 23, 2024, 12:48 a.m. UTC | #1
On Thu, Oct 17, 2024 at 11:41 PM Kanchana P Sridhar
<kanchana.p.sridhar@intel.com> wrote:
>
> If the crypto_acomp has a poll interface registered, zswap_compress()
> and zswap_decompress() will submit the acomp_req, and then poll() for a
> successful completion/error status in a busy-wait loop. This allows an
> asynchronous way to manage (potentially multiple) acomp_reqs without
> the use of interrupts, which is supported in the iaa_crypto driver.
>
> This enables us to implement batch submission of multiple
> compression/decompression jobs to the Intel IAA hardware accelerator,
> which will process them in parallel; followed by polling the batch's
> acomp_reqs for completion status.
>
> Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
> ---
>  mm/zswap.c | 51 +++++++++++++++++++++++++++++++++++++++------------
>  1 file changed, 39 insertions(+), 12 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index f6316b66fb23..948c9745ee57 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -910,18 +910,34 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
>         acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
>
>         /*
> -        * it maybe looks a little bit silly that we send an asynchronous request,
> -        * then wait for its completion synchronously. This makes the process look
> -        * synchronous in fact.
> -        * Theoretically, acomp supports users send multiple acomp requests in one
> -        * acomp instance, then get those requests done simultaneously. but in this
> -        * case, zswap actually does store and load page by page, there is no
> -        * existing method to send the second page before the first page is done
> -        * in one thread doing zwap.
> -        * but in different threads running on different cpu, we have different
> -        * acomp instance, so multiple threads can do (de)compression in parallel.
> +        * If the crypto_acomp provides an asynchronous poll() interface,
> +        * submit the descriptor and poll for a completion status.
> +        *
> +        * It maybe looks a little bit silly that we send an asynchronous
> +        * request, then wait for its completion in a busy-wait poll loop, or,
> +        * synchronously. This makes the process look synchronous in fact.
> +        * Theoretically, acomp supports users send multiple acomp requests in
> +        * one acomp instance, then get those requests done simultaneously.
> +        * But in this case, zswap actually does store and load page by page,
> +        * there is no existing method to send the second page before the
> +        * first page is done in one thread doing zswap.
> +        * But in different threads running on different cpu, we have different
> +        * acomp instance, so multiple threads can do (de)compression in
> +        * parallel.
>          */
> -       comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
> +       if (acomp_ctx->acomp->poll) {
> +               comp_ret = crypto_acomp_compress(acomp_ctx->req);
> +               if (comp_ret == -EINPROGRESS) {
> +                       do {
> +                               comp_ret = crypto_acomp_poll(acomp_ctx->req);
> +                               if (comp_ret && comp_ret != -EAGAIN)
> +                                       break;
> +                       } while (comp_ret);
> +               }
> +       } else {
> +               comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
> +       }
> +

Is Herbert suggesting that crypto_wait_req(crypto_acomp_compress(..))
essentially do the poll internally for IAA, and hence this change can
be dropped?

>         dlen = acomp_ctx->req->dlen;
>         if (comp_ret)
>                 goto unlock;
> @@ -959,6 +975,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
>         struct scatterlist input, output;
>         struct crypto_acomp_ctx *acomp_ctx;
>         u8 *src;
> +       int ret;
>
>         acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
>         mutex_lock(&acomp_ctx->mutex);
> @@ -984,7 +1001,17 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
>         sg_init_table(&output, 1);
>         sg_set_folio(&output, folio, PAGE_SIZE, 0);
>         acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
> -       BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
> +       if (acomp_ctx->acomp->poll) {
> +               ret = crypto_acomp_decompress(acomp_ctx->req);
> +               if (ret == -EINPROGRESS) {
> +                       do {
> +                               ret = crypto_acomp_poll(acomp_ctx->req);
> +                               BUG_ON(ret && ret != -EAGAIN);
> +                       } while (ret);
> +               }
> +       } else {
> +               BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
> +       }
>         BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
>         mutex_unlock(&acomp_ctx->mutex);
>
> --
> 2.27.0
>
Sridhar, Kanchana P Oct. 23, 2024, 2:01 a.m. UTC | #2
Hi Yosry,

> -----Original Message-----
> From: Yosry Ahmed <yosryahmed@google.com>
> Sent: Tuesday, October 22, 2024 5:49 PM
> To: Sridhar, Kanchana P <kanchana.p.sridhar@intel.com>
> Cc: linux-kernel@vger.kernel.org; linux-mm@kvack.org;
> hannes@cmpxchg.org; nphamcs@gmail.com; chengming.zhou@linux.dev;
> usamaarif642@gmail.com; ryan.roberts@arm.com; Huang, Ying
> <ying.huang@intel.com>; 21cnbao@gmail.com; akpm@linux-foundation.org;
> linux-crypto@vger.kernel.org; herbert@gondor.apana.org.au;
> davem@davemloft.net; clabbe@baylibre.com; ardb@kernel.org;
> ebiggers@google.com; surenb@google.com; Accardi, Kristen C
> <kristen.c.accardi@intel.com>; zanussi@kernel.org; viro@zeniv.linux.org.uk;
> brauner@kernel.org; jack@suse.cz; mcgrof@kernel.org; kees@kernel.org;
> joel.granados@kernel.org; bfoster@redhat.com; willy@infradead.org; linux-
> fsdevel@vger.kernel.org; Feghali, Wajdi K <wajdi.k.feghali@intel.com>; Gopal,
> Vinodh <vinodh.gopal@intel.com>
> Subject: Re: [RFC PATCH v1 04/13] mm: zswap:
> zswap_compress()/decompress() can submit, then poll an acomp_req.
> 
> On Thu, Oct 17, 2024 at 11:41 PM Kanchana P Sridhar
> <kanchana.p.sridhar@intel.com> wrote:
> >
> > If the crypto_acomp has a poll interface registered, zswap_compress()
> > and zswap_decompress() will submit the acomp_req, and then poll() for a
> > successful completion/error status in a busy-wait loop. This allows an
> > asynchronous way to manage (potentially multiple) acomp_reqs without
> > the use of interrupts, which is supported in the iaa_crypto driver.
> >
> > This enables us to implement batch submission of multiple
> > compression/decompression jobs to the Intel IAA hardware accelerator,
> > which will process them in parallel; followed by polling the batch's
> > acomp_reqs for completion status.
> >
> > Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
> > ---
> >  mm/zswap.c | 51 +++++++++++++++++++++++++++++++++++++++----------
> --
> >  1 file changed, 39 insertions(+), 12 deletions(-)
> >
> > diff --git a/mm/zswap.c b/mm/zswap.c
> > index f6316b66fb23..948c9745ee57 100644
> > --- a/mm/zswap.c
> > +++ b/mm/zswap.c
> > @@ -910,18 +910,34 @@ static bool zswap_compress(struct page *page,
> struct zswap_entry *entry,
> >         acomp_request_set_params(acomp_ctx->req, &input, &output,
> PAGE_SIZE, dlen);
> >
> >         /*
> > -        * it maybe looks a little bit silly that we send an asynchronous request,
> > -        * then wait for its completion synchronously. This makes the process
> look
> > -        * synchronous in fact.
> > -        * Theoretically, acomp supports users send multiple acomp requests in
> one
> > -        * acomp instance, then get those requests done simultaneously. but in
> this
> > -        * case, zswap actually does store and load page by page, there is no
> > -        * existing method to send the second page before the first page is
> done
> > -        * in one thread doing zwap.
> > -        * but in different threads running on different cpu, we have different
> > -        * acomp instance, so multiple threads can do (de)compression in
> parallel.
> > +        * If the crypto_acomp provides an asynchronous poll() interface,
> > +        * submit the descriptor and poll for a completion status.
> > +        *
> > +        * It maybe looks a little bit silly that we send an asynchronous
> > +        * request, then wait for its completion in a busy-wait poll loop, or,
> > +        * synchronously. This makes the process look synchronous in fact.
> > +        * Theoretically, acomp supports users send multiple acomp requests in
> > +        * one acomp instance, then get those requests done simultaneously.
> > +        * But in this case, zswap actually does store and load page by page,
> > +        * there is no existing method to send the second page before the
> > +        * first page is done in one thread doing zswap.
> > +        * But in different threads running on different cpu, we have different
> > +        * acomp instance, so multiple threads can do (de)compression in
> > +        * parallel.
> >          */
> > -       comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx-
> >req), &acomp_ctx->wait);
> > +       if (acomp_ctx->acomp->poll) {
> > +               comp_ret = crypto_acomp_compress(acomp_ctx->req);
> > +               if (comp_ret == -EINPROGRESS) {
> > +                       do {
> > +                               comp_ret = crypto_acomp_poll(acomp_ctx->req);
> > +                               if (comp_ret && comp_ret != -EAGAIN)
> > +                                       break;
> > +                       } while (comp_ret);
> > +               }
> > +       } else {
> > +               comp_ret =
> crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx-
> >wait);
> > +       }
> > +
> 
> Is Herbert suggesting that crypto_wait_req(crypto_acomp_compress(..))
> essentially do the poll internally for IAA, and hence this change can
> be dropped?

Yes, you're right. I plan to submit a v2 shortly with Herbert's suggestion.

Thanks,
Kanchana

> 
> >         dlen = acomp_ctx->req->dlen;
> >         if (comp_ret)
> >                 goto unlock;
> > @@ -959,6 +975,7 @@ static void zswap_decompress(struct zswap_entry
> *entry, struct folio *folio)
> >         struct scatterlist input, output;
> >         struct crypto_acomp_ctx *acomp_ctx;
> >         u8 *src;
> > +       int ret;
> >
> >         acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
> >         mutex_lock(&acomp_ctx->mutex);
> > @@ -984,7 +1001,17 @@ static void zswap_decompress(struct
> zswap_entry *entry, struct folio *folio)
> >         sg_init_table(&output, 1);
> >         sg_set_folio(&output, folio, PAGE_SIZE, 0);
> >         acomp_request_set_params(acomp_ctx->req, &input, &output, entry-
> >length, PAGE_SIZE);
> > -       BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx-
> >req), &acomp_ctx->wait));
> > +       if (acomp_ctx->acomp->poll) {
> > +               ret = crypto_acomp_decompress(acomp_ctx->req);
> > +               if (ret == -EINPROGRESS) {
> > +                       do {
> > +                               ret = crypto_acomp_poll(acomp_ctx->req);
> > +                               BUG_ON(ret && ret != -EAGAIN);
> > +                       } while (ret);
> > +               }
> > +       } else {
> > +
> BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req),
> &acomp_ctx->wait));
> > +       }
> >         BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
> >         mutex_unlock(&acomp_ctx->mutex);
> >
> > --
> > 2.27.0
> >
diff mbox series

Patch

diff --git a/mm/zswap.c b/mm/zswap.c
index f6316b66fb23..948c9745ee57 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -910,18 +910,34 @@  static bool zswap_compress(struct page *page, struct zswap_entry *entry,
 	acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
 
 	/*
-	 * it maybe looks a little bit silly that we send an asynchronous request,
-	 * then wait for its completion synchronously. This makes the process look
-	 * synchronous in fact.
-	 * Theoretically, acomp supports users send multiple acomp requests in one
-	 * acomp instance, then get those requests done simultaneously. but in this
-	 * case, zswap actually does store and load page by page, there is no
-	 * existing method to send the second page before the first page is done
-	 * in one thread doing zwap.
-	 * but in different threads running on different cpu, we have different
-	 * acomp instance, so multiple threads can do (de)compression in parallel.
+	 * If the crypto_acomp provides an asynchronous poll() interface,
+	 * submit the descriptor and poll for a completion status.
+	 *
+	 * It maybe looks a little bit silly that we send an asynchronous
+	 * request, then wait for its completion in a busy-wait poll loop, or,
+	 * synchronously. This makes the process look synchronous in fact.
+	 * Theoretically, acomp supports users send multiple acomp requests in
+	 * one acomp instance, then get those requests done simultaneously.
+	 * But in this case, zswap actually does store and load page by page,
+	 * there is no existing method to send the second page before the
+	 * first page is done in one thread doing zswap.
+	 * But in different threads running on different cpu, we have different
+	 * acomp instance, so multiple threads can do (de)compression in
+	 * parallel.
 	 */
-	comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
+	if (acomp_ctx->acomp->poll) {
+		comp_ret = crypto_acomp_compress(acomp_ctx->req);
+		if (comp_ret == -EINPROGRESS) {
+			do {
+				comp_ret = crypto_acomp_poll(acomp_ctx->req);
+				if (comp_ret && comp_ret != -EAGAIN)
+					break;
+			} while (comp_ret);
+		}
+	} else {
+		comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
+	}
+
 	dlen = acomp_ctx->req->dlen;
 	if (comp_ret)
 		goto unlock;
@@ -959,6 +975,7 @@  static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
 	struct scatterlist input, output;
 	struct crypto_acomp_ctx *acomp_ctx;
 	u8 *src;
+	int ret;
 
 	acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
 	mutex_lock(&acomp_ctx->mutex);
@@ -984,7 +1001,17 @@  static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
 	sg_init_table(&output, 1);
 	sg_set_folio(&output, folio, PAGE_SIZE, 0);
 	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
-	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
+	if (acomp_ctx->acomp->poll) {
+		ret = crypto_acomp_decompress(acomp_ctx->req);
+		if (ret == -EINPROGRESS) {
+			do {
+				ret = crypto_acomp_poll(acomp_ctx->req);
+				BUG_ON(ret && ret != -EAGAIN);
+			} while (ret);
+		}
+	} else {
+		BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
+	}
 	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
 	mutex_unlock(&acomp_ctx->mutex);