diff mbox series

[v3,05/19] crypto: Add generic 16-bit carry-less multiply routines

Message ID 20230821161854.419893-6-richard.henderson@linaro.org
State Superseded
Headers show
Series crypto: Provide clmul.h and host accel | expand

Commit Message

Richard Henderson Aug. 21, 2023, 4:18 p.m. UTC
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 include/crypto/clmul.h | 16 ++++++++++++++++
 crypto/clmul.c         | 21 +++++++++++++++++++++
 2 files changed, 37 insertions(+)

Comments

Ard Biesheuvel Sept. 10, 2023, 12:28 p.m. UTC | #1
On Mon, 21 Aug 2023 at 18:19, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>  include/crypto/clmul.h | 16 ++++++++++++++++
>  crypto/clmul.c         | 21 +++++++++++++++++++++
>  2 files changed, 37 insertions(+)
>
> diff --git a/include/crypto/clmul.h b/include/crypto/clmul.h
> index 153b5e3057..c7ad28aa85 100644
> --- a/include/crypto/clmul.h
> +++ b/include/crypto/clmul.h
> @@ -38,4 +38,20 @@ uint64_t clmul_8x4_odd(uint64_t, uint64_t);
>   */
>  uint64_t clmul_8x4_packed(uint32_t, uint32_t);
>
> +/**
> + * clmul_16x2_even:
> + *
> + * Perform two 16x16->32 carry-less multiplies.
> + * The odd words of the inputs are ignored.
> + */
> +uint64_t clmul_16x2_even(uint64_t, uint64_t);
> +
> +/**
> + * clmul_16x2_odd:
> + *
> + * Perform two 16x16->32 carry-less multiplies.
> + * The even bytes of the inputs are ignored.

even words

Reviewed-by: Ard Biesheuvel <ardb@kernel.org>


> + */
> +uint64_t clmul_16x2_odd(uint64_t, uint64_t);
> +
>  #endif /* CRYPTO_CLMUL_H */
> diff --git a/crypto/clmul.c b/crypto/clmul.c
> index 82d873fee5..2c87cfbf8a 100644
> --- a/crypto/clmul.c
> +++ b/crypto/clmul.c
> @@ -58,3 +58,24 @@ uint64_t clmul_8x4_packed(uint32_t n, uint32_t m)
>  {
>      return clmul_8x4_even_int(unpack_8_to_16(n), unpack_8_to_16(m));
>  }
> +
> +uint64_t clmul_16x2_even(uint64_t n, uint64_t m)
> +{
> +    uint64_t r = 0;
> +
> +    n &= 0x0000ffff0000ffffull;
> +    m &= 0x0000ffff0000ffffull;
> +
> +    for (int i = 0; i < 16; ++i) {
> +        uint64_t mask = (n & 0x0000000100000001ull) * 0xffffffffull;
> +        r ^= m & mask;
> +        n >>= 1;
> +        m <<= 1;
> +    }
> +    return r;
> +}
> +
> +uint64_t clmul_16x2_odd(uint64_t n, uint64_t m)
> +{
> +    return clmul_16x2_even(n >> 16, m >> 16);
> +}
> --
> 2.34.1
>
diff mbox series

Patch

diff --git a/include/crypto/clmul.h b/include/crypto/clmul.h
index 153b5e3057..c7ad28aa85 100644
--- a/include/crypto/clmul.h
+++ b/include/crypto/clmul.h
@@ -38,4 +38,20 @@  uint64_t clmul_8x4_odd(uint64_t, uint64_t);
  */
 uint64_t clmul_8x4_packed(uint32_t, uint32_t);
 
+/**
+ * clmul_16x2_even:
+ *
+ * Perform two 16x16->32 carry-less multiplies.
+ * The odd words of the inputs are ignored.
+ */
+uint64_t clmul_16x2_even(uint64_t, uint64_t);
+
+/**
+ * clmul_16x2_odd:
+ *
+ * Perform two 16x16->32 carry-less multiplies.
+ * The even bytes of the inputs are ignored.
+ */
+uint64_t clmul_16x2_odd(uint64_t, uint64_t);
+
 #endif /* CRYPTO_CLMUL_H */
diff --git a/crypto/clmul.c b/crypto/clmul.c
index 82d873fee5..2c87cfbf8a 100644
--- a/crypto/clmul.c
+++ b/crypto/clmul.c
@@ -58,3 +58,24 @@  uint64_t clmul_8x4_packed(uint32_t n, uint32_t m)
 {
     return clmul_8x4_even_int(unpack_8_to_16(n), unpack_8_to_16(m));
 }
+
+uint64_t clmul_16x2_even(uint64_t n, uint64_t m)
+{
+    uint64_t r = 0;
+
+    n &= 0x0000ffff0000ffffull;
+    m &= 0x0000ffff0000ffffull;
+
+    for (int i = 0; i < 16; ++i) {
+        uint64_t mask = (n & 0x0000000100000001ull) * 0xffffffffull;
+        r ^= m & mask;
+        n >>= 1;
+        m <<= 1;
+    }
+    return r;
+}
+
+uint64_t clmul_16x2_odd(uint64_t n, uint64_t m)
+{
+    return clmul_16x2_even(n >> 16, m >> 16);
+}