crypto: Add generic 64-bit carry-less multiply routine

Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
master
Richard Henderson 2023-07-11 10:10:47 +01:00
parent f56d3c1a14
commit 00f463b38a
3 changed files with 51 additions and 0 deletions

View File

@ -92,3 +92,20 @@ uint64_t clmul_32(uint32_t n, uint32_t m32)
}
return r;
}
Int128 clmul_64_gen(uint64_t n, uint64_t m)
{
uint64_t rl = 0, rh = 0;
/* Bit 0 can only influence the low 64-bit result. */
if (n & 1) {
rl = m;
}
for (int i = 1; i < 64; ++i) {
uint64_t mask = -((n >> i) & 1);
rl ^= (m << i) & mask;
rh ^= (m >> (64 - i)) & mask;
}
return int128_make128(rl, rh);
}

View File

@ -0,0 +1,15 @@
/*
* No host specific carry-less multiply acceleration.
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef GENERIC_HOST_CRYPTO_CLMUL_H
#define GENERIC_HOST_CRYPTO_CLMUL_H
#define HAVE_CLMUL_ACCEL false
#define ATTR_CLMUL_ACCEL
Int128 clmul_64_accel(uint64_t, uint64_t)
QEMU_ERROR("unsupported accel");
#endif /* GENERIC_HOST_CRYPTO_CLMUL_H */

View File

@ -8,6 +8,9 @@
#ifndef CRYPTO_CLMUL_H
#define CRYPTO_CLMUL_H
#include "qemu/int128.h"
#include "host/crypto/clmul.h"
/**
* clmul_8x8_low:
*
@ -61,4 +64,20 @@ uint64_t clmul_16x2_odd(uint64_t, uint64_t);
*/
uint64_t clmul_32(uint32_t, uint32_t);
/**
* clmul_64:
*
* Perform a 64x64->128 carry-less multiply.
*/
Int128 clmul_64_gen(uint64_t, uint64_t);
static inline Int128 clmul_64(uint64_t a, uint64_t b)
{
if (HAVE_CLMUL_ACCEL) {
return clmul_64_accel(a, b);
} else {
return clmul_64_gen(a, b);
}
}
#endif /* CRYPTO_CLMUL_H */