mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 21:33:00 +00:00
05924717ac
This patch introduces a new algorithm for multiplication of tristate numbers (tnums) that is provably sound. It is faster and more precise when compared to the existing method. Like the existing method, this new algorithm follows the long multiplication algorithm. The idea is to generate partial products by multiplying each bit in the multiplier (tnum a) with the multiplicand (tnum b), and adding the partial products after appropriately bit-shifting them. The new algorithm, however, uses just a single loop over the bits of the multiplier (tnum a) and accumulates only the uncertain components of the multiplicand (tnum b) into a mask-only tnum. The following paper explains the algorithm in more detail: https://arxiv.org/abs/2105.05398. A natural way to construct the tnum product is by performing a tnum addition on all the partial products. This algorithm presents another method of doing this: decompose each partial product into two tnums, consisting of the values and the masks separately. The mask-sum is accumulated within the loop in acc_m. The value-sum tnum is generated using a.value * b.value. The tnum constructed by tnum addition of the value-sum and the mask-sum contains all possible summations of concrete values drawn from the partial product tnums pairwise. We prove this result in the paper. Our evaluations show that the new algorithm is overall more precise (producing tnums with less uncertain components) than the existing method. As an illustrative example, consider the input tnums A and B. The numbers in the parenthesis correspond to (value;mask). A = 000000x1 (1;2) B = 0010011x (38;1) A * B (existing) = xxxxxxxx (0;255) A * B (new) = 0x1xxxxx (32;95) Importantly, we present a proof of soundness of the new algorithm in the aforementioned paper. Additionally, we show that this new algorithm is empirically faster than the existing method. Co-developed-by: Matan Shachnai <m.shachnai@rutgers.edu> Co-developed-by: Srinivas Narayana <srinivas.narayana@rutgers.edu> Co-developed-by: Santosh Nagarakatte <santosh.nagarakatte@rutgers.edu> Signed-off-by: Matan Shachnai <m.shachnai@rutgers.edu> Signed-off-by: Srinivas Narayana <srinivas.narayana@rutgers.edu> Signed-off-by: Santosh Nagarakatte <santosh.nagarakatte@rutgers.edu> Signed-off-by: Harishankar Vishwanathan <harishankar.vishwanathan@rutgers.edu> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Edward Cree <ecree.xilinx@gmail.com> Link: https://arxiv.org/abs/2105.05398 Link: https://lore.kernel.org/bpf/20210531020157.7386-1-harishankar.vishwanathan@rutgers.edu
215 lines
5.1 KiB
C
215 lines
5.1 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/* tnum: tracked (or tristate) numbers
|
|
*
|
|
* A tnum tracks knowledge about the bits of a value. Each bit can be either
|
|
* known (0 or 1), or unknown (x). Arithmetic operations on tnums will
|
|
* propagate the unknown bits such that the tnum result represents all the
|
|
* possible results for possible values of the operands.
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/tnum.h>
|
|
|
|
#define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m}
|
|
/* A completely unknown value */
|
|
const struct tnum tnum_unknown = { .value = 0, .mask = -1 };
|
|
|
|
struct tnum tnum_const(u64 value)
|
|
{
|
|
return TNUM(value, 0);
|
|
}
|
|
|
|
struct tnum tnum_range(u64 min, u64 max)
|
|
{
|
|
u64 chi = min ^ max, delta;
|
|
u8 bits = fls64(chi);
|
|
|
|
/* special case, needed because 1ULL << 64 is undefined */
|
|
if (bits > 63)
|
|
return tnum_unknown;
|
|
/* e.g. if chi = 4, bits = 3, delta = (1<<3) - 1 = 7.
|
|
* if chi = 0, bits = 0, delta = (1<<0) - 1 = 0, so we return
|
|
* constant min (since min == max).
|
|
*/
|
|
delta = (1ULL << bits) - 1;
|
|
return TNUM(min & ~delta, delta);
|
|
}
|
|
|
|
struct tnum tnum_lshift(struct tnum a, u8 shift)
|
|
{
|
|
return TNUM(a.value << shift, a.mask << shift);
|
|
}
|
|
|
|
struct tnum tnum_rshift(struct tnum a, u8 shift)
|
|
{
|
|
return TNUM(a.value >> shift, a.mask >> shift);
|
|
}
|
|
|
|
struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness)
|
|
{
|
|
/* if a.value is negative, arithmetic shifting by minimum shift
|
|
* will have larger negative offset compared to more shifting.
|
|
* If a.value is nonnegative, arithmetic shifting by minimum shift
|
|
* will have larger positive offset compare to more shifting.
|
|
*/
|
|
if (insn_bitness == 32)
|
|
return TNUM((u32)(((s32)a.value) >> min_shift),
|
|
(u32)(((s32)a.mask) >> min_shift));
|
|
else
|
|
return TNUM((s64)a.value >> min_shift,
|
|
(s64)a.mask >> min_shift);
|
|
}
|
|
|
|
struct tnum tnum_add(struct tnum a, struct tnum b)
|
|
{
|
|
u64 sm, sv, sigma, chi, mu;
|
|
|
|
sm = a.mask + b.mask;
|
|
sv = a.value + b.value;
|
|
sigma = sm + sv;
|
|
chi = sigma ^ sv;
|
|
mu = chi | a.mask | b.mask;
|
|
return TNUM(sv & ~mu, mu);
|
|
}
|
|
|
|
struct tnum tnum_sub(struct tnum a, struct tnum b)
|
|
{
|
|
u64 dv, alpha, beta, chi, mu;
|
|
|
|
dv = a.value - b.value;
|
|
alpha = dv + a.mask;
|
|
beta = dv - b.mask;
|
|
chi = alpha ^ beta;
|
|
mu = chi | a.mask | b.mask;
|
|
return TNUM(dv & ~mu, mu);
|
|
}
|
|
|
|
struct tnum tnum_and(struct tnum a, struct tnum b)
|
|
{
|
|
u64 alpha, beta, v;
|
|
|
|
alpha = a.value | a.mask;
|
|
beta = b.value | b.mask;
|
|
v = a.value & b.value;
|
|
return TNUM(v, alpha & beta & ~v);
|
|
}
|
|
|
|
struct tnum tnum_or(struct tnum a, struct tnum b)
|
|
{
|
|
u64 v, mu;
|
|
|
|
v = a.value | b.value;
|
|
mu = a.mask | b.mask;
|
|
return TNUM(v, mu & ~v);
|
|
}
|
|
|
|
struct tnum tnum_xor(struct tnum a, struct tnum b)
|
|
{
|
|
u64 v, mu;
|
|
|
|
v = a.value ^ b.value;
|
|
mu = a.mask | b.mask;
|
|
return TNUM(v & ~mu, mu);
|
|
}
|
|
|
|
/* Generate partial products by multiplying each bit in the multiplier (tnum a)
|
|
* with the multiplicand (tnum b), and add the partial products after
|
|
* appropriately bit-shifting them. Instead of directly performing tnum addition
|
|
* on the generated partial products, equivalenty, decompose each partial
|
|
* product into two tnums, consisting of the value-sum (acc_v) and the
|
|
* mask-sum (acc_m) and then perform tnum addition on them. The following paper
|
|
* explains the algorithm in more detail: https://arxiv.org/abs/2105.05398.
|
|
*/
|
|
struct tnum tnum_mul(struct tnum a, struct tnum b)
|
|
{
|
|
u64 acc_v = a.value * b.value;
|
|
struct tnum acc_m = TNUM(0, 0);
|
|
|
|
while (a.value || a.mask) {
|
|
/* LSB of tnum a is a certain 1 */
|
|
if (a.value & 1)
|
|
acc_m = tnum_add(acc_m, TNUM(0, b.mask));
|
|
/* LSB of tnum a is uncertain */
|
|
else if (a.mask & 1)
|
|
acc_m = tnum_add(acc_m, TNUM(0, b.value | b.mask));
|
|
/* Note: no case for LSB is certain 0 */
|
|
a = tnum_rshift(a, 1);
|
|
b = tnum_lshift(b, 1);
|
|
}
|
|
return tnum_add(TNUM(acc_v, 0), acc_m);
|
|
}
|
|
|
|
/* Note that if a and b disagree - i.e. one has a 'known 1' where the other has
|
|
* a 'known 0' - this will return a 'known 1' for that bit.
|
|
*/
|
|
struct tnum tnum_intersect(struct tnum a, struct tnum b)
|
|
{
|
|
u64 v, mu;
|
|
|
|
v = a.value | b.value;
|
|
mu = a.mask & b.mask;
|
|
return TNUM(v & ~mu, mu);
|
|
}
|
|
|
|
struct tnum tnum_cast(struct tnum a, u8 size)
|
|
{
|
|
a.value &= (1ULL << (size * 8)) - 1;
|
|
a.mask &= (1ULL << (size * 8)) - 1;
|
|
return a;
|
|
}
|
|
|
|
bool tnum_is_aligned(struct tnum a, u64 size)
|
|
{
|
|
if (!size)
|
|
return true;
|
|
return !((a.value | a.mask) & (size - 1));
|
|
}
|
|
|
|
bool tnum_in(struct tnum a, struct tnum b)
|
|
{
|
|
if (b.mask & ~a.mask)
|
|
return false;
|
|
b.value &= ~a.mask;
|
|
return a.value == b.value;
|
|
}
|
|
|
|
int tnum_strn(char *str, size_t size, struct tnum a)
|
|
{
|
|
return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask);
|
|
}
|
|
EXPORT_SYMBOL_GPL(tnum_strn);
|
|
|
|
int tnum_sbin(char *str, size_t size, struct tnum a)
|
|
{
|
|
size_t n;
|
|
|
|
for (n = 64; n; n--) {
|
|
if (n < size) {
|
|
if (a.mask & 1)
|
|
str[n - 1] = 'x';
|
|
else if (a.value & 1)
|
|
str[n - 1] = '1';
|
|
else
|
|
str[n - 1] = '0';
|
|
}
|
|
a.mask >>= 1;
|
|
a.value >>= 1;
|
|
}
|
|
str[min(size - 1, (size_t)64)] = 0;
|
|
return 64;
|
|
}
|
|
|
|
struct tnum tnum_subreg(struct tnum a)
|
|
{
|
|
return tnum_cast(a, 4);
|
|
}
|
|
|
|
struct tnum tnum_clear_subreg(struct tnum a)
|
|
{
|
|
return tnum_lshift(tnum_rshift(a, 32), 32);
|
|
}
|
|
|
|
struct tnum tnum_const_subreg(struct tnum a, u32 value)
|
|
{
|
|
return tnum_or(tnum_clear_subreg(a), tnum_const(value));
|
|
}
|