Merge pull request #11468 from topolarity/f80-mul

compiler_rt: Implement softfloat multiply for `f80`
This commit is contained in:
Andrew Kelley 2022-04-19 13:04:31 -04:00 committed by GitHub
commit f3b3d7f20a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 266 additions and 244 deletions

View File

@ -226,23 +226,26 @@ comptime {
@export(__addsf3, .{ .name = "__addsf3", .linkage = linkage });
const __adddf3 = @import("compiler_rt/addXf3.zig").__adddf3;
@export(__adddf3, .{ .name = "__adddf3", .linkage = linkage });
const __addtf3 = @import("compiler_rt/addXf3.zig").__addtf3;
@export(__addtf3, .{ .name = "__addtf3", .linkage = linkage });
const __addxf3 = @import("compiler_rt/addXf3.zig").__addxf3;
@export(__addxf3, .{ .name = "__addxf3", .linkage = linkage });
const __addtf3 = @import("compiler_rt/addXf3.zig").__addtf3;
@export(__addtf3, .{ .name = "__addtf3", .linkage = linkage });
const __subsf3 = @import("compiler_rt/addXf3.zig").__subsf3;
@export(__subsf3, .{ .name = "__subsf3", .linkage = linkage });
const __subdf3 = @import("compiler_rt/addXf3.zig").__subdf3;
@export(__subdf3, .{ .name = "__subdf3", .linkage = linkage });
const __subtf3 = @import("compiler_rt/addXf3.zig").__subtf3;
@export(__subtf3, .{ .name = "__subtf3", .linkage = linkage });
const __subxf3 = @import("compiler_rt/addXf3.zig").__subxf3;
@export(__subxf3, .{ .name = "__subxf3", .linkage = linkage });
const __subtf3 = @import("compiler_rt/addXf3.zig").__subtf3;
@export(__subtf3, .{ .name = "__subtf3", .linkage = linkage });
const __mulsf3 = @import("compiler_rt/mulXf3.zig").__mulsf3;
@export(__mulsf3, .{ .name = "__mulsf3", .linkage = linkage });
const __muldf3 = @import("compiler_rt/mulXf3.zig").__muldf3;
@export(__muldf3, .{ .name = "__muldf3", .linkage = linkage });
const __mulxf3 = @import("compiler_rt/mulXf3.zig").__mulxf3;
@export(__mulxf3, .{ .name = "__mulxf3", .linkage = linkage });
const __multf3 = @import("compiler_rt/mulXf3.zig").__multf3;
@export(__multf3, .{ .name = "__multf3", .linkage = linkage });

View File

@ -3,6 +3,7 @@
// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/lib/builtins/fp_add_impl.inc
const std = @import("std");
const math = std.math;
const builtin = @import("builtin");
const compiler_rt = @import("../compiler_rt.zig");
@ -14,6 +15,16 @@ pub fn __adddf3(a: f64, b: f64) callconv(.C) f64 {
return addXf3(f64, a, b);
}
pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
return addXf3(f80, a, b);
}
pub fn __subxf3(a: f80, b: f80) callconv(.C) f80 {
var b_rep = std.math.break_f80(b);
b_rep.exp ^= 0x8000;
return __addxf3(a, std.math.make_f80(b_rep));
}
pub fn __addtf3(a: f128, b: f128) callconv(.C) f128 {
return addXf3(f128, a, b);
}
@ -58,10 +69,10 @@ fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T
const bits = @typeInfo(T).Float.bits;
const Z = std.meta.Int(.unsigned, bits);
const S = std.meta.Int(.unsigned, bits - @clz(Z, @as(Z, bits) - 1));
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
const fractionalBits = math.floatFractionalBits(T);
const integerBit = @as(Z, 1) << fractionalBits;
const shift = @clz(std.meta.Int(.unsigned, bits), significand.*) - @clz(Z, implicitBit);
const shift = @clz(std.meta.Int(.unsigned, bits), significand.*) - @clz(Z, integerBit);
significand.* <<= @intCast(S, shift);
return 1 - shift;
}
@ -73,26 +84,26 @@ fn addXf3(comptime T: type, a: T, b: T) T {
const S = std.meta.Int(.unsigned, bits - @clz(Z, @as(Z, bits) - 1));
const typeWidth = bits;
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
const significandBits = math.floatMantissaBits(T);
const fractionalBits = math.floatFractionalBits(T);
const exponentBits = math.floatExponentBits(T);
const signBit = (@as(Z, 1) << (significandBits + exponentBits));
const maxExponent = ((1 << exponentBits) - 1);
const implicitBit = (@as(Z, 1) << significandBits);
const quietBit = implicitBit >> 1;
const significandMask = implicitBit - 1;
const integerBit = (@as(Z, 1) << fractionalBits);
const quietBit = integerBit >> 1;
const significandMask = (@as(Z, 1) << significandBits) - 1;
const absMask = signBit - 1;
const exponentMask = absMask ^ significandMask;
const qnanRep = exponentMask | quietBit;
const qnanRep = @bitCast(Z, math.nan(T)) | quietBit;
var aRep = @bitCast(Z, a);
var bRep = @bitCast(Z, b);
const aAbs = aRep & absMask;
const bAbs = bRep & absMask;
const infRep = @bitCast(Z, std.math.inf(T));
const infRep = @bitCast(Z, math.inf(T));
// Detect if a or b is zero, infinity, or NaN.
if (aAbs -% @as(Z, 1) >= infRep - @as(Z, 1) or
@ -157,12 +168,12 @@ fn addXf3(comptime T: type, a: T, b: T) T {
// implicit significand bit. (If we fell through from the denormal path it
// was already set by normalize( ), but setting it twice won't hurt
// anything.)
aSignificand = (aSignificand | implicitBit) << 3;
bSignificand = (bSignificand | implicitBit) << 3;
aSignificand = (aSignificand | integerBit) << 3;
bSignificand = (bSignificand | integerBit) << 3;
// Shift the significand of b by the difference in exponents, with a sticky
// bottom bit to get rounding correct.
const @"align" = @intCast(Z, aExponent - bExponent);
const @"align" = @intCast(u32, aExponent - bExponent);
if (@"align" != 0) {
if (@"align" < typeWidth) {
const sticky = if (bSignificand << @intCast(S, typeWidth - @"align") != 0) @as(Z, 1) else 0;
@ -178,8 +189,8 @@ fn addXf3(comptime T: type, a: T, b: T) T {
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
if (aSignificand < implicitBit << 3) {
const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(.unsigned, bits), implicitBit << 3));
if (aSignificand < integerBit << 3) {
const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(.unsigned, bits), integerBit << 3));
aSignificand <<= @intCast(S, shift);
aExponent -= shift;
}
@ -188,7 +199,7 @@ fn addXf3(comptime T: type, a: T, b: T) T {
// If the addition carried up, we need to right-shift the result and
// adjust the exponent:
if (aSignificand & (implicitBit << 4) != 0) {
if (aSignificand & (integerBit << 4) != 0) {
const sticky = aSignificand & 1;
aSignificand = aSignificand >> 1 | sticky;
aExponent += 1;
@ -210,7 +221,7 @@ fn addXf3(comptime T: type, a: T, b: T) T {
// Low three bits are round, guard, and sticky.
const roundGuardSticky = aSignificand & 0x7;
// Shift the significand into place, and mask off the implicit bit.
// Shift the significand into place, and mask off the integer bit, if it's implicit.
var result = (aSignificand >> 3) & significandMask;
// Insert the exponent and sign.
@ -222,180 +233,14 @@ fn addXf3(comptime T: type, a: T, b: T) T {
if (roundGuardSticky > 0x4) result += 1;
if (roundGuardSticky == 0x4) result += result & 1;
// Restore any explicit integer bit, if it was rounded off
if (significandBits != fractionalBits) {
if ((result >> significandBits) != 0) result |= integerBit;
}
return @bitCast(T, result);
}
fn normalize_f80(exp: *i32, significand: *u80) void {
const shift = @clz(u64, @truncate(u64, significand.*));
significand.* = (significand.* << shift);
exp.* += -@as(i8, shift);
}
pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
var a_rep = std.math.break_f80(a);
var b_rep = std.math.break_f80(b);
var a_exp: i32 = a_rep.exp & 0x7FFF;
var b_exp: i32 = b_rep.exp & 0x7FFF;
const significand_bits = std.math.floatMantissaBits(f80);
const int_bit = 0x8000000000000000;
const significand_mask = 0x7FFFFFFFFFFFFFFF;
const qnan_bit = 0xC000000000000000;
const max_exp = 0x7FFF;
const sign_bit = 0x8000;
// Detect if a or b is infinity, or NaN.
if (a_exp == max_exp) {
if (a_rep.fraction ^ int_bit == 0) {
if (b_exp == max_exp and (b_rep.fraction ^ int_bit == 0)) {
// +/-infinity + -/+infinity = qNaN
return std.math.qnan_f80;
}
// +/-infinity + anything = +/- infinity
return a;
} else {
std.debug.assert(a_rep.fraction & significand_mask != 0);
// NaN + anything = qNaN
a_rep.fraction |= qnan_bit;
return std.math.make_f80(a_rep);
}
}
if (b_exp == max_exp) {
if (b_rep.fraction ^ int_bit == 0) {
// anything + +/-infinity = +/-infinity
return b;
} else {
std.debug.assert(b_rep.fraction & significand_mask != 0);
// anything + NaN = qNaN
b_rep.fraction |= qnan_bit;
return std.math.make_f80(b_rep);
}
}
const a_zero = (a_rep.fraction | @bitCast(u32, a_exp)) == 0;
const b_zero = (b_rep.fraction | @bitCast(u32, b_exp)) == 0;
if (a_zero) {
// zero + anything = anything
if (b_zero) {
// but we need to get the sign right for zero + zero
a_rep.exp &= b_rep.exp;
return std.math.make_f80(a_rep);
} else {
return b;
}
} else if (b_zero) {
// anything + zero = anything
return a;
}
var a_int: u80 = a_rep.fraction | (@as(u80, a_rep.exp & max_exp) << significand_bits);
var b_int: u80 = b_rep.fraction | (@as(u80, b_rep.exp & max_exp) << significand_bits);
// Swap a and b if necessary so that a has the larger absolute value.
if (b_int > a_int) {
const temp = a_rep;
a_rep = b_rep;
b_rep = temp;
}
// Extract the exponent and significand from the (possibly swapped) a and b.
a_exp = a_rep.exp & max_exp;
b_exp = b_rep.exp & max_exp;
a_int = a_rep.fraction;
b_int = b_rep.fraction;
// Normalize any denormals, and adjust the exponent accordingly.
normalize_f80(&a_exp, &a_int);
normalize_f80(&b_exp, &b_int);
// The sign of the result is the sign of the larger operand, a. If they
// have opposite signs, we are performing a subtraction; otherwise addition.
const result_sign = a_rep.exp & sign_bit;
const subtraction = (a_rep.exp ^ b_rep.exp) & sign_bit != 0;
// Shift the significands to give us round, guard and sticky, and or in the
// implicit significand bit. (If we fell through from the denormal path it
// was already set by normalize( ), but setting it twice won't hurt
// anything.)
a_int = a_int << 3;
b_int = b_int << 3;
// Shift the significand of b by the difference in exponents, with a sticky
// bottom bit to get rounding correct.
const @"align" = @intCast(u80, a_exp - b_exp);
if (@"align" != 0) {
if (@"align" < 80) {
const sticky = if (b_int << @intCast(u7, 80 - @"align") != 0) @as(u80, 1) else 0;
b_int = (b_int >> @truncate(u7, @"align")) | sticky;
} else {
b_int = 1; // sticky; b is known to be non-zero.
}
}
if (subtraction) {
a_int -= b_int;
// If a == -b, return +zero.
if (a_int == 0) return 0.0;
// If partial cancellation occurred, we need to left-shift the result
// and adjust the exponent:
if (a_int < int_bit << 3) {
const shift = @intCast(i32, @clz(u80, a_int)) - @intCast(i32, @clz(u80, @as(u80, int_bit) << 3));
a_int <<= @intCast(u7, shift);
a_exp -= shift;
}
} else { // addition
a_int += b_int;
// If the addition carried up, we need to right-shift the result and
// adjust the exponent:
if (a_int & (int_bit << 4) != 0) {
const sticky = a_int & 1;
a_int = a_int >> 1 | sticky;
a_exp += 1;
}
}
// If we have overflowed the type, return +/- infinity:
if (a_exp >= max_exp) {
a_rep.exp = max_exp | result_sign;
a_rep.fraction = int_bit; // integer bit is set for +/-inf
return std.math.make_f80(a_rep);
}
if (a_exp <= 0) {
// Result is denormal before rounding; the exponent is zero and we
// need to shift the significand.
const shift = @intCast(u80, 1 - a_exp);
const sticky = if (a_int << @intCast(u7, 80 - shift) != 0) @as(u1, 1) else 0;
a_int = a_int >> @intCast(u7, shift | sticky);
a_exp = 0;
}
// Low three bits are round, guard, and sticky.
const round_guard_sticky = @truncate(u3, a_int);
// Shift the significand into place.
a_int = @truncate(u64, a_int >> 3);
// // Insert the exponent and sign.
a_int |= (@intCast(u80, a_exp) | result_sign) << significand_bits;
// Final rounding. The result may overflow to infinity, but that is the
// correct result in that case.
if (round_guard_sticky > 0x4) a_int += 1;
if (round_guard_sticky == 0x4) a_int += a_int & 1;
a_rep.fraction = @truncate(u64, a_int);
a_rep.exp = @truncate(u16, a_int >> significand_bits);
return std.math.make_f80(a_rep);
}
pub fn __subxf3(a: f80, b: f80) callconv(.C) f80 {
var b_rep = std.math.break_f80(b);
b_rep.exp ^= 0x8000;
return __addxf3(a, std.math.make_f80(b_rep));
}
test {
_ = @import("addXf3_test.zig");
}

View File

@ -3,8 +3,9 @@
// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/test/builtins/Unit/addtf3_test.c
// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/test/builtins/Unit/subtf3_test.c
const std = @import("std");
const math = std.math;
const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64);
const inf128 = @bitCast(f128, @as(u128, 0x7fff000000000000) << 64);
const __addtf3 = @import("addXf3.zig").__addtf3;
@ -37,13 +38,14 @@ test "addtf3" {
try test__addtf3(@bitCast(f128, (@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000)), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
// inf + inf = inf
try test__addtf3(inf128, inf128, 0x7fff000000000000, 0x0);
try test__addtf3(math.inf(f128), math.inf(f128), 0x7fff000000000000, 0x0);
// inf + any = inf
try test__addtf3(inf128, 0x1.2335653452436234723489432abcdefp+5, 0x7fff000000000000, 0x0);
try test__addtf3(math.inf(f128), 0x1.2335653452436234723489432abcdefp+5, 0x7fff000000000000, 0x0);
// any + any
try test__addtf3(0x1.23456734245345543849abcdefp+5, 0x1.edcba52449872455634654321fp-1, 0x40042afc95c8b579, 0x61e58dd6c51eb77c);
try test__addtf3(0x1.edcba52449872455634654321fp-1, 0x1.23456734245345543849abcdefp+5, 0x40042afc95c8b579, 0x61e58dd6c51eb77c);
}
const __subtf3 = @import("addXf3.zig").__subtf3;
@ -78,8 +80,76 @@ test "subtf3" {
try test__subtf3(@bitCast(f128, (@as(u128, 0x7fff000000000000) << 64) | @as(u128, 0x800030000000)), 0x1.23456789abcdefp+5, 0x7fff800000000000, 0x0);
// inf - any = inf
try test__subtf3(inf128, 0x1.23456789abcdefp+5, 0x7fff000000000000, 0x0);
try test__subtf3(math.inf(f128), 0x1.23456789abcdefp+5, 0x7fff000000000000, 0x0);
// any + any
try test__subtf3(0x1.234567829a3bcdef5678ade36734p+5, 0x1.ee9d7c52354a6936ab8d7654321fp-1, 0x40041b8af1915166, 0xa44a7bca780a166c);
try test__subtf3(0x1.ee9d7c52354a6936ab8d7654321fp-1, 0x1.234567829a3bcdef5678ade36734p+5, 0xc0041b8af1915166, 0xa44a7bca780a166c);
}
const __addxf3 = @import("addXf3.zig").__addxf3;
const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1)));
fn test__addxf3(a: f80, b: f80, expected: u80) !void {
const x = __addxf3(a, b);
const rep = @bitCast(u80, x);
if (rep == expected)
return;
if (math.isNan(@bitCast(f80, expected)) and math.isNan(x))
return; // We don't currently test NaN payload propagation
return error.TestFailed;
}
test "addxf3" {
// NaN + any = NaN
try test__addxf3(qnan80, 0x1.23456789abcdefp+5, @bitCast(u80, qnan80));
try test__addxf3(@bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), 0x1.23456789abcdefp+5, @bitCast(u80, qnan80));
// any + NaN = NaN
try test__addxf3(0x1.23456789abcdefp+5, qnan80, @bitCast(u80, qnan80));
try test__addxf3(0x1.23456789abcdefp+5, @bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), @bitCast(u80, qnan80));
// NaN + inf = NaN
try test__addxf3(qnan80, math.inf(f80), @bitCast(u80, qnan80));
// inf + NaN = NaN
try test__addxf3(math.inf(f80), qnan80, @bitCast(u80, qnan80));
// inf + inf = inf
try test__addxf3(math.inf(f80), math.inf(f80), @bitCast(u80, math.inf(f80)));
// inf + -inf = NaN
try test__addxf3(math.inf(f80), -math.inf(f80), @bitCast(u80, qnan80));
// -inf + inf = NaN
try test__addxf3(-math.inf(f80), math.inf(f80), @bitCast(u80, qnan80));
// inf + any = inf
try test__addxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @bitCast(u80, math.inf(f80)));
// any + inf = inf
try test__addxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @bitCast(u80, math.inf(f80)));
// any + any
try test__addxf3(0x1.23456789abcdp+5, 0x1.dcba987654321p+5, 0x4005_BFFFFFFFFFFFC400);
try test__addxf3(0x1.23456734245345543849abcdefp+5, 0x1.edcba52449872455634654321fp-1, 0x4004_957E_4AE4_5ABC_B0F3);
try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x1.0p-63, 0x3FFF_FFFFFFFFFFFFFFFF); // exact
try test__addxf3(0x1.ffff_ffff_ffff_fffep+0, 0x0.0p0, 0x3FFF_FFFFFFFFFFFFFFFF); // exact
try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x1.4p-63, 0x3FFF_FFFFFFFFFFFFFFFF); // round down
try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x1.8p-63, 0x4000_8000000000000000); // round up to even
try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x1.cp-63, 0x4000_8000000000000000); // round up
try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x2.0p-63, 0x4000_8000000000000000); // exact
try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x2.1p-63, 0x4000_8000000000000000); // round down
try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x3.0p-63, 0x4000_8000000000000000); // round down to even
try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x3.1p-63, 0x4000_8000000000000001); // round up
try test__addxf3(0x1.ffff_ffff_ffff_fffcp+0, 0x4.0p-63, 0x4000_8000000000000001); // exact
try test__addxf3(0x1.0fff_ffff_ffff_fffep+0, 0x1.0p-63, 0x3FFF_8800000000000000); // exact
try test__addxf3(0x1.0fff_ffff_ffff_fffep+0, 0x1.7p-63, 0x3FFF_8800000000000000); // round down
try test__addxf3(0x1.0fff_ffff_ffff_fffep+0, 0x1.8p-63, 0x3FFF_8800000000000000); // round down to even
try test__addxf3(0x1.0fff_ffff_ffff_fffep+0, 0x1.9p-63, 0x3FFF_8800000000000001); // round up
try test__addxf3(0x1.0fff_ffff_ffff_fffep+0, 0x2.0p-63, 0x3FFF_8800000000000001); // exact
}

View File

@ -3,12 +3,16 @@
// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/fp_mul_impl.inc
const std = @import("std");
const math = std.math;
const builtin = @import("builtin");
const compiler_rt = @import("../compiler_rt.zig");
pub fn __multf3(a: f128, b: f128) callconv(.C) f128 {
return mulXf3(f128, a, b);
}
pub fn __mulxf3(a: f80, b: f80) callconv(.C) f80 {
return mulXf3(f80, a, b);
}
pub fn __muldf3(a: f64, b: f64) callconv(.C) f64 {
return mulXf3(f64, a, b);
}
@ -29,30 +33,36 @@ pub fn __aeabi_dmul(a: f64, b: f64) callconv(.C) f64 {
fn mulXf3(comptime T: type, a: T, b: T) T {
@setRuntimeSafety(builtin.is_test);
const typeWidth = @typeInfo(T).Float.bits;
const significandBits = math.floatMantissaBits(T);
const fractionalBits = math.floatFractionalBits(T);
const exponentBits = math.floatExponentBits(T);
const Z = std.meta.Int(.unsigned, typeWidth);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
// ZSignificand is large enough to contain the significand, including an explicit integer bit
const ZSignificand = PowerOfTwoSignificandZ(T);
const ZSignificandBits = @typeInfo(ZSignificand).Int.bits;
const roundBit = (1 << (ZSignificandBits - 1));
const signBit = (@as(Z, 1) << (significandBits + exponentBits));
const maxExponent = ((1 << exponentBits) - 1);
const exponentBias = (maxExponent >> 1);
const implicitBit = (@as(Z, 1) << significandBits);
const quietBit = implicitBit >> 1;
const significandMask = implicitBit - 1;
const integerBit = (@as(ZSignificand, 1) << fractionalBits);
const quietBit = integerBit >> 1;
const significandMask = (@as(Z, 1) << significandBits) - 1;
const absMask = signBit - 1;
const exponentMask = absMask ^ significandMask;
const qnanRep = exponentMask | quietBit;
const infRep = @bitCast(Z, std.math.inf(T));
const qnanRep = @bitCast(Z, math.nan(T)) | quietBit;
const infRep = @bitCast(Z, math.inf(T));
const minNormalRep = @bitCast(Z, math.floatMin(T));
const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent);
const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent);
const productSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit;
var aSignificand: Z = @bitCast(Z, a) & significandMask;
var bSignificand: Z = @bitCast(Z, b) & significandMask;
var aSignificand: ZSignificand = @intCast(ZSignificand, @bitCast(Z, a) & significandMask);
var bSignificand: ZSignificand = @intCast(ZSignificand, @bitCast(Z, b) & significandMask);
var scale: i32 = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
@ -93,38 +103,40 @@ fn mulXf3(comptime T: type, a: T, b: T) T {
// one or both of a or b is denormal, the other (if applicable) is a
// normal number. Renormalize one or both of a and b, and set scale to
// include the necessary exponent adjustment.
if (aAbs < implicitBit) scale += normalize(T, &aSignificand);
if (bAbs < implicitBit) scale += normalize(T, &bSignificand);
if (aAbs < minNormalRep) scale += normalize(T, &aSignificand);
if (bAbs < minNormalRep) scale += normalize(T, &bSignificand);
}
// Or in the implicit significand bit. (If we fell through from the
// denormal path it was already set by normalize( ), but setting it twice
// won't hurt anything.)
aSignificand |= implicitBit;
bSignificand |= implicitBit;
aSignificand |= integerBit;
bSignificand |= integerBit;
// Get the significand of a*b. Before multiplying the significands, shift
// one of them left to left-align it in the field. Thus, the product will
// have (exponentBits + 2) integral digits, all but two of which must be
// zero. Normalizing this result is just a conditional left-shift by one
// and bumping the exponent accordingly.
var productHi: Z = undefined;
var productLo: Z = undefined;
wideMultiply(Z, aSignificand, bSignificand << exponentBits, &productHi, &productLo);
var productHi: ZSignificand = undefined;
var productLo: ZSignificand = undefined;
const left_align_shift = ZSignificandBits - fractionalBits - 1;
wideMultiply(ZSignificand, aSignificand, bSignificand << left_align_shift, &productHi, &productLo);
var productExponent: i32 = @bitCast(i32, aExponent +% bExponent) -% exponentBias +% scale;
var productExponent: i32 = @intCast(i32, aExponent + bExponent) - exponentBias + scale;
// Normalize the significand, adjust exponent if needed.
if ((productHi & implicitBit) != 0) {
if ((productHi & integerBit) != 0) {
productExponent +%= 1;
} else {
productHi = (productHi << 1) | (productLo >> (typeWidth - 1));
productHi = (productHi << 1) | (productLo >> (ZSignificandBits - 1));
productLo = productLo << 1;
}
// If we have overflowed the type, return +/- infinity.
if (productExponent >= maxExponent) return @bitCast(T, infRep | productSign);
var result: Z = undefined;
if (productExponent <= 0) {
// Result is denormal before rounding
//
@ -133,35 +145,49 @@ fn mulXf3(comptime T: type, a: T, b: T) T {
// handle this case separately, but we make it a special case to
// simplify the shift logic.
const shift: u32 = @truncate(u32, @as(Z, 1) -% @bitCast(u32, productExponent));
if (shift >= typeWidth) return @bitCast(T, productSign);
if (shift >= ZSignificandBits) return @bitCast(T, productSign);
// Otherwise, shift the significand of the result so that the round
// bit is the high bit of productLo.
wideRightShiftWithSticky(Z, &productHi, &productLo, shift);
const sticky = wideShrWithTruncation(ZSignificand, &productHi, &productLo, shift);
productLo |= @boolToInt(sticky);
result = productHi;
} else {
// Result is normal before rounding; insert the exponent.
productHi &= significandMask;
productHi |= @as(Z, @bitCast(u32, productExponent)) << significandBits;
result = productHi & significandMask;
result |= @intCast(Z, productExponent) << significandBits;
}
// Insert the sign of the result:
productHi |= productSign;
// Final rounding. The final result may overflow to infinity, or underflow
// to zero, but those are the correct results in those cases. We use the
// default IEEE-754 round-to-nearest, ties-to-even rounding mode.
if (productLo > signBit) productHi +%= 1;
if (productLo == signBit) productHi +%= productHi & 1;
return @bitCast(T, productHi);
if (productLo > roundBit) result +%= 1;
if (productLo == roundBit) result +%= result & 1;
// Restore any explicit integer bit, if it was rounded off
if (significandBits != fractionalBits) {
if ((result >> significandBits) != 0) result |= integerBit;
}
// Insert the sign of the result:
result |= productSign;
return @bitCast(T, result);
}
fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
@setRuntimeSafety(builtin.is_test);
switch (Z) {
u16 => {
// 16x16 --> 32 bit multiply
const product = @as(u32, a) * @as(u32, b);
hi.* = @intCast(u16, product >> 16);
lo.* = @truncate(u16, product);
},
u32 => {
// 32x32 --> 64 bit multiply
const product = @as(u64, a) * @as(u64, b);
hi.* = @truncate(u32, product >> 32);
hi.* = @intCast(u32, product >> 32);
lo.* = @truncate(u32, product);
},
u64 => {
@ -170,7 +196,7 @@ fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
return @truncate(u32, x);
}
fn hiWord(x: u64) u64 {
return @truncate(u32, x >> 32);
return @intCast(u32, x >> 32);
}
};
// 64x64 -> 128 wide multiply for platforms that don't have such an operation;
@ -264,34 +290,45 @@ fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
}
}
fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
@setRuntimeSafety(builtin.is_test);
const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
/// Returns a power-of-two integer type that is large enough to contain
/// the significand of T, including an explicit integer bit
fn PowerOfTwoSignificandZ(comptime T: type) type {
const bits = math.ceilPowerOfTwoAssert(u16, math.floatFractionalBits(T) + 1);
return std.meta.Int(.unsigned, bits);
}
const shift = @clz(Z, significand.*) - @clz(Z, implicitBit);
significand.* <<= @intCast(std.math.Log2Int(Z), shift);
fn normalize(comptime T: type, significand: *PowerOfTwoSignificandZ(T)) i32 {
@setRuntimeSafety(builtin.is_test);
const Z = PowerOfTwoSignificandZ(T);
const integerBit = @as(Z, 1) << math.floatFractionalBits(T);
const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
significand.* <<= @intCast(math.Log2Int(Z), shift);
return @as(i32, 1) - shift;
}
fn wideRightShiftWithSticky(comptime Z: type, hi: *Z, lo: *Z, count: u32) void {
// Returns `true` if the right shift is inexact (i.e. any bit shifted out is non-zero)
//
// This is analogous to an shr version of `@shlWithOverflow`
fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool {
@setRuntimeSafety(builtin.is_test);
const typeWidth = @typeInfo(Z).Int.bits;
const S = std.math.Log2Int(Z);
const S = math.Log2Int(Z);
var inexact = false;
if (count < typeWidth) {
const sticky = @boolToInt((lo.* << @intCast(S, typeWidth -% count)) != 0);
lo.* = (hi.* << @intCast(S, typeWidth -% count)) | (lo.* >> @intCast(S, count)) | sticky;
inexact = (lo.* << @intCast(S, typeWidth -% count)) != 0;
lo.* = (hi.* << @intCast(S, typeWidth -% count)) | (lo.* >> @intCast(S, count));
hi.* = hi.* >> @intCast(S, count);
} else if (count < 2 * typeWidth) {
const sticky = @boolToInt((hi.* << @intCast(S, 2 * typeWidth -% count) | lo.*) != 0);
lo.* = hi.* >> @intCast(S, count -% typeWidth) | sticky;
inexact = (hi.* << @intCast(S, 2 * typeWidth -% count) | lo.*) != 0;
lo.* = hi.* >> @intCast(S, count -% typeWidth);
hi.* = 0;
} else {
const sticky = @boolToInt((hi.* | lo.*) != 0);
lo.* = sticky;
inexact = (hi.* | lo.*) != 0;
lo.* = 0;
hi.* = 0;
}
return inexact;
}
test {

View File

@ -2,10 +2,15 @@
//
// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/test/builtins/Unit/multf3_test.c
const std = @import("std");
const math = std.math;
const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64);
const inf128 = @bitCast(f128, @as(u128, 0x7fff000000000000) << 64);
const __multf3 = @import("mulXf3.zig").__multf3;
const __mulxf3 = @import("mulXf3.zig").__mulxf3;
const __muldf3 = @import("mulXf3.zig").__muldf3;
const __mulsf3 = @import("mulXf3.zig").__mulsf3;
// return true if equal
// use two 64-bit integers intead of one 128-bit integer
@ -97,4 +102,66 @@ test "multf3" {
0x3f90000000000000,
0x0,
);
try test__multf3(0x1.0000_0000_0000_0000_0000_0000_0001p+0, 0x1.8p+5, 0x4004_8000_0000_0000, 0x0000_0000_0000_0002);
try test__multf3(0x1.0000_0000_0000_0000_0000_0000_0002p+0, 0x1.8p+5, 0x4004_8000_0000_0000, 0x0000_0000_0000_0003);
}
const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1)));
fn test__mulxf3(a: f80, b: f80, expected: u80) !void {
const x = __mulxf3(a, b);
const rep = @bitCast(u80, x);
if (rep == expected)
return;
if (math.isNan(@bitCast(f80, expected)) and math.isNan(x))
return; // We don't currently test NaN payload propagation
return error.TestFailed;
}
test "mulxf3" {
// NaN * any = NaN
try test__mulxf3(qnan80, 0x1.23456789abcdefp+5, @bitCast(u80, qnan80));
try test__mulxf3(@bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), 0x1.23456789abcdefp+5, @bitCast(u80, qnan80));
// any * NaN = NaN
try test__mulxf3(0x1.23456789abcdefp+5, qnan80, @bitCast(u80, qnan80));
try test__mulxf3(0x1.23456789abcdefp+5, @bitCast(f80, @as(u80, 0x7fff_8000_8000_3000_0000)), @bitCast(u80, qnan80));
// NaN * inf = NaN
try test__mulxf3(qnan80, math.inf(f80), @bitCast(u80, qnan80));
// inf * NaN = NaN
try test__mulxf3(math.inf(f80), qnan80, @bitCast(u80, qnan80));
// inf * inf = inf
try test__mulxf3(math.inf(f80), math.inf(f80), @bitCast(u80, math.inf(f80)));
// inf * -inf = -inf
try test__mulxf3(math.inf(f80), -math.inf(f80), @bitCast(u80, -math.inf(f80)));
// -inf + inf = -inf
try test__mulxf3(-math.inf(f80), math.inf(f80), @bitCast(u80, -math.inf(f80)));
// inf * any = inf
try test__mulxf3(math.inf(f80), 0x1.2335653452436234723489432abcdefp+5, @bitCast(u80, math.inf(f80)));
// any * inf = inf
try test__mulxf3(0x1.2335653452436234723489432abcdefp+5, math.inf(f80), @bitCast(u80, math.inf(f80)));
// any * any
try test__mulxf3(0x1.0p+0, 0x1.dcba987654321p+5, 0x4004_ee5d_4c3b_2a19_0800);
try test__mulxf3(0x1.0000_0000_0000_0004p+0, 0x1.8p+5, 0x4004_C000_0000_0000_0003); // exact
try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.0p+5, 0x4004_8000_0000_0000_0001); // exact
try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.7ffep+5, 0x4004_BFFF_0000_0000_0001); // round down
try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.8p+5, 0x4004_C000_0000_0000_0002); // round up to even
try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.8002p+5, 0x4004_C001_0000_0000_0002); // round up
try test__mulxf3(0x1.0000_0000_0000_0002p+0, 0x1.0p+6, 0x4005_8000_0000_0000_0001); // exact
try test__mulxf3(0x1.0000_0001p+0, 0x1.0000_0001p+0, 0x3FFF_8000_0001_0000_0000); // round down to even
try test__mulxf3(0x1.0000_0001p+0, 0x1.0000_0001_0002p+0, 0x3FFF_8000_0001_0001_0001); // round up
}