From 111c4d23a9e6ff15a3241e7664a57a1f0e8f87a1 Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen <4643818-rmlarsen1@users.noreply.gitlab.com> Date: Wed, 8 Apr 2026 13:10:27 -0700 Subject: [PATCH] Revert "Revert "Speed up plog_double ~1.7x with fast integer range reduction"" This reverts commit b1d2ce4c8577b5e0d931e0ee90e7492048ff3287 --- .../arch/Default/GenericPacketMathFunctions.h | 190 +++++++----------- test/ulp_accuracy/ulp_accuracy.cpp | 130 ++++++++---- 2 files changed, 159 insertions(+), 161 deletions(-) diff --git a/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h b/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h index 3d8a8cf42..51964fb4a 100644 --- a/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +++ b/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h @@ -141,98 +141,83 @@ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog2_float(const Pac return plog_impl_float(_x); } -/* Returns the base e (2.718...) or base 2 logarithm of x. - * The argument is separated into its exponent and fractional parts. - * The logarithm of the fraction in the interval [sqrt(1/2), sqrt(2)], - * is approximated by - * - * log(1+x) = x - 0.5 x**2 + x**3 P(x)/Q(x). - * - * for more detail see: http://www.netlib.org/cephes/ - */ -template -EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog_impl_double(const Packet _x) { - Packet x = _x; +// Core range reduction and polynomial evaluation for double logarithm. +// +// Same structure as plog_core_float but for double precision. +// Given a positive double v (may be denormal), decomposes it as +// v = 2^e * (1+f) with f in [sqrt(0.5)-1, sqrt(2)-1], then evaluates +// log(1+f) ≈ f - 0.5*f^2 + f^3 * P(f)/Q(f) using the Cephes [5/5] +// rational approximation. +template +EIGEN_STRONG_INLINE void plog_core_double(const Packet v, Packet& log_mantissa, Packet& e) { + typedef typename unpacket_traits::integer_packet PacketL; - const Packet cst_1 = pset1(1.0); - const Packet cst_neg_half = pset1(-0.5); - const Packet cst_minus_inf = pset1frombits(static_cast(0xfff0000000000000ull)); - const Packet cst_pos_inf = pset1frombits(static_cast(0x7ff0000000000000ull)); + const PacketL cst_min_normal = pset1(int64_t(0x0010000000000000LL)); + const PacketL cst_mant_mask = pset1(int64_t(0x000fffffffffffffLL)); + const PacketL cst_sqrt_half_offset = pset1(int64_t(0x00095f619980c433LL)); + const PacketL cst_exp_bias = pset1(int64_t(0x3ff)); // 1023 + const PacketL cst_half_mant = pset1(int64_t(0x3fe6a09e667f3bcdLL)); // sqrt(0.5) - // Polynomial Coefficients for log(1+x) = x - x**2/2 + x**3 P(x)/Q(x) - // 1/sqrt(2) <= x < sqrt(2) - const Packet cst_cephes_SQRTHF = pset1(0.70710678118654752440E0); - const Packet cst_cephes_log_p0 = pset1(1.01875663804580931796E-4); - const Packet cst_cephes_log_p1 = pset1(4.97494994976747001425E-1); - const Packet cst_cephes_log_p2 = pset1(4.70579119878881725854E0); - const Packet cst_cephes_log_p3 = pset1(1.44989225341610930846E1); - const Packet cst_cephes_log_p4 = pset1(1.79368678507819816313E1); - const Packet cst_cephes_log_p5 = pset1(7.70838733755885391666E0); + // Normalize denormals by multiplying by 2^52. + PacketL vi = preinterpret(v); + PacketL is_denormal = pcmp_lt(vi, cst_min_normal); + Packet v_normalized = pmul(v, pset1(4503599627370496.0)); // 2^52 + vi = pselect(is_denormal, preinterpret(v_normalized), vi); + PacketL denorm_adj = pand(is_denormal, pset1(int64_t(52))); - const Packet cst_cephes_log_q0 = pset1(1.0); - const Packet cst_cephes_log_q1 = pset1(1.12873587189167450590E1); - const Packet cst_cephes_log_q2 = pset1(4.52279145837532221105E1); - const Packet cst_cephes_log_q3 = pset1(8.29875266912776603211E1); - const Packet cst_cephes_log_q4 = pset1(7.11544750618563894466E1); - const Packet cst_cephes_log_q5 = pset1(2.31251620126765340583E1); + // Combined range reduction via integer bias (same trick as float version). + PacketL vi_biased = padd(vi, cst_sqrt_half_offset); + PacketL e_int = psub(psub(plogical_shift_right<52>(vi_biased), cst_exp_bias), denorm_adj); + e = pcast(e_int); + Packet f = psub(preinterpret(padd(pand(vi_biased, cst_mant_mask), cst_half_mant)), pset1(1.0)); - Packet e; - // extract significant in the range [0.5,1) and exponent - x = pfrexp(x, e); + // Rational approximation log(1+f) = f - 0.5*f^2 + f^3 * P(f)/Q(f) + // from Cephes, [5/5] rational on [sqrt(0.5)-1, sqrt(2)-1]. + Packet f2 = pmul(f, f); + Packet f3 = pmul(f2, f); - // Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2)) - // and shift by -1. The values are then centered around 0, which improves - // the stability of the polynomial evaluation. - // if( x < SQRTHF ) { - // e -= 1; - // x = x + x - 1.0; - // } else { x = x - 1.0; } - Packet mask = pcmp_lt(x, cst_cephes_SQRTHF); - Packet tmp = pand(x, mask); - x = psub(x, cst_1); - e = psub(e, pand(cst_1, mask)); - x = padd(x, tmp); - - Packet x2 = pmul(x, x); - Packet x3 = pmul(x2, x); - - // Evaluate the polynomial in factored form for better instruction-level parallelism. - // y = x - 0.5*x^2 + x^3 * polevl( x, P, 5 ) / p1evl( x, Q, 5 ) ); + // Evaluate P and Q in factored form for instruction-level parallelism. Packet y, y1, y_; - y = pmadd(cst_cephes_log_p0, x, cst_cephes_log_p1); - y1 = pmadd(cst_cephes_log_p3, x, cst_cephes_log_p4); - y = pmadd(y, x, cst_cephes_log_p2); - y1 = pmadd(y1, x, cst_cephes_log_p5); - y_ = pmadd(y, x3, y1); + y = pmadd(pset1(1.01875663804580931796E-4), f, pset1(4.97494994976747001425E-1)); + y1 = pmadd(pset1(1.44989225341610930846E1), f, pset1(1.79368678507819816313E1)); + y = pmadd(y, f, pset1(4.70579119878881725854E0)); + y1 = pmadd(y1, f, pset1(7.70838733755885391666E0)); + y_ = pmadd(y, f3, y1); - y = pmadd(cst_cephes_log_q0, x, cst_cephes_log_q1); - y1 = pmadd(cst_cephes_log_q3, x, cst_cephes_log_q4); - y = pmadd(y, x, cst_cephes_log_q2); - y1 = pmadd(y1, x, cst_cephes_log_q5); - y = pmadd(y, x3, y1); + y = pmadd(pset1(1.0), f, pset1(1.12873587189167450590E1)); + y1 = pmadd(pset1(8.29875266912776603211E1), f, pset1(7.11544750618563894466E1)); + y = pmadd(y, f, pset1(4.52279145837532221105E1)); + y1 = pmadd(y1, f, pset1(2.31251620126765340583E1)); + y = pmadd(y, f3, y1); - y_ = pmul(y_, x3); + y_ = pmul(y_, f3); y = pdiv(y_, y); - y = pmadd(cst_neg_half, x2, y); - x = padd(x, y); + y = pmadd(pset1(-0.5), f2, y); + log_mantissa = padd(f, y); +} - // Add the logarithm of the exponent back to the result of the interpolation. +// Natural or base-2 logarithm for double packets. +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog_impl_double(const Packet _x) { + Packet log_mantissa, e; + plog_core_double(_x, log_mantissa, e); + + // Add the logarithm of the exponent back to the result. + Packet x; if (base2) { const Packet cst_log2e = pset1(static_cast(EIGEN_LOG2E)); - x = pmadd(x, cst_log2e, e); + x = pmadd(log_mantissa, cst_log2e, e); } else { const Packet cst_ln2 = pset1(static_cast(EIGEN_LN2)); - x = pmadd(e, cst_ln2, x); + x = pmadd(e, cst_ln2, log_mantissa); } + const Packet cst_minus_inf = pset1frombits(static_cast(0xfff0000000000000ull)); + const Packet cst_pos_inf = pset1frombits(static_cast(0x7ff0000000000000ull)); Packet invalid_mask = pcmp_lt_or_nan(_x, pzero(_x)); Packet iszero_mask = pcmp_eq(_x, pzero(_x)); Packet pos_inf_mask = pcmp_eq(_x, cst_pos_inf); - // Filter out invalid inputs, i.e.: - // - negative arg will be NAN - // - 0 will be -INF - // - +INF will be +INF return pselect(iszero_mask, cst_minus_inf, por(pselect(pos_inf_mask, cst_pos_inf, x), invalid_mask)); } @@ -287,7 +272,10 @@ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet generic_log1p_float(c } /** \internal \returns log(1 + x) for double precision float. - Same direct approach as the float version. + Computes log(1+x) using plog_core_double for the core range reduction + and polynomial evaluation. The rounding error from forming u = fl(1+x) + is recovered as dx = x - (u - 1), and folded in as a first-order + correction dx/u after the polynomial evaluation. */ template EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet generic_log1p_double(const Packet& x) { @@ -295,61 +283,25 @@ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet generic_log1p_double( const Packet cst_minus_inf = pset1frombits(static_cast(0xfff0000000000000ull)); const Packet cst_pos_inf = pset1frombits(static_cast(0x7ff0000000000000ull)); + // u = 1 + x, with rounding. Recover the lost low bits: dx = x - (u - 1). Packet u = padd(one, x); Packet dx = psub(x, psub(u, one)); + // For |x| tiny enough that u rounds to 1, return x directly. Packet small_mask = pcmp_eq(u, one); + // For u = +inf (x very large), return +inf. Packet inf_mask = pcmp_eq(u, cst_pos_inf); - const Packet cst_cephes_SQRTHF = pset1(0.70710678118654752440E0); - Packet e; - Packet m = pfrexp(u, e); - Packet mask = pcmp_lt(m, cst_cephes_SQRTHF); - Packet tmp = pand(m, mask); - m = psub(m, one); - e = psub(e, pand(one, mask)); - m = padd(m, tmp); + // Core range reduction and polynomial on u. + Packet log_u, e; + plog_core_double(u, log_u, e); - // Same polynomial as plog_double. - const Packet cst_neg_half = pset1(-0.5); - const Packet cst_cephes_log_p0 = pset1(1.01875663804580931796E-4); - const Packet cst_cephes_log_p1 = pset1(4.97494994976747001425E-1); - const Packet cst_cephes_log_p2 = pset1(4.70579119878881725854E0); - const Packet cst_cephes_log_p3 = pset1(1.44989225341610930846E1); - const Packet cst_cephes_log_p4 = pset1(1.79368678507819816313E1); - const Packet cst_cephes_log_p5 = pset1(7.70838733755885391666E0); - const Packet cst_cephes_log_q0 = pset1(1.0); - const Packet cst_cephes_log_q1 = pset1(1.12873587189167450590E1); - const Packet cst_cephes_log_q2 = pset1(4.52279145837532221105E1); - const Packet cst_cephes_log_q3 = pset1(8.29875266912776603211E1); - const Packet cst_cephes_log_q4 = pset1(7.11544750618563894466E1); - const Packet cst_cephes_log_q5 = pset1(2.31251620126765340583E1); - - Packet m2 = pmul(m, m); - Packet m3 = pmul(m2, m); - - Packet y, y1, y_; - y = pmadd(cst_cephes_log_p0, m, cst_cephes_log_p1); - y1 = pmadd(cst_cephes_log_p3, m, cst_cephes_log_p4); - y = pmadd(y, m, cst_cephes_log_p2); - y1 = pmadd(y1, m, cst_cephes_log_p5); - y_ = pmadd(y, m3, y1); - - y = pmadd(cst_cephes_log_q0, m, cst_cephes_log_q1); - y1 = pmadd(cst_cephes_log_q3, m, cst_cephes_log_q4); - y = pmadd(y, m, cst_cephes_log_q2); - y1 = pmadd(y1, m, cst_cephes_log_q5); - y = pmadd(y, m3, y1); - - y_ = pmul(y_, m3); - Packet log_m = pdiv(y_, y); - log_m = pmadd(cst_neg_half, m2, log_m); - log_m = padd(m, log_m); - - // result = e * ln2 + log(m) + dx/u. + // result = e * ln2 + log(u) + dx/u. + // The dx/u term corrects for the rounding error in u = fl(1+x). const Packet cst_ln2 = pset1(static_cast(EIGEN_LN2)); - Packet result = pmadd(e, cst_ln2, padd(log_m, pdiv(dx, u))); + Packet result = pmadd(e, cst_ln2, padd(log_u, pdiv(dx, u))); + // Handle special cases. Packet neg_mask = pcmp_lt(u, pzero(u)); Packet zero_mask = pcmp_eq(x, pset1(-1.0)); result = pselect(small_mask, x, result); diff --git a/test/ulp_accuracy/ulp_accuracy.cpp b/test/ulp_accuracy/ulp_accuracy.cpp index 8a8c62f05..6882a3567 100644 --- a/test/ulp_accuracy/ulp_accuracy.cpp +++ b/test/ulp_accuracy/ulp_accuracy.cpp @@ -233,15 +233,17 @@ static std::vector> build_func_table() { // Range iteration helpers // ============================================================================ -// Advances x toward +inf by at least 1 ULP. When step_eps > 0, additionally -// jumps by a relative factor of (1 + step_eps) to sample the range sparsely. +// Advances a non-negative value toward +inf by at least 1 ULP. When step_eps > 0, +// additionally jumps by max(|x|, min_normal) * step_eps. For normals this is +// equivalent to x * (1 + eps). For denormals where x * eps < smallest_denormal, +// the min_normal floor ensures we still skip through the denormal region at a +// rate matching the smallest normals rather than stalling at 1 ULP per step. template -static inline Scalar advance_by_step(Scalar x, double step_eps) { +static inline Scalar advance_positive(Scalar x, double step_eps) { Scalar next = std::nextafter(x, std::numeric_limits::infinity()); if (step_eps > 0.0 && std::isfinite(next)) { - // Try to jump further by a relative amount. - Scalar jumped = next > 0 ? next * static_cast(1.0 + step_eps) : next / static_cast(1.0 + step_eps); - // Use the jump only if it actually advances further (handles denormal stalling). + Scalar base = std::max(next, std::numeric_limits::min()); + Scalar jumped = next + base * static_cast(step_eps); if (jumped > next) next = jumped; } return next; @@ -281,26 +283,60 @@ static double linear_to_scalar(int64_t lin, double /*tag*/) { // Dynamic work queue: threads atomically claim chunks for load balancing // ============================================================================ +// Work queue that distributes chunks in positive absolute-value linear space. +// Iteration goes outward from 0: the worker tests both +|x| and -|x| for +// each sampled magnitude, so the multiplicative step (1 + eps) always works +// cleanly — no special handling for negative values needed. template struct WorkQueue { int64_t range_hi_lin; int64_t chunk_size; double step_eps; std::atomic next_lin; + Scalar orig_lo; // original range for sign filtering + Scalar orig_hi; + bool test_pos; // whether any positive values are in [lo, hi] + bool test_neg; // whether any negative values are in [lo, hi] - void init(Scalar lo, Scalar hi, int64_t csz, double step) { - range_hi_lin = scalar_to_linear(hi); - chunk_size = csz; + void init(Scalar lo, Scalar hi, int num_threads, double step) { + orig_lo = lo; + orig_hi = hi; + test_pos = (hi >= Scalar(0)); + test_neg = (lo < Scalar(0)); + + // Compute absolute-value iteration range. + Scalar abs_lo, abs_hi; + if (lo <= Scalar(0) && hi >= Scalar(0)) { + abs_lo = Scalar(0); + abs_hi = std::max(std::abs(lo), hi); + } else { + abs_lo = std::min(std::abs(lo), std::abs(hi)); + abs_hi = std::max(std::abs(lo), std::abs(hi)); + } + + range_hi_lin = scalar_to_linear(abs_hi); step_eps = step; - next_lin.store(scalar_to_linear(lo), std::memory_order_relaxed); + next_lin.store(scalar_to_linear(abs_lo), std::memory_order_relaxed); + + uint64_t total_abs = count_scalars_in_range(abs_lo, abs_hi); + chunk_size = std::max(int64_t(1), static_cast(total_abs / (num_threads * 16))); + if (step > 0.0) { + // Ensure chunks are large enough that advance_positive's min_normal floor + // can actually skip the denormal region. The denormal region contains + // count_scalars_in_range(0, min_normal) ULPs; any chunk must span at + // least that many so the min_normal-based jump lands past chunk_hi. + int64_t denorm_span = static_cast(count_scalars_in_range(Scalar(0), std::numeric_limits::min())); + chunk_size = std::max(chunk_size, denorm_span); + } } - // Claim the next chunk. Returns false when no work remains. + // Claim the next chunk of absolute values. Returns false when no work remains. bool claim(Scalar& chunk_lo, Scalar& chunk_hi) { int64_t lo_lin = next_lin.fetch_add(chunk_size, std::memory_order_relaxed); - if (lo_lin > range_hi_lin) return false; - int64_t hi_lin = lo_lin + chunk_size - 1; - if (hi_lin > range_hi_lin) hi_lin = range_hi_lin; + if (lo_lin > range_hi_lin || lo_lin < 0) return false; + // Compute hi_lin carefully to avoid int64_t overflow. + int64_t remaining = range_hi_lin - lo_lin; + int64_t hi_lin = (remaining < chunk_size - 1) ? range_hi_lin : lo_lin + chunk_size - 1; chunk_lo = linear_to_scalar(lo_lin, Scalar(0)); chunk_hi = linear_to_scalar(hi_lin, Scalar(0)); return true; @@ -322,8 +358,12 @@ static void worker(const FuncEntry& func, WorkQueue& queue, int #ifdef EIGEN_HAS_MPFR mpfr_t mp_in, mp_out; if (use_mpfr) { - mpfr_init2(mp_in, 128); - mpfr_init2(mp_out, 128); + // Use 2x the mantissa bits of Scalar for the reference: 48 for float (24-bit + // mantissa), 106 for double (53-bit mantissa). This is sufficient for correctly- + // rounded results while keeping MPFR evaluation fast. + constexpr int kMpfrBits = std::is_same::value ? 48 : 106; + mpfr_init2(mp_in, kMpfrBits); + mpfr_init2(mp_out, kMpfrBits); } #else (void)use_mpfr; @@ -348,32 +388,42 @@ static void worker(const FuncEntry& func, WorkQueue& queue, int } }; + auto flush_batch = [&](int& idx) { + if (idx == 0) return; + for (int i = idx; i < batch_size; i++) input[i] = input[idx - 1]; + func.eigen_eval(eigen_out, input); + process_batch(idx, input, eigen_out); + idx = 0; + }; + + auto push_value = [&](Scalar v, int& idx) { + input[idx++] = v; + if (idx == batch_size) flush_batch(idx); + }; + Scalar chunk_lo, chunk_hi; while (queue.claim(chunk_lo, chunk_hi)) { int idx = 0; - Scalar x = chunk_lo; + Scalar abs_x = chunk_lo; for (;;) { - input[idx] = x; - idx++; - - if (idx == batch_size) { - func.eigen_eval(eigen_out, input); - process_batch(batch_size, input, eigen_out); - idx = 0; + // Test +|x| if positive values are in range. + if (queue.test_pos && abs_x >= queue.orig_lo && abs_x <= queue.orig_hi) { + push_value(abs_x, idx); + } + // Test -|x| if negative values are in range (skip -0 to avoid testing 0 twice). + if (queue.test_neg && abs_x != Scalar(0)) { + Scalar neg_x = -abs_x; + if (neg_x >= queue.orig_lo && neg_x <= queue.orig_hi) { + push_value(neg_x, idx); + } } - if (x >= chunk_hi) break; - Scalar next = advance_by_step(x, queue.step_eps); - x = (next > chunk_hi) ? chunk_hi : next; + if (abs_x >= chunk_hi) break; + Scalar next = advance_positive(abs_x, queue.step_eps); + abs_x = (next > chunk_hi) ? chunk_hi : next; } - // Process remaining partial batch. Pad unused slots with the last valid - // input so the full-size vectorized eval doesn't read uninitialized memory. - if (idx > 0) { - for (int i = idx; i < batch_size; i++) input[i] = input[idx - 1]; - func.eigen_eval(eigen_out, input); - process_batch(idx, input, eigen_out); - } + flush_batch(idx); } #ifdef EIGEN_HAS_MPFR @@ -439,11 +489,12 @@ static int run_test(const Options& opts) { std::printf("Function: %s (%s)\n", opts.func_name.c_str(), kTypeName); std::printf("Range: [%.*g, %.*g]\n", kDigits, double(lo), kDigits, double(hi)); if (opts.step_eps > 0.0) { - std::printf("Sampling step: (1 + %g) * nextafter(x)\n", opts.step_eps); + std::printf("Sampling step: |x| * (1 + %g)\n", opts.step_eps); } else { std::printf("Representable values in range: %lu\n", static_cast(total_scalars)); } - std::printf("Reference: %s\n", opts.use_mpfr ? "MPFR (128-bit)" : "std C++ math"); + std::printf("Reference: %s\n", + opts.use_mpfr ? (opts.use_double ? "MPFR (106-bit)" : "MPFR (48-bit)") : "std C++ math"); std::printf("Threads: %d\n", num_threads); std::printf("Batch size: %d\n", opts.batch_size); std::printf("\n"); @@ -459,13 +510,8 @@ static int run_test(const Options& opts) { results.back()->init(opts.hist_width); } - // Use dynamic work distribution: threads claim small chunks from a shared - // queue. This ensures even load balancing regardless of how per-value - // work varies across the range (e.g. log on negatives is trivial). - // Choose chunk_size so we get ~16 chunks per thread for good balancing. - int64_t chunk_size = std::max(int64_t(1), static_cast(total_scalars / (num_threads * 16))); WorkQueue queue; - queue.init(lo, hi, chunk_size, opts.step_eps); + queue.init(lo, hi, num_threads, opts.step_eps); std::vector threads; auto start_time = std::chrono::steady_clock::now();