/* * Copyright (C) 2003 Bernardo Innocenti * * Based on former do_div() implementation from asm-parisc/div64.h: * Copyright (C) 1999 Hewlett-Packard Co * Copyright (C) 1999 David Mosberger-Tang * * * Generic C version of 64bit/32bit division and modulo, with * 64bit result and 32bit remainder. * * The fast case for (n>>32 == 0) is handled inline by do_div(). * * Code generated for this function might be very inefficient * for some CPUs. __div64_32() can be overridden by linking arch-specific * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S * or by defining a preprocessor macro in arch/include/asm/div64.h. */ #include #include #include /* Not needed on 64bit architectures */ #if BITS_PER_LONG == 32 #ifndef __div64_32 uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) { uint64_t rem = *n; uint64_t b = base; uint64_t res, d = 1; uint32_t high = rem >> 32; /* Reduce the thing a bit first */ res = 0; if (high >= base) { high /= base; res = (uint64_t) high << 32; rem -= (uint64_t) (high*base) << 32; } while ((int64_t)b > 0 && b < rem) { b = b+b; d = d+d; } do { if (rem >= b) { rem -= b; res += d; } b >>= 1; d >>= 1; } while (d); *n = res; return rem; } EXPORT_SYMBOL(__div64_32); #endif #ifndef div_s64_rem s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) { u64 quotient; if (dividend < 0) { quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder); *remainder = -*remainder; if (divisor > 0) quotient = -quotient; } else { quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder); if (divisor < 0) quotient = -quotient; } return quotient; } EXPORT_SYMBOL(div_s64_rem); #endif /** * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder * @dividend: 64bit dividend * @divisor: 64bit divisor * @remainder: 64bit remainder * * This implementation is a comparable to algorithm used by div64_u64. * But this operation, which includes math for calculating the remainder, * is kept distinct to avoid slowing down the div64_u64 operation on 32bit * systems. */ #ifndef div64_u64_rem u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) { u32 high = divisor >> 32; u64 quot; if (high == 0) { u32 rem32; quot = div_u64_rem(dividend, divisor, &rem32); *remainder = rem32; } else { int n = 1 + fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) quot--; *remainder = dividend - quot * divisor; if (*remainder >= divisor) { quot++; *remainder -= divisor; } } return quot; } EXPORT_SYMBOL(div64_u64_rem); #endif /** * div64_u64 - unsigned 64bit divide with 64bit divisor * @dividend: 64bit dividend * @divisor: 64bit divisor * * This implementation is a modified version of the algorithm proposed * by the book 'Hacker's Delight'. The original source and full proof * can be found here and is available for use without restriction. * * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt' */ #ifndef div64_u64 u64 div64_u64(u64 dividend, u64 divisor) { u32 high = divisor >> 32; u64 quot; if (high == 0) { quot = div_u64(dividend, divisor); } else { int n = 1 + fls(high); quot = div_u64(dividend >> n, divisor >> n); if (quot != 0) quot--; if ((dividend - quot * divisor) >= divisor) quot++; } return quot; } EXPORT_SYMBOL(div64_u64); #endif /** * div64_s64 - signed 64bit divide with 64bit divisor * @dividend: 64bit dividend * @divisor: 64bit divisor */ #ifndef div64_s64 s64 div64_s64(s64 dividend, s64 divisor) { s64 quot, t; quot = div64_u64(abs(dividend), abs(divisor)); t = (dividend ^ divisor) >> 63; return (quot ^ t) - t; } EXPORT_SYMBOL(div64_s64); #endif #endif /* BITS_PER_LONG == 32 */ /* * Iterative div/mod for use when dividend is not expected to be much * bigger than divisor. */ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) { return __iter_div_u64_rem(dividend, divisor, remainder); } EXPORT_SYMBOL(iter_div_u64_rem); >space:mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 09:37:34 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-31 21:47:58 +0100
commit0becc0ae5b42828785b589f686725ff5bc3b9b25 (patch)
treebe6d0e1f37c38ed0a7dd5da2d4b1e93f0fb43101 /tools/lib/bitmap.c
parent24c2503255d35c269b67162c397a1a1c1e02f6ce (diff)
x86/mce: Make timer handling more robust
Erik reported that on a preproduction hardware a CMCI storm triggers the BUG_ON in add_timer_on(). The reason is that the per CPU MCE timer is started by the CMCI logic before the MCE CPU hotplug callback starts the timer with add_timer_on(). So the timer is already queued which triggers the BUG. Using add_timer_on() is pretty pointless in this code because the timer is strictlty per CPU, initialized as pinned and all operations which arm the timer happen on the CPU to which the timer belongs. Simplify the whole machinery by using mod_timer() instead of add_timer_on() which avoids the problem because mod_timer() can handle already queued timers. Use __start_timer() everywhere so the earliest armed expiry time is preserved. Reported-by: Erik Veijola <erik.veijola@intel.com> Tested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Tony Luck <tony.luck@intel.com> Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701310936080.3457@nanos Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'tools/lib/bitmap.c')