scale=0 define gcd(a,b) { auto t; while (b) { t = b; b = a % b; a = t; } return a; } /* Division by reciprocal multiplication. */ define fmul(b,n,d) { return (2^b*n+d-1)/d; } /* Adjustment factor when a ceiling value is used. Use as: (imul * n) + (fmulxx * n + fadjxx) >> xx) */ define fadj(b,n,d) { auto v; d = d/gcd(n,d); v = 2^b*(d-1)/d; return v; } /* Compute the appropriate mul/adj values as well as a shift count, which brings the mul value into the range 2^b-1 <= x < 2^b. Such a shift value will be correct in the signed integer range and off by at most one in the upper half of the unsigned range. */ define fmuls(b,n,d) { auto s, m; for (s = 0; 1; s++) { m = fmul(s,n,d); if (m >= 2^(b-1)) return s; } return 0; } define timeconst(hz) { print "/* Automatically generated by kernel/time/timeconst.bc */\n" print "/* Time conversion constants for HZ == ", hz, " */\n" print "\n" print "#ifndef KERNEL_TIMECONST_H\n" print "#define KERNEL_TIMECONST_H\n\n" print "#include \n" print "#include \n\n" print "#if HZ != ", hz, "\n" print "#error \qinclude/generated/timeconst.h has the wrong HZ value!\q\n" print "#endif\n\n" if (hz < 2) { print "#error Totally bogus HZ value!\n" } else { s=fmuls(32,1000,hz) obase=16 print "#define HZ_TO_MSEC_MUL32\tU64_C(0x", fmul(s,1000,hz), ")\n" print "#define HZ_TO_MSEC_ADJ32\tU64_C(0x", fadj(s,1000,hz), ")\n" obase=10 print "#define HZ_TO_MSEC_SHR32\t", s, "\n" s=fmuls(32,hz,1000) obase=16 print "#define MSEC_TO_HZ_MUL32\tU64_C(0x", fmul(s,hz,1000), ")\n" print "#define MSEC_TO_HZ_ADJ32\tU64_C(0x", fadj(s,hz,1000), ")\n" obase=10 print "#define MSEC_TO_HZ_SHR32\t", s, "\n" obase=10 cd=gcd(hz,1000) print "#define HZ_TO_MSEC_NUM\t\t", 1000/cd, "\n" print "#define HZ_TO_MSEC_DEN\t\t", hz/cd, "\n" print "#define MSEC_TO_HZ_NUM\t\t", hz/cd, "\n" print "#define MSEC_TO_HZ_DEN\t\t", 1000/cd, "\n" print "\n" s=fmuls(32,1000000,hz) obase=16 print "#define HZ_TO_USEC_MUL32\tU64_C(0x", fmul(s,1000000,hz), ")\n" print "#define HZ_TO_USEC_ADJ32\tU64_C(0x", fadj(s,1000000,hz), ")\n" obase=10 print "#define HZ_TO_USEC_SHR32\t", s, "\n" s=fmuls(32,hz,1000000) obase=16 print "#define USEC_TO_HZ_MUL32\tU64_C(0x", fmul(s,hz,1000000), ")\n" print "#define USEC_TO_HZ_ADJ32\tU64_C(0x", fadj(s,hz,1000000), ")\n" obase=10 print "#define USEC_TO_HZ_SHR32\t", s, "\n" obase=10 cd=gcd(hz,1000000) print "#define HZ_TO_USEC_NUM\t\t", 1000000/cd, "\n" print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n" print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n" print "#define USEC_TO_HZ_DEN\t\t", 1000000/cd, "\n" print "\n" print "#endif /* KERNEL_TIMECONST_H */\n" } halt } hz = read(); timeconst(hz) '>
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-01-16 07:45:44 +0100
committerIngo Molnar <mingo@kernel.org>2017-01-16 07:45:44 +0100
commit3e4f7a4956e54143f7fc15c636158ad4166d219d (patch)
treec286358c42c47328a12f523878e8cc9fa022f9f9
parentf4d3935e4f4884ba80561db5549394afb8eef8f7 (diff)
parent52d7e48b86fc108e45a656d8e53e4237993c481d (diff)
Merge branch 'rcu/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into rcu/urgent
Pull an urgent RCU fix from Paul E. McKenney: "This series contains a pair of commits that permit RCU synchronous grace periods (synchronize_rcu() and friends) to work correctly throughout boot. This eliminates the current "dead time" starting when the scheduler spawns its first taks and ending when the last of RCU's kthreads is spawned (this last happens during early_initcall() time). Although RCU's synchronous grace periods have long been documented as not working during this time, prior to 4.9, the expedited grace periods worked by accident, and some ACPI code came to rely on this unintentional behavior. (Note that this unintentional behavior was -not- reliable. For example, failures from ACPI could occur on !SMP systems and on systems booting with the rcu_normal kernel boot parameter.) Either way, there is a bug that needs fixing, and the 4.9 switch of RCU's expedited grace periods to workqueues could be considered to have caused a regression. This series therefore makes RCU's expedited grace periods operate correctly throughout the boot process. This has been demonstrated to fix the problems ACPI was encountering, and has the added longer-term benefit of simplifying RCU's behavior." Signed-off-by: Ingo Molnar <mingo@kernel.org>