/* CLOCK_MONOTONIC vs CLOCK_MONOTONIC_RAW skew test * by: john stultz (johnstul@us.ibm.com) * John Stultz * (C) Copyright IBM 2012 * (C) Copyright Linaro Limited 2015 * Licensed under the GPLv2 * * To build: * $ gcc raw_skew.c -o raw_skew -lrt * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include #include #include #include #include #include #ifdef KTEST #include "../kselftest.h" #else static inline int ksft_exit_pass(void) { exit(0); } static inline int ksft_exit_fail(void) { exit(1); } #endif #define CLOCK_MONOTONIC_RAW 4 #define NSEC_PER_SEC 1000000000LL #define shift_right(x, s) ({ \ __typeof__(x) __x = (x); \ __typeof__(s) __s = (s); \ __x < 0 ? -(-__x >> __s) : __x >> __s; \ }) long long llabs(long long val) { if (val < 0) val = -val; return val; } unsigned long long ts_to_nsec(struct timespec ts) { return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec; } struct timespec nsec_to_ts(long long ns) { struct timespec ts; ts.tv_sec = ns/NSEC_PER_SEC; ts.tv_nsec = ns%NSEC_PER_SEC; return ts; } long long diff_timespec(struct timespec start, struct timespec end) { long long start_ns, end_ns; start_ns = ts_to_nsec(start); end_ns = ts_to_nsec(end); return end_ns - start_ns; } void get_monotonic_and_raw(struct timespec *mon, struct timespec *raw) { struct timespec start, mid, end; long long diff = 0, tmp; int i; for (i = 0; i < 3; i++) { long long newdiff; clock_gettime(CLOCK_MONOTONIC, &start); clock_gettime(CLOCK_MONOTONIC_RAW, &mid); clock_gettime(CLOCK_MONOTONIC, &end); newdiff = diff_timespec(start, end); if (diff == 0 || newdiff < diff) { diff = newdiff; *raw = mid; tmp = (ts_to_nsec(start) + ts_to_nsec(end))/2; *mon = nsec_to_ts(tmp); } } } int main(int argv, char **argc) { struct timespec mon, raw, start, end; long long delta1, delta2, interval, eppm, ppm; struct timex tx1, tx2; setbuf(stdout, NULL); if (clock_gettime(CLOCK_MONOTONIC_RAW, &raw)) { printf("ERR: NO CLOCK_MONOTONIC_RAW\n"); return -1; } tx1.modes = 0; adjtimex(&tx1); get_monotonic_and_raw(&mon, &raw); start = mon; delta1 = diff_timespec(mon, raw); if (tx1.offset) printf("WARNING: ADJ_OFFSET in progress, this will cause inaccurate results\n"); printf("Estimating clock drift: "); sleep(120); get_monotonic_and_raw(&mon, &raw); end = mon; tx2.modes = 0; adjtimex(&tx2); delta2 = diff_timespec(mon, raw); interval = diff_timespec(start, end); /* calculate measured ppm between MONOTONIC and MONOTONIC_RAW */ eppm = ((delta2-delta1)*NSEC_PER_SEC)/interval; eppm = -eppm; printf("%lld.%i(est)", eppm/1000, abs((int)(eppm%1000))); /* Avg the two actual freq samples adjtimex gave us */ ppm = (tx1.freq + tx2.freq) * 1000 / 2; ppm = (long long)tx1.freq * 1000; ppm = shift_right(ppm, 16); printf(" %lld.%i(act)", ppm/1000, abs((int)(ppm%1000))); if (llabs(eppm - ppm) > 1000) { printf(" [FAILED]\n"); return ksft_exit_fail(); } printf(" [OK]\n"); return ksft_exit_pass(); } g?id=56067812d5b0e737ac2063e94a50f76b810d6ca3'>Collapse)AuthorFilesLines ent'>
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 23:58:38 +0100
committerIngo Molnar <mingo@kernel.org>2017-02-01 08:37:27 +0100
commitdd86e373e09fb16b83e8adf5c48c421a4ca76468 (patch)
tree55703c2ea8584e303e342090614e0aab3509ab21 /tools/include/asm-generic/bitops.h
parent0b3589be9b98994ce3d5aeca52445d1f5627c4ba (diff)
perf/x86/intel/rapl: Make package handling more robust
The package management code in RAPL relies on package mapping being available before a CPU is started. This changed with: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") because the ACPI/BIOS information turned out to be unreliable, but that left RAPL in broken state. This was not noticed because on a regular boot all CPUs are online before RAPL is initialized. A possible fix would be to reintroduce the mess which allocates a package data structure in CPU prepare and when it turns out to already exist in starting throw it away later in the CPU online callback. But that's a horrible hack and not required at all because RAPL becomes functional for perf only in the CPU online callback. That's correct because user space is not yet informed about the CPU being onlined, so nothing caan rely on RAPL being available on that particular CPU. Move the allocation to the CPU online callback and simplify the hotplug handling. At this point the package mapping is established and correct. This also adds a missing check for available package data in the event_init() function. Reported-by: Yasuaki Ishimatsu <yasu.isimatu@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Stephane Eranian <eranian@google.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Fixes: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") Link: http://lkml.kernel.org/r/20170131230141.212593966@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools/include/asm-generic/bitops.h')