/* * Copyright 2015, Cyril Bur, IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * This test attempts to see if the VSX registers change across preemption. * There is no way to be sure preemption happened so this test just * uses many threads and a long wait. As such, a successful test * doesn't mean much but a failure is bad. */ #include #include #include #include #include #include #include #include #include #include "utils.h" /* Time to wait for workers to get preempted (seconds) */ #define PREEMPT_TIME 20 /* * Factor by which to multiply number of online CPUs for total number of * worker threads */ #define THREAD_FACTOR 8 /* * Ensure there is twice the number of non-volatile VMX regs! * check_vmx() is going to use the other half as space to put the live * registers before calling vsx_memcmp() */ __thread vector int varray[24] = { {1, 2, 3, 4 }, {5, 6, 7, 8 }, {9, 10,11,12}, {13,14,15,16}, {17,18,19,20}, {21,22,23,24}, {25,26,27,28}, {29,30,31,32}, {33,34,35,36}, {37,38,39,40}, {41,42,43,44}, {45,46,47,48} }; int threads_starting; int running; extern long preempt_vsx(vector int *varray, int *threads_starting, int *running); long vsx_memcmp(vector int *a) { vector int zero = {0, 0, 0, 0}; int i; FAIL_IF(a != varray); for(i = 0; i < 12; i++) { if (memcmp(&a[i + 12], &zero, sizeof(vector int)) == 0) { fprintf(stderr, "Detected zero from the VSX reg %d\n", i + 12); return 2; } } if (memcmp(a, &a[12], 12 * sizeof(vector int))) { long *p = (long *)a; fprintf(stderr, "VSX mismatch\n"); for (i = 0; i < 24; i=i+2) fprintf(stderr, "%d: 0x%08lx%08lx | 0x%08lx%08lx\n", i/2 + i%2 + 20, p[i], p[i + 1], p[i + 24], p[i + 25]); return 1; } return 0; } void *preempt_vsx_c(void *p) { int i, j; long rc; srand(pthread_self()); for (i = 0; i < 12; i++) for (j = 0; j < 4; j++) { varray[i][j] = rand(); /* Don't want zero because it hides kernel problems */ if (varray[i][j] == 0) j--; } rc = preempt_vsx(varray, &threads_starting, &running); if (rc == 2) fprintf(stderr, "Caught zeros in VSX compares\n"); return (void *)rc; } int test_preempt_vsx(void) { int i, rc, threads; pthread_t *tids; threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR; tids = malloc(threads * sizeof(pthread_t)); FAIL_IF(!tids); running = true; threads_starting = threads; for (i = 0; i < threads; i++) { rc = pthread_create(&tids[i], NULL, preempt_vsx_c, NULL); FAIL_IF(rc); } setbuf(stdout, NULL); /* Not really nessesary but nice to wait for every thread to start */ printf("\tWaiting for %d workers to start...", threads_starting); while(threads_starting) asm volatile("": : :"memory"); printf("done\n"); printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME); sleep(PREEMPT_TIME); printf("done\n"); printf("\tStopping workers..."); /* * Working are checking this value every loop. In preempt_vsx 'cmpwi r5,0; bne 2b'. * r5 will have loaded the value of running. */ running = 0; for (i = 0; i < threads; i++) { void *rc_p; pthread_join(tids[i], &rc_p); /* * Harness will say the fail was here, look at why preempt_vsx * returned */ if ((long) rc_p) printf("oops\n"); FAIL_IF((long) rc_p); } printf("done\n"); return 0; } int main(int argc, char *argv[]) { return test_harness(test_preempt_vsx, "vsx_preempt"); } 'cgit-panel'>diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 23:58:38 +0100
committerIngo Molnar <mingo@kernel.org>2017-02-01 08:37:27 +0100
commitdd86e373e09fb16b83e8adf5c48c421a4ca76468 (patch)
tree55703c2ea8584e303e342090614e0aab3509ab21 /sound/soc/codecs/sirf-audio-codec.h
parent0b3589be9b98994ce3d5aeca52445d1f5627c4ba (diff)
perf/x86/intel/rapl: Make package handling more robust
The package management code in RAPL relies on package mapping being available before a CPU is started. This changed with: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") because the ACPI/BIOS information turned out to be unreliable, but that left RAPL in broken state. This was not noticed because on a regular boot all CPUs are online before RAPL is initialized. A possible fix would be to reintroduce the mess which allocates a package data structure in CPU prepare and when it turns out to already exist in starting throw it away later in the CPU online callback. But that's a horrible hack and not required at all because RAPL becomes functional for perf only in the CPU online callback. That's correct because user space is not yet informed about the CPU being onlined, so nothing caan rely on RAPL being available on that particular CPU. Move the allocation to the CPU online callback and simplify the hotplug handling. At this point the package mapping is established and correct. This also adds a missing check for available package data in the event_init() function. Reported-by: Yasuaki Ishimatsu <yasu.isimatu@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Stephane Eranian <eranian@google.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Fixes: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") Link: http://lkml.kernel.org/r/20170131230141.212593966@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'sound/soc/codecs/sirf-audio-codec.h')