/* * Copyright (C) 2013 Davidlohr Bueso * * futex-hash: Stress the hell out of the Linux kernel futex uaddr hashing. * * This program is particularly useful for measuring the kernel's futex hash * table/function implementation. In order for it to make sense, use with as * many threads and futexes as possible. */ /* For the CLR_() macros */ #include #include #include #include #include #include #include #include "../util/stat.h" #include #include "bench.h" #include "futex.h" #include #include static unsigned int nthreads = 0; static unsigned int nsecs = 10; /* amount of futexes per thread */ static unsigned int nfutexes = 1024; static bool fshared = false, done = false, silent = false; static int futex_flag = 0; struct timeval start, end, runtime; static pthread_mutex_t thread_lock; static unsigned int threads_starting; static struct stats throughput_stats; static pthread_cond_t thread_parent, thread_worker; struct worker { int tid; u_int32_t *futex; pthread_t thread; unsigned long ops; }; static const struct option options[] = { OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"), OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"), OPT_UINTEGER('f', "futexes", &nfutexes, "Specify amount of futexes per threads"), OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"), OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"), OPT_END() }; static const char * const bench_futex_hash_usage[] = { "perf bench futex hash ", NULL }; static void *workerfn(void *arg) { int ret; struct worker *w = (struct worker *) arg; unsigned int i; unsigned long ops = w->ops; /* avoid cacheline bouncing */ pthread_mutex_lock(&thread_lock); threads_starting--; if (!threads_starting) pthread_cond_signal(&thread_parent); pthread_cond_wait(&thread_worker, &thread_lock); pthread_mutex_unlock(&thread_lock); do { for (i = 0; i < nfutexes; i++, ops++) { /* * We want the futex calls to fail in order to stress * the hashing of uaddr and not measure other steps, * such as internal waitqueue handling, thus enlarging * the critical region protected by hb->lock. */ ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag); if (!silent && (!ret || errno != EAGAIN || errno != EWOULDBLOCK)) warn("Non-expected futex return call"); } } while (!done); w->ops = ops; return NULL; } static void toggle_done(int sig __maybe_unused, siginfo_t *info __maybe_unused, void *uc __maybe_unused) { /* inform all threads that we're done for the day */ done = true; gettimeofday(&end, NULL); timersub(&end, &start, &runtime); } static void print_summary(void) { unsigned long avg = avg_stats(&throughput_stats); double stddev = stddev_stats(&throughput_stats); printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n", !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg), (int) runtime.tv_sec); } int bench_futex_hash(int argc, const char **argv, const char *prefix __maybe_unused) { int ret = 0; cpu_set_t cpu; struct sigaction act; unsigned int i, ncpus; pthread_attr_t thread_attr; struct worker *worker = NULL; argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0); if (argc) { usage_with_options(bench_futex_hash_usage, options); exit(EXIT_FAILURE); } ncpus = sysconf(_SC_NPROCESSORS_ONLN); nsecs = futexbench_sanitize_numeric(nsecs); nfutexes = futexbench_sanitize_numeric(nfutexes); sigfillset(&act.sa_mask); act.sa_sigaction = toggle_done; sigaction(SIGINT, &act, NULL); if (!nthreads) /* default to the number of CPUs */ nthreads = ncpus; else nthreads = futexbench_sanitize_numeric(nthreads); worker = calloc(nthreads, sizeof(*worker)); if (!worker) goto errmem; if (!fshared) futex_flag = FUTEX_PRIVATE_FLAG; printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n", getpid(), nthreads, nfutexes, fshared ? "shared":"private", nsecs); init_stats(&throughput_stats); pthread_mutex_init(&thread_lock, NULL); pthread_cond_init(&thread_parent, NULL); pthread_cond_init(&thread_worker, NULL); threads_starting = nthreads; pthread_attr_init(&thread_attr); gettimeofday(&start, NULL); for (i = 0; i < nthreads; i++) { worker[i].tid = i; worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex)); if (!worker[i].futex) goto errmem; CPU_ZERO(&cpu); CPU_SET(i % ncpus, &cpu); ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu); if (ret) err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); ret = pthread_create(&worker[i].thread, &thread_attr, workerfn, (void *)(struct worker *) &worker[i]); if (ret) err(EXIT_FAILURE, "pthread_create"); } pthread_attr_destroy(&thread_attr); pthread_mutex_lock(&thread_lock); while (threads_starting) pthread_cond_wait(&thread_parent, &thread_lock); pthread_cond_broadcast(&thread_worker); pthread_mutex_unlock(&thread_lock); sleep(nsecs); toggle_done(0, NULL, NULL); for (i = 0; i < nthreads; i++) { ret = pthread_join(worker[i].thread, NULL); if (ret) err(EXIT_FAILURE, "pthread_join"); } /* cleanup & report results */ pthread_cond_destroy(&thread_parent); pthread_cond_destroy(&thread_worker); pthread_mutex_destroy(&thread_lock); for (i = 0; i < nthreads; i++) { unsigned long t = worker[i].ops/runtime.tv_sec; update_stats(&throughput_stats, t); if (!silent) { if (nfutexes == 1) printf("[thread %2d] futex: %p [ %ld ops/sec ]\n", worker[i].tid, &worker[i].futex[0], t); else printf("[thread %2d] futexes: %p ... %p [ %ld ops/sec ]\n", worker[i].tid, &worker[i].futex[0], &worker[i].futex[nfutexes-1], t); } free(worker[i].futex); } print_summary(); free(worker); return ret; errmem: err(EXIT_FAILURE, "calloc"); } ref='/cgit.cgi/linux/net-next.git/commit/sound/soc/samsung/pcm.c?h=nds-private-remove&id=24c2503255d35c269b67162c397a1a1c1e02f6ce'>24c2503255d35c269b67162c397a1a1c1e02f6ce (diff)
x86/mce: Make timer handling more robust
Erik reported that on a preproduction hardware a CMCI storm triggers the BUG_ON in add_timer_on(). The reason is that the per CPU MCE timer is started by the CMCI logic before the MCE CPU hotplug callback starts the timer with add_timer_on(). So the timer is already queued which triggers the BUG. Using add_timer_on() is pretty pointless in this code because the timer is strictlty per CPU, initialized as pinned and all operations which arm the timer happen on the CPU to which the timer belongs. Simplify the whole machinery by using mod_timer() instead of add_timer_on() which avoids the problem because mod_timer() can handle already queued timers. Use __start_timer() everywhere so the earliest armed expiry time is preserved. Reported-by: Erik Veijola <erik.veijola@intel.com> Tested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Tony Luck <tony.luck@intel.com> Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701310936080.3457@nanos Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'sound/soc/samsung/pcm.c')