#include #include #include #include #include #include #include #include #include #include #include /* * setup_fault_attr() is a helper function for various __setup handlers, so it * returns 0 on error, because that is what __setup handlers do. */ int setup_fault_attr(struct fault_attr *attr, char *str) { unsigned long probability; unsigned long interval; int times; int space; /* ",,," */ if (sscanf(str, "%lu,%lu,%d,%d", &interval, &probability, &space, ×) < 4) { printk(KERN_WARNING "FAULT_INJECTION: failed to parse arguments\n"); return 0; } attr->probability = probability; attr->interval = interval; atomic_set(&attr->times, times); atomic_set(&attr->space, space); return 1; } EXPORT_SYMBOL_GPL(setup_fault_attr); static void fail_dump(struct fault_attr *attr) { if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) { printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" "name %pd, interval %lu, probability %lu, " "space %d, times %d\n", attr->dname, attr->interval, attr->probability, atomic_read(&attr->space), atomic_read(&attr->times)); if (attr->verbose > 1) dump_stack(); } } #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) static bool fail_task(struct fault_attr *attr, struct task_struct *task) { return !in_interrupt() && task->make_it_fail; } #define MAX_STACK_TRACE_DEPTH 32 #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER static bool fail_stacktrace(struct fault_attr *attr) { struct stack_trace trace; int depth = attr->stacktrace_depth; unsigned long entries[MAX_STACK_TRACE_DEPTH]; int n; bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX); if (depth == 0) return found; trace.nr_entries = 0; trace.entries = entries; trace.max_entries = depth; trace.skip = 1; save_stack_trace(&trace); for (n = 0; n < trace.nr_entries; n++) { if (attr->reject_start <= entries[n] && entries[n] < attr->reject_end) return false; if (attr->require_start <= entries[n] && entries[n] < attr->require_end) found = true; } return found; } #else static inline bool fail_stacktrace(struct fault_attr *attr) { return true; } #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ /* * This code is stolen from failmalloc-1.0 * http://www.nongnu.org/failmalloc/ */ bool should_fail(struct fault_attr *attr, ssize_t size) { /* No need to check any other properties if the probability is 0 */ if (attr->probability == 0) return false; if (attr->task_filter && !fail_task(attr, current)) return false; if (atomic_read(&attr->times) == 0) return false; if (atomic_read(&attr->space) > size) { atomic_sub(size, &attr->space); return false; } if (attr->interval > 1) { attr->count++; if (attr->count % attr->interval) return false; } if (attr->probability <= prandom_u32() % 100) return false; if (!fail_stacktrace(attr)) return false; fail_dump(attr); if (atomic_read(&attr->times) != -1) atomic_dec_not_zero(&attr->times); return true; } EXPORT_SYMBOL_GPL(should_fail); #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int debugfs_ul_set(void *data, u64 val) { *(unsigned long *)data = val; return 0; } static int debugfs_ul_get(void *data, u64 *val) { *val = *(unsigned long *)data; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n"); static struct dentry *debugfs_create_ul(const char *name, umode_t mode, struct dentry *parent, unsigned long *value) { return debugfs_create_file(name, mode, parent, value, &fops_ul); } #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER static int debugfs_stacktrace_depth_set(void *data, u64 val) { *(unsigned long *)data = min_t(unsigned long, val, MAX_STACK_TRACE_DEPTH); return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get, debugfs_stacktrace_depth_set, "%llu\n"); static struct dentry *debugfs_create_stacktrace_depth( const char *name, umode_t mode, struct dentry *parent, unsigned long *value) { return debugfs_create_file(name, mode, parent, value, &fops_stacktrace_depth); } #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ struct dentry *fault_create_debugfs_attr(const char *name, struct dentry *parent, struct fault_attr *attr) { umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; struct dentry *dir; dir = debugfs_create_dir(name, parent); if (!dir) return ERR_PTR(-ENOMEM); if (!debugfs_create_ul("probability", mode, dir, &attr->probability)) goto fail; if (!debugfs_create_ul("interval", mode, dir, &attr->interval)) goto fail; if (!debugfs_create_atomic_t("times", mode, dir, &attr->times)) goto fail; if (!debugfs_create_atomic_t("space", mode, dir, &attr->space)) goto fail; if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose)) goto fail; if (!debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir, &attr->ratelimit_state.interval)) goto fail; if (!debugfs_create_u32("verbose_ratelimit_burst", mode, dir, &attr->ratelimit_state.burst)) goto fail; if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter)) goto fail; #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER if (!debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir, &attr->stacktrace_depth)) goto fail; if (!debugfs_create_ul("require-start", mode, dir, &attr->require_start)) goto fail; if (!debugfs_create_ul("require-end", mode, dir, &attr->require_end)) goto fail; if (!debugfs_create_ul("reject-start", mode, dir, &attr->reject_start)) goto fail; if (!debugfs_create_ul("reject-end", mode, dir, &attr->reject_end)) goto fail; #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ attr->dname = dget(dir); return dir; fail: debugfs_remove_recursive(dir); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(fault_create_debugfs_attr); #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ a href='/cgit.cgi/linux/net-next.git/commit/sound/soc/bcm/bcm2835-i2s.c?h=nds-private-remove&id=0c744ea4f77d72b3dcebb7a8f2684633ec79be88'>0c744ea4f77d72b3dcebb7a8f2684633ec79be88 (diff)
tracing: Fix hwlat kthread migration
The hwlat tracer creates a kernel thread at start of the tracer. It is pinned to a single CPU and will move to the next CPU after each period of running. If the user modifies the migration thread's affinity, it will not change after that happens. The original code created the thread at the first instance it was called, but later was changed to destroy the thread after the tracer was finished, and would not be created until the next instance of the tracer was established. The code that initialized the affinity was only called on the initial instantiation of the tracer. After that, it was not initialized, and the previous affinity did not match the current newly created one, making it appear that the user modified the thread's affinity when it did not, and the thread failed to migrate again. Cc: stable@vger.kernel.org Fixes: 0330f7aa8ee6 ("tracing: Have hwlat trace migrate across tracing_cpumask CPUs") Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'sound/soc/bcm/bcm2835-i2s.c')