#include "reiserfs.h" #include /* * The previous reiserfs locking scheme was heavily based on * the tricky properties of the Bkl: * * - it was acquired recursively by a same task * - the performances relied on the release-while-schedule() property * * Now that we replace it by a mutex, we still want to keep the same * recursive property to avoid big changes in the code structure. * We use our own lock_owner here because the owner field on a mutex * is only available in SMP or mutex debugging, also we only need this field * for this mutex, no need for a system wide mutex facility. * * Also this lock is often released before a call that could block because * reiserfs performances were partially based on the release while schedule() * property of the Bkl. */ void reiserfs_write_lock(struct super_block *s) { struct reiserfs_sb_info *sb_i = REISERFS_SB(s); if (sb_i->lock_owner != current) { mutex_lock(&sb_i->lock); sb_i->lock_owner = current; } /* No need to protect it, only the current task touches it */ sb_i->lock_depth++; } void reiserfs_write_unlock(struct super_block *s) { struct reiserfs_sb_info *sb_i = REISERFS_SB(s); /* * Are we unlocking without even holding the lock? * Such a situation must raise a BUG() if we don't want * to corrupt the data. */ BUG_ON(sb_i->lock_owner != current); if (--sb_i->lock_depth == -1) { sb_i->lock_owner = NULL; mutex_unlock(&sb_i->lock); } } int __must_check reiserfs_write_unlock_nested(struct super_block *s) { struct reiserfs_sb_info *sb_i = REISERFS_SB(s); int depth; /* this can happen when the lock isn't always held */ if (sb_i->lock_owner != current) return -1; depth = sb_i->lock_depth; sb_i->lock_depth = -1; sb_i->lock_owner = NULL; mutex_unlock(&sb_i->lock); return depth; } void reiserfs_write_lock_nested(struct super_block *s, int depth) { struct reiserfs_sb_info *sb_i = REISERFS_SB(s); /* this can happen when the lock isn't always held */ if (depth == -1) return; mutex_lock(&sb_i->lock); sb_i->lock_owner = current; sb_i->lock_depth = depth; } /* * Utility function to force a BUG if it is called without the superblock * write lock held. caller is the string printed just before calling BUG() */ void reiserfs_check_lock_depth(struct super_block *sb, char *caller) { struct reiserfs_sb_info *sb_i = REISERFS_SB(sb); WARN_ON(sb_i->lock_depth < 0); } #ifdef CONFIG_REISERFS_CHECK void reiserfs_lock_check_recursive(struct super_block *sb) { struct reiserfs_sb_info *sb_i = REISERFS_SB(sb); WARN_ONCE((sb_i->lock_depth > 0), "Unwanted recursive reiserfs lock!\n"); } #endif r
path: root/include/uapi/asm-generic
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-01-30 19:27:10 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-01-31 09:13:49 -0500
commit79c6f448c8b79c321e4a1f31f98194e4f6b6cae7 (patch)
tree370efda701f03cccf21e02bb1fdd3b852547d75c /include/uapi/asm-generic
parent0c744ea4f77d72b3dcebb7a8f2684633ec79be88 (diff)
tracing: Fix hwlat kthread migration
The hwlat tracer creates a kernel thread at start of the tracer. It is pinned to a single CPU and will move to the next CPU after each period of running. If the user modifies the migration thread's affinity, it will not change after that happens. The original code created the thread at the first instance it was called, but later was changed to destroy the thread after the tracer was finished, and would not be created until the next instance of the tracer was established. The code that initialized the affinity was only called on the initial instantiation of the tracer. After that, it was not initialized, and the previous affinity did not match the current newly created one, making it appear that the user modified the thread's affinity when it did not, and the thread failed to migrate again. Cc: stable@vger.kernel.org Fixes: 0330f7aa8ee6 ("tracing: Have hwlat trace migrate across tracing_cpumask CPUs") Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'include/uapi/asm-generic')