/* * RT-Mutexes: blocking mutual exclusion locks with PI support * * started by Ingo Molnar and Thomas Gleixner: * * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar * Copyright (C) 2006, Timesys Corp., Thomas Gleixner * * This file contains macros used solely by rtmutex.c. Debug version. */ extern void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); extern void debug_rt_mutex_lock(struct rt_mutex *lock); extern void debug_rt_mutex_unlock(struct rt_mutex *lock); extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner); extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); extern void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk, struct rt_mutex_waiter *waiter, struct rt_mutex *lock); extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter); # define debug_rt_mutex_reset_waiter(w) \ do { (w)->deadlock_lock = NULL; } while (0) static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, enum rtmutex_chainwalk walk) { return (waiter != NULL); } static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) { debug_rt_mutex_print_deadlock(w); } cgi/linux/net-next.git/?h=nds-private-remove'>summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-08-10 17:22:44 +0200
committerDan Williams <dan.j.williams@intel.com>2016-12-26 20:29:24 -0800
commitc6dcf52c23d2d3fb5235cec42d7dd3f786b87d55 (patch)
tree7e63a6c0225a769e679b194f54b5723e4cfba385 /tools/thermal
parente568df6b84ff05a22467503afc11bee7a6ba0700 (diff)
mm: Invalidate DAX radix tree entries only if appropriate
Currently invalidate_inode_pages2_range() and invalidate_mapping_pages() just delete all exceptional radix tree entries they find. For DAX this is not desirable as we track cache dirtiness in these entries and when they are evicted, we may not flush caches although it is necessary. This can for example manifest when we write to the same block both via mmap and via write(2) (to different offsets) and fsync(2) then does not properly flush CPU caches when modification via write(2) was the last one. Create appropriate DAX functions to handle invalidation of DAX entries for invalidate_inode_pages2_range() and invalidate_mapping_pages() and wire them up into the corresponding mm functions. Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'tools/thermal')