#include /* validate @native and @pcp counter values match @expected */ #define CHECK(native, pcp, expected) \ do { \ WARN((native) != (expected), \ "raw %ld (0x%lx) != expected %lld (0x%llx)", \ (native), (native), \ (long long)(expected), (long long)(expected)); \ WARN(__this_cpu_read(pcp) != (expected), \ "pcp %ld (0x%lx) != expected %lld (0x%llx)", \ __this_cpu_read(pcp), __this_cpu_read(pcp), \ (long long)(expected), (long long)(expected)); \ } while (0) static DEFINE_PER_CPU(long, long_counter); static DEFINE_PER_CPU(unsigned long, ulong_counter); static int __init percpu_test_init(void) { /* * volatile prevents compiler from optimizing it uses, otherwise the * +ul_one/-ul_one below would replace with inc/dec instructions. */ volatile unsigned int ui_one = 1; long l = 0; unsigned long ul = 0; pr_info("percpu test start\n"); preempt_disable(); l += -1; __this_cpu_add(long_counter, -1); CHECK(l, long_counter, -1); l += 1; __this_cpu_add(long_counter, 1); CHECK(l, long_counter, 0); ul = 0; __this_cpu_write(ulong_counter, 0); ul += 1UL; __this_cpu_add(ulong_counter, 1UL); CHECK(ul, ulong_counter, 1); ul += -1UL; __this_cpu_add(ulong_counter, -1UL); CHECK(ul, ulong_counter, 0); ul += -(unsigned long)1; __this_cpu_add(ulong_counter, -(unsigned long)1); CHECK(ul, ulong_counter, -1); ul = 0; __this_cpu_write(ulong_counter, 0); ul -= 1; __this_cpu_dec(ulong_counter); CHECK(ul, ulong_counter, -1); CHECK(ul, ulong_counter, ULONG_MAX); l += -ui_one; __this_cpu_add(long_counter, -ui_one); CHECK(l, long_counter, 0xffffffff); l += ui_one; __this_cpu_add(long_counter, ui_one); CHECK(l, long_counter, (long)0x100000000LL); l = 0; __this_cpu_write(long_counter, 0); l -= ui_one; __this_cpu_sub(long_counter, ui_one); CHECK(l, long_counter, -1); l = 0; __this_cpu_write(long_counter, 0); l += ui_one; __this_cpu_add(long_counter, ui_one); CHECK(l, long_counter, 1); l += -ui_one; __this_cpu_add(long_counter, -ui_one); CHECK(l, long_counter, (long)0x100000000LL); l = 0; __this_cpu_write(long_counter, 0); l -= ui_one; this_cpu_sub(long_counter, ui_one); CHECK(l, long_counter, -1); CHECK(l, long_counter, ULONG_MAX); ul = 0; __this_cpu_write(ulong_counter, 0); ul += ui_one; __this_cpu_add(ulong_counter, ui_one); CHECK(ul, ulong_counter, 1); ul = 0; __this_cpu_write(ulong_counter, 0); ul -= ui_one; __this_cpu_sub(ulong_counter, ui_one); CHECK(ul, ulong_counter, -1); CHECK(ul, ulong_counter, ULONG_MAX); ul = 3; __this_cpu_write(ulong_counter, 3); ul = this_cpu_sub_return(ulong_counter, ui_one); CHECK(ul, ulong_counter, 2); ul = __this_cpu_sub_return(ulong_counter, ui_one); CHECK(ul, ulong_counter, 1); preempt_enable(); pr_info("percpu test done\n"); return -EAGAIN; /* Fail will directly unload the module */ } static void __exit percpu_test_exit(void) { } module_init(percpu_test_init) module_exit(percpu_test_exit) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Greg Thelen"); MODULE_DESCRIPTION("percpu operations test"); ef='/cgit.cgi/linux/net-next.git/commit/tools/perf/perf.h?h=nds-private-remove&id=1db175428ee374489448361213e9c3b749d14900'>perf.h
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-10-21 11:33:49 +0200
committerDan Williams <dan.j.williams@intel.com>2016-12-26 20:29:25 -0800
commit1db175428ee374489448361213e9c3b749d14900 (patch)
tree32e4eebdad84d1c6e84508186b0f1fb63df7bdfc /tools/perf/perf.h
parent9f141d6ef6258a3a37a045842d9ba7e68f368956 (diff)
ext4: Simplify DAX fault path
Now that dax_iomap_fault() calls ->iomap_begin() without entry lock, we can use transaction starting in ext4_iomap_begin() and thus simplify ext4_dax_fault(). It also provides us proper retries in case of ENOSPC. Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'tools/perf/perf.h')