#undef TRACE_SYSTEM_VAR #ifdef CONFIG_PERF_EVENTS #undef __entry #define __entry entry #undef __get_dynamic_array #define __get_dynamic_array(field) \ ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) #undef __get_dynamic_array_len #define __get_dynamic_array_len(field) \ ((__entry->__data_loc_##field >> 16) & 0xffff) #undef __get_str #define __get_str(field) ((char *)__get_dynamic_array(field)) #undef __get_bitmask #define __get_bitmask(field) (char *)__get_dynamic_array(field) #undef __perf_count #define __perf_count(c) (__count = (c)) #undef __perf_task #define __perf_task(t) (__task = (t)) #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace void \ perf_trace_##call(void *__data, proto) \ { \ struct trace_event_call *event_call = __data; \ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ struct trace_event_raw_##call *entry; \ struct bpf_prog *prog = event_call->prog; \ struct pt_regs *__regs; \ u64 __count = 1; \ struct task_struct *__task = NULL; \ struct hlist_head *head; \ int __entry_size; \ int __data_size; \ int rctx; \ \ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ \ head = this_cpu_ptr(event_call->perf_events); \ if (!prog && __builtin_constant_p(!__task) && !__task && \ hlist_empty(head)) \ return; \ \ __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ sizeof(u64)); \ __entry_size -= sizeof(u32); \ \ entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \ if (!entry) \ return; \ \ perf_fetch_caller_regs(__regs); \ \ tstruct \ \ { assign; } \ \ perf_trace_run_bpf_submit(entry, __entry_size, rctx, \ event_call, __count, __regs, \ head, __task); \ } /* * This part is compiled out, it is only here as a build time check * to make sure that if the tracepoint handling changes, the * perf probe will fail to compile unless it too is updated. */ #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ static inline void perf_test_probe_##call(void) \ { \ check_trace_callback_type_##call(perf_trace_##template); \ } #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #endif /* CONFIG_PERF_EVENTS */ =''/>
path: root/include/net/llc_c_ev.h
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2017-01-03 14:18:43 +1300
committerEric W. Biederman <ebiederm@xmission.com>2017-01-10 13:34:43 +1300
commit3895dbf8985f656675b5bde610723a29cbce3fa7 (patch)
tree91d4517f09918fd573998eb40b8f35f08ed1c470 /include/net/llc_c_ev.h
parent0c744ea4f77d72b3dcebb7a8f2684633ec79be88 (diff)
mnt: Protect the mountpoint hashtable with mount_lock
Protecting the mountpoint hashtable with namespace_sem was sufficient until a call to umount_mnt was added to mntput_no_expire. At which point it became possible for multiple calls of put_mountpoint on the same hash chain to happen on the same time. Kristen Johansen <kjlx@templeofstupid.com> reported: > This can cause a panic when simultaneous callers of put_mountpoint > attempt to free the same mountpoint. This occurs because some callers > hold the mount_hash_lock, while others hold the namespace lock. Some > even hold both. > > In this submitter's case, the panic manifested itself as a GP fault in > put_mountpoint() when it called hlist_del() and attempted to dereference > a m_hash.pprev that had been poisioned by another thread. Al Viro observed that the simple fix is to switch from using the namespace_sem to the mount_lock to protect the mountpoint hash table. I have taken Al's suggested patch moved put_mountpoint in pivot_root (instead of taking mount_lock an additional time), and have replaced new_mountpoint with get_mountpoint a function that does the hash table lookup and addition under the mount_lock. The introduction of get_mounptoint ensures that only the mount_lock is needed to manipulate the mountpoint hashtable. d_set_mounted is modified to only set DCACHE_MOUNTED if it is not already set. This allows get_mountpoint to use the setting of DCACHE_MOUNTED to ensure adding a struct mountpoint for a dentry happens exactly once. Cc: stable@vger.kernel.org Fixes: ce07d891a089 ("mnt: Honor MNT_LOCKED when detaching mounts") Reported-by: Krister Johansen <kjlx@templeofstupid.com> Suggested-by: Al Viro <viro@ZenIV.linux.org.uk> Acked-by: Al Viro <viro@ZenIV.linux.org.uk> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Diffstat (limited to 'include/net/llc_c_ev.h')