#undef TRACE_SYSTEM #define TRACE_SYSTEM sunvnet #if !defined(_TRACE_SUNVNET_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SUNVNET_H #include TRACE_EVENT(vnet_rx_one, TP_PROTO(int lsid, int rsid, int index, int needs_ack), TP_ARGS(lsid, rsid, index, needs_ack), TP_STRUCT__entry( __field(int, lsid) __field(int, rsid) __field(int, index) __field(int, needs_ack) ), TP_fast_assign( __entry->lsid = lsid; __entry->rsid = rsid; __entry->index = index; __entry->needs_ack = needs_ack; ), TP_printk("(%x:%x) walk_rx_one index %d; needs_ack %d", __entry->lsid, __entry->rsid, __entry->index, __entry->needs_ack) ); DECLARE_EVENT_CLASS(vnet_tx_stopped_ack_template, TP_PROTO(int lsid, int rsid, int ack_end, int npkts), TP_ARGS(lsid, rsid, ack_end, npkts), TP_STRUCT__entry( __field(int, lsid) __field(int, rsid) __field(int, ack_end) __field(int, npkts) ), TP_fast_assign( __entry->lsid = lsid; __entry->rsid = rsid; __entry->ack_end = ack_end; __entry->npkts = npkts; ), TP_printk("(%x:%x) stopped ack for %d; npkts %d", __entry->lsid, __entry->rsid, __entry->ack_end, __entry->npkts) ); DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_send_stopped_ack, TP_PROTO(int lsid, int rsid, int ack_end, int npkts), TP_ARGS(lsid, rsid, ack_end, npkts)); DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_defer_stopped_ack, TP_PROTO(int lsid, int rsid, int ack_end, int npkts), TP_ARGS(lsid, rsid, ack_end, npkts)); DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_pending_stopped_ack, TP_PROTO(int lsid, int rsid, int ack_end, int npkts), TP_ARGS(lsid, rsid, ack_end, npkts)); TRACE_EVENT(vnet_rx_stopped_ack, TP_PROTO(int lsid, int rsid, int end), TP_ARGS(lsid, rsid, end), TP_STRUCT__entry( __field(int, lsid) __field(int, rsid) __field(int, end) ), TP_fast_assign( __entry->lsid = lsid; __entry->rsid = rsid; __entry->end = end; ), TP_printk("(%x:%x) stopped ack for index %d", __entry->lsid, __entry->rsid, __entry->end) ); TRACE_EVENT(vnet_tx_trigger, TP_PROTO(int lsid, int rsid, int start, int err), TP_ARGS(lsid, rsid, start, err), TP_STRUCT__entry( __field(int, lsid) __field(int, rsid) __field(int, start) __field(int, err) ), TP_fast_assign( __entry->lsid = lsid; __entry->rsid = rsid; __entry->start = start; __entry->err = err; ), TP_printk("(%x:%x) Tx trigger for %d sent with err %d %s", __entry->lsid, __entry->rsid, __entry->start, __entry->err, __entry->err > 0 ? "(ok)" : " ") ); TRACE_EVENT(vnet_skip_tx_trigger, TP_PROTO(int lsid, int rsid, int last), TP_ARGS(lsid, rsid, last), TP_STRUCT__entry( __field(int, lsid) __field(int, rsid) __field(int, last) ), TP_fast_assign( __entry->lsid = lsid; __entry->rsid = rsid; __entry->last = last; ), TP_printk("(%x:%x) Skip Tx trigger. Last trigger sent was %d", __entry->lsid, __entry->rsid, __entry->last) ); #endif /* _TRACE_SOCK_H */ /* This part must be outside protection */ #include ds-private-remove&id=966d2b04e070bc040319aaebfec09e0144dc3341'>net/batman-adv/Makefile
diff options
context:
space:
mode:
authorDouglas Miller <dougmill@linux.vnet.ibm.com>2017-01-28 06:42:20 -0600
committerTejun Heo <tj@kernel.org>2017-01-28 07:49:42 -0500
commit966d2b04e070bc040319aaebfec09e0144dc3341 (patch)
tree4b96156e3d1dd4dfd6039b7c219c9dc4616da52d /net/batman-adv/Makefile
parent1b1bc42c1692e9b62756323c675a44cb1a1f9dbd (diff)
percpu-refcount: fix reference leak during percpu-atomic transition
percpu_ref_tryget() and percpu_ref_tryget_live() should return "true" IFF they acquire a reference. But the return value from atomic_long_inc_not_zero() is a long and may have high bits set, e.g. PERCPU_COUNT_BIAS, and the return value of the tryget routines is bool so the reference may actually be acquired but the routines return "false" which results in a reference leak since the caller assumes it does not need to do a corresponding percpu_ref_put(). This was seen when performing CPU hotplug during I/O, as hangs in blk_mq_freeze_queue_wait where percpu_ref_kill (blk_mq_freeze_queue_start) raced with percpu_ref_tryget (blk_mq_timeout_work). Sample stack trace: __switch_to+0x2c0/0x450 __schedule+0x2f8/0x970 schedule+0x48/0xc0 blk_mq_freeze_queue_wait+0x94/0x120 blk_mq_queue_reinit_work+0xb8/0x180 blk_mq_queue_reinit_prepare+0x84/0xa0 cpuhp_invoke_callback+0x17c/0x600 cpuhp_up_callbacks+0x58/0x150 _cpu_up+0xf0/0x1c0 do_cpu_up+0x120/0x150 cpu_subsys_online+0x64/0xe0 device_online+0xb4/0x120 online_store+0xb4/0xc0 dev_attr_store+0x68/0xa0 sysfs_kf_write+0x80/0xb0 kernfs_fop_write+0x17c/0x250 __vfs_write+0x6c/0x1e0 vfs_write+0xd0/0x270 SyS_write+0x6c/0x110 system_call+0x38/0xe0 Examination of the queue showed a single reference (no PERCPU_COUNT_BIAS, and __PERCPU_REF_DEAD, __PERCPU_REF_ATOMIC set) and no requests. However, conditions at the time of the race are count of PERCPU_COUNT_BIAS + 0 and __PERCPU_REF_DEAD and __PERCPU_REF_ATOMIC set. The fix is to make the tryget routines use an actual boolean internally instead of the atomic long result truncated to a int. Fixes: e625305b3907 percpu-refcount: make percpu_ref based on longs instead of ints Link: https://bugzilla.kernel.org/show_bug.cgi?id=190751 Signed-off-by: Douglas Miller <dougmill@linux.vnet.ibm.com> Reviewed-by: Jens Axboe <axboe@fb.com> Signed-off-by: Tejun Heo <tj@kernel.org> Fixes: e625305b3907 ("percpu-refcount: make percpu_ref based on longs instead of ints") Cc: stable@vger.kernel.org # v3.18+
Diffstat (limited to 'net/batman-adv/Makefile')