/* * cls_cgroup.h Control Group Classifier * * Authors: Thomas Graf * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #ifndef _NET_CLS_CGROUP_H #define _NET_CLS_CGROUP_H #include #include #include #include #include #ifdef CONFIG_CGROUP_NET_CLASSID struct cgroup_cls_state { struct cgroup_subsys_state css; u32 classid; }; struct cgroup_cls_state *task_cls_state(struct task_struct *p); static inline u32 task_cls_classid(struct task_struct *p) { u32 classid; if (in_interrupt()) return 0; rcu_read_lock(); classid = container_of(task_css(p, net_cls_cgrp_id), struct cgroup_cls_state, css)->classid; rcu_read_unlock(); return classid; } static inline void sock_update_classid(struct sock_cgroup_data *skcd) { u32 classid; classid = task_cls_classid(current); sock_cgroup_set_classid(skcd, classid); } static inline u32 task_get_classid(const struct sk_buff *skb) { u32 classid = task_cls_state(current)->classid; /* Due to the nature of the classifier it is required to ignore all * packets originating from softirq context as accessing `current' * would lead to false results. * * This test assumes that all callers of dev_queue_xmit() explicitly * disable bh. Knowing this, it is possible to detect softirq based * calls by looking at the number of nested bh disable calls because * softirqs always disables bh. */ if (in_serving_softirq()) { struct sock *sk = skb_to_full_sk(skb); /* If there is an sock_cgroup_classid we'll use that. */ if (!sk || !sk_fullsock(sk)) return 0; classid = sock_cgroup_classid(&sk->sk_cgrp_data); } return classid; } #else /* !CONFIG_CGROUP_NET_CLASSID */ static inline void sock_update_classid(struct sock_cgroup_data *skcd) { } static inline u32 task_get_classid(const struct sk_buff *skb) { return 0; } #endif /* CONFIG_CGROUP_NET_CLASSID */ #endif /* _NET_CLS_CGROUP_H */ ink.h?id=0e0694ff1a7791274946b7f51bae692da0001a08&id2=c739c0a7c3c2472d7562b8f802cdce44d2597c8b'>diff
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/events/devlink.h')
0949241ab588'>patch)
treeb5bc906278fe1ac66d75de984d26bf59b43b3ed8 /drivers/usb/gadget/function/u_ether.h
parent566cf877a1fcb6d6dc0126b076aad062054c2637 (diff)
perf/core: Fix use-after-free bug
Dmitry reported a KASAN use-after-free on event->group_leader. It turns out there's a hole in perf_remove_from_context() due to event_function_call() not calling its function when the task associated with the event is already dead. In this case the event will have been detached from the task, but the grouping will have been retained, such that group operations might still work properly while there are live child events etc. This does however mean that we can miss a perf_group_detach() call when the group decomposes, this in turn can then lead to use-after-free. Fix it by explicitly doing the group detach if its still required. Reported-by: Dmitry Vyukov <dvyukov@google.com> Tested-by: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: stable@vger.kernel.org # v4.5+ Cc: syzkaller <syzkaller@googlegroups.com> Fixes: 63b6da39bb38 ("perf: Fix perf_event_exit_task() race") Link: http://lkml.kernel.org/r/20170126153955.GD6515@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/usb/gadget/function/u_ether.h')