static struct xfrm6_tunnel __rcu *tunnel6_handlers __read_mostly;
static struct xfrm6_tunnel __rcu *tunnel46_handlers __read_mostly;
static DEFINE_MUTEX(tunnel6_mutex);
int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
{
struct xfrm6_tunnel __rcu **pprev;
struct xfrm6_tunnel *t;
int ret = -EEXIST;
int priority = handler->priority;
mutex_lock(&tunnel6_mutex);
for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
(t = rcu_dereference_protected(*pprev,
lockdep_is_held(&tunnel6_mutex))) != NULL;
pprev = &t->next) {
if (t->priority > priority)
break;
if (t->priority == priority)
goto err;
}
handler->next = *pprev;
rcu_assign_pointer(*pprev, handler);
ret = 0;
err:
mutex_unlock(&tunnel6_mutex);
return ret;
}
EXPORT_SYMBOL(xfrm6_tunnel_register);
int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
{
struct xfrm6_tunnel __rcu **pprev;
struct xfrm6_tunnel *t;
int ret = -ENOENT;
mutex_lock(&tunnel6_mutex);
for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
(t = rcu_dereference_protected(*pprev,
lockdep_is_held(&tunnel6_mutex))) != NULL;
pprev = &t->next) {
if (t == handler) {
*pprev = handler->next;
ret = 0;
break;
}
}
mutex_unlock(&tunnel6_mutex);
synchronize_net();
return ret;
}
EXPORT_SYMBOL(xfrm6_tunnel_deregister);
#define for_each_tunnel_rcu(head, handler) \
for (handler = rcu_dereference(head); \
handler != NULL; \
handler = rcu_dereference(handler->next)) \
static int tunnel6_rcv(struct sk_buff *skb)
{
struct xfrm6_tunnel *handler;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto drop;
for_each_tunnel_rcu(tunnel6_handlers, handler)
if (!handler->handler(skb))
return 0;
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
return 0;
}
static int tunnel46_rcv(struct sk_buff *skb)
{
struct xfrm6_tunnel *handler;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto drop;
for_each_tunnel_rcu(tunnel46_handlers, handler)
if (!handler->handler(skb))
return 0;
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
return 0;
}
static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct xfrm6_tunnel *handler;
for_each_tunnel_rcu(tunnel6_handlers, handler)
if (!handler->err_handler(skb, opt, type, code, offset, info))
break;
}
static void tunnel46_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct xfrm6_tunnel *handler;
for_each_tunnel_rcu(tunnel46_handlers, handler)
if (!handler->err_handler(skb, opt, type, code, offset, info))
break;
}
static const struct inet6_protocol tunnel6_protocol = {
.handler = tunnel6_rcv,
.err_handler = tunnel6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
static const struct inet6_protocol tunnel46_protocol = {
.handler = tunnel46_rcv,
.err_handler = tunnel46_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
static int __init tunnel6_init(void)
{
if (inet6_add_protocol(&tunnel6_protocol, IPPROTO_IPV6)) {
pr_err("%s: can't add protocol\n", __func__);
return -EAGAIN;
}
if (inet6_add_protocol(&tunnel46_protocol, IPPROTO_IPIP)) {
pr_err("%s: can't add protocol\n", __func__);
inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6);
return -EAGAIN;
}
return 0;
}
static void __exit tunnel6_fini(void)
{
if (inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP))
pr_err("%s: can't remove protocol\n", __func__);
if (inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6))
pr_err("%s: can't remove protocol\n", __func__);
}
module_init(tunnel6_init);
module_exit(tunnel6_fini);
MODULE_LICENSE("GPL");
>2017-02-03 17:10:28 +1100 |
committer | Michael Ellerman <mpe@ellerman.id.au> | 2017-02-08 23:36:29 +1100 |
commit | d7df2443cd5f67fc6ee7c05a88e4996e8177f91b (patch) |
tree | 098a7c0ca4fceb8a65cb1f693c9d71990388933d /net/nfc/digital.h |
parent | a0615a16f7d0ceb5804d295203c302d496d8ee91 (diff) |
powerpc/mm: Fix spurrious segfaults on radix with autonuma
When autonuma (Automatic NUMA balancing) marks a PTE inaccessible it
clears all the protection bits but leave the PTE valid.
With the Radix MMU, an attempt at executing from such a PTE will
take a fault with bit 35 of SRR1 set "SRR1_ISI_N_OR_G".
It is thus incorrect to treat all such faults as errors. We should
pass them to handle_mm_fault() for autonuma to deal with. The case
of pages that are really not executable is handled by the existing
test for VM_EXEC further down.
That leaves us with catching the kernel attempts at executing user
pages. We can catch that earlier, even before we do find_vma.
It is never valid on powerpc for the kernel to take an exec fault
to begin with. So fold that test with the existing test for the
kernel faulting on kernel addresses to bail out early.
Fixes: 1d18ad026844 ("powerpc/mm: Detect instruction fetch denied and report")
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>