/* * Stream Parser * * Copyright (c) 2016 Tom Herbert * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. */ #ifndef __NET_STRPARSER_H_ #define __NET_STRPARSER_H_ #include #include #define STRP_STATS_ADD(stat, count) ((stat) += (count)) #define STRP_STATS_INCR(stat) ((stat)++) struct strp_stats { unsigned long long rx_msgs; unsigned long long rx_bytes; unsigned int rx_mem_fail; unsigned int rx_need_more_hdr; unsigned int rx_msg_too_big; unsigned int rx_msg_timeouts; unsigned int rx_bad_hdr_len; }; struct strp_aggr_stats { unsigned long long rx_msgs; unsigned long long rx_bytes; unsigned int rx_mem_fail; unsigned int rx_need_more_hdr; unsigned int rx_msg_too_big; unsigned int rx_msg_timeouts; unsigned int rx_bad_hdr_len; unsigned int rx_aborts; unsigned int rx_interrupted; unsigned int rx_unrecov_intr; }; struct strparser; /* Callbacks are called with lock held for the attached socket */ struct strp_callbacks { int (*parse_msg)(struct strparser *strp, struct sk_buff *skb); void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb); int (*read_sock_done)(struct strparser *strp, int err); void (*abort_parser)(struct strparser *strp, int err); }; struct strp_rx_msg { int full_len; int offset; }; static inline struct strp_rx_msg *strp_rx_msg(struct sk_buff *skb) { return (struct strp_rx_msg *)((void *)skb->cb + offsetof(struct qdisc_skb_cb, data)); } /* Structure for an attached lower socket */ struct strparser { struct sock *sk; u32 rx_stopped : 1; u32 rx_paused : 1; u32 rx_aborted : 1; u32 rx_interrupted : 1; u32 rx_unrecov_intr : 1; struct sk_buff **rx_skb_nextp; struct timer_list rx_msg_timer; struct sk_buff *rx_skb_head; unsigned int rx_need_bytes; struct delayed_work rx_delayed_work; struct work_struct rx_work; struct strp_stats stats; struct strp_callbacks cb; }; /* Must be called with lock held for attached socket */ static inline void strp_pause(struct strparser *strp) { strp->rx_paused = 1; } /* May be called without holding lock for attached socket */ void strp_unpause(struct strparser *strp); static inline void save_strp_stats(struct strparser *strp, struct strp_aggr_stats *agg_stats) { /* Save psock statistics in the mux when psock is being unattached. */ #define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += \ strp->stats._stat) SAVE_PSOCK_STATS(rx_msgs); SAVE_PSOCK_STATS(rx_bytes); SAVE_PSOCK_STATS(rx_mem_fail); SAVE_PSOCK_STATS(rx_need_more_hdr); SAVE_PSOCK_STATS(rx_msg_too_big); SAVE_PSOCK_STATS(rx_msg_timeouts); SAVE_PSOCK_STATS(rx_bad_hdr_len); #undef SAVE_PSOCK_STATS if (strp->rx_aborted) agg_stats->rx_aborts++; if (strp->rx_interrupted) agg_stats->rx_interrupted++; if (strp->rx_unrecov_intr) agg_stats->rx_unrecov_intr++; } static inline void aggregate_strp_stats(struct strp_aggr_stats *stats, struct strp_aggr_stats *agg_stats) { #define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat) SAVE_PSOCK_STATS(rx_msgs); SAVE_PSOCK_STATS(rx_bytes); SAVE_PSOCK_STATS(rx_mem_fail); SAVE_PSOCK_STATS(rx_need_more_hdr); SAVE_PSOCK_STATS(rx_msg_too_big); SAVE_PSOCK_STATS(rx_msg_timeouts); SAVE_PSOCK_STATS(rx_bad_hdr_len); SAVE_PSOCK_STATS(rx_aborts); SAVE_PSOCK_STATS(rx_interrupted); SAVE_PSOCK_STATS(rx_unrecov_intr); #undef SAVE_PSOCK_STATS } void strp_done(struct strparser *strp); void strp_stop(struct strparser *strp); void strp_check_rcv(struct strparser *strp); int strp_init(struct strparser *strp, struct sock *csk, struct strp_callbacks *cb); void strp_data_ready(struct strparser *strp); #endif /* __NET_STRPARSER_H_ */ e='submit' value='reload'/>
authorPaul Moore <paul@paul-moore.com>2016-11-29 16:53:26 -0500
committerPaul Moore <paul@paul-moore.com>2016-12-14 13:06:04 -0500
commita09cfa470817ac086cf68418da13a2b91c2744ef (patch)
treea701499dc5828aa54314d843917ea35a60cc436c
parent6c54e7899693dee3db67ea996e9be0e10f67920f (diff)
audit: don't ever sleep on a command record/message
Sleeping on a command record/message in audit_log_start() could slow something, e.g. auditd, from doing something important, e.g. clean shutdown, which could present problems on a heavily loaded system. This patch allows tasks to bypass any queue restrictions if they are logging a command record/message. Signed-off-by: Paul Moore <paul@paul-moore.com>
-rw-r--r--kernel/audit.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/kernel/audit.c b/kernel/audit.cst AP had already done that but it can also hurt us: Imagine a single hyperthreaded CPU (Intel(R) Atom(TM) CPU N270, for example) which updates the microcode on the BSP but since the microcode engine is shared between the two threads, the update on CPU1 doesn't happen because it has already happened on CPU0 and we don't find a newer microcode revision on CPU1. Which doesn't set the intel_ucode_patch pointer and at initrd jettisoning time we don't save the microcode patch for later application. Now, when we suspend to RAM, the loaded microcode gets cleared so we need to reload but there's no patch saved in the cache. Removing the optimization fixes this issue and all is fine and dandy. Fixes: 06b8534cb728 ("x86/microcode: Rework microcode loading") Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20170120202955.4091-2-bp@alien8.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'net/ipv6')