#ifndef _NFT_FIB_H_ #define _NFT_FIB_H_ struct nft_fib { enum nft_registers dreg:8; u8 result; u32 flags; }; extern const struct nla_policy nft_fib_policy[]; static inline bool nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in) { return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK; } int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr); int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]); int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nft_data **data); void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt); void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt); void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt); void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt); void nft_fib_store_result(void *reg, enum nft_fib_result r, const struct nft_pktinfo *pkt, int index); #endif loop-back'>packet-loop-back net-next plumbingsTobias Klauser
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2016-05-20 15:25:28 +0200
committerChristoffer Dall <christoffer.dall@linaro.org>2016-05-20 16:26:38 +0200
commit35a2d58588f0992627e74b447ccab21570544c86 (patch)
tree823643adbca0b1aece727a932ea0cbe4c5809321
parentefffe55af5e16f7935aa0175cf25c386f08219f5 (diff)
KVM: arm/arm64: vgic-new: Synchronize changes to active state
When modifying the active state of an interrupt via the MMIO interface, we should ensure that the write has the intended effect. If a guest sets an interrupt to active, but that interrupt is already flushed into a list register on a running VCPU, then that VCPU will write the active state back into the struct vgic_irq upon returning from the guest and syncing its state. This is a non-benign race, because the guest can observe that an interrupt is not active, and it can have a reasonable expectations that other VCPUs will not ack any IRQs, and then set the state to active, and expect it to stay that way. Currently we are not honoring this case. Thefore, change both the SACTIVE and CACTIVE mmio handlers to stop the world, change the irq state, potentially queue the irq if we're setting it to active, and then continue. We take this chance to slightly optimize these functions by not stopping the world when touching private interrupts where there is inherently no possible race. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat