/* * netsniff-ng - the packet sniffing beast * Copyright 2009, 2010 Daniel Borkmann. * Copyright 2010 Emmanuel Roullit. * Subject to the GPL, version 2. */ #include #include #include /* for ntohs() */ #include "proto.h" #include "protos.h" #include "dissector_eth.h" #include "pkt_buff.h" struct vlanhdr { uint16_t h_vlan_TCI; uint16_t h_vlan_encapsulated_proto; } __attribute__((packed)); static void vlan(struct pkt_buff *pkt) { uint16_t tci; struct vlanhdr *vlan = (struct vlanhdr *) pkt_pull(pkt, sizeof(*vlan)); if (vlan == NULL) return; tci = ntohs(vlan->h_vlan_TCI); tprintf(" [ VLAN "); tprintf("Prio (%d), ", (tci & 0xE000) >> 13); tprintf("CFI (%d), ", (tci & 0x1000) >> 12); tprintf("ID (%d), ", (tci & 0x0FFF)); tprintf("Proto (0x%.4x)", ntohs(vlan->h_vlan_encapsulated_proto)); tprintf(" ]\n"); pkt_set_proto(pkt, ð_lay2, ntohs(vlan->h_vlan_encapsulated_proto)); } static void vlan_less(struct pkt_buff *pkt) { uint16_t tci; struct vlanhdr *vlan = (struct vlanhdr *) pkt_pull(pkt, sizeof(*vlan)); if (vlan == NULL) return; tci = ntohs(vlan->h_vlan_TCI); tprintf(" VLAN%d", (tci & 0x0FFF)); pkt_set_proto(pkt, ð_lay2, ntohs(vlan->h_vlan_encapsulated_proto)); } struct protocol vlan_ops = { .key = 0x8100, .print_full = vlan, .print_less = vlan_less, }; ubmit' value='switch'/> net-next plumbingsTobias Klauser
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWanpeng Li <wanpeng.li@hotmail.com>2016-07-14 16:15:56 +0800
committerIngo Molnar <mingo@kernel.org>2016-08-10 14:13:28 +0200
commit229ce631574761870a2ac938845fadbd07f35caa (patch)
treeef5627be2364a5105a5a9fd7dd4d2fac942e8594
parent2db34e8bf9a22f4e38b29deccee57457bc0e7d74 (diff)
locking/pvqspinlock: Fix double hash race
When the lock holder vCPU is racing with the queue head: CPU 0 (lock holder) CPU1 (queue head) =================== ================= spin_lock(); spin_lock(); pv_kick_node(): pv_wait_head_or_lock(): if (!lp) { lp = pv_hash(lock, pn); xchg(&l->locked, _Q_SLOW_VAL); } WRITE_ONCE(pn->state, vcpu_halted); cmpxchg(&pn->state, vcpu_halted, vcpu_hashed); WRITE_ONCE(l->locked, _Q_SLOW_VAL); (void)pv_hash(lock, pn); In this case, lock holder inserts the pv_node of queue head into the hash table and set _Q_SLOW_VAL unnecessary. This patch avoids it by restoring/setting vcpu_hashed state after failing adaptive locking spinning. Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman Long <Waiman.Long@hpe.com> Link: http://lkml.kernel.org/r/1468484156-4521-1-git-send-email-wanpeng.li@hotmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>