/* * netsniff-ng - the packet sniffing beast * Copyright 2009, 2010 Daniel Borkmann. * Subject to the GPL, version 2. */ #include #include /* for ntohs() */ #include "proto.h" #include "protos.h" #include "lookup.h" #include "pkt_buff.h" #include "built_in.h" struct arphdr { uint16_t ar_hrd; /* format of hardware address */ uint16_t ar_pro; /* format of protocol address */ uint8_t ar_hln; /* length of hardware address */ uint8_t ar_pln; /* length of protocol address */ uint16_t ar_op; /* ARP opcode (command) */ uint8_t ar_sha[6]; /* sender hardware address */ uint8_t ar_sip[4]; /* sender IP address */ uint8_t ar_tha[6]; /* target hardware address */ uint8_t ar_tip[4]; /* target IP address */ } __packed; #define ARPHRD_ETHER 1 #define ARPHRD_IEEE802 6 #define ARPHRD_ARCNET 7 #define ARPHRD_ATM 16 #define ARPHRD_ATM2 19 #define ARPHRD_SERIAL 20 #define ARPHRD_ATM3 21 #define ARPHRD_IEEE1394 24 #define ARPOP_REQUEST 1 /* ARP request */ #define ARPOP_REPLY 2 /* ARP reply */ #define ARPOP_RREQUEST 3 /* RARP request */ #define ARPOP_RREPLY 4 /* RARP reply */ #define ARPOP_InREQUEST 8 /* InARP request */ #define ARPOP_InREPLY 9 /* InARP reply */ #define ARPOP_NAK 10 /* (ATM)ARP NAK */ static void arp(struct pkt_buff *pkt) { char *hrd; char *pro; char *opcode; struct arphdr *arp = (struct arphdr *) pkt_pull(pkt, sizeof(*arp)); if (arp == NULL) return; switch (ntohs(arp->ar_hrd)) { case ARPHRD_ETHER: hrd = "Ethernet"; break; case ARPHRD_IEEE802: hrd = "IEEE 802"; break; case ARPHRD_ARCNET: hrd = "ARCNET"; break; case ARPHRD_ATM: case ARPHRD_ATM2: case ARPHRD_ATM3: hrd = "ATM"; break; case ARPHRD_SERIAL: hrd = "Serial Line"; break; case ARPHRD_IEEE1394: hrd = "IEEE 1394.1995"; break; default: hrd = "Unknown"; break; } pro = lookup_ether_type(ntohs(arp->ar_pro)); if (pro == NULL) pro = "Unknown"; switch (ntohs(arp->ar_op)) { case ARPOP_REQUEST: opcode = "ARP request"; break; case ARPOP_REPLY: opcode = "ARP reply"; break; case ARPOP_RREQUEST: opcode = "RARP request"; break; case ARPOP_RREPLY: opcode = "RARP reply"; break; case ARPOP_InREQUEST: opcode = "InARP request"; break; case ARPOP_InREPLY: opcode = "InARP reply"; break; case ARPOP_NAK: opcode = "(ATM) ARP NAK"; break; default: opcode = "Unknown"; break; }; tprintf(" [ ARP "); tprintf("Format HA (%u => %s), ", ntohs(arp->ar_hrd), hrd); tprintf("Format Proto (0x%.4x => %s), ", ntohs(arp->ar_pro), pro); tprintf("HA Len (%u), ", arp->ar_hln); tprintf("Proto Len (%u), ", arp->ar_pln); tprintf("Opcode (%u => %s)", ntohs(arp->ar_op), opcode); tprintf(" ]\n"); } static void arp_less(struct pkt_buff *pkt) { char *opcode = NULL; struct arphdr *arp = (struct arphdr *) pkt_pull(pkt, sizeof(*arp)); if (arp == NULL) return; switch (ntohs(arp->ar_op)) { case ARPOP_REQUEST: opcode = "ARP request"; break; case ARPOP_REPLY: opcode = "ARP reply"; break; case ARPOP_RREQUEST: opcode = "RARP request"; break; case ARPOP_RREPLY: opcode = "RARP reply"; break; case ARPOP_InREQUEST: opcode = "InARP request"; break; case ARPOP_InREPLY: opcode = "InARP reply"; break; case ARPOP_NAK: opcode = "(ATM) ARP NAK"; break; default: opcode = "Unknown"; break; }; tprintf(" Op %s", opcode); } struct protocol arp_ops = { .key = 0x0806, .print_full = arp, .print_less = arp_less, }; ='ctrl'>mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2016-05-03 17:15:43 -0400
committerSteven Rostedt <rostedt@goodmis.org>2016-05-03 17:59:24 -0400
commit0fc1b09ff1ff404ddf753f5ffa5cd0adc8fdcdc9 (patch)
tree2607e3d3e242fb4d3d426fafe6295e8d7d6ac5a7
parentdcb0b5575d24a32f51a3f1003312fb94ed4e214a (diff)
tracing: Use temp buffer when filtering events
Filtering of events requires the data to be written to the ring buffer before it can be decided to filter or not. This is because the parameters of the filter are based on the result that is written to the ring buffer and not on the parameters that are passed into the trace functions. The ftrace ring buffer is optimized for writing into the ring buffer and committing. The discard procedure used when filtering decides the event should be discarded is much more heavy weight. Thus, using a temporary filter when filtering events can speed things up drastically. Without a temp buffer we have: # trace-cmd start -p nop # perf stat -r 10 hackbench 50 0.790706626 seconds time elapsed ( +- 0.71% ) # trace-cmd start -e all # perf stat -r 10 hackbench 50 1.566904059 seconds time elapsed ( +- 0.27% ) # trace-cmd start -e all -f 'common_preempt_count==20' # perf stat -r 10 hackbench 50 1.690598511 seconds time elapsed ( +- 0.19% ) # trace-cmd start -e all -f 'common_preempt_count!=20' # perf stat -r 10 hackbench 50 1.707486364 seconds time elapsed ( +- 0.30% ) The first run above is without any tracing, just to get a based figure. hackbench takes ~0.79 seconds to run on the system. The second run enables tracing all events where nothing is filtered. This increases the time by 100% and hackbench takes 1.57 seconds to run. The third run filters all events where the preempt count will equal "20" (this should never happen) thus all events are discarded. This takes 1.69 seconds to run. This is 10% slower than just committing the events! The last run enables all events and filters where the filter will commit all events, and this takes 1.70 seconds to run. The filtering overhead is approximately 10%. Thus, the discard and commit of an event from the ring buffer may be about the same time. With this patch, the numbers change: # trace-cmd start -p nop # perf stat -r 10 hackbench 50 0.778233033 seconds time elapsed ( +- 0.38% ) # trace-cmd start -e all # perf stat -r 10 hackbench 50 1.582102692 seconds time elapsed ( +- 0.28% ) # trace-cmd start -e all -f 'common_preempt_count==20' # perf stat -r 10 hackbench 50 1.309230710 seconds time elapsed ( +- 0.22% ) # trace-cmd start -e all -f 'common_preempt_count!=20' # perf stat -r 10 hackbench 50 1.786001924 seconds time elapsed ( +- 0.20% ) The first run is again the base with no tracing. The second run is all tracing with no filtering. It is a little slower, but that may be well within the noise. The third run shows that discarding all events only took 1.3 seconds. This is a speed up of 23%! The discard is much faster than even the commit. The one downside is shown in the last run. Events that are not discarded by the filter will take longer to add, this is due to the extra copy of the event. Cc: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>