/* * netsniff-ng - the packet sniffing beast * Copyright 2009, 2010 Daniel Borkmann. * Subject to the GPL, version 2. */ #include #include #include #include #include #include #include #include #include #include "xmalloc.h" #include "die.h" #include "ring_rx.h" #include "built_in.h" void destroy_rx_ring(int sock, struct ring *ring) { int ret; bool v3 = get_sockopt_tpacket(sock) == TPACKET_V3; munmap(ring->mm_space, ring->mm_len); ring->mm_len = 0; xfree(ring->frames); /* In general, this is freed during close(2) anyway. */ if (v3) return; fmemset(&ring->layout, 0, sizeof(ring->layout)); ret = setsockopt(sock, SOL_PACKET, PACKET_RX_RING, &ring->layout, sizeof(ring->layout)); if (unlikely(ret)) panic("Cannot destroy the RX_RING: %s!\n", strerror(errno)); } void setup_rx_ring_layout(int sock, struct ring *ring, unsigned int size, bool jumbo_support, bool v3) { fmemset(&ring->layout, 0, sizeof(ring->layout)); ring->layout.tp_block_size = (jumbo_support ? getpagesize() << 4 : getpagesize() << 2); ring->layout.tp_frame_size = (jumbo_support ? TPACKET_ALIGNMENT << 12 : TPACKET_ALIGNMENT << 7); ring->layout.tp_block_nr = size / ring->layout.tp_block_size; ring->layout.tp_frame_nr = ring->layout.tp_block_size / ring->layout.tp_frame_size * ring->layout.tp_block_nr; if (v3) { /* Pass out, if this will ever change and we do crap on it! */ build_bug_on(offsetof(struct tpacket_req, tp_frame_nr) != offsetof(struct tpacket_req3, tp_frame_nr) && sizeof(struct tpacket_req) != offsetof(struct tpacket_req3, tp_retire_blk_tov)); ring->layout3.tp_retire_blk_tov = 100; /* 0: let kernel decide */ ring->layout3.tp_sizeof_priv = 0; ring->layout3.tp_feature_req_word = 0; set_sockopt_tpacket_v3(sock); } else { set_sockopt_tpacket_v2(sock); } ring_verify_layout(ring); } void create_rx_ring(int sock, struct ring *ring, int verbose) { int ret; bool v3 = get_sockopt_tpacket(sock) == TPACKET_V3; retry: ret = setsockopt(sock, SOL_PACKET, PACKET_RX_RING, &ring->raw, v3 ? sizeof(ring->layout3) : sizeof(ring->layout)); if (errno == ENOMEM && ring->layout.tp_block_nr > 1) { ring->layout.tp_block_nr >>= 1; ring->layout.tp_frame_nr = ring->layout.tp_block_size / ring->layout.tp_frame_size * ring->layout.tp_block_nr; goto retry; } if (ret < 0) panic("Cannot allocate RX_RING!\n"); ring->mm_len = ring->layout.tp_block_size * ring->layout.tp_block_nr; if (verbose) { printf("RX: %.2Lf MiB, %u Frames, each %u Byte allocated\n", (long double) ring->mm_len / (1 << 20), ring->layout.tp_frame_nr, ring->layout.tp_frame_size); } } void mmap_rx_ring(int sock, struct ring *ring) { mmap_ring_generic(sock, ring); } void alloc_rx_ring_frames(int sock, struct ring *ring) { int num; size_t size; bool v3 = get_sockopt_tpacket(sock) == TPACKET_V3; if (v3) { num = ring->layout3.tp_block_nr; size = ring->layout3.tp_block_size; } else { num = ring->layout.tp_frame_nr; size = ring->layout.tp_frame_size; } alloc_ring_frames_generic(ring, num, size); } void bind_rx_ring(int sock, struct ring *ring, int ifindex) { bind_ring_generic(sock, ring, ifindex); } void sock_rx_net_stats(int sock) { int ret; bool v3 = get_sockopt_tpacket(sock) == TPACKET_V3; union { struct tpacket_stats k2; struct tpacket_stats_v3 k3; } stats; socklen_t slen = v3 ? sizeof(stats.k3) : sizeof(stats.k2); memset(&stats, 0, sizeof(stats)); ret = getsockopt(sock, SOL_PACKET, PACKET_STATISTICS, &stats, &slen); if (ret > -1) { uint64_t packets = stats.k3.tp_packets; uint64_t drops = stats.k3.tp_drops; printf("\r%12ld packets incoming\n", packets); printf("\r%12ld packets passed filter\n", packets - drops); printf("\r%12ld packets failed filter (out of space)\n", drops); if (stats.k3.tp_packets > 0) printf("\r%12.4lf%\% packet droprate\n", (1.0 * drops / packets) * 100.0); } } :space:mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-01-30 19:27:10 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-01-31 09:13:49 -0500
commit79c6f448c8b79c321e4a1f31f98194e4f6b6cae7 (patch)
tree370efda701f03cccf21e02bb1fdd3b852547d75c /tools/perf/Documentation/perf.data-file-format.txt
parent0c744ea4f77d72b3dcebb7a8f2684633ec79be88 (diff)
tracing: Fix hwlat kthread migration
The hwlat tracer creates a kernel thread at start of the tracer. It is pinned to a single CPU and will move to the next CPU after each period of running. If the user modifies the migration thread's affinity, it will not change after that happens. The original code created the thread at the first instance it was called, but later was changed to destroy the thread after the tracer was finished, and would not be created until the next instance of the tracer was established. The code that initialized the affinity was only called on the initial instantiation of the tracer. After that, it was not initialized, and the previous affinity did not match the current newly created one, making it appear that the user modified the thread's affinity when it did not, and the thread failed to migrate again. Cc: stable@vger.kernel.org Fixes: 0330f7aa8ee6 ("tracing: Have hwlat trace migrate across tracing_cpumask CPUs") Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'tools/perf/Documentation/perf.data-file-format.txt')