#ifndef _NET_GRO_CELLS_H #define _NET_GRO_CELLS_H #include #include #include struct gro_cell { struct sk_buff_head napi_skbs; struct napi_struct napi; }; struct gro_cells { struct gro_cell __percpu *cells; }; static inline int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) { struct gro_cell *cell; struct net_device *dev = skb->dev; if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) return netif_rx(skb); cell = this_cpu_ptr(gcells->cells); if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; } __skb_queue_tail(&cell->napi_skbs, skb); if (skb_queue_len(&cell->napi_skbs) == 1) napi_schedule(&cell->napi); return NET_RX_SUCCESS; } /* called under BH context */ static inline int gro_cell_poll(struct napi_struct *napi, int budget) { struct gro_cell *cell = container_of(napi, struct gro_cell, napi); struct sk_buff *skb; int work_done = 0; while (work_done < budget) { skb = __skb_dequeue(&cell->napi_skbs); if (!skb) break; napi_gro_receive(napi, skb); work_done++; } if (work_done < budget) napi_complete_done(napi, work_done); return work_done; } static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev) { int i; gcells->cells = alloc_percpu(struct gro_cell); if (!gcells->cells) return -ENOMEM; for_each_possible_cpu(i) { struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); __skb_queue_head_init(&cell->napi_skbs); set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state); netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); napi_enable(&cell->napi); } return 0; } static inline void gro_cells_destroy(struct gro_cells *gcells) { int i; if (!gcells->cells) return; for_each_possible_cpu(i) { struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); netif_napi_del(&cell->napi); __skb_queue_purge(&cell->napi_skbs); } free_percpu(gcells->cells); gcells->cells = NULL; } #endif 4f6b6cae7'>commitdiff
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-01-30 19:27:10 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-01-31 09:13:49 -0500
commit79c6f448c8b79c321e4a1f31f98194e4f6b6cae7 (patch)
tree370efda701f03cccf21e02bb1fdd3b852547d75c /include/dt-bindings/clock/lpc18xx-ccu.h
parent0c744ea4f77d72b3dcebb7a8f2684633ec79be88 (diff)
tracing: Fix hwlat kthread migration
The hwlat tracer creates a kernel thread at start of the tracer. It is pinned to a single CPU and will move to the next CPU after each period of running. If the user modifies the migration thread's affinity, it will not change after that happens. The original code created the thread at the first instance it was called, but later was changed to destroy the thread after the tracer was finished, and would not be created until the next instance of the tracer was established. The code that initialized the affinity was only called on the initial instantiation of the tracer. After that, it was not initialized, and the previous affinity did not match the current newly created one, making it appear that the user modified the thread's affinity when it did not, and the thread failed to migrate again. Cc: stable@vger.kernel.org Fixes: 0330f7aa8ee6 ("tracing: Have hwlat trace migrate across tracing_cpumask CPUs") Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'include/dt-bindings/clock/lpc18xx-ccu.h')