#include #include #include #include #include /* Calculate expected number of TX descriptors */ int tso_count_descs(struct sk_buff *skb) { /* The Marvell Way */ return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; } EXPORT_SYMBOL(tso_count_descs); void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) { struct tcphdr *tcph; int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int mac_hdr_len = skb_network_offset(skb); memcpy(hdr, skb->data, hdr_len); if (!tso->ipv6) { struct iphdr *iph = (void *)(hdr + mac_hdr_len); iph->id = htons(tso->ip_id); iph->tot_len = htons(size + hdr_len - mac_hdr_len); tso->ip_id++; } else { struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len); iph->payload_len = htons(size + tcp_hdrlen(skb)); } tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); put_unaligned_be32(tso->tcp_seq, &tcph->seq); if (!is_last) { /* Clear all special flags for not last packet */ tcph->psh = 0; tcph->fin = 0; tcph->rst = 0; } } EXPORT_SYMBOL(tso_build_hdr); void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) { tso->tcp_seq += size; tso->size -= size; tso->data += size; if ((tso->size == 0) && (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; /* Move to next segment */ tso->size = frag->size; tso->data = page_address(frag->page.p) + frag->page_offset; tso->next_frag_idx++; } } EXPORT_SYMBOL(tso_build_data); void tso_start(struct sk_buff *skb, struct tso_t *tso) { int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); tso->ip_id = ntohs(ip_hdr(skb)->id); tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); tso->next_frag_idx = 0; tso->ipv6 = vlan_get_protocol(skb) == htons(ETH_P_IPV6); /* Build first data */ tso->size = skb_headlen(skb) - hdr_len; tso->data = skb->data + hdr_len; if ((tso->size == 0) && (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; /* Move to next segment */ tso->size = frag->size; tso->data = page_address(frag->page.p) + frag->page_offset; tso->next_frag_idx++; } } EXPORT_SYMBOL(tso_start); abel/netlabel_unlabeled.h'>
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 19:03:21 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-31 20:22:18 +0100
commitaaaec6fc755447a1d056765b11b24d8ff2b81366 (patch)
treea7f4167960ee1df86739905b6ccdeb95465bfe5f /net/netlabel/netlabel_unlabeled.h
parent08d85f3ea99f1eeafc4e8507936190e86a16ee8c (diff)
x86/irq: Make irq activate operations symmetric
The recent commit which prevents double activation of interrupts unearthed interesting code in x86. The code (ab)uses irq_domain_activate_irq() to reconfigure an already activated interrupt. That trips over the prevention code now. Fix it by deactivating the interrupt before activating the new configuration. Fixes: 08d85f3ea99f1 "irqdomain: Avoid activating interrupts more than once" Reported-and-tested-by: Mike Galbraith <efault@gmx.de> Reported-and-tested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701311901580.3457@nanos
Diffstat (limited to 'net/netlabel/netlabel_unlabeled.h')