#include #include #include #include #include /* Calculate expected number of TX descriptors */ int tso_count_descs(struct sk_buff *skb) { /* The Marvell Way */ return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; } EXPORT_SYMBOL(tso_count_descs); void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) { struct tcphdr *tcph; int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int mac_hdr_len = skb_network_offset(skb); memcpy(hdr, skb->data, hdr_len); if (!tso->ipv6) { struct iphdr *iph = (void *)(hdr + mac_hdr_len); iph->id = htons(tso->ip_id); iph->tot_len = htons(size + hdr_len - mac_hdr_len); tso->ip_id++; } else { struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len); iph->payload_len = htons(size + tcp_hdrlen(skb)); } tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); put_unaligned_be32(tso->tcp_seq, &tcph->seq); if (!is_last) { /* Clear all special flags for not last packet */ tcph->psh = 0; tcph->fin = 0; tcph->rst = 0; } } EXPORT_SYMBOL(tso_build_hdr); void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) { tso->tcp_seq += size; tso->size -= size; tso->data += size; if ((tso->size == 0) && (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; /* Move to next segment */ tso->size = frag->size; tso->data = page_address(frag->page.p) + frag->page_offset; tso->next_frag_idx++; } } EXPORT_SYMBOL(tso_build_data); void tso_start(struct sk_buff *skb, struct tso_t *tso) { int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); tso->ip_id = ntohs(ip_hdr(skb)->id); tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); tso->next_frag_idx = 0; tso->ipv6 = vlan_get_protocol(skb) == htons(ETH_P_IPV6); /* Build first data */ tso->size = skb_headlen(skb) - hdr_len; tso->data = skb->data + hdr_len; if ((tso->size == 0) && (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; /* Move to next segment */ tso->size = frag->size; tso->data = page_address(frag->page.p) + frag->page_offset; tso->next_frag_idx++; } } EXPORT_SYMBOL(tso_start); 7f89aea1ece90e6a802e'/>
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-01-28 11:09:04 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-01-28 11:09:04 -0800
commitdd553962675ab5747e887f89aea1ece90e6a802e (patch)
treed999368d0921e139b7b67ea0fc6a4e6ac548d8dc /net/ncsi/Kconfig
parent64a172d265643b345007ddaafcc523f6e5373b69 (diff)
parent2e38a37f23c98d7fad87ff022670060b8a0e2bf5 (diff)
Merge tag 'md/4.10-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
Pull MD fixes from Shaohua Li: "This fixes several corner cases for raid5 cache, which is merged into this cycle" * tag 'md/4.10-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md: md/r5cache: disable write back for degraded array md/r5cache: shift complex rmw from read path to write path md/r5cache: flush data only stripes in r5l_recovery_log() md/raid5: move comment of fetch_block to right location md/r5cache: read data into orig_page for prexor of cached data md/raid5-cache: delete meaningless code
Diffstat (limited to 'net/ncsi/Kconfig')