/* * linux/net/sunrpc/socklib.c * * Common socket helper routines for RPC client and server * * Copyright (C) 1995, 1996 Olaf Kirch */ #include #include #include #include #include #include #include #include #include /** * xdr_skb_read_bits - copy some data bits from skb to internal buffer * @desc: sk_buff copy helper * @to: copy destination * @len: number of bytes to copy * * Possibly called several times to iterate over an sk_buff and copy * data out of it. */ size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len) { if (len > desc->count) len = desc->count; if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len))) return 0; desc->count -= len; desc->offset += len; return len; } EXPORT_SYMBOL_GPL(xdr_skb_read_bits); /** * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer * @desc: sk_buff copy helper * @to: copy destination * @len: number of bytes to copy * * Same as skb_read_bits, but calculate a checksum at the same time. */ static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len) { unsigned int pos; __wsum csum2; if (len > desc->count) len = desc->count; pos = desc->offset; csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); desc->csum = csum_block_add(desc->csum, csum2, pos); desc->count -= len; desc->offset += len; return len; } /** * xdr_partial_copy_from_skb - copy data out of an skb * @xdr: target XDR buffer * @base: starting offset * @desc: sk_buff copy helper * @copy_actor: virtual method for copying data * */ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor) { struct page **ppage = xdr->pages; unsigned int len, pglen = xdr->page_len; ssize_t copied = 0; size_t ret; len = xdr->head[0].iov_len; if (base < len) { len -= base; ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); copied += ret; if (ret != len || !desc->count) goto out; base = 0; } else base -= len; if (unlikely(pglen == 0)) goto copy_tail; if (unlikely(base >= pglen)) { base -= pglen; goto copy_tail; } if (base || xdr->page_base) { pglen -= base; base += xdr->page_base; ppage += base >> PAGE_SHIFT; base &= ~PAGE_MASK; } do { char *kaddr; /* ACL likes to be lazy in allocating pages - ACLs * are small by default but can get huge. */ if (unlikely(*ppage == NULL)) { *ppage = alloc_page(GFP_ATOMIC); if (unlikely(*ppage == NULL)) { if (copied == 0) copied = -ENOMEM; goto out; } } len = PAGE_SIZE; kaddr = kmap_atomic(*ppage); if (base) { len -= base; if (pglen < len) len = pglen; ret = copy_actor(desc, kaddr + base, len); base = 0; } else { if (pglen < len) len = pglen; ret = copy_actor(desc, kaddr, len); } flush_dcache_page(*ppage); kunmap_atomic(kaddr); copied += ret; if (ret != len || !desc->count) goto out; ppage++; } while ((pglen -= len) != 0); copy_tail: len = xdr->tail[0].iov_len; if (base < len) copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); out: return copied; } EXPORT_SYMBOL_GPL(xdr_partial_copy_from_skb); /** * csum_partial_copy_to_xdr - checksum and copy data * @xdr: target XDR buffer * @skb: source skb * * We have set things up such that we perform the checksum of the UDP * packet in parallel with the copies into the RPC client iovec. -DaveM */ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) { struct xdr_skb_reader desc; desc.skb = skb; desc.offset = 0; desc.count = skb->len - desc.offset; if (skb_csum_unnecessary(skb)) goto no_checksum; desc.csum = csum_partial(skb->data, desc.offset, skb->csum); if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0) return -1; if (desc.offset != skb->len) { __wsum csum2; csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); desc.csum = csum_block_add(desc.csum, csum2, desc.offset); } if (desc.count) return -1; if (csum_fold(desc.csum)) return -1; if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !skb->csum_complete_sw) netdev_rx_csum_fault(skb->dev); return 0; no_checksum: if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0) return -1; if (desc.count) return -1; return 0; } EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr); >2017-01-26 22:56:21 +0100 committerDavid S. Miller <davem@davemloft.net>2017-01-27 10:33:56 -0500 commit92e55f412cffd016cc245a74278cb4d7b89bb3bc (patch) treeab90e2e59c82daae60d2c953ef6c780e1f6e5550 /include parenta47b70ea86bdeb3091341f5ae3ef580f1a1ad822 (diff)
tcp: don't annotate mark on control socket from tcp_v6_send_response()
Unlike ipv4, this control socket is shared by all cpus so we cannot use it as scratchpad area to annotate the mark that we pass to ip6_xmit(). Add a new parameter to ip6_xmit() to indicate the mark. The SCTP socket family caches the flowi6 structure in the sctp_transport structure, so we cannot use to carry the mark unless we later on reset it back, which I discarded since it looks ugly to me. Fixes: bf99b4ded5f8 ("tcp: fix mark propagation with fwmark_reflect enabled") Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')