/* * netsniff-ng - the packet sniffing beast * Copyright 2011 - 2013 Daniel Borkmann. * Subject to the GPL, version 2. */ #include #include #include #include #include #include #include "pcap_io.h" #include "xmalloc.h" #include "built_in.h" #include "iosched.h" #include "ioops.h" static struct iovec iov[1024] __cacheline_aligned; static off_t iov_off_rd = 0, iov_slot = 0; static ssize_t pcap_sg_write(int fd, pcap_pkthdr_t *phdr, enum pcap_type type, const uint8_t *packet, size_t len) { ssize_t ret, hdrsize = pcap_get_hdr_length(phdr, type); if (unlikely(iov_slot == array_size(iov))) { ret = writev(fd, iov, array_size(iov)); if (ret < 0) panic("Writev I/O error: %s!\n", strerror(errno)); iov_slot = 0; } fmemcpy(iov[iov_slot].iov_base, &phdr->raw, hdrsize); iov[iov_slot].iov_len = hdrsize; fmemcpy(iov[iov_slot].iov_base + iov[iov_slot].iov_len, packet, len); ret = (iov[iov_slot].iov_len += len); iov_slot++; return ret; } static ssize_t __pcap_sg_inter_iov_hdr_read(int fd, pcap_pkthdr_t *phdr, enum pcap_type type, uint8_t *packet, size_t len, size_t hdrsize) { int ret; size_t offset = 0; ssize_t remainder; offset = iov[iov_slot].iov_len - iov_off_rd; remainder = hdrsize - offset; if (remainder < 0) remainder = 0; bug_on(offset + remainder != hdrsize); fmemcpy(&phdr->raw, iov[iov_slot].iov_base + iov_off_rd, offset); iov_off_rd = 0; iov_slot++; if (iov_slot == array_size(iov)) { iov_slot = 0; ret = readv(fd, iov, array_size(iov)); if (unlikely(ret <= 0)) return -EIO; } fmemcpy(&phdr->raw + offset, iov[iov_slot].iov_base + iov_off_rd, remainder); iov_off_rd += remainder; return hdrsize; } static ssize_t __pcap_sg_inter_iov_data_read(int fd, uint8_t *packet, size_t len, size_t hdrlen) { int ret; size_t offset = 0; ssize_t remainder; offset = iov[iov_slot].iov_len - iov_off_rd; remainder = hdrlen - offset; if (remainder < 0) remainder = 0; bug_on(offset + remainder != hdrlen); fmemcpy(packet, iov[iov_slot].iov_base + iov_off_rd, offset); iov_off_rd = 0; iov_slot++; if (iov_slot == array_size(iov)) { iov_slot = 0; ret = readv(fd, iov, array_size(iov)); if (unlikely(ret <= 0)) return -EIO; } fmemcpy(packet + offset, iov[iov_slot].iov_base + iov_off_rd, remainder); iov_off_rd += remainder; return hdrlen; } static ssize_t pcap_sg_read(int fd, pcap_pkthdr_t *phdr, enum pcap_type type, uint8_t *packet, size_t len) { ssize_t ret = 0; size_t hdrsize = pcap_get_hdr_length(phdr, type), hdrlen; if (likely(iov[iov_slot].iov_len - iov_off_rd >= hdrsize)) { fmemcpy(&phdr->raw, iov[iov_slot].iov_base + iov_off_rd, hdrsize); iov_off_rd += hdrsize; } else { ret = __pcap_sg_inter_iov_hdr_read(fd, phdr, type, packet, len, hdrsize); if (unlikely(ret < 0)) return ret; } hdrlen = pcap_get_length(phdr, type); if (unlikely(hdrlen == 0 || hdrlen > len)) return -EINVAL; if (likely(iov[iov_slot].iov_len - iov_off_rd >= hdrlen)) { fmemcpy(packet, iov[iov_slot].iov_base + iov_off_rd, hdrlen); iov_off_rd += hdrlen; } else { ret = __pcap_sg_inter_iov_data_read(fd, packet, len, hdrlen); if (unlikely(ret < 0)) return ret; } return hdrsize + hdrlen; } static void pcap_sg_fsync(int fd) { ssize_t ret = writev(fd, iov, iov_slot); if (ret < 0) panic("Writev I/O error: %s!\n", strerror(errno)); iov_slot = 0; fdatasync(fd); } static void pcap_sg_init_once(void) { set_ioprio_rt(); } static int pcap_sg_prepare_access(int fd, enum pcap_mode mode, bool jumbo) { int i, ret; size_t len = 0; iov_slot = 0; len = jumbo ? (PAGE_SIZE * 16) /* 64k max */ : (PAGE_SIZE * 3) /* 12k max */; for (i = 0; i < array_size(iov); ++i) { iov[i].iov_base = xzmalloc_aligned(len, 64); iov[i].iov_len = len; } if (mode == PCAP_MODE_RD) { ret = readv(fd, iov, array_size(iov)); if (ret <= 0) return -EIO; iov_off_rd = 0; iov_slot = 0; } return 0; } static void pcap_sg_prepare_close(int fd, enum pcap_mode mode) { int i; for (i = 0; i < array_size(iov); ++i) xfree(iov[i].iov_base); } const struct pcap_file_ops pcap_sg_ops = { .init_once_pcap = pcap_sg_init_once, .pull_fhdr_pcap = pcap_generic_pull_fhdr, .push_fhdr_pcap = pcap_generic_push_fhdr, .prepare_access_pcap = pcap_sg_prepare_access, .prepare_close_pcap = pcap_sg_prepare_close, .read_pcap = pcap_sg_read, .write_pcap = pcap_sg_write, .fsync_pcap = pcap_sg_fsync, }; iff/?h=nds-private-remove&id=fa19a769f82fb9a5ca000b83cacd13fcaeda51ac&id2=90f92c631b210c1e97080b53a9d863783281a932'>Diffstatselect>
mode:
authorSriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>2017-01-09 16:00:44 +0530
committerJ. Bruce Fields <bfields@redhat.com>2017-01-12 16:14:47 -0500
commitce1ca7d2d140a1f4aaffd297ac487f246963dd2f (patch)
tree5aeab5e0cb6ef404c894a18251588278b9432a2c /sound/isa/ad1816a/Makefile
parent546125d1614264d26080817d0c8cddb9b25081fa (diff)
svcrdma: avoid duplicate dma unmapping during error recovery
In rdma_read_chunk_frmr() when ib_post_send() fails, the error code path invokes ib_dma_unmap_sg() to unmap the sg list. It then invokes svc_rdma_put_frmr() which in turn tries to unmap the same sg list through ib_dma_unmap_sg() again. This second unmap is invalid and could lead to problems when the iova being unmapped is subsequently reused. Remove the call to unmap in rdma_read_chunk_frmr() and let svc_rdma_put_frmr() handle it. Fixes: 412a15c0fe53 ("svcrdma: Port to new memory registration API") Cc: stable@vger.kernel.org Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'sound/isa/ad1816a/Makefile')