/* * linux/fs/ufs/swab.h * * Copyright (C) 1997, 1998 Francois-Rene Rideau * Copyright (C) 1998 Jakub Jelinek * Copyright (C) 2001 Christoph Hellwig */ #ifndef _UFS_SWAB_H #define _UFS_SWAB_H /* * Notes: * HERE WE ASSUME EITHER BIG OR LITTLE ENDIAN UFSes * in case there are ufs implementations that have strange bytesexes, * you'll need to modify code here as well as in ufs_super.c and ufs_fs.h * to support them. */ enum { BYTESEX_LE, BYTESEX_BE }; static inline u64 fs64_to_cpu(struct super_block *sbp, __fs64 n) { if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return le64_to_cpu((__force __le64)n); else return be64_to_cpu((__force __be64)n); } static inline __fs64 cpu_to_fs64(struct super_block *sbp, u64 n) { if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return (__force __fs64)cpu_to_le64(n); else return (__force __fs64)cpu_to_be64(n); } static inline u32 fs32_to_cpu(struct super_block *sbp, __fs32 n) { if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return le32_to_cpu((__force __le32)n); else return be32_to_cpu((__force __be32)n); } static inline __fs32 cpu_to_fs32(struct super_block *sbp, u32 n) { if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return (__force __fs32)cpu_to_le32(n); else return (__force __fs32)cpu_to_be32(n); } static inline void fs32_add(struct super_block *sbp, __fs32 *n, int d) { if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) le32_add_cpu((__le32 *)n, d); else be32_add_cpu((__be32 *)n, d); } static inline void fs32_sub(struct super_block *sbp, __fs32 *n, int d) { if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) le32_add_cpu((__le32 *)n, -d); else be32_add_cpu((__be32 *)n, -d); } static inline u16 fs16_to_cpu(struct super_block *sbp, __fs16 n) { if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return le16_to_cpu((__force __le16)n); else return be16_to_cpu((__force __be16)n); } static inline __fs16 cpu_to_fs16(struct super_block *sbp, u16 n) { if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) return (__force __fs16)cpu_to_le16(n); else return (__force __fs16)cpu_to_be16(n); } static inline void fs16_add(struct super_block *sbp, __fs16 *n, int d) { if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) le16_add_cpu((__le16 *)n, d); else be16_add_cpu((__be16 *)n, d); } static inline void fs16_sub(struct super_block *sbp, __fs16 *n, int d) { if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE) le16_add_cpu((__le16 *)n, -d); else be16_add_cpu((__be16 *)n, -d); } #endif /* _UFS_SWAB_H */ tr>
path: root/fs
diff options
context:
space:
mode:
authorSriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>2017-01-09 16:00:44 +0530
committerJ. Bruce Fields <bfields@redhat.com>2017-01-12 16:14:47 -0500
commitce1ca7d2d140a1f4aaffd297ac487f246963dd2f (patch)
tree5aeab5e0cb6ef404c894a18251588278b9432a2c /fs
parent546125d1614264d26080817d0c8cddb9b25081fa (diff)
svcrdma: avoid duplicate dma unmapping during error recovery
In rdma_read_chunk_frmr() when ib_post_send() fails, the error code path invokes ib_dma_unmap_sg() to unmap the sg list. It then invokes svc_rdma_put_frmr() which in turn tries to unmap the same sg list through ib_dma_unmap_sg() again. This second unmap is invalid and could lead to problems when the iova being unmapped is subsequently reused. Remove the call to unmap in rdma_read_chunk_frmr() and let svc_rdma_put_frmr() handle it. Fixes: 412a15c0fe53 ("svcrdma: Port to new memory registration API") Cc: stable@vger.kernel.org Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs')