/* * CBC: Cipher Block Chaining mode * * Copyright (c) 2016 Herbert Xu * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #ifndef _CRYPTO_CBC_H #define _CRYPTO_CBC_H #include #include #include static inline int crypto_cbc_encrypt_segment( struct skcipher_walk *walk, struct crypto_skcipher *tfm, void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) { unsigned int bsize = crypto_skcipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; u8 *iv = walk->iv; do { crypto_xor(iv, src, bsize); fn(tfm, iv, dst); memcpy(iv, dst, bsize); src += bsize; dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; } static inline int crypto_cbc_encrypt_inplace( struct skcipher_walk *walk, struct crypto_skcipher *tfm, void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) { unsigned int bsize = crypto_skcipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *iv = walk->iv; do { crypto_xor(src, iv, bsize); fn(tfm, src, src); iv = src; src += bsize; } while ((nbytes -= bsize) >= bsize); memcpy(walk->iv, iv, bsize); return nbytes; } static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req, void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct skcipher_walk walk; int err; err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes) { if (walk.src.virt.addr == walk.dst.virt.addr) err = crypto_cbc_encrypt_inplace(&walk, tfm, fn); else err = crypto_cbc_encrypt_segment(&walk, tfm, fn); err = skcipher_walk_done(&walk, err); } return err; } static inline int crypto_cbc_decrypt_segment( struct skcipher_walk *walk, struct crypto_skcipher *tfm, void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) { unsigned int bsize = crypto_skcipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; u8 *iv = walk->iv; do { fn(tfm, src, dst); crypto_xor(dst, iv, bsize); iv = src; src += bsize; dst += bsize; } while ((nbytes -= bsize) >= bsize); memcpy(walk->iv, iv, bsize); return nbytes; } static inline int crypto_cbc_decrypt_inplace( struct skcipher_walk *walk, struct crypto_skcipher *tfm, void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) { unsigned int bsize = crypto_skcipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 last_iv[bsize]; /* Start of the last block. */ src += nbytes - (nbytes & (bsize - 1)) - bsize; memcpy(last_iv, src, bsize); for (;;) { fn(tfm, src, src); if ((nbytes -= bsize) < bsize) break; crypto_xor(src, src - bsize, bsize); src -= bsize; } crypto_xor(src, walk->iv, bsize); memcpy(walk->iv, last_iv, bsize); return nbytes; } static inline int crypto_cbc_decrypt_blocks( struct skcipher_walk *walk, struct crypto_skcipher *tfm, void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) { if (walk->src.virt.addr == walk->dst.virt.addr) return crypto_cbc_decrypt_inplace(walk, tfm, fn); else return crypto_cbc_decrypt_segment(walk, tfm, fn); } #endif /* _CRYPTO_CBC_H */ option value='6'>6space:mode:
authorDouglas Miller <dougmill@linux.vnet.ibm.com>2017-01-28 06:42:20 -0600
committerTejun Heo <tj@kernel.org>2017-01-28 07:49:42 -0500
commit966d2b04e070bc040319aaebfec09e0144dc3341 (patch)
tree4b96156e3d1dd4dfd6039b7c219c9dc4616da52d /include/net/nfc
parent1b1bc42c1692e9b62756323c675a44cb1a1f9dbd (diff)
percpu-refcount: fix reference leak during percpu-atomic transition
percpu_ref_tryget() and percpu_ref_tryget_live() should return "true" IFF they acquire a reference. But the return value from atomic_long_inc_not_zero() is a long and may have high bits set, e.g. PERCPU_COUNT_BIAS, and the return value of the tryget routines is bool so the reference may actually be acquired but the routines return "false" which results in a reference leak since the caller assumes it does not need to do a corresponding percpu_ref_put(). This was seen when performing CPU hotplug during I/O, as hangs in blk_mq_freeze_queue_wait where percpu_ref_kill (blk_mq_freeze_queue_start) raced with percpu_ref_tryget (blk_mq_timeout_work). Sample stack trace: __switch_to+0x2c0/0x450 __schedule+0x2f8/0x970 schedule+0x48/0xc0 blk_mq_freeze_queue_wait+0x94/0x120 blk_mq_queue_reinit_work+0xb8/0x180 blk_mq_queue_reinit_prepare+0x84/0xa0 cpuhp_invoke_callback+0x17c/0x600 cpuhp_up_callbacks+0x58/0x150 _cpu_up+0xf0/0x1c0 do_cpu_up+0x120/0x150 cpu_subsys_online+0x64/0xe0 device_online+0xb4/0x120 online_store+0xb4/0xc0 dev_attr_store+0x68/0xa0 sysfs_kf_write+0x80/0xb0 kernfs_fop_write+0x17c/0x250 __vfs_write+0x6c/0x1e0 vfs_write+0xd0/0x270 SyS_write+0x6c/0x110 system_call+0x38/0xe0 Examination of the queue showed a single reference (no PERCPU_COUNT_BIAS, and __PERCPU_REF_DEAD, __PERCPU_REF_ATOMIC set) and no requests. However, conditions at the time of the race are count of PERCPU_COUNT_BIAS + 0 and __PERCPU_REF_DEAD and __PERCPU_REF_ATOMIC set. The fix is to make the tryget routines use an actual boolean internally instead of the atomic long result truncated to a int. Fixes: e625305b3907 percpu-refcount: make percpu_ref based on longs instead of ints Link: https://bugzilla.kernel.org/show_bug.cgi?id=190751 Signed-off-by: Douglas Miller <dougmill@linux.vnet.ibm.com> Reviewed-by: Jens Axboe <axboe@fb.com> Signed-off-by: Tejun Heo <tj@kernel.org> Fixes: e625305b3907 ("percpu-refcount: make percpu_ref based on longs instead of ints") Cc: stable@vger.kernel.org # v3.18+
Diffstat (limited to 'include/net/nfc')