/* * Software async crypto daemon * * Added AEAD support to cryptd. * Authors: Tadeusz Struk (tadeusz.struk@intel.com) * Adrian Hoban * Gabriele Paoloni * Aidan O'Mahony (aidan.o.mahony@intel.com) * Copyright (c) 2010, Intel Corporation. */ #ifndef _CRYPTO_CRYPT_H #define _CRYPTO_CRYPT_H #include #include #include #include struct cryptd_ablkcipher { struct crypto_ablkcipher base; }; static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast( struct crypto_ablkcipher *tfm) { return (struct cryptd_ablkcipher *)tfm; } /* alg_name should be algorithm to be cryptd-ed */ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask); struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm); void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); struct cryptd_skcipher { struct crypto_skcipher base; }; struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, u32 type, u32 mask); struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm); /* Must be called without moving CPUs. */ bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm); void cryptd_free_skcipher(struct cryptd_skcipher *tfm); struct cryptd_ahash { struct crypto_ahash base; }; static inline struct cryptd_ahash *__cryptd_ahash_cast( struct crypto_ahash *tfm) { return (struct cryptd_ahash *)tfm; } /* alg_name should be algorithm to be cryptd-ed */ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, u32 type, u32 mask); struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); struct shash_desc *cryptd_shash_desc(struct ahash_request *req); /* Must be called without moving CPUs. */ bool cryptd_ahash_queued(struct cryptd_ahash *tfm); void cryptd_free_ahash(struct cryptd_ahash *tfm); struct cryptd_aead { struct crypto_aead base; }; static inline struct cryptd_aead *__cryptd_aead_cast( struct crypto_aead *tfm) { return (struct cryptd_aead *)tfm; } struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, u32 type, u32 mask); struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); /* Must be called without moving CPUs. */ bool cryptd_aead_queued(struct cryptd_aead *tfm); void cryptd_free_aead(struct cryptd_aead *tfm); #endif 1f493e7398'/>
ption value='8'>8
AgeCommit message (Expand)AuthorFilesLines
space:
mode:
authorJack Morgenstein <jackm@dev.mellanox.co.il>2017-01-15 20:15:00 +0200
committerDoug Ledford <dledford@redhat.com>2017-01-27 14:29:04 -0500
commitb4cfe3971f6eab542dd7ecc398bfa1aeec889934 (patch)
treec7ad49d05da0535170c8e7710cd44ae1cecc271f /net/rose
parent2d4b21e0a2913612274a69a3ba1bfee4cffc6e77 (diff)
RDMA/cma: Fix unknown symbol when CONFIG_IPV6 is not enabled
If IPV6 has not been enabled in the underlying kernel, we must avoid calling IPV6 procedures in rdma_cm.ko. This requires using "IS_ENABLED(CONFIG_IPV6)" in "if" statements surrounding any code which calls external IPV6 procedures. In the instance fixed here, procedure cma_bind_addr() called ipv6_addr_type() -- which resulted in calling external procedure __ipv6_addr_type(). Fixes: 6c26a77124ff ("RDMA/cma: fix IPv6 address resolution") Cc: <stable@vger.kernel.org> # v4.2+ Cc: Spencer Baugh <sbaugh@catern.com> Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Reviewed-by: Moni Shoua <monis@mellanox.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'net/rose')