/* * Software async crypto daemon * * Added AEAD support to cryptd. * Authors: Tadeusz Struk (tadeusz.struk@intel.com) * Adrian Hoban * Gabriele Paoloni * Aidan O'Mahony (aidan.o.mahony@intel.com) * Copyright (c) 2010, Intel Corporation. */ #ifndef _CRYPTO_CRYPT_H #define _CRYPTO_CRYPT_H #include #include #include #include struct cryptd_ablkcipher { struct crypto_ablkcipher base; }; static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast( struct crypto_ablkcipher *tfm) { return (struct cryptd_ablkcipher *)tfm; } /* alg_name should be algorithm to be cryptd-ed */ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask); struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm); void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); struct cryptd_skcipher { struct crypto_skcipher base; }; struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, u32 type, u32 mask); struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm); /* Must be called without moving CPUs. */ bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm); void cryptd_free_skcipher(struct cryptd_skcipher *tfm); struct cryptd_ahash { struct crypto_ahash base; }; static inline struct cryptd_ahash *__cryptd_ahash_cast( struct crypto_ahash *tfm) { return (struct cryptd_ahash *)tfm; } /* alg_name should be algorithm to be cryptd-ed */ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, u32 type, u32 mask); struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); struct shash_desc *cryptd_shash_desc(struct ahash_request *req); /* Must be called without moving CPUs. */ bool cryptd_ahash_queued(struct cryptd_ahash *tfm); void cryptd_free_ahash(struct cryptd_ahash *tfm); struct cryptd_aead { struct crypto_aead base; }; static inline struct cryptd_aead *__cryptd_aead_cast( struct crypto_aead *tfm) { return (struct cryptd_aead *)tfm; } struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, u32 type, u32 mask); struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); /* Must be called without moving CPUs. */ bool cryptd_aead_queued(struct cryptd_aead *tfm); void cryptd_free_aead(struct cryptd_aead *tfm); #endif 0ae5b42828785b589f686725ff5bc3b9b25'/>
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 09:37:34 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-31 21:47:58 +0100
commit0becc0ae5b42828785b589f686725ff5bc3b9b25 (patch)
treebe6d0e1f37c38ed0a7dd5da2d4b1e93f0fb43101 /include/dt-bindings/gpio
parent24c2503255d35c269b67162c397a1a1c1e02f6ce (diff)
x86/mce: Make timer handling more robust
Erik reported that on a preproduction hardware a CMCI storm triggers the BUG_ON in add_timer_on(). The reason is that the per CPU MCE timer is started by the CMCI logic before the MCE CPU hotplug callback starts the timer with add_timer_on(). So the timer is already queued which triggers the BUG. Using add_timer_on() is pretty pointless in this code because the timer is strictlty per CPU, initialized as pinned and all operations which arm the timer happen on the CPU to which the timer belongs. Simplify the whole machinery by using mod_timer() instead of add_timer_on() which avoids the problem because mod_timer() can handle already queued timers. Use __start_timer() everywhere so the earliest armed expiry time is preserved. Reported-by: Erik Veijola <erik.veijola@intel.com> Tested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Tony Luck <tony.luck@intel.com> Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701310936080.3457@nanos Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/dt-bindings/gpio')