/* * Software async crypto daemon * * Added AEAD support to cryptd. * Authors: Tadeusz Struk (tadeusz.struk@intel.com) * Adrian Hoban * Gabriele Paoloni * Aidan O'Mahony (aidan.o.mahony@intel.com) * Copyright (c) 2010, Intel Corporation. */ #ifndef _CRYPTO_CRYPT_H #define _CRYPTO_CRYPT_H #include #include #include #include struct cryptd_ablkcipher { struct crypto_ablkcipher base; }; static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast( struct crypto_ablkcipher *tfm) { return (struct cryptd_ablkcipher *)tfm; } /* alg_name should be algorithm to be cryptd-ed */ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask); struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm); void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); struct cryptd_skcipher { struct crypto_skcipher base; }; struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, u32 type, u32 mask); struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm); /* Must be called without moving CPUs. */ bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm); void cryptd_free_skcipher(struct cryptd_skcipher *tfm); struct cryptd_ahash { struct crypto_ahash base; }; static inline struct cryptd_ahash *__cryptd_ahash_cast( struct crypto_ahash *tfm) { return (struct cryptd_ahash *)tfm; } /* alg_name should be algorithm to be cryptd-ed */ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, u32 type, u32 mask); struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); struct shash_desc *cryptd_shash_desc(struct ahash_request *req); /* Must be called without moving CPUs. */ bool cryptd_ahash_queued(struct cryptd_ahash *tfm); void cryptd_free_ahash(struct cryptd_ahash *tfm); struct cryptd_aead { struct crypto_aead base; }; static inline struct cryptd_aead *__cryptd_aead_cast( struct crypto_aead *tfm) { return (struct cryptd_aead *)tfm; } struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, u32 type, u32 mask); struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); /* Must be called without moving CPUs. */ bool cryptd_aead_queued(struct cryptd_aead *tfm); void cryptd_free_aead(struct cryptd_aead *tfm); #endif mitter
diff options
context:
space:
mode:
authorSrinivas Pandruvada <srinivas.pandruvada@linux.intel.com>2017-02-03 14:18:39 -0800
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-02-04 00:11:08 +0100
commit6e978b22efa1db9f6e71b24440b5f1d93e968ee3 (patch)
treec666f7a26b860674848949e39a610222b0723f89 /include/trace/define_trace.h
parent3c223c19aea85d3dda1416c187915f4a30b04b1f (diff)
cpufreq: intel_pstate: Disable energy efficiency optimization
Some Kabylake desktop processors may not reach max turbo when running in HWP mode, even if running under sustained 100% utilization. This occurs when the HWP.EPP (Energy Performance Preference) is set to "balance_power" (0x80) -- the default on most systems. It occurs because the platform BIOS may erroneously enable an energy-efficiency setting -- MSR_IA32_POWER_CTL BIT-EE, which is not recommended to be enabled on this SKU. On the failing systems, this BIOS issue was not discovered when the desktop motherboard was tested with Windows, because the BIOS also neglects to provide the ACPI/CPPC table, that Windows requires to enable HWP, and so Windows runs in legacy P-state mode, where this setting has no effect. Linux' intel_pstate driver does not require ACPI/CPPC to enable HWP, and so it runs in HWP mode, exposing this incorrect BIOS configuration. There are several ways to address this problem. First, Linux can also run in legacy P-state mode on this system. As intel_pstate is how Linux enables HWP, booting with "intel_pstate=disable" will run in acpi-cpufreq/ondemand legacy p-state mode. Or second, the "performance" governor can be used with intel_pstate, which will modify HWP.EPP to 0. Or third, starting in 4.10, the /sys/devices/system/cpu/cpufreq/policy*/energy_performance_preference attribute in can be updated from "balance_power" to "performance". Or fourth, apply this patch, which fixes the erroneous setting of MSR_IA32_POWER_CTL BIT_EE on this model, allowing the default configuration to function as designed. Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> Reviewed-by: Len Brown <len.brown@intel.com> Cc: 4.6+ <stable@vger.kernel.org> # 4.6+ Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'include/trace/define_trace.h')