/*
 *  net/dccp/ccid.c
 *
 *  An implementation of the DCCP protocol
 *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
 *
 *  CCID infrastructure
 *
 *	This program is free software; you can redistribute it and/or modify it
 *	under the terms of the GNU General Public License version 2 as
 *	published by the Free Software Foundation.
 */

#include <linux/slab.h>

#include "ccid.h"
#include "ccids/lib/tfrc.h"

static struct ccid_operations *ccids[] = {
	&ccid2_ops,
#ifdef CONFIG_IP_DCCP_CCID3
	&ccid3_ops,
#endif
};

static struct ccid_operations *ccid_by_number(const u8 id)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(ccids); i++)
		if (ccids[i]->ccid_id == id)
			return ccids[i];
	return NULL;
}

/* check that up to @array_len members in @ccid_array are supported */
bool ccid_support_check(u8 const *ccid_array, u8 array_len)
{
	while (array_len > 0)
		if (ccid_by_number(ccid_array[--array_len]) == NULL)
			return false;
	return true;
}

/**
 * ccid_get_builtin_ccids  -  Populate a list of built-in CCIDs
 * @ccid_array: pointer to copy into
 * @array_len: value to return length into
 *
 * This function allocates memory - caller must see that it is freed after use.
 */
int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
{
	*ccid_array = kmalloc(ARRAY_SIZE(ccids), gfp_any());
	if (*ccid_array == NULL)
		return -ENOBUFS;

	for (*array_len = 0; *array_len < ARRAY_SIZE(ccids); *array_len += 1)
		(*ccid_array)[*array_len] = ccids[*array_len]->ccid_id;
	return 0;
}

int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
				  char __user *optval, int __user *optlen)
{
	u8 *ccid_array, array_len;
	int err = 0;

	if (ccid_get_builtin_ccids(&ccid_array, &array_len))
		return -ENOBUFS;

	if (put_user(array_len, optlen))
		err = -EFAULT;
	else if (len > 0 && copy_to_user(optval, ccid_array,
					 len > array_len ? array_len : len))
		err = -EFAULT;

	kfree(ccid_array);
	return err;
}

static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...)
{
	struct kmem_cache *slab;
	va_list args;

	va_start(args, fmt);
	vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args);
	va_end(args);

	slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
				 SLAB_HWCACHE_ALIGN, NULL);
	return slab;
}

static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
{
	kmem_cache_destroy(slab);
}

static int __init ccid_activate(struct ccid_operations *ccid_ops)
{
	int err = -ENOBUFS;

	ccid_ops->ccid_hc_rx_slab =
			ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
					       ccid_ops->ccid_hc_rx_slab_name,
					       "ccid%u_hc_rx_sock",
					       ccid_ops->ccid_id);
	if (ccid_ops->ccid_hc_rx_slab == NULL)
		goto out;

	ccid_ops->ccid_hc_tx_slab =
			ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
					       ccid_ops->ccid_hc_tx_slab_name,
					       "ccid%u_hc_tx_sock",
					       ccid_ops->ccid_id);
	if (ccid_ops->ccid_hc_tx_slab == NULL)
		goto out_free_rx_slab;

	pr_info("DCCP: Activated CCID %d (%s)\n",
		ccid_ops->ccid_id, ccid_ops->ccid_name);
	err = 0;
out:
	return err;
out_free_rx_slab:
	ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
	ccid_ops->ccid_hc_rx_slab = NULL;
	goto out;
}

static void ccid_deactivate(struct ccid_operations *ccid_ops)
{
	ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
	ccid_ops->ccid_hc_tx_slab = NULL;
	ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
	ccid_ops->ccid_hc_rx_slab = NULL;

	pr_info("DCCP: Deactivated CCID %d (%s)\n",
		ccid_ops->ccid_id, ccid_ops->ccid_name);
}

struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx)
{
	struct ccid_operations *ccid_ops = ccid_by_number(id);
	struct ccid *ccid = NULL;

	if (ccid_ops == NULL)
		goto out;

	ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
				     ccid_ops->ccid_hc_tx_slab, gfp_any());
	if (ccid == NULL)
		goto out;
	ccid->ccid_ops = ccid_ops;
	if (rx) {
		memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size);
		if (ccid->ccid_ops->ccid_hc_rx_init != NULL &&
		    ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0)
			goto out_free_ccid;
	} else {
		memset(ccid + 1, 0, ccid_ops->ccid_hc_tx_obj_size);
		if (ccid->ccid_ops->ccid_hc_tx_init != NULL &&
		    ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0)
			goto out_free_ccid;
	}
out:
	return ccid;
out_free_ccid:
	kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab :
			ccid_ops->ccid_hc_tx_slab, ccid);
	ccid = NULL;
	goto out;
}

void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)
{
	if (ccid != NULL) {
		if (ccid->ccid_ops->ccid_hc_rx_exit != NULL)
			ccid->ccid_ops->ccid_hc_rx_exit(sk);
		kmem_cache_free(ccid->ccid_ops->ccid_hc_rx_slab, ccid);
	}
}

void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
{
	if (ccid != NULL) {
		if (ccid->ccid_ops->ccid_hc_tx_exit != NULL)
			ccid->ccid_ops->ccid_hc_tx_exit(sk);
		kmem_cache_free(ccid->ccid_ops->ccid_hc_tx_slab, ccid);
	}
}

int __init ccid_initialize_builtins(void)
{
	int i, err = tfrc_lib_init();

	if (err)
		return err;

	for (i = 0; i < ARRAY_SIZE(ccids); i++) {
		err = ccid_activate(ccids[i]);
		if (err)
			goto unwind_registrations;
	}
	return 0;

unwind_registrations:
	while(--i >= 0)
		ccid_deactivate(ccids[i]);
	tfrc_lib_exit();
	return err;
}

void ccid_cleanup_builtins(void)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(ccids); i++)
		ccid_deactivate(ccids[i]);
	tfrc_lib_exit();
}
tree/?id=6e978b22efa1db9f6e71b24440b5f1d93e968ee3'>c666f7a26b860674848949e39a610222b0723f89</a> /<a href='/cgit.cgi/linux/net-next.git/tree/include/net/nfc/hci.h?id=6e978b22efa1db9f6e71b24440b5f1d93e968ee3'>include/net/nfc/hci.h</a></td></tr>
<tr><th>parent</th><td colspan='2' class='oid'><a href='/cgit.cgi/linux/net-next.git/commit/include/net/nfc/hci.h?id=3c223c19aea85d3dda1416c187915f4a30b04b1f'>3c223c19aea85d3dda1416c187915f4a30b04b1f</a> (<a href='/cgit.cgi/linux/net-next.git/diff/include/net/nfc/hci.h?id=6e978b22efa1db9f6e71b24440b5f1d93e968ee3&amp;id2=3c223c19aea85d3dda1416c187915f4a30b04b1f'>diff</a>)</td></tr></table>
<div class='commit-subject'>cpufreq: intel_pstate: Disable energy efficiency optimization</div><div class='commit-msg'>Some Kabylake desktop processors may not reach max turbo when running in
HWP mode, even if running under sustained 100% utilization.

This occurs when the HWP.EPP (Energy Performance Preference) is set to
"balance_power" (0x80) -- the default on most systems.

It occurs because the platform BIOS may erroneously enable an
energy-efficiency setting -- MSR_IA32_POWER_CTL BIT-EE, which is not
recommended to be enabled on this SKU.

On the failing systems, this BIOS issue was not discovered when the
desktop motherboard was tested with Windows, because the BIOS also
neglects to provide the ACPI/CPPC table, that Windows requires to enable
HWP, and so Windows runs in legacy P-state mode, where this setting has
no effect.

Linux' intel_pstate driver does not require ACPI/CPPC to enable HWP, and
so it runs in HWP mode, exposing this incorrect BIOS configuration.

There are several ways to address this problem.

First, Linux can also run in legacy P-state mode on this system.
As intel_pstate is how Linux enables HWP, booting with
"intel_pstate=disable"
will run in acpi-cpufreq/ondemand legacy p-state mode.

Or second, the "performance" governor can be used with intel_pstate,
which will modify HWP.EPP to 0.

Or third, starting in 4.10, the
/sys/devices/system/cpu/cpufreq/policy*/energy_performance_preference
attribute in can be updated from "balance_power" to "performance".

Or fourth, apply this patch, which fixes the erroneous setting of
MSR_IA32_POWER_CTL BIT_EE on this model, allowing the default
configuration to function as designed.

Signed-off-by: Srinivas Pandruvada &lt;srinivas.pandruvada@linux.intel.com&gt;
Reviewed-by: Len Brown &lt;len.brown@intel.com&gt;
Cc: 4.6+ &lt;stable@vger.kernel.org&gt; # 4.6+
Signed-off-by: Rafael J. Wysocki &lt;rafael.j.wysocki@intel.com&gt;
</div><div class='diffstat-header'><a href='/cgit.cgi/linux/net-next.git/diff/?id=6e978b22efa1db9f6e71b24440b5f1d93e968ee3'>Diffstat</a> (limited to 'include/net/nfc/hci.h')</div><table summary='diffstat' class='diffstat'>