#ifndef _NET_MRP_H
#define _NET_MRP_H
#define MRP_END_MARK 0x0
struct mrp_pdu_hdr {
u8 version;
};
struct mrp_msg_hdr {
u8 attrtype;
u8 attrlen;
};
struct mrp_vecattr_hdr {
__be16 lenflags;
unsigned char firstattrvalue[];
#define MRP_VECATTR_HDR_LEN_MASK cpu_to_be16(0x1FFF)
#define MRP_VECATTR_HDR_FLAG_LA cpu_to_be16(0x2000)
};
enum mrp_vecattr_event {
MRP_VECATTR_EVENT_NEW,
MRP_VECATTR_EVENT_JOIN_IN,
MRP_VECATTR_EVENT_IN,
MRP_VECATTR_EVENT_JOIN_MT,
MRP_VECATTR_EVENT_MT,
MRP_VECATTR_EVENT_LV,
__MRP_VECATTR_EVENT_MAX
};
struct mrp_skb_cb {
struct mrp_msg_hdr *mh;
struct mrp_vecattr_hdr *vah;
unsigned char attrvalue[];
};
static inline struct mrp_skb_cb *mrp_cb(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct mrp_skb_cb) >
FIELD_SIZEOF(struct sk_buff, cb));
return (struct mrp_skb_cb *)skb->cb;
}
enum mrp_applicant_state {
MRP_APPLICANT_INVALID,
MRP_APPLICANT_VO,
MRP_APPLICANT_VP,
MRP_APPLICANT_VN,
MRP_APPLICANT_AN,
MRP_APPLICANT_AA,
MRP_APPLICANT_QA,
MRP_APPLICANT_LA,
MRP_APPLICANT_AO,
MRP_APPLICANT_QO,
MRP_APPLICANT_AP,
MRP_APPLICANT_QP,
__MRP_APPLICANT_MAX
};
#define MRP_APPLICANT_MAX (__MRP_APPLICANT_MAX - 1)
enum mrp_event {
MRP_EVENT_NEW,
MRP_EVENT_JOIN,
MRP_EVENT_LV,
MRP_EVENT_TX,
MRP_EVENT_R_NEW,
MRP_EVENT_R_JOIN_IN,
MRP_EVENT_R_IN,
MRP_EVENT_R_JOIN_MT,
MRP_EVENT_R_MT,
MRP_EVENT_R_LV,
MRP_EVENT_R_LA,
MRP_EVENT_REDECLARE,
MRP_EVENT_PERIODIC,
__MRP_EVENT_MAX
};
#define MRP_EVENT_MAX (__MRP_EVENT_MAX - 1)
enum mrp_tx_action {
MRP_TX_ACTION_NONE,
MRP_TX_ACTION_S_NEW,
MRP_TX_ACTION_S_JOIN_IN,
MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
MRP_TX_ACTION_S_IN_OPTIONAL,
MRP_TX_ACTION_S_LV,
};
struct mrp_attr {
struct rb_node node;
enum mrp_applicant_state state;
u8 type;
u8 len;
unsigned char value[];
};
enum mrp_applications {
MRP_APPLICATION_MVRP,
__MRP_APPLICATION_MAX
};
#define MRP_APPLICATION_MAX (__MRP_APPLICATION_MAX - 1)
struct mrp_application {
enum mrp_applications type;
unsigned int maxattr;
struct packet_type pkttype;
unsigned char group_address[ETH_ALEN];
u8 version;
};
struct mrp_applicant {
struct mrp_application *app;
struct net_device *dev;
struct timer_list join_timer;
struct timer_list periodic_timer;
spinlock_t lock;
struct sk_buff_head queue;
struct sk_buff *pdu;
struct rb_root mad;
struct rcu_head rcu;
};
struct mrp_port {
struct mrp_applicant __rcu *applicants[MRP_APPLICATION_MAX + 1];
struct rcu_head rcu;
};
int mrp_register_application(struct mrp_application *app);
void mrp_unregister_application(struct mrp_application *app);
int mrp_init_applicant(struct net_device *dev, struct mrp_application *app);
void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *app);
int mrp_request_join(const struct net_device *dev,
const struct mrp_application *app,
const void *value, u8 len, u8 type);
void mrp_request_leave(const struct net_device *dev,
const struct mrp_application *app,
const void *value, u8 len, u8 type);
#endif /* _NET_MRP_H */
next.git/commit/tools?h=nds-private-remove&id=6e978b22efa1db9f6e71b24440b5f1d93e968ee3'>tools/perf/tests/attr
cpufreq: intel_pstate: Disable energy efficiency optimization
Some Kabylake desktop processors may not reach max turbo when running in
HWP mode, even if running under sustained 100% utilization.
This occurs when the HWP.EPP (Energy Performance Preference) is set to
"balance_power" (0x80) -- the default on most systems.
It occurs because the platform BIOS may erroneously enable an
energy-efficiency setting -- MSR_IA32_POWER_CTL BIT-EE, which is not
recommended to be enabled on this SKU.
On the failing systems, this BIOS issue was not discovered when the
desktop motherboard was tested with Windows, because the BIOS also
neglects to provide the ACPI/CPPC table, that Windows requires to enable
HWP, and so Windows runs in legacy P-state mode, where this setting has
no effect.
Linux' intel_pstate driver does not require ACPI/CPPC to enable HWP, and
so it runs in HWP mode, exposing this incorrect BIOS configuration.
There are several ways to address this problem.
First, Linux can also run in legacy P-state mode on this system.
As intel_pstate is how Linux enables HWP, booting with
"intel_pstate=disable"
will run in acpi-cpufreq/ondemand legacy p-state mode.
Or second, the "performance" governor can be used with intel_pstate,
which will modify HWP.EPP to 0.
Or third, starting in 4.10, the
/sys/devices/system/cpu/cpufreq/policy*/energy_performance_preference
attribute in can be updated from "balance_power" to "performance".
Or fourth, apply this patch, which fixes the erroneous setting of
MSR_IA32_POWER_CTL BIT_EE on this model, allowing the default
configuration to function as designed.
Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Reviewed-by: Len Brown <len.brown@intel.com>
Cc: 4.6+ <stable@vger.kernel.org> # 4.6+
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>