/* * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #undef TRACE_SYSTEM #define TRACE_SYSTEM clk #if !defined(_TRACE_CLK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_CLK_H #include struct clk_core; DECLARE_EVENT_CLASS(clk, TP_PROTO(struct clk_core *core), TP_ARGS(core), TP_STRUCT__entry( __string( name, core->name ) ), TP_fast_assign( __assign_str(name, core->name); ), TP_printk("%s", __get_str(name)) ); DEFINE_EVENT(clk, clk_enable, TP_PROTO(struct clk_core *core), TP_ARGS(core) ); DEFINE_EVENT(clk, clk_enable_complete, TP_PROTO(struct clk_core *core), TP_ARGS(core) ); DEFINE_EVENT(clk, clk_disable, TP_PROTO(struct clk_core *core), TP_ARGS(core) ); DEFINE_EVENT(clk, clk_disable_complete, TP_PROTO(struct clk_core *core), TP_ARGS(core) ); DEFINE_EVENT(clk, clk_prepare, TP_PROTO(struct clk_core *core), TP_ARGS(core) ); DEFINE_EVENT(clk, clk_prepare_complete, TP_PROTO(struct clk_core *core), TP_ARGS(core) ); DEFINE_EVENT(clk, clk_unprepare, TP_PROTO(struct clk_core *core), TP_ARGS(core) ); DEFINE_EVENT(clk, clk_unprepare_complete, TP_PROTO(struct clk_core *core), TP_ARGS(core) ); DECLARE_EVENT_CLASS(clk_rate, TP_PROTO(struct clk_core *core, unsigned long rate), TP_ARGS(core, rate), TP_STRUCT__entry( __string( name, core->name ) __field(unsigned long, rate ) ), TP_fast_assign( __assign_str(name, core->name); __entry->rate = rate; ), TP_printk("%s %lu", __get_str(name), (unsigned long)__entry->rate) ); DEFINE_EVENT(clk_rate, clk_set_rate, TP_PROTO(struct clk_core *core, unsigned long rate), TP_ARGS(core, rate) ); DEFINE_EVENT(clk_rate, clk_set_rate_complete, TP_PROTO(struct clk_core *core, unsigned long rate), TP_ARGS(core, rate) ); DECLARE_EVENT_CLASS(clk_parent, TP_PROTO(struct clk_core *core, struct clk_core *parent), TP_ARGS(core, parent), TP_STRUCT__entry( __string( name, core->name ) __string( pname, parent->name ) ), TP_fast_assign( __assign_str(name, core->name); __assign_str(pname, parent->name); ), TP_printk("%s %s", __get_str(name), __get_str(pname)) ); DEFINE_EVENT(clk_parent, clk_set_parent, TP_PROTO(struct clk_core *core, struct clk_core *parent), TP_ARGS(core, parent) ); DEFINE_EVENT(clk_parent, clk_set_parent_complete, TP_PROTO(struct clk_core *core, struct clk_core *parent), TP_ARGS(core, parent) ); DECLARE_EVENT_CLASS(clk_phase, TP_PROTO(struct clk_core *core, int phase), TP_ARGS(core, phase), TP_STRUCT__entry( __string( name, core->name ) __field( int, phase ) ), TP_fast_assign( __assign_str(name, core->name); __entry->phase = phase; ), TP_printk("%s %d", __get_str(name), (int)__entry->phase) ); DEFINE_EVENT(clk_phase, clk_set_phase, TP_PROTO(struct clk_core *core, int phase), TP_ARGS(core, phase) ); DEFINE_EVENT(clk_phase, clk_set_phase_complete, TP_PROTO(struct clk_core *core, int phase), TP_ARGS(core, phase) ); #endif /* _TRACE_CLK_H */ /* This part must be outside protection */ #include >
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2017-01-20 21:29:40 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-23 09:39:55 +0100
commitc26665ab5c49ad3e142e0f054ca3204f259ba09c (patch)
tree3bab11918e18e9d25ef7544dba05cdf39d1abec5 /tools/lib/api/fs
parent7a308bb3016f57e5be11a677d15b821536419d36 (diff)
x86/microcode/intel: Drop stashed AP patch pointer optimization
This was meant to save us the scanning of the microcode containter in the initrd since the first AP had already done that but it can also hurt us: Imagine a single hyperthreaded CPU (Intel(R) Atom(TM) CPU N270, for example) which updates the microcode on the BSP but since the microcode engine is shared between the two threads, the update on CPU1 doesn't happen because it has already happened on CPU0 and we don't find a newer microcode revision on CPU1. Which doesn't set the intel_ucode_patch pointer and at initrd jettisoning time we don't save the microcode patch for later application. Now, when we suspend to RAM, the loaded microcode gets cleared so we need to reload but there's no patch saved in the cache. Removing the optimization fixes this issue and all is fine and dandy. Fixes: 06b8534cb728 ("x86/microcode: Rework microcode loading") Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20170120202955.4091-2-bp@alien8.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'tools/lib/api/fs')