/* Rewritten by Rusty Russell, on the backs of many others... Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include /* * mutex protecting text section modification (dynamic code patching). * some users need to sleep (allocating memory...) while they hold this lock. * * NOT exported to modules - patching kernel text is a really delicate matter. */ DEFINE_MUTEX(text_mutex); extern struct exception_table_entry __start___ex_table[]; extern struct exception_table_entry __stop___ex_table[]; /* Cleared by build time tools if the table is already sorted. */ u32 __initdata __visible main_extable_sort_needed = 1; /* Sort the kernel's built-in exception table */ void __init sort_main_extable(void) { if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) { pr_notice("Sorting __ex_table...\n"); sort_extable(__start___ex_table, __stop___ex_table); } } /* Given an address, look for it in the exception tables. */ const struct exception_table_entry *search_exception_tables(unsigned long addr) { const struct exception_table_entry *e; e = search_extable(__start___ex_table, __stop___ex_table-1, addr); if (!e) e = search_module_extables(addr); return e; } static inline int init_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_sinittext && addr < (unsigned long)_einittext) return 1; return 0; } int core_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_stext && addr < (unsigned long)_etext) return 1; if (system_state == SYSTEM_BOOTING && init_kernel_text(addr)) return 1; return 0; } /** * core_kernel_data - tell if addr points to kernel data * @addr: address to test * * Returns true if @addr passed in is from the core kernel data * section. * * Note: On some archs it may return true for core RODATA, and false * for others. But will always be true for core RW data. */ int core_kernel_data(unsigned long addr) { if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata) return 1; return 0; } int __kernel_text_address(unsigned long addr) { if (core_kernel_text(addr)) return 1; if (is_module_text_address(addr)) return 1; if (is_ftrace_trampoline(addr)) return 1; /* * There might be init symbols in saved stacktraces. * Give those symbols a chance to be printed in * backtraces (such as lockdep traces). * * Since we are after the module-symbols check, there's * no danger of address overlap: */ if (init_kernel_text(addr)) return 1; return 0; } int kernel_text_address(unsigned long addr) { if (core_kernel_text(addr)) return 1; if (is_module_text_address(addr)) return 1; return is_ftrace_trampoline(addr); } /* * On some architectures (PPC64, IA64) function pointers * are actually only tokens to some data that then holds the * real function address. As a result, to find if a function * pointer is part of the kernel text, we need to do some * special dereferencing first. */ int func_ptr_is_kernel_text(void *ptr) { unsigned long addr; addr = (unsigned long) dereference_function_descriptor(ptr); if (core_kernel_text(addr)) return 1; return is_module_text_address(addr); } ass='label'>mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 23:58:38 +0100
committerIngo Molnar <mingo@kernel.org>2017-02-01 08:37:27 +0100
commitdd86e373e09fb16b83e8adf5c48c421a4ca76468 (patch)
tree55703c2ea8584e303e342090614e0aab3509ab21 /net/core/scm.c
parent0b3589be9b98994ce3d5aeca52445d1f5627c4ba (diff)
perf/x86/intel/rapl: Make package handling more robust
The package management code in RAPL relies on package mapping being available before a CPU is started. This changed with: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") because the ACPI/BIOS information turned out to be unreliable, but that left RAPL in broken state. This was not noticed because on a regular boot all CPUs are online before RAPL is initialized. A possible fix would be to reintroduce the mess which allocates a package data structure in CPU prepare and when it turns out to already exist in starting throw it away later in the CPU online callback. But that's a horrible hack and not required at all because RAPL becomes functional for perf only in the CPU online callback. That's correct because user space is not yet informed about the CPU being onlined, so nothing caan rely on RAPL being available on that particular CPU. Move the allocation to the CPU online callback and simplify the hotplug handling. At this point the package mapping is established and correct. This also adds a missing check for available package data in the event_init() function. Reported-by: Yasuaki Ishimatsu <yasu.isimatu@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Stephane Eranian <eranian@google.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Fixes: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") Link: http://lkml.kernel.org/r/20170131230141.212593966@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'net/core/scm.c')