#ifndef _MPX_HW_H #define _MPX_HW_H #include /* Describe the MPX Hardware Layout in here */ #define NR_MPX_BOUNDS_REGISTERS 4 #ifdef __i386__ #define MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES 16 /* 4 * 32-bits */ #define MPX_BOUNDS_TABLE_SIZE_BYTES (1ULL << 14) /* 16k */ #define MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES 4 #define MPX_BOUNDS_DIR_SIZE_BYTES (1ULL << 22) /* 4MB */ #define MPX_BOUNDS_TABLE_BOTTOM_BIT 2 #define MPX_BOUNDS_TABLE_TOP_BIT 11 #define MPX_BOUNDS_DIR_BOTTOM_BIT 12 #define MPX_BOUNDS_DIR_TOP_BIT 31 #else /* * Linear Address of "pointer" (LAp) * 0 -> 2: ignored * 3 -> 19: index in to bounds table * 20 -> 47: index in to bounds directory * 48 -> 63: ignored */ #define MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES 32 #define MPX_BOUNDS_TABLE_SIZE_BYTES (1ULL << 22) /* 4MB */ #define MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES 8 #define MPX_BOUNDS_DIR_SIZE_BYTES (1ULL << 31) /* 2GB */ #define MPX_BOUNDS_TABLE_BOTTOM_BIT 3 #define MPX_BOUNDS_TABLE_TOP_BIT 19 #define MPX_BOUNDS_DIR_BOTTOM_BIT 20 #define MPX_BOUNDS_DIR_TOP_BIT 47 #endif #define MPX_BOUNDS_DIR_NR_ENTRIES \ (MPX_BOUNDS_DIR_SIZE_BYTES/MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES) #define MPX_BOUNDS_TABLE_NR_ENTRIES \ (MPX_BOUNDS_TABLE_SIZE_BYTES/MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES) #define MPX_BOUNDS_TABLE_ENTRY_VALID_BIT 0x1 struct mpx_bd_entry { union { char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES]; void *contents[1]; }; } __attribute__((packed)); struct mpx_bt_entry { union { char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES]; unsigned long contents[1]; }; } __attribute__((packed)); struct mpx_bounds_dir { struct mpx_bd_entry entries[MPX_BOUNDS_DIR_NR_ENTRIES]; } __attribute__((packed)); struct mpx_bounds_table { struct mpx_bt_entry entries[MPX_BOUNDS_TABLE_NR_ENTRIES]; } __attribute__((packed)); static inline unsigned long GET_BITS(unsigned long val, int bottombit, int topbit) { int total_nr_bits = topbit - bottombit; unsigned long mask = (1UL << total_nr_bits)-1; return (val >> bottombit) & mask; } static inline unsigned long __vaddr_bounds_table_index(void *vaddr) { return GET_BITS((unsigned long)vaddr, MPX_BOUNDS_TABLE_BOTTOM_BIT, MPX_BOUNDS_TABLE_TOP_BIT); } static inline unsigned long __vaddr_bounds_directory_index(void *vaddr) { return GET_BITS((unsigned long)vaddr, MPX_BOUNDS_DIR_BOTTOM_BIT, MPX_BOUNDS_DIR_TOP_BIT); } static inline struct mpx_bd_entry *mpx_vaddr_to_bd_entry(void *vaddr, struct mpx_bounds_dir *bounds_dir) { unsigned long index = __vaddr_bounds_directory_index(vaddr); return &bounds_dir->entries[index]; } static inline int bd_entry_valid(struct mpx_bd_entry *bounds_dir_entry) { unsigned long __bd_entry = (unsigned long)bounds_dir_entry->contents; return (__bd_entry & MPX_BOUNDS_TABLE_ENTRY_VALID_BIT); } static inline struct mpx_bounds_table * __bd_entry_to_bounds_table(struct mpx_bd_entry *bounds_dir_entry) { unsigned long __bd_entry = (unsigned long)bounds_dir_entry->contents; assert(__bd_entry & MPX_BOUNDS_TABLE_ENTRY_VALID_BIT); __bd_entry &= ~MPX_BOUNDS_TABLE_ENTRY_VALID_BIT; return (struct mpx_bounds_table *)__bd_entry; } static inline struct mpx_bt_entry * mpx_vaddr_to_bt_entry(void *vaddr, struct mpx_bounds_dir *bounds_dir) { struct mpx_bd_entry *bde = mpx_vaddr_to_bd_entry(vaddr, bounds_dir); struct mpx_bounds_table *bt = __bd_entry_to_bounds_table(bde); unsigned long index = __vaddr_bounds_table_index(vaddr); return &bt->entries[index]; } #endif /* _MPX_HW_H */ el'>context:space:mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 23:58:38 +0100
committerIngo Molnar <mingo@kernel.org>2017-02-01 08:37:27 +0100
commitdd86e373e09fb16b83e8adf5c48c421a4ca76468 (patch)
tree55703c2ea8584e303e342090614e0aab3509ab21 /tools/perf/util/build-id.c
parent0b3589be9b98994ce3d5aeca52445d1f5627c4ba (diff)
perf/x86/intel/rapl: Make package handling more robust
The package management code in RAPL relies on package mapping being available before a CPU is started. This changed with: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") because the ACPI/BIOS information turned out to be unreliable, but that left RAPL in broken state. This was not noticed because on a regular boot all CPUs are online before RAPL is initialized. A possible fix would be to reintroduce the mess which allocates a package data structure in CPU prepare and when it turns out to already exist in starting throw it away later in the CPU online callback. But that's a horrible hack and not required at all because RAPL becomes functional for perf only in the CPU online callback. That's correct because user space is not yet informed about the CPU being onlined, so nothing caan rely on RAPL being available on that particular CPU. Move the allocation to the CPU online callback and simplify the hotplug handling. At this point the package mapping is established and correct. This also adds a missing check for available package data in the event_init() function. Reported-by: Yasuaki Ishimatsu <yasu.isimatu@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Stephane Eranian <eranian@google.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Fixes: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") Link: http://lkml.kernel.org/r/20170131230141.212593966@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools/perf/util/build-id.c')