/* * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __XFS_LOG_H__ #define __XFS_LOG_H__ struct xfs_log_vec { struct xfs_log_vec *lv_next; /* next lv in build list */ int lv_niovecs; /* number of iovecs in lv */ struct xfs_log_iovec *lv_iovecp; /* iovec array */ struct xfs_log_item *lv_item; /* owner */ char *lv_buf; /* formatted buffer */ int lv_bytes; /* accounted space in buffer */ int lv_buf_len; /* aligned size of buffer */ int lv_size; /* size of allocated lv */ }; #define XFS_LOG_VEC_ORDERED (-1) static inline void * xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, uint type) { struct xfs_log_iovec *vec = *vecp; if (vec) { ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs); vec++; } else { vec = &lv->lv_iovecp[0]; } vec->i_type = type; vec->i_addr = lv->lv_buf + lv->lv_buf_len; ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t))); *vecp = vec; return vec->i_addr; } /* * We need to make sure the next buffer is naturally aligned for the biggest * basic data type we put into it. We already accounted for this padding when * sizing the buffer. * * However, this padding does not get written into the log, and hence we have to * track the space used by the log vectors separately to prevent log space hangs * due to inaccurate accounting (i.e. a leak) of the used log space through the * CIL context ticket. */ static inline void xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len) { lv->lv_buf_len += round_up(len, sizeof(uint64_t)); lv->lv_bytes += len; vec->i_len = len; } static inline void * xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, uint type, void *data, int len) { void *buf; buf = xlog_prepare_iovec(lv, vecp, type); memcpy(buf, data, len); xlog_finish_iovec(lv, *vecp, len); return buf; } /* * Structure used to pass callback function and the function's argument * to the log manager. */ typedef struct xfs_log_callback { struct xfs_log_callback *cb_next; void (*cb_func)(void *, int); void *cb_arg; } xfs_log_callback_t; /* * By comparing each component, we don't have to worry about extra * endian issues in treating two 32 bit numbers as one 64 bit number */ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2) { if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2)) return (CYCLE_LSN(lsn1)2017-01-31 23:58:38 +0100 committerIngo Molnar <mingo@kernel.org>2017-02-01 08:37:27 +0100 commitdd86e373e09fb16b83e8adf5c48c421a4ca76468 (patch) tree55703c2ea8584e303e342090614e0aab3509ab21 /tools/build/feature/test-libunwind-debug-frame-aarch64.c parent0b3589be9b98994ce3d5aeca52445d1f5627c4ba (diff)
perf/x86/intel/rapl: Make package handling more robust
The package management code in RAPL relies on package mapping being available before a CPU is started. This changed with: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") because the ACPI/BIOS information turned out to be unreliable, but that left RAPL in broken state. This was not noticed because on a regular boot all CPUs are online before RAPL is initialized. A possible fix would be to reintroduce the mess which allocates a package data structure in CPU prepare and when it turns out to already exist in starting throw it away later in the CPU online callback. But that's a horrible hack and not required at all because RAPL becomes functional for perf only in the CPU online callback. That's correct because user space is not yet informed about the CPU being onlined, so nothing caan rely on RAPL being available on that particular CPU. Move the allocation to the CPU online callback and simplify the hotplug handling. At this point the package mapping is established and correct. This also adds a missing check for available package data in the event_init() function. Reported-by: Yasuaki Ishimatsu <yasu.isimatu@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Stephane Eranian <eranian@google.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Fixes: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") Link: http://lkml.kernel.org/r/20170131230141.212593966@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools/build/feature/test-libunwind-debug-frame-aarch64.c')