/* * Copyright (C) International Business Machines Corp., 2000-2002 * Portions Copyright (C) Christoph Hellwig, 2001-2002 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _H_JFS_METAPAGE #define _H_JFS_METAPAGE #include struct metapage { /* Common logsyncblk prefix (see jfs_logmgr.h) */ u16 xflag; u16 unused; lid_t lid; int lsn; struct list_head synclist; /* End of logsyncblk prefix */ unsigned long flag; /* See Below */ unsigned long count; /* Reference count */ void *data; /* Data pointer */ sector_t index; /* block address of page */ wait_queue_head_t wait; /* implementation */ struct page *page; unsigned int logical_size; /* Journal management */ int clsn; int nohomeok; struct jfs_log *log; }; /* metapage flag */ #define META_locked 0 #define META_dirty 2 #define META_sync 3 #define META_discard 4 #define META_forcewrite 5 #define META_io 6 #define mark_metapage_dirty(mp) set_bit(META_dirty, &(mp)->flag) /* function prototypes */ extern int metapage_init(void); extern void metapage_exit(void); extern struct metapage *__get_metapage(struct inode *inode, unsigned long lblock, unsigned int size, int absolute, unsigned long new); #define read_metapage(inode, lblock, size, absolute)\ __get_metapage(inode, lblock, size, absolute, false) #define get_metapage(inode, lblock, size, absolute)\ __get_metapage(inode, lblock, size, absolute, true) extern void release_metapage(struct metapage *); extern void grab_metapage(struct metapage *); extern void force_metapage(struct metapage *); /* * hold_metapage and put_metapage are used in conjunction. The page lock * is not dropped between the two, so no other threads can get or release * the metapage */ extern void hold_metapage(struct metapage *); extern void put_metapage(struct metapage *); static inline void write_metapage(struct metapage *mp) { set_bit(META_dirty, &mp->flag); release_metapage(mp); } static inline void flush_metapage(struct metapage *mp) { set_bit(META_sync, &mp->flag); write_metapage(mp); } static inline void discard_metapage(struct metapage *mp) { clear_bit(META_dirty, &mp->flag); set_bit(META_discard, &mp->flag); release_metapage(mp); } static inline void metapage_nohomeok(struct metapage *mp) { struct page *page = mp->page; lock_page(page); if (!mp->nohomeok++) { mark_metapage_dirty(mp); get_page(page); wait_on_page_writeback(page); } unlock_page(page); } /* * This serializes access to mp->lsn when metapages are added to logsynclist * without setting nohomeok. i.e. updating imap & dmap */ static inline void metapage_wait_for_io(struct metapage *mp) { if (test_bit(META_io, &mp->flag)) wait_on_page_writeback(mp->page); } /* * This is called when already holding the metapage */ static inline void _metapage_homeok(struct metapage *mp) { if (!--mp->nohomeok) put_page(mp->page); } static inline void metapage_homeok(struct metapage *mp) { hold_metapage(mp); _metapage_homeok(mp); put_metapage(mp); } extern const struct address_space_operations jfs_metapage_aops; /* * This routines invalidate all pages for an extent. */ extern void __invalidate_metapages(struct inode *, s64, int); #define invalidate_pxd_metapages(ip, pxd) \ __invalidate_metapages((ip), addressPXD(&(pxd)), lengthPXD(&(pxd))) #define invalidate_dxd_metapages(ip, dxd) \ __invalidate_metapages((ip), addressDXD(&(dxd)), lengthDXD(&(dxd))) #define invalidate_xad_metapages(ip, xad) \ __invalidate_metapages((ip), addressXAD(&(xad)), lengthXAD(&(xad))) #endif /* _H_JFS_METAPAGE */ option>space:mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 23:58:38 +0100
committerIngo Molnar <mingo@kernel.org>2017-02-01 08:37:27 +0100
commitdd86e373e09fb16b83e8adf5c48c421a4ca76468 (patch)
tree55703c2ea8584e303e342090614e0aab3509ab21 /kernel/locking/qspinlock_paravirt.h
parent0b3589be9b98994ce3d5aeca52445d1f5627c4ba (diff)
perf/x86/intel/rapl: Make package handling more robust
The package management code in RAPL relies on package mapping being available before a CPU is started. This changed with: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") because the ACPI/BIOS information turned out to be unreliable, but that left RAPL in broken state. This was not noticed because on a regular boot all CPUs are online before RAPL is initialized. A possible fix would be to reintroduce the mess which allocates a package data structure in CPU prepare and when it turns out to already exist in starting throw it away later in the CPU online callback. But that's a horrible hack and not required at all because RAPL becomes functional for perf only in the CPU online callback. That's correct because user space is not yet informed about the CPU being onlined, so nothing caan rely on RAPL being available on that particular CPU. Move the allocation to the CPU online callback and simplify the hotplug handling. At this point the package mapping is established and correct. This also adds a missing check for available package data in the event_init() function. Reported-by: Yasuaki Ishimatsu <yasu.isimatu@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Stephane Eranian <eranian@google.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Fixes: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") Link: http://lkml.kernel.org/r/20170131230141.212593966@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/qspinlock_paravirt.h')