/* * Copyright 2014 IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #ifndef _MISC_CXL_BASE_H #define _MISC_CXL_BASE_H #include #ifdef CONFIG_CXL_BASE #define CXL_IRQ_RANGES 4 struct cxl_irq_ranges { irq_hw_number_t offset[CXL_IRQ_RANGES]; irq_hw_number_t range[CXL_IRQ_RANGES]; }; extern atomic_t cxl_use_count; static inline bool cxl_ctx_in_use(void) { return (atomic_read(&cxl_use_count) != 0); } static inline void cxl_ctx_get(void) { atomic_inc(&cxl_use_count); } static inline void cxl_ctx_put(void) { atomic_dec(&cxl_use_count); } struct cxl_afu *cxl_afu_get(struct cxl_afu *afu); void cxl_afu_put(struct cxl_afu *afu); void cxl_slbia(struct mm_struct *mm); bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu); void cxl_pci_disable_device(struct pci_dev *dev); int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev); #else /* CONFIG_CXL_BASE */ static inline bool cxl_ctx_in_use(void) { return false; } static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu) { return NULL; } static inline void cxl_afu_put(struct cxl_afu *afu) {} static inline void cxl_slbia(struct mm_struct *mm) {} static inline bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu) { return false; } static inline void cxl_pci_disable_device(struct pci_dev *dev) {} static inline int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { return -ENODEV; } static inline void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev) {} #endif /* CONFIG_CXL_BASE */ #endif a>treecommitdiff
path: root/tools/power/cpupower/man
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-01-30 19:27:10 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-01-31 09:13:49 -0500
commit79c6f448c8b79c321e4a1f31f98194e4f6b6cae7 (patch)
tree370efda701f03cccf21e02bb1fdd3b852547d75c /tools/power/cpupower/man
parent0c744ea4f77d72b3dcebb7a8f2684633ec79be88 (diff)
tracing: Fix hwlat kthread migration
The hwlat tracer creates a kernel thread at start of the tracer. It is pinned to a single CPU and will move to the next CPU after each period of running. If the user modifies the migration thread's affinity, it will not change after that happens. The original code created the thread at the first instance it was called, but later was changed to destroy the thread after the tracer was finished, and would not be created until the next instance of the tracer was established. The code that initialized the affinity was only called on the initial instantiation of the tracer. After that, it was not initialized, and the previous affinity did not match the current newly created one, making it appear that the user modified the thread's affinity when it did not, and the thread failed to migrate again. Cc: stable@vger.kernel.org Fixes: 0330f7aa8ee6 ("tracing: Have hwlat trace migrate across tracing_cpumask CPUs") Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'tools/power/cpupower/man')