#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H #define _ASM_ARM_XEN_PAGE_COHERENT_H #include #include void __xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs); void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs); void __xen_dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir); void __xen_dma_sync_single_for_device(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir); static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); } static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) { __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); } static inline void xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { unsigned long page_pfn = page_to_xen_pfn(page); unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); unsigned long compound_pages = (1<map_page(hwdev, page, offset, size, dir, attrs); else __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); } static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { unsigned long pfn = PFN_DOWN(handle); /* * Dom0 is mapped 1:1, while the Linux page can be spanned accross * multiple Xen page, it's not possible to have a mix of local and * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a * foreign mfn will always return false. If the page is local we can * safely call the native dma_ops function, otherwise we call the xen * specific function. */ if (pfn_valid(pfn)) { if (__generic_dma_ops(hwdev)->unmap_page) __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); } else __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); } static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { unsigned long pfn = PFN_DOWN(handle); if (pfn_valid(pfn)) { if (__generic_dma_ops(hwdev)->sync_single_for_cpu) __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); } else __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); } static inline void xen_dma_sync_single_for_device(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { unsigned long pfn = PFN_DOWN(handle); if (pfn_valid(pfn)) { if (__generic_dma_ops(hwdev)->sync_single_for_device) __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); } else __xen_dma_sync_single_for_device(hwdev, handle, size, dir); } #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ colspan='2'/>context:space:mode:
authorBorislav Petkov <bp@suse.de>2017-01-20 21:29:40 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-23 09:39:55 +0100
commitc26665ab5c49ad3e142e0f054ca3204f259ba09c (patch)
tree3bab11918e18e9d25ef7544dba05cdf39d1abec5 /net/nfc/nci/spi.c
parent7a308bb3016f57e5be11a677d15b821536419d36 (diff)
x86/microcode/intel: Drop stashed AP patch pointer optimization
This was meant to save us the scanning of the microcode containter in the initrd since the first AP had already done that but it can also hurt us: Imagine a single hyperthreaded CPU (Intel(R) Atom(TM) CPU N270, for example) which updates the microcode on the BSP but since the microcode engine is shared between the two threads, the update on CPU1 doesn't happen because it has already happened on CPU0 and we don't find a newer microcode revision on CPU1. Which doesn't set the intel_ucode_patch pointer and at initrd jettisoning time we don't save the microcode patch for later application. Now, when we suspend to RAM, the loaded microcode gets cleared so we need to reload but there's no patch saved in the cache. Removing the optimization fixes this issue and all is fine and dandy. Fixes: 06b8534cb728 ("x86/microcode: Rework microcode loading") Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20170120202955.4091-2-bp@alien8.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'net/nfc/nci/spi.c')