#ifndef __ASM_MEMORY_MODEL_H #define __ASM_MEMORY_MODEL_H #include #ifndef __ASSEMBLY__ #if defined(CONFIG_FLATMEM) #ifndef ARCH_PFN_OFFSET #define ARCH_PFN_OFFSET (0UL) #endif #elif defined(CONFIG_DISCONTIGMEM) #ifndef arch_pfn_to_nid #define arch_pfn_to_nid(pfn) pfn_to_nid(pfn) #endif #ifndef arch_local_page_offset #define arch_local_page_offset(pfn, nid) \ ((pfn) - NODE_DATA(nid)->node_start_pfn) #endif #endif /* CONFIG_DISCONTIGMEM */ /* * supports 3 memory models. */ #if defined(CONFIG_FLATMEM) #define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) #define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ ARCH_PFN_OFFSET) #elif defined(CONFIG_DISCONTIGMEM) #define __pfn_to_page(pfn) \ ({ unsigned long __pfn = (pfn); \ unsigned long __nid = arch_pfn_to_nid(__pfn); \ NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ }) #define __page_to_pfn(pg) \ ({ const struct page *__pg = (pg); \ struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ (unsigned long)(__pg - __pgdat->node_mem_map) + \ __pgdat->node_start_pfn; \ }) #elif defined(CONFIG_SPARSEMEM_VMEMMAP) /* memmap is virtually contiguous. */ #define __pfn_to_page(pfn) (vmemmap + (pfn)) #define __page_to_pfn(page) (unsigned long)((page) - vmemmap) #elif defined(CONFIG_SPARSEMEM) /* * Note: section's mem_map is encoded to reflect its start_pfn. * section[i].section_mem_map == mem_map's address - start_pfn; */ #define __page_to_pfn(pg) \ ({ const struct page *__pg = (pg); \ int __sec = page_to_section(__pg); \ (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ }) #define __pfn_to_page(pfn) \ ({ unsigned long __pfn = (pfn); \ struct mem_section *__sec = __pfn_to_section(__pfn); \ __section_mem_map_addr(__sec) + __pfn; \ }) #endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ /* * Convert a physical address to a Page Frame Number and back */ #define __phys_to_pfn(paddr) PHYS_PFN(paddr) #define __pfn_to_phys(pfn) PFN_PHYS(pfn) #define page_to_pfn __page_to_pfn #define pfn_to_page __pfn_to_page #endif /* __ASSEMBLY__ */ #endif 569cb9c0'/>
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@imgtec.com>2016-08-19 18:07:15 +0100
committerThomas Gleixner <tglx@linutronix.de>2016-08-22 18:37:51 +0200
commit2564970a381651865364974ea414384b569cb9c0 (patch)
tree4bfbdd4a519afcf1712dd025cd5041ae085314d5
parent6a33fa2b87513fee44cb8f0cd17b1acd6316bc6b (diff)
irqchip/mips-gic: Implement activate op for device domain
If an IRQ is setup using __setup_irq(), which is used by the request_irq() family of functions, and we are using an SMP kernel then the affinity of the IRQ will be set via setup_affinity() immediately after the IRQ is enabled. This call to gic_set_affinity() will lead to the interrupt being mapped to a VPE. However there are other ways to use IRQs which don't cause affinity to be set, for example if it is used to chain to another IRQ controller with irq_set_chained_handler_and_data(). The irq_set_chained_handler_and_data() code path will enable the IRQ, but will not trigger a call to gic_set_affinity() and in this case nothing will map the interrupt to a VPE, meaning that the interrupt is never received. Fix this by implementing the activate operation for the GIC device IRQ domain, using gic_shared_irq_domain_map() to map the interrupt to the correct pin of cpu 0. Fixes: c98c1822ee13 ("irqchip/mips-gic: Add device hierarchy domain") Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Cc: Jason Cooper <jason@lakedaemon.net> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20160819170715.27820-2-paul.burton@imgtec.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>