/* include/asm-generic/tlb.h * * Generic TLB shootdown code * * Copyright 2001 Red Hat, Inc. * Based on code from mm/memory.c Copyright Linus Torvalds and others. * * Copyright 2011 Red Hat, Inc., Peter Zijlstra * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #ifndef _ASM_GENERIC__TLB_H #define _ASM_GENERIC__TLB_H #include #include #include #ifdef CONFIG_HAVE_RCU_TABLE_FREE /* * Semi RCU freeing of the page directories. * * This is needed by some architectures to implement software pagetable walkers. * * gup_fast() and other software pagetable walkers do a lockless page-table * walk and therefore needs some synchronization with the freeing of the page * directories. The chosen means to accomplish that is by disabling IRQs over * the walk. * * Architectures that use IPIs to flush TLBs will then automagically DTRT, * since we unlink the page, flush TLBs, free the page. Since the disabling of * IRQs delays the completion of the TLB flush we can never observe an already * freed page. * * Architectures that do not have this (PPC) need to delay the freeing by some * other means, this is that means. * * What we do is batch the freed directory pages (tables) and RCU free them. * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling * holds off grace periods. * * However, in order to batch these pages we need to allocate storage, this * allocation is deep inside the MM code and can thus easily fail on memory * pressure. To guarantee progress we fall back to single table freeing, see * the implementation of tlb_remove_table_one(). * */ struct mmu_table_batch { struct rcu_head rcu; unsigned int nr; void *tables[0]; }; #define MAX_TABLE_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) extern void tlb_table_flush(struct mmu_gather *tlb); extern void tlb_remove_table(struct mmu_gather *tlb, void *table); #endif /* * If we can't allocate a page to make a big batch of page pointers * to work on, then just handle a few from the on-stack structure. */ #define MMU_GATHER_BUNDLE 8 struct mmu_gather_batch { struct mmu_gather_batch *next; unsigned int nr; unsigned int max; struct page *pages[0]; }; #define MAX_GATHER_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) /* * Limit the maximum number of mmu_gather batches to reduce a risk of soft * lockups for non-preemptible kernels on huge machines when a lot of memory * is zapped during unmapping. * 10K pages freed at once should be safe even without a preemption point. */ #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) /* struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. */ struct mmu_gather { struct mm_struct *mm; #ifdef CONFIG_HAVE_RCU_TABLE_FREE struct mmu_table_batch *batch; #endif unsigned long start; unsigned long end; /* we are in the middle of an operation to clear * a full mm and can make some optimizations */ unsigned int fullmm : 1, /* we have performed an operation which * requires a complete flush of the tlb */ need_flush_all : 1; struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; unsigned int batch_count; int page_size; }; #define HAVE_GENERIC_MMU_GATHER void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address, unsigned int range_size) { tlb->start = min(tlb->start, address); tlb->end = max(tlb->end, address + range_size); } static inline void __tlb_reset_range(struct mmu_gather *tlb) { if (tlb->fullmm) { tlb->start = tlb->end = ~0; } else { tlb->start = TASK_SIZE; tlb->end = 0; } } static inline void tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { if (__tlb_remove_page_size(tlb, page, page_size)) tlb_flush_mmu(tlb); } static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page_size(tlb, page, PAGE_SIZE); } /* tlb_remove_page * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when * required. */ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { return tlb_remove_page_size(tlb, page, PAGE_SIZE); } #ifndef tlb_remove_check_page_size_change #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, unsigned int page_size) { /* * We don't care about page size change, just update * mmu_gather page size here so that debug checks * doesn't throw false warning. */ #ifdef CONFIG_DEBUG_VM tlb->page_size = page_size; #endif } #endif /* * In the case of tlb vma handling, we can optimise these away in the * case where we're doing a full MM flush. When we're doing a munmap, * the vmas are adjusted to only cover the region to be torn down. */ #ifndef tlb_start_vma #define tlb_start_vma(tlb, vma) do { } while (0) #endif #define __tlb_end_vma(tlb, vma) \ do { \ if (!tlb->fullmm && tlb->end) { \ tlb_flush(tlb); \ __tlb_reset_range(tlb); \ } \ } while (0) #ifndef tlb_end_vma #define tlb_end_vma __tlb_end_vma #endif #ifndef __tlb_remove_tlb_entry #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #endif /** * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. * * Record the fact that pte's were really unmapped by updating the range, * so we can later optimise away the tlb invalidate. This helps when * userspace is unmapping already-unmapped pages, which happens quite a lot. */ #define tlb_remove_tlb_entry(tlb, ptep, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ do { \ __tlb_adjust_range(tlb, address, huge_page_size(h)); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) /** * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation * This is a nop so far, because only x86 needs it. */ #ifndef __tlb_remove_pmd_tlb_entry #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) #endif #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ do { \ __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ } while (0) /* * For things like page tables caches (ie caching addresses "inside" the * page tables, like x86 does), for legacy reasons, flushing an * individual page had better flush the page table caches behind it. This * is definitely how x86 works, for example. And if you have an * architected non-legacy page table cache (which I'm not aware of * anybody actually doing), you're going to have some architecturally * explicit flushing for that, likely *separate* from a regular TLB entry * flush, and thus you'd need more than just some range expansion.. * * So if we ever find an architecture * that would want something that odd, I think it is up to that * architecture to do its own odd thing, not cause pain for others * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com * * For now w.r.t page table cache, mark the range_size as PAGE_SIZE */ #define pte_free_tlb(tlb, ptep, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __pte_free_tlb(tlb, ptep, address); \ } while (0) #ifndef __ARCH_HAS_4LEVEL_HACK #define pud_free_tlb(tlb, pudp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif #define pmd_free_tlb(tlb, pmdp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ __pmd_free_tlb(tlb, pmdp, address); \ } while (0) #define tlb_migrate_finish(mm) do {} while (0) #endif /* _ASM_GENERIC__TLB_H */ > -rw-r--r--bcm281xx.h2456logplain -rw-r--r--bcm2835-aux.h635logplain -rw-r--r--bcm2835.h1962logplain -rw-r--r--berlin2.h1034logplain -rw-r--r--berlin2q.h695logplain -rw-r--r--clps711x-clock.h718logplain -rw-r--r--efm32-cmu.h1112logplain -rw-r--r--exynos-audss-clk.h597logplain -rw-r--r--exynos3250.h9083logplain -rw-r--r--exynos4.h8284logplain -rw-r--r--exynos4415.h9828logplain -rw-r--r--exynos5250.h4616logplain -rw-r--r--exynos5260-clk.h14876logplain -rw-r--r--exynos5410.h1689logplain -rw-r--r--exynos5420.h6857logplain -rw-r--r--exynos5433.h45372logplain -rw-r--r--exynos5440.h1141logplain -rw-r--r--exynos7-clk.h5281logplain -rw-r--r--gxbb-aoclkc.h2866logplain -rw-r--r--gxbb-clkc.h592logplain -rw-r--r--hi3516cv300-clock.h1668logplain -rw-r--r--hi3519-clock.h1328logplain -rw-r--r--hi3620-clock.h4496logplain -rw-r--r--hi6220-clock.h4508logplain -rw-r--r--hip04-clock.h1137logplain -rw-r--r--histb-clock.h2012logplain -rw-r--r--hix5hd2-clock.h2415logplain -rw-r--r--imx1-clock.h1055logplain -rw-r--r--imx21-clock.h2461logplain -rw-r--r--imx27-clock.h3494logplain -rw-r--r--imx5-clock.h7212logplain -rw-r--r--imx6qdl-clock.h9593logplain -rw-r--r--imx6sl-clock.h5849logplain -rw-r--r--imx6sx-clock.h9099logplain -rw-r--r--imx6ul-clock.h8203logplain -rw-r--r--imx7d-clock.h15974logplain -rw-r--r--jz4740-cgu.h1028logplain -rw-r--r--jz4780-cgu.h2470logplain -rw-r--r--lpc18xx-ccu.h2134logplain -rw-r--r--lpc18xx-cgu.h1142logplain -rw-r--r--lpc32xx-clock.h1633logplain -rw-r--r--lsi,axm5516-clks.h974logplain -rw-r--r--marvell,mmp2.h2022logplain -rw-r--r--marvell,pxa168.h1654logplain -rw-r--r--marvell,pxa1928.h1535logplain -rw-r--r--marvell,pxa910.h1598logplain -rw-r--r--maxim,max77620.h632logplain -rw-r--r--maxim,max77686.h648logplain -rw-r--r--maxim,max77802.h630logplain -rw-r--r--meson8b-clkc.h523logplain -rw-r--r--microchip,pic32-clock.h1150logplain -rw-r--r--mpc512x-clock.h2236logplain -rw-r--r--mt2701-clk.h13832logplain -rw-r--r--mt8135-clk.h5641logplain -rw-r--r--mt8173-clk.h9293logplain -rw-r--r--oxsemi,ox810se.h1002logplain -rw-r--r--oxsemi,ox820.h1203logplain -rw-r--r--pistachio-clk.h4863logplain -rw-r--r--pxa-clock.h1715logplain -rw-r--r--qcom,gcc-apq8084.h12872logplain -rw-r--r--qcom,gcc-ipq4019.h5423logplain -rw-r--r--qcom,gcc-ipq806x.h8574logplain -rw-r--r--qcom,gcc-mdm9615.h9497logplain -rw-r--r--qcom,gcc-msm8660.h7932logplain -rw-r--r--qcom,gcc-msm8916.h6190logplain -rw-r--r--qcom,gcc-msm8960.h9342logplain -rw-r--r--qcom,gcc-msm8974.h12340logplain -rw-r--r--qcom,gcc-msm8994.h4858logplain -rw-r--r--qcom,gcc-msm8996.h12575logplain -rw-r--r--qcom,lcc-ipq806x.h899logplain -rw-r--r--qcom,lcc-mdm9615.h1701logplain -rw-r--r--qcom,lcc-msm8960.h1616logplain -rw-r--r--qcom,mmcc-apq8084.h5722logplain -rw-r--r--qcom,mmcc-msm8960.h4109logplain -rw-r--r--qcom,mmcc-msm8974.h5223logplain -rw-r--r--qcom,mmcc-msm8996.h9403logplain -rw-r--r--qcom,rpmcc.h2101logplain -rw-r--r--r7s72100-clock.h1218logplain -rw-r--r--r8a73a4-clock.h1596logplain -rw-r--r--r8a7740-clock.h1992logplain -rw-r--r--r8a7743-cpg-mssr.h1269logplain -rw-r--r--r8a7745-cpg-mssr.h1298logplain -rw-r--r--r8a7778-clock.h1855logplain -rw-r--r--r8a7779-clock.h1647logplain -rw-r--r--r8a7790-clock.h4367logplain -rw-r--r--r8a7791-clock.h4388logplain -rw-r--r--r8a7792-clock.h2562logplain -rw-r--r--r8a7793-clock.h4561logplain -rw-r--r--r8a7794-clock.h3679logplain -rw-r--r--r8a7795-cpg-mssr.h1890logplain -rw-r--r--r8a7796-cpg-mssr.h2066logplain -rw-r--r--renesas-cpg-mssr.h542logplain -rw-r--r--rk1108-cru.h6605logplain -rw-r--r--rk3036-cru.h4584logplain -rw-r--r--rk3066a-cru.h1068logplain -rw-r--r--rk3188-cru-common.h6105logplain -rw-r--r--rk3188-cru.h1435logplain