#ifndef _PGTABLE_NOPUD_H #define _PGTABLE_NOPUD_H #ifndef __ASSEMBLY__ #define __PAGETABLE_PUD_FOLDED /* * Having the pud type consist of a pgd gets the size right, and allows * us to conceptually access the pgd entry that this pud is folded into * without casting. */ typedef struct { pgd_t pgd; } pud_t; #define PUD_SHIFT PGDIR_SHIFT #define PTRS_PER_PUD 1 #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) /* * The "pgd_xxx()" functions here are trivial for a folded two-level * setup: the pud is never bad, and a pud always exists (as it's folded * into the pgd entry) */ static inline int pgd_none(pgd_t pgd) { return 0; } static inline int pgd_bad(pgd_t pgd) { return 0; } static inline int pgd_present(pgd_t pgd) { return 1; } static inline void pgd_clear(pgd_t *pgd) { } #define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) #define pgd_populate(mm, pgd, pud) do { } while (0) /* * (puds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) */ #define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) { return (pud_t *)pgd; } #define pud_val(x) (pgd_val((x).pgd)) #define __pud(x) ((pud_t) { __pgd(x) } ) #define pgd_page(pgd) (pud_page((pud_t){ pgd })) #define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) /* * allocating and freeing a pud is trivial: the 1-entry pud is * inside the pgd, so has no extra memory associated with it. */ #define pud_alloc_one(mm, address) NULL #define pud_free(mm, x) do { } while (0) #define __pud_free_tlb(tlb, x, a) do { } while (0) #undef pud_addr_end #define pud_addr_end(addr, end) (end) #endif /* __ASSEMBLY__ */ #endif /* _PGTABLE_NOPUD_H */ e32a221871ffc04bb752216b823001'>treecommitdiff
path: root/fs/nfs/nfs4namespace.c
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2013-10-17 14:14:10 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2013-10-28 15:31:55 -0400
commit0625c2dd6ae32a221871ffc04bb752216b823001 (patch)
treeade4401e23b075b947ca9d5453d2b538f37203a3 /fs/nfs/nfs4namespace.c
parentcd3fadece2b97462583500371a3eb9b091c49603 (diff)
NFS: Fix possible endless state recovery wait
In nfs4_wait_clnt_recover(), hold a reference to the clp being waited on. The state manager can reduce clp->cl_count to 1, in which case the nfs_put_client() in nfs4_run_state_manager() can free *clp before wait_on_bit() returns and allows nfs4_wait_clnt_recover() to run again. The behavior at that point is non-deterministic. If the waited-on bit still happens to be zero, wait_on_bit() will wake the waiter as expected. If the bit is set again (say, if the memory was poisoned when freed) wait_on_bit() can leave the waiter asleep. This is a narrow fix which ensures the safety of accessing *clp in nfs4_wait_clnt_recover(), but does not address the continued use of a possibly freed *clp after nfs4_wait_clnt_recover() returns (see nfs_end_delegation_return(), for example). Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/nfs4namespace.c')