/* * Cache operations for Coda. * For Linux 2.1: (C) 1997 Carnegie Mellon University * For Linux 2.3: (C) 2000 Carnegie Mellon University * * Carnegie Mellon encourages users of this code to contribute improvements * to the Coda project http://www.coda.cs.cmu.edu/ . */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "coda_linux.h" #include "coda_cache.h" static atomic_t permission_epoch = ATOMIC_INIT(0); /* replace or extend an acl cache hit */ void coda_cache_enter(struct inode *inode, int mask) { struct coda_inode_info *cii = ITOC(inode); spin_lock(&cii->c_lock); cii->c_cached_epoch = atomic_read(&permission_epoch); if (!uid_eq(cii->c_uid, current_fsuid())) { cii->c_uid = current_fsuid(); cii->c_cached_perm = mask; } else cii->c_cached_perm |= mask; spin_unlock(&cii->c_lock); } /* remove cached acl from an inode */ void coda_cache_clear_inode(struct inode *inode) { struct coda_inode_info *cii = ITOC(inode); spin_lock(&cii->c_lock); cii->c_cached_epoch = atomic_read(&permission_epoch) - 1; spin_unlock(&cii->c_lock); } /* remove all acl caches */ void coda_cache_clear_all(struct super_block *sb) { atomic_inc(&permission_epoch); } /* check if the mask has been matched against the acl already */ int coda_cache_check(struct inode *inode, int mask) { struct coda_inode_info *cii = ITOC(inode); int hit; spin_lock(&cii->c_lock); hit = (mask & cii->c_cached_perm) == mask && uid_eq(cii->c_uid, current_fsuid()) && cii->c_cached_epoch == atomic_read(&permission_epoch); spin_unlock(&cii->c_lock); return hit; } /* Purging dentries and children */ /* The following routines drop dentries which are not in use and flag dentries which are in use to be zapped later. The flags are detected by: - coda_dentry_revalidate (for lookups) if the flag is C_PURGE - coda_dentry_delete: to remove dentry from the cache when d_count falls to zero - an inode method coda_revalidate (for attributes) if the flag is C_VATTR */ /* this won't do any harm: just flag all children */ static void coda_flag_children(struct dentry *parent, int flag) { struct dentry *de; spin_lock(&parent->d_lock); list_for_each_entry(de, &parent->d_subdirs, d_child) { /* don't know what to do with negative dentries */ if (d_inode(de) ) coda_flag_inode(d_inode(de), flag); } spin_unlock(&parent->d_lock); return; } void coda_flag_inode_children(struct inode *inode, int flag) { struct dentry *alias_de; if ( !inode || !S_ISDIR(inode->i_mode)) return; alias_de = d_find_alias(inode); if (!alias_de) return; coda_flag_children(alias_de, flag); shrink_dcache_parent(alias_de); dput(alias_de); } /events
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 10:58:17 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 10:58:17 -0800
commit2a4c32edd39b7de166e723b1991abcde4db3a701 (patch)
tree1eba8e303e19d0f8fa0874f4514f87ebac82ae5c /include/trace/events
parentb9f98bd4034a3196ff068eb0fa376c5f41077480 (diff)
parent20737738d397dfadbca1ea50dcc00d7259f500cf (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
Pull MD updates from Shaohua Li: - a raid5 writeback cache feature. The goal is to aggregate writes to make full stripe write and reduce read-modify-write. It's helpful for workload which does sequential write and follows fsync for example. This feature is experimental and off by default right now. - FAILFAST support. This fails IOs to broken raid disks quickly, so can improve latency. It's mainly for DASD storage, but some patches help normal raid array too. - support bad block for raid array with external metadata - AVX2 instruction support for raid6 parity calculation - normalize MD info output - add missing blktrace - other bug fixes * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md: (66 commits) md: separate flags for superblock changes md: MD_RECOVERY_NEEDED is set for mddev->recovery md: takeover should clear unrelated bits md/r5cache: after recovery, increase journal seq by 10000 md/raid5-cache: fix crc in rewrite_data_only_stripes() md/raid5-cache: no recovery is required when create super-block md: fix refcount problem on mddev when stopping array. md/r5cache: do r5c_update_log_state after log recovery md/raid5-cache: adjust the write position of the empty block if no data blocks md/r5cache: run_no_space_stripes() when R5C_LOG_CRITICAL == 0 md/raid5: limit request size according to implementation limits md/raid5-cache: do not need to set STRIPE_PREREAD_ACTIVE repeatedly md/raid5-cache: remove the unnecessary next_cp_seq field from the r5l_log md/raid5-cache: release the stripe_head at the appropriate location md/raid5-cache: use ring add to prevent overflow md/raid5-cache: remove unnecessary function parameters raid5-cache: don't set STRIPE_R5C_PARTIAL_STRIPE flag while load stripe into cache raid5-cache: add another check conditon before replaying one stripe md/r5cache: enable IRQs on error path md/r5cache: handle alloc_page failure ...
Diffstat (limited to 'include/trace/events')