/* cnode related routines for the coda kernel code (C) 1996 Peter Braam */ #include #include #include #include #include #include #include "coda_linux.h" static inline int coda_fideq(struct CodaFid *fid1, struct CodaFid *fid2) { return memcmp(fid1, fid2, sizeof(*fid1)) == 0; } static const struct inode_operations coda_symlink_inode_operations = { .get_link = page_get_link, .setattr = coda_setattr, }; /* cnode.c */ static void coda_fill_inode(struct inode *inode, struct coda_vattr *attr) { coda_vattr_to_iattr(inode, attr); if (S_ISREG(inode->i_mode)) { inode->i_op = &coda_file_inode_operations; inode->i_fop = &coda_file_operations; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &coda_dir_inode_operations; inode->i_fop = &coda_dir_operations; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &coda_symlink_inode_operations; inode_nohighmem(inode); inode->i_data.a_ops = &coda_symlink_aops; inode->i_mapping = &inode->i_data; } else init_special_inode(inode, inode->i_mode, huge_decode_dev(attr->va_rdev)); } static int coda_test_inode(struct inode *inode, void *data) { struct CodaFid *fid = (struct CodaFid *)data; struct coda_inode_info *cii = ITOC(inode); return coda_fideq(&cii->c_fid, fid); } static int coda_set_inode(struct inode *inode, void *data) { struct CodaFid *fid = (struct CodaFid *)data; struct coda_inode_info *cii = ITOC(inode); cii->c_fid = *fid; return 0; } struct inode * coda_iget(struct super_block * sb, struct CodaFid * fid, struct coda_vattr * attr) { struct inode *inode; struct coda_inode_info *cii; unsigned long hash = coda_f2i(fid); inode = iget5_locked(sb, hash, coda_test_inode, coda_set_inode, fid); if (!inode) return ERR_PTR(-ENOMEM); if (inode->i_state & I_NEW) { cii = ITOC(inode); /* we still need to set i_ino for things like stat(2) */ inode->i_ino = hash; /* inode is locked and unique, no need to grab cii->c_lock */ cii->c_mapcount = 0; unlock_new_inode(inode); } /* always replace the attributes, type might have changed */ coda_fill_inode(inode, attr); return inode; } /* this is effectively coda_iget: - get attributes (might be cached) - get the inode for the fid using vfs iget - link the two up if this is needed - fill in the attributes */ struct inode *coda_cnode_make(struct CodaFid *fid, struct super_block *sb) { struct coda_vattr attr; struct inode *inode; int error; /* We get inode numbers from Venus -- see venus source */ error = venus_getattr(sb, fid, &attr); if (error) return ERR_PTR(error); inode = coda_iget(sb, fid, &attr); if (IS_ERR(inode)) pr_warn("%s: coda_iget failed\n", __func__); return inode; } /* Although we treat Coda file identifiers as immutable, there is one * special case for files created during a disconnection where they may * not be globally unique. When an identifier collision is detected we * first try to flush the cached inode from the kernel and finally * resort to renaming/rehashing in-place. Userspace remembers both old * and new values of the identifier to handle any in-flight upcalls. * The real solution is to use globally unique UUIDs as identifiers, but * retrofitting the existing userspace code for this is non-trivial. */ void coda_replace_fid(struct inode *inode, struct CodaFid *oldfid, struct CodaFid *newfid) { struct coda_inode_info *cii = ITOC(inode); unsigned long hash = coda_f2i(newfid); BUG_ON(!coda_fideq(&cii->c_fid, oldfid)); /* replace fid and rehash inode */ /* XXX we probably need to hold some lock here! */ remove_inode_hash(inode); cii->c_fid = *newfid; inode->i_ino = hash; __insert_inode_hash(inode, hash); } /* convert a fid to an inode. */ struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb) { struct inode *inode; unsigned long hash = coda_f2i(fid); if ( !sb ) { pr_warn("%s: no sb!\n", __func__); return NULL; } inode = ilookup5(sb, hash, coda_test_inode, fid); if ( !inode ) return NULL; /* we should never see newly created inodes because we intentionally * fail in the initialization callback */ BUG_ON(inode->i_state & I_NEW); return inode; } /* the CONTROL inode is made without asking attributes from Venus */ struct inode *coda_cnode_makectl(struct super_block *sb) { struct inode *inode = new_inode(sb); if (inode) { inode->i_ino = CTL_INO; inode->i_op = &coda_ioctl_inode_operations; inode->i_fop = &coda_ioctl_operations; inode->i_mode = 0444; return inode; } return ERR_PTR(-ENOMEM); } /select>
authorLinus Torvalds <torvalds@linux-foundation.org>2017-01-01 12:27:05 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-01-01 12:27:05 -0800
commit4759d386d55fef452d692bf101167914437e848e (patch)
treee7109c192ec589fcea2a98f9702aa3c0e4009581 /net/ipv4/tcp_nv.c
parent238d1d0f79f619d75c2cc741d6770fb0986aef24 (diff)
parent1db175428ee374489448361213e9c3b749d14900 (diff)
Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull DAX updates from Dan Williams: "The completion of Jan's DAX work for 4.10. As I mentioned in the libnvdimm-for-4.10 pull request, these are some final fixes for the DAX dirty-cacheline-tracking invalidation work that was merged through the -mm, ext4, and xfs trees in -rc1. These patches were prepared prior to the merge window, but we waited for 4.10-rc1 to have a stable merge base after all the prerequisites were merged. Quoting Jan on the overall changes in these patches: "So I'd like all these 6 patches to go for rc2. The first three patches fix invalidation of exceptional DAX entries (a bug which is there for a long time) - without these patches data loss can occur on power failure even though user called fsync(2). The other three patches change locking of DAX faults so that ->iomap_begin() is called in a more relaxed locking context and we are safe to start a transaction there for ext4" These have received a build success notification from the kbuild robot, and pass the latest libnvdimm unit tests. There have not been any -next releases since -rc1, so they have not appeared there" * 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: ext4: Simplify DAX fault path dax: Call ->iomap_begin without entry lock during dax fault dax: Finish fault completely when loading holes dax: Avoid page invalidation races and unnecessary radix tree traversals mm: Invalidate DAX radix tree entries only if appropriate ext2: Return BH_New buffers for zeroed blocks
Diffstat (limited to 'net/ipv4/tcp_nv.c')