#ifndef __ASM_GENERIC_SHMBUF_H #define __ASM_GENERIC_SHMBUF_H #include /* * The shmid64_ds structure for x86 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * * shmid64_ds was originally meant to be architecture specific, but * everyone just ended up making identical copies without specific * optimizations, so we may just as well all use the same one. * * 64 bit architectures typically define a 64 bit __kernel_time_t, * so they do not need the first two padding words. * On big-endian systems, the padding is in the wrong place. * * * Pad space is left for: * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ size_t shm_segsz; /* size of segment (bytes) */ __kernel_time_t shm_atime; /* last attach time */ #if __BITS_PER_LONG != 64 unsigned long __unused1; #endif __kernel_time_t shm_dtime; /* last detach time */ #if __BITS_PER_LONG != 64 unsigned long __unused2; #endif __kernel_time_t shm_ctime; /* last change time */ #if __BITS_PER_LONG != 64 unsigned long __unused3; #endif __kernel_pid_t shm_cpid; /* pid of creator */ __kernel_pid_t shm_lpid; /* pid of last operator */ __kernel_ulong_t shm_nattch; /* no. of current attaches */ __kernel_ulong_t __unused4; __kernel_ulong_t __unused5; }; struct shminfo64 { __kernel_ulong_t shmmax; __kernel_ulong_t shmmin; __kernel_ulong_t shmmni; __kernel_ulong_t shmseg; __kernel_ulong_t shmall; __kernel_ulong_t __unused1; __kernel_ulong_t __unused2; __kernel_ulong_t __unused3; __kernel_ulong_t __unused4; }; #endif /* __ASM_GENERIC_SHMBUF_H */ e-remove&id=4759d386d55fef452d692bf101167914437e848e'>refslogtreecommitdiff
path: root/include/crypto/internal/simd.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-01-01 12:27:05 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-01-01 12:27:05 -0800
commit4759d386d55fef452d692bf101167914437e848e (patch)
treee7109c192ec589fcea2a98f9702aa3c0e4009581 /include/crypto/internal/simd.h
parent238d1d0f79f619d75c2cc741d6770fb0986aef24 (diff)
parent1db175428ee374489448361213e9c3b749d14900 (diff)
Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull DAX updates from Dan Williams: "The completion of Jan's DAX work for 4.10. As I mentioned in the libnvdimm-for-4.10 pull request, these are some final fixes for the DAX dirty-cacheline-tracking invalidation work that was merged through the -mm, ext4, and xfs trees in -rc1. These patches were prepared prior to the merge window, but we waited for 4.10-rc1 to have a stable merge base after all the prerequisites were merged. Quoting Jan on the overall changes in these patches: "So I'd like all these 6 patches to go for rc2. The first three patches fix invalidation of exceptional DAX entries (a bug which is there for a long time) - without these patches data loss can occur on power failure even though user called fsync(2). The other three patches change locking of DAX faults so that ->iomap_begin() is called in a more relaxed locking context and we are safe to start a transaction there for ext4" These have received a build success notification from the kbuild robot, and pass the latest libnvdimm unit tests. There have not been any -next releases since -rc1, so they have not appeared there" * 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: ext4: Simplify DAX fault path dax: Call ->iomap_begin without entry lock during dax fault dax: Finish fault completely when loading holes dax: Avoid page invalidation races and unnecessary radix tree traversals mm: Invalidate DAX radix tree entries only if appropriate ext2: Return BH_New buffers for zeroed blocks
Diffstat (limited to 'include/crypto/internal/simd.h')