/* * sys_ipc() is the old de-multiplexer for the SysV IPC calls. * * This is really horribly ugly, and new architectures should just wire up * the individual syscalls instead. */ #include #ifdef __ARCH_WANT_SYS_IPC #include #include #include #include #include SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second, unsigned long, third, void __user *, ptr, long, fifth) { int version, ret; version = call >> 16; /* hack for backward compatibility */ call &= 0xffff; switch (call) { case SEMOP: return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL); case SEMTIMEDOP: return sys_semtimedop(first, (struct sembuf __user *)ptr, second, (const struct timespec __user *)fifth); case SEMGET: return sys_semget(first, second, third); case SEMCTL: { unsigned long arg; if (!ptr) return -EINVAL; if (get_user(arg, (unsigned long __user *) ptr)) return -EFAULT; return sys_semctl(first, second, third, arg); } case MSGSND: return sys_msgsnd(first, (struct msgbuf __user *) ptr, second, third); case MSGRCV: switch (version) { case 0: { struct ipc_kludge tmp; if (!ptr) return -EINVAL; if (copy_from_user(&tmp, (struct ipc_kludge __user *) ptr, sizeof(tmp))) return -EFAULT; return sys_msgrcv(first, tmp.msgp, second, tmp.msgtyp, third); } default: return sys_msgrcv(first, (struct msgbuf __user *) ptr, second, fifth, third); } case MSGGET: return sys_msgget((key_t) first, second); case MSGCTL: return sys_msgctl(first, second, (struct msqid_ds __user *)ptr); case SHMAT: switch (version) { default: { unsigned long raddr; ret = do_shmat(first, (char __user *)ptr, second, &raddr, SHMLBA); if (ret) return ret; return put_user(raddr, (unsigned long __user *) third); } case 1: /* * This was the entry point for kernel-originating calls * from iBCS2 in 2.2 days. */ return -EINVAL; } case SHMDT: return sys_shmdt((char __user *)ptr); case SHMGET: return sys_shmget(first, second, third); case SHMCTL: return sys_shmctl(first, second, (struct shmid_ds __user *) ptr); default: return -ENOSYS; } } #endif ound/firewire'>
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2017-01-20 21:29:40 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-23 09:39:55 +0100
commitc26665ab5c49ad3e142e0f054ca3204f259ba09c (patch)
tree3bab11918e18e9d25ef7544dba05cdf39d1abec5 /sound/firewire
parent7a308bb3016f57e5be11a677d15b821536419d36 (diff)
x86/microcode/intel: Drop stashed AP patch pointer optimization
This was meant to save us the scanning of the microcode containter in the initrd since the first AP had already done that but it can also hurt us: Imagine a single hyperthreaded CPU (Intel(R) Atom(TM) CPU N270, for example) which updates the microcode on the BSP but since the microcode engine is shared between the two threads, the update on CPU1 doesn't happen because it has already happened on CPU0 and we don't find a newer microcode revision on CPU1. Which doesn't set the intel_ucode_patch pointer and at initrd jettisoning time we don't save the microcode patch for later application. Now, when we suspend to RAM, the loaded microcode gets cleared so we need to reload but there's no patch saved in the cache. Removing the optimization fixes this issue and all is fine and dandy. Fixes: 06b8534cb728 ("x86/microcode: Rework microcode loading") Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20170120202955.4091-2-bp@alien8.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'sound/firewire')