/* * xen-acpi-pad.c - Xen pad interface * * Copyright (c) 2012, Intel Corporation. * Author: Liu, Jinsong * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad" #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 static DEFINE_MUTEX(xen_cpu_lock); static int xen_acpi_pad_idle_cpus(unsigned int idle_nums) { struct xen_platform_op op; op.cmd = XENPF_core_parking; op.u.core_parking.type = XEN_CORE_PARKING_SET; op.u.core_parking.idle_nums = idle_nums; return HYPERVISOR_platform_op(&op); } static int xen_acpi_pad_idle_cpus_num(void) { struct xen_platform_op op; op.cmd = XENPF_core_parking; op.u.core_parking.type = XEN_CORE_PARKING_GET; return HYPERVISOR_platform_op(&op) ?: op.u.core_parking.idle_nums; } /* * Query firmware how many CPUs should be idle * return -1 on failure */ static int acpi_pad_pur(acpi_handle handle) { struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *package; int num = -1; if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) return num; if (!buffer.length || !buffer.pointer) return num; package = buffer.pointer; if (package->type == ACPI_TYPE_PACKAGE && package->package.count == 2 && package->package.elements[0].integer.value == 1) /* rev 1 */ num = package->package.elements[1].integer.value; kfree(buffer.pointer); return num; } static void acpi_pad_handle_notify(acpi_handle handle) { int idle_nums; struct acpi_buffer param = { .length = 4, .pointer = (void *)&idle_nums, }; mutex_lock(&xen_cpu_lock); idle_nums = acpi_pad_pur(handle); if (idle_nums < 0) { mutex_unlock(&xen_cpu_lock); return; } idle_nums = xen_acpi_pad_idle_cpus(idle_nums) ?: xen_acpi_pad_idle_cpus_num(); if (idle_nums >= 0) acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, ¶m); mutex_unlock(&xen_cpu_lock); } static void acpi_pad_notify(acpi_handle handle, u32 event, void *data) { switch (event) { case ACPI_PROCESSOR_AGGREGATOR_NOTIFY: acpi_pad_handle_notify(handle); break; default: pr_warn("Unsupported event [0x%x]\n", event); break; } } static int acpi_pad_add(struct acpi_device *device) { acpi_status status; strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS); status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, acpi_pad_notify, device); if (ACPI_FAILURE(status)) return -ENODEV; return 0; } static int acpi_pad_remove(struct acpi_device *device) { mutex_lock(&xen_cpu_lock); xen_acpi_pad_idle_cpus(0); mutex_unlock(&xen_cpu_lock); acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, acpi_pad_notify); return 0; } static const struct acpi_device_id pad_device_ids[] = { {"ACPI000C", 0}, {"", 0}, }; static struct acpi_driver acpi_pad_driver = { .name = "processor_aggregator", .class = ACPI_PROCESSOR_AGGREGATOR_CLASS, .ids = pad_device_ids, .ops = { .add = acpi_pad_add, .remove = acpi_pad_remove, }, }; static int __init xen_acpi_pad_init(void) { /* Only DOM0 is responsible for Xen acpi pad */ if (!xen_initial_domain()) return -ENODEV; /* Only Xen4.2 or later support Xen acpi pad */ if (!xen_running_on_version_or_later(4, 2)) return -ENODEV; return acpi_bus_register_driver(&acpi_pad_driver); } subsys_initcall(xen_acpi_pad_init); '30'>30space:mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 09:37:34 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-31 21:47:58 +0100
commit0becc0ae5b42828785b589f686725ff5bc3b9b25 (patch)
treebe6d0e1f37c38ed0a7dd5da2d4b1e93f0fb43101 /net/dccp/ackvec.c
parent24c2503255d35c269b67162c397a1a1c1e02f6ce (diff)
x86/mce: Make timer handling more robust
Erik reported that on a preproduction hardware a CMCI storm triggers the BUG_ON in add_timer_on(). The reason is that the per CPU MCE timer is started by the CMCI logic before the MCE CPU hotplug callback starts the timer with add_timer_on(). So the timer is already queued which triggers the BUG. Using add_timer_on() is pretty pointless in this code because the timer is strictlty per CPU, initialized as pinned and all operations which arm the timer happen on the CPU to which the timer belongs. Simplify the whole machinery by using mod_timer() instead of add_timer_on() which avoids the problem because mod_timer() can handle already queued timers. Use __start_timer() everywhere so the earliest armed expiry time is preserved. Reported-by: Erik Veijola <erik.veijola@intel.com> Tested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Tony Luck <tony.luck@intel.com> Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701310936080.3457@nanos Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'net/dccp/ackvec.c')