#include "../../util/kvm-stat.h" #include #include #include define_exit_reasons_table(vmx_exit_reasons, VMX_EXIT_REASONS); define_exit_reasons_table(svm_exit_reasons, SVM_EXIT_REASONS); static struct kvm_events_ops exit_events = { .is_begin_event = exit_event_begin, .is_end_event = exit_event_end, .decode_key = exit_event_decode_key, .name = "VM-EXIT" }; const char *vcpu_id_str = "vcpu_id"; const int decode_str_len = 20; const char *kvm_exit_reason = "exit_reason"; const char *kvm_entry_trace = "kvm:kvm_entry"; const char *kvm_exit_trace = "kvm:kvm_exit"; /* * For the mmio events, we treat: * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...). */ static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) { key->key = perf_evsel__intval(evsel, sample, "gpa"); key->info = perf_evsel__intval(evsel, sample, "type"); } #define KVM_TRACE_MMIO_READ_UNSATISFIED 0 #define KVM_TRACE_MMIO_READ 1 #define KVM_TRACE_MMIO_WRITE 2 static bool mmio_event_begin(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) { /* MMIO read begin event in kernel. */ if (kvm_exit_event(evsel)) return true; /* MMIO write begin event in kernel. */ if (!strcmp(evsel->name, "kvm:kvm_mmio") && perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) { mmio_event_get_key(evsel, sample, key); return true; } return false; } static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) { /* MMIO write end event in kernel. */ if (kvm_entry_event(evsel)) return true; /* MMIO read end event in kernel.*/ if (!strcmp(evsel->name, "kvm:kvm_mmio") && perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) { mmio_event_get_key(evsel, sample, key); return true; } return false; } static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, struct event_key *key, char *decode) { scnprintf(decode, decode_str_len, "%#lx:%s", (unsigned long)key->key, key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R"); } static struct kvm_events_ops mmio_events = { .is_begin_event = mmio_event_begin, .is_end_event = mmio_event_end, .decode_key = mmio_event_decode_key, .name = "MMIO Access" }; /* The time of emulation pio access is from kvm_pio to kvm_entry. */ static void ioport_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) { key->key = perf_evsel__intval(evsel, sample, "port"); key->info = perf_evsel__intval(evsel, sample, "rw"); } static bool ioport_event_begin(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) { if (!strcmp(evsel->name, "kvm:kvm_pio")) { ioport_event_get_key(evsel, sample, key); return true; } return false; } static bool ioport_event_end(struct perf_evsel *evsel, struct perf_sample *sample __maybe_unused, struct event_key *key __maybe_unused) { return kvm_entry_event(evsel); } static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, struct event_key *key, char *decode) { scnprintf(decode, decode_str_len, "%#llx:%s", (unsigned long long)key->key, key->info ? "POUT" : "PIN"); } static struct kvm_events_ops ioport_events = { .is_begin_event = ioport_event_begin, .is_end_event = ioport_event_end, .decode_key = ioport_event_decode_key, .name = "IO Port Access" }; const char *kvm_events_tp[] = { "kvm:kvm_entry", "kvm:kvm_exit", "kvm:kvm_mmio", "kvm:kvm_pio", NULL, }; struct kvm_reg_events_ops kvm_reg_events_ops[] = { { .name = "vmexit", .ops = &exit_events }, { .name = "mmio", .ops = &mmio_events }, { .name = "ioport", .ops = &ioport_events }, { NULL, NULL }, }; const char * const kvm_skip_events[] = { "HLT", NULL, }; int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid) { if (strstr(cpuid, "Intel")) { kvm->exit_reasons = vmx_exit_reasons; kvm->exit_reasons_isa = "VMX"; } else if (strstr(cpuid, "AMD")) { kvm->exit_reasons = svm_exit_reasons; kvm->exit_reasons_isa = "SVM"; } else return -ENOTSUP; return 0; } option value='25'>25space:mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 09:37:34 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-31 21:47:58 +0100
commit0becc0ae5b42828785b589f686725ff5bc3b9b25 (patch)
treebe6d0e1f37c38ed0a7dd5da2d4b1e93f0fb43101 /sound/soc/codecs/rt298.c
parent24c2503255d35c269b67162c397a1a1c1e02f6ce (diff)
x86/mce: Make timer handling more robust
Erik reported that on a preproduction hardware a CMCI storm triggers the BUG_ON in add_timer_on(). The reason is that the per CPU MCE timer is started by the CMCI logic before the MCE CPU hotplug callback starts the timer with add_timer_on(). So the timer is already queued which triggers the BUG. Using add_timer_on() is pretty pointless in this code because the timer is strictlty per CPU, initialized as pinned and all operations which arm the timer happen on the CPU to which the timer belongs. Simplify the whole machinery by using mod_timer() instead of add_timer_on() which avoids the problem because mod_timer() can handle already queued timers. Use __start_timer() everywhere so the earliest armed expiry time is preserved. Reported-by: Erik Veijola <erik.veijola@intel.com> Tested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Tony Luck <tony.luck@intel.com> Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701310936080.3457@nanos Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'sound/soc/codecs/rt298.c')