#include "evlist.h" #include "evsel.h" #include "parse-events.h" #include "tests.h" #include "debug.h" static int perf_evsel__roundtrip_cache_name_test(void) { char name[128]; int type, op, err = 0, ret = 0, i, idx; struct perf_evsel *evsel; struct perf_evlist *evlist = perf_evlist__new(); if (evlist == NULL) return -ENOMEM; for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { /* skip invalid cache type */ if (!perf_evsel__is_cache_op_valid(type, op)) continue; for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { __perf_evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name)); err = parse_events(evlist, name, NULL); if (err) ret = err; } } } idx = 0; evsel = perf_evlist__first(evlist); for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { /* skip invalid cache type */ if (!perf_evsel__is_cache_op_valid(type, op)) continue; for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { __perf_evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name)); if (evsel->idx != idx) continue; ++idx; if (strcmp(perf_evsel__name(evsel), name)) { pr_debug("%s != %s\n", perf_evsel__name(evsel), name); ret = -1; } evsel = perf_evsel__next(evsel); } } } perf_evlist__delete(evlist); return ret; } static int __perf_evsel__name_array_test(const char *names[], int nr_names) { int i, err; struct perf_evsel *evsel; struct perf_evlist *evlist = perf_evlist__new(); if (evlist == NULL) return -ENOMEM; for (i = 0; i < nr_names; ++i) { err = parse_events(evlist, names[i], NULL); if (err) { pr_debug("failed to parse event '%s', err %d\n", names[i], err); goto out_delete_evlist; } } err = 0; evlist__for_each_entry(evlist, evsel) { if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) { --err; pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]); } } out_delete_evlist: perf_evlist__delete(evlist); return err; } #define perf_evsel__name_array_test(names) \ __perf_evsel__name_array_test(names, ARRAY_SIZE(names)) int test__perf_evsel__roundtrip_name_test(int subtest __maybe_unused) { int err = 0, ret = 0; err = perf_evsel__name_array_test(perf_evsel__hw_names); if (err) ret = err; err = __perf_evsel__name_array_test(perf_evsel__sw_names, PERF_COUNT_SW_DUMMY + 1); if (err) ret = err; err = perf_evsel__roundtrip_cache_name_test(); if (err) ret = err; return ret; } ass='path'>path: root/net/netrom/nr_route.c
type='hidden' name='id' value='aaaec6fc755447a1d056765b11b24d8ff2b81366'/>
AgeCommit message (Collapse)AuthorFilesLines
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 19:03:21 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-31 20:22:18 +0100
commitaaaec6fc755447a1d056765b11b24d8ff2b81366 (patch)
treea7f4167960ee1df86739905b6ccdeb95465bfe5f /tools/lib/lockdep
parent08d85f3ea99f1eeafc4e8507936190e86a16ee8c (diff)
x86/irq: Make irq activate operations symmetric
The recent commit which prevents double activation of interrupts unearthed interesting code in x86. The code (ab)uses irq_domain_activate_irq() to reconfigure an already activated interrupt. That trips over the prevention code now. Fix it by deactivating the interrupt before activating the new configuration. Fixes: 08d85f3ea99f1 "irqdomain: Avoid activating interrupts more than once" Reported-and-tested-by: Mike Galbraith <efault@gmx.de> Reported-and-tested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701311901580.3457@nanos
Diffstat (limited to 'tools/lib/lockdep')