#ifndef __PERF_HEADER_H #define __PERF_HEADER_H #include #include #include #include #include #include "event.h" #include "env.h" enum { HEADER_RESERVED = 0, /* always cleared */ HEADER_FIRST_FEATURE = 1, HEADER_TRACING_DATA = 1, HEADER_BUILD_ID, HEADER_HOSTNAME, HEADER_OSRELEASE, HEADER_VERSION, HEADER_ARCH, HEADER_NRCPUS, HEADER_CPUDESC, HEADER_CPUID, HEADER_TOTAL_MEM, HEADER_CMDLINE, HEADER_EVENT_DESC, HEADER_CPU_TOPOLOGY, HEADER_NUMA_TOPOLOGY, HEADER_BRANCH_STACK, HEADER_PMU_MAPPINGS, HEADER_GROUP_DESC, HEADER_AUXTRACE, HEADER_STAT, HEADER_CACHE, HEADER_LAST_FEATURE, HEADER_FEAT_BITS = 256, }; enum perf_header_version { PERF_HEADER_VERSION_1, PERF_HEADER_VERSION_2, }; struct perf_file_section { u64 offset; u64 size; }; struct perf_file_header { u64 magic; u64 size; u64 attr_size; struct perf_file_section attrs; struct perf_file_section data; /* event_types is ignored */ struct perf_file_section event_types; DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); }; struct perf_pipe_file_header { u64 magic; u64 size; }; struct perf_header; int perf_file_header__read(struct perf_file_header *header, struct perf_header *ph, int fd); struct perf_header { enum perf_header_version version; bool needs_swap; u64 data_offset; u64 data_size; u64 feat_offset; DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); struct perf_env env; }; struct perf_evlist; struct perf_session; int perf_session__read_header(struct perf_session *session); int perf_session__write_header(struct perf_session *session, struct perf_evlist *evlist, int fd, bool at_exit); int perf_header__write_pipe(int fd); void perf_header__set_feat(struct perf_header *header, int feat); void perf_header__clear_feat(struct perf_header *header, int feat); bool perf_header__has_feat(const struct perf_header *header, int feat); int perf_header__set_cmdline(int argc, const char **argv); int perf_header__process_sections(struct perf_header *header, int fd, void *data, int (*process)(struct perf_file_section *section, struct perf_header *ph, int feat, int fd, void *data)); int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full); int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr, u32 ids, u64 *id, perf_event__handler_t process); int perf_event__synthesize_attrs(struct perf_tool *tool, struct perf_session *session, perf_event__handler_t process); int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct perf_evsel *evsel, perf_event__handler_t process); int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct perf_evsel *evsel, perf_event__handler_t process); int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct perf_evsel *evsel, perf_event__handler_t process); int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct perf_evsel *evsel, perf_event__handler_t process); int perf_event__process_attr(struct perf_tool *tool, union perf_event *event, struct perf_evlist **pevlist); int perf_event__process_event_update(struct perf_tool *tool, union perf_event *event, struct perf_evlist **pevlist); size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp); int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct perf_evlist *evlist, perf_event__handler_t process); int perf_event__process_tracing_data(struct perf_tool *tool, union perf_event *event, struct perf_session *session); int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc, perf_event__handler_t process, struct machine *machine); int perf_event__process_build_id(struct perf_tool *tool, union perf_event *event, struct perf_session *session); bool is_perf_magic(u64 magic); #define NAME_ALIGN 64 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned); /* * arch specific callback */ int get_cpuid(char *buffer, size_t sz); char *get_cpuid_str(void); #endif /* __PERF_HEADER_H */ ript>
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 09:37:34 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-31 21:47:58 +0100
commit0becc0ae5b42828785b589f686725ff5bc3b9b25 (patch)
treebe6d0e1f37c38ed0a7dd5da2d4b1e93f0fb43101 /fs/befs/datastream.h
parent24c2503255d35c269b67162c397a1a1c1e02f6ce (diff)
x86/mce: Make timer handling more robust
Erik reported that on a preproduction hardware a CMCI storm triggers the BUG_ON in add_timer_on(). The reason is that the per CPU MCE timer is started by the CMCI logic before the MCE CPU hotplug callback starts the timer with add_timer_on(). So the timer is already queued which triggers the BUG. Using add_timer_on() is pretty pointless in this code because the timer is strictlty per CPU, initialized as pinned and all operations which arm the timer happen on the CPU to which the timer belongs. Simplify the whole machinery by using mod_timer() instead of add_timer_on() which avoids the problem because mod_timer() can handle already queued timers. Use __start_timer() everywhere so the earliest armed expiry time is preserved. Reported-by: Erik Veijola <erik.veijola@intel.com> Tested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Tony Luck <tony.luck@intel.com> Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701310936080.3457@nanos Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'fs/befs/datastream.h')