/* * iommu trace points * * Copyright (C) 2013 Shuah Khan * */ #undef TRACE_SYSTEM #define TRACE_SYSTEM iommu #if !defined(_TRACE_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_IOMMU_H #include #include struct device; DECLARE_EVENT_CLASS(iommu_group_event, TP_PROTO(int group_id, struct device *dev), TP_ARGS(group_id, dev), TP_STRUCT__entry( __field(int, gid) __string(device, dev_name(dev)) ), TP_fast_assign( __entry->gid = group_id; __assign_str(device, dev_name(dev)); ), TP_printk("IOMMU: groupID=%d device=%s", __entry->gid, __get_str(device) ) ); DEFINE_EVENT(iommu_group_event, add_device_to_group, TP_PROTO(int group_id, struct device *dev), TP_ARGS(group_id, dev) ); DEFINE_EVENT(iommu_group_event, remove_device_from_group, TP_PROTO(int group_id, struct device *dev), TP_ARGS(group_id, dev) ); DECLARE_EVENT_CLASS(iommu_device_event, TP_PROTO(struct device *dev), TP_ARGS(dev), TP_STRUCT__entry( __string(device, dev_name(dev)) ), TP_fast_assign( __assign_str(device, dev_name(dev)); ), TP_printk("IOMMU: device=%s", __get_str(device) ) ); DEFINE_EVENT(iommu_device_event, attach_device_to_domain, TP_PROTO(struct device *dev), TP_ARGS(dev) ); DEFINE_EVENT(iommu_device_event, detach_device_from_domain, TP_PROTO(struct device *dev), TP_ARGS(dev) ); TRACE_EVENT(map, TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), TP_ARGS(iova, paddr, size), TP_STRUCT__entry( __field(u64, iova) __field(u64, paddr) __field(size_t, size) ), TP_fast_assign( __entry->iova = iova; __entry->paddr = paddr; __entry->size = size; ), TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu", __entry->iova, __entry->paddr, __entry->size ) ); TRACE_EVENT(unmap, TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), TP_ARGS(iova, size, unmapped_size), TP_STRUCT__entry( __field(u64, iova) __field(size_t, size) __field(size_t, unmapped_size) ), TP_fast_assign( __entry->iova = iova; __entry->size = size; __entry->unmapped_size = unmapped_size; ), TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu", __entry->iova, __entry->size, __entry->unmapped_size ) ); DECLARE_EVENT_CLASS(iommu_error, TP_PROTO(struct device *dev, unsigned long iova, int flags), TP_ARGS(dev, iova, flags), TP_STRUCT__entry( __string(device, dev_name(dev)) __string(driver, dev_driver_string(dev)) __field(u64, iova) __field(int, flags) ), TP_fast_assign( __assign_str(device, dev_name(dev)); __assign_str(driver, dev_driver_string(dev)); __entry->iova = iova; __entry->flags = flags; ), TP_printk("IOMMU:%s %s iova=0x%016llx flags=0x%04x", __get_str(driver), __get_str(device), __entry->iova, __entry->flags ) ); DEFINE_EVENT(iommu_error, io_page_fault, TP_PROTO(struct device *dev, unsigned long iova, int flags), TP_ARGS(dev, iova, flags) ); #endif /* _TRACE_IOMMU_H */ /* This part must be outside protection */ #include space:mode:
authorValentin Rothberg <valentinrothberg@gmail.com>2016-10-05 07:57:26 +0200
committerMichael Ellerman <mpe@ellerman.id.au>2016-10-27 21:52:59 +1100
commit39715bf972ed4fee18fe5409609a971fb16b1771 (patch)
tree1d423c94bc8d3199a42f16b6296c9d96e929982b
parent09b7e37b18eecc1e347f4b1a3bc863f32801f634 (diff)
powerpc/process: Fix CONFIG_ALIVEC typo in restore_tm_state()
It should be ALTIVEC, not ALIVEC. Cyril explains: If a thread performs a transaction with altivec and then gets preempted for whatever reason, this bug may cause the kernel to not re-enable altivec when that thread runs again. This will result in an altivec unavailable fault, when that fault happens inside a user transaction the kernel has no choice but to enable altivec and doom the transaction. The result is that transactions using altivec may get aborted more often than they should. The difficulty in catching this with a selftest is my deliberate use of the word may above. Optimisations to avoid FPU/altivec/VSX faults mean that the kernel will always leave them on for 255 switches. This code prevents the kernel turning it off if it got to the 256th switch (and userspace was transactional). Fixes: dc16b553c949 ("powerpc: Always restore FPU/VEC/VSX if hardware transactional memory in use") Reviewed-by: Cyril Bur <cyrilbur@gmail.com> Signed-off-by: Valentin Rothberg <valentinrothberg@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>