#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "inotify/inotify.h" #include "../fs/mount.h" #if defined(CONFIG_PROC_FS) #if defined(CONFIG_INOTIFY_USER) || defined(CONFIG_FANOTIFY) static void show_fdinfo(struct seq_file *m, struct file *f, void (*show)(struct seq_file *m, struct fsnotify_mark *mark)) { struct fsnotify_group *group = f->private_data; struct fsnotify_mark *mark; mutex_lock(&group->mark_mutex); list_for_each_entry(mark, &group->marks_list, g_list) { show(m, mark); if (seq_has_overflowed(m)) break; } mutex_unlock(&group->mark_mutex); } #if defined(CONFIG_EXPORTFS) static void show_mark_fhandle(struct seq_file *m, struct inode *inode) { struct { struct file_handle handle; u8 pad[MAX_HANDLE_SZ]; } f; int size, ret, i; f.handle.handle_bytes = sizeof(f.pad); size = f.handle.handle_bytes >> 2; ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0); if ((ret == FILEID_INVALID) || (ret < 0)) { WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret); return; } f.handle.handle_type = ret; f.handle.handle_bytes = size * sizeof(u32); seq_printf(m, "fhandle-bytes:%x fhandle-type:%x f_handle:", f.handle.handle_bytes, f.handle.handle_type); for (i = 0; i < f.handle.handle_bytes; i++) seq_printf(m, "%02x", (int)f.handle.f_handle[i]); } #else static void show_mark_fhandle(struct seq_file *m, struct inode *inode) { } #endif #ifdef CONFIG_INOTIFY_USER static void inotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark) { struct inotify_inode_mark *inode_mark; struct inode *inode; if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE) || !(mark->flags & FSNOTIFY_MARK_FLAG_INODE)) return; inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark); inode = igrab(mark->inode); if (inode) { /* * IN_ALL_EVENTS represents all of the mask bits * that we expose to userspace. There is at * least one bit (FS_EVENT_ON_CHILD) which is * used only internally to the kernel. */ u32 mask = mark->mask & IN_ALL_EVENTS; seq_printf(m, "inotify wd:%x ino:%lx sdev:%x mask:%x ignored_mask:%x ", inode_mark->wd, inode->i_ino, inode->i_sb->s_dev, mask, mark->ignored_mask); show_mark_fhandle(m, inode); seq_putc(m, '\n'); iput(inode); } } void inotify_show_fdinfo(struct seq_file *m, struct file *f) { show_fdinfo(m, f, inotify_fdinfo); } #endif /* CONFIG_INOTIFY_USER */ #ifdef CONFIG_FANOTIFY static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark) { unsigned int mflags = 0; struct inode *inode; if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) return; if (mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY) mflags |= FAN_MARK_IGNORED_SURV_MODIFY; if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) { inode = igrab(mark->inode); if (!inode) return; seq_printf(m, "fanotify ino:%lx sdev:%x mflags:%x mask:%x ignored_mask:%x ", inode->i_ino, inode->i_sb->s_dev, mflags, mark->mask, mark->ignored_mask); show_mark_fhandle(m, inode); seq_putc(m, '\n'); iput(inode); } else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT) { struct mount *mnt = real_mount(mark->mnt); seq_printf(m, "fanotify mnt_id:%x mflags:%x mask:%x ignored_mask:%x\n", mnt->mnt_id, mflags, mark->mask, mark->ignored_mask); } } void fanotify_show_fdinfo(struct seq_file *m, struct file *f) { struct fsnotify_group *group = f->private_data; unsigned int flags = 0; switch (group->priority) { case FS_PRIO_0: flags |= FAN_CLASS_NOTIF; break; case FS_PRIO_1: flags |= FAN_CLASS_CONTENT; break; case FS_PRIO_2: flags |= FAN_CLASS_PRE_CONTENT; break; } if (group->max_events == UINT_MAX) flags |= FAN_UNLIMITED_QUEUE; if (group->fanotify_data.max_marks == UINT_MAX) flags |= FAN_UNLIMITED_MARKS; seq_printf(m, "fanotify flags:%x event-flags:%x\n", flags, group->fanotify_data.f_flags); show_fdinfo(m, f, fanotify_fdinfo); } #endif /* CONFIG_FANOTIFY */ #endif /* CONFIG_INOTIFY_USER || CONFIG_FANOTIFY */ #endif /* CONFIG_PROC_FS */ ssdiff
authorIago Abal <mail@iagoabal.eu>2017-01-11 14:00:21 +0100
committerVinod Koul <vinod.koul@intel.com>2017-01-25 15:35:11 +0530
commit91539eb1fda2d530d3b268eef542c5414e54bf1a (patch)
tree960f5ca6342ad20837aff18aad6e8ecd7da32fd6 /sound/soc/au1x/Makefile
parent6610d0edf6dc7ee97e46ab3a538a565c79d26199 (diff)
dmaengine: pl330: fix double lock
The static bug finder EBA (http://www.iagoabal.eu/eba/) reported the following double-lock bug: Double lock: 1. spin_lock_irqsave(pch->lock, flags) at pl330_free_chan_resources:2236; 2. call to function `pl330_release_channel' immediately after; 3. call to function `dma_pl330_rqcb' in line 1753; 4. spin_lock_irqsave(pch->lock, flags) at dma_pl330_rqcb:1505. I have fixed it as suggested by Marek Szyprowski. First, I have replaced `pch->lock' with `pl330->lock' in functions `pl330_alloc_chan_resources' and `pl330_free_chan_resources'. This avoids the double-lock by acquiring a different lock than `dma_pl330_rqcb'. NOTE that, as a result, `pl330_free_chan_resources' executes `list_splice_tail_init' on `pch->work_list' under lock `pl330->lock', whereas in the rest of the code `pch->work_list' is protected by `pch->lock'. I don't know if this may cause race conditions. Similarly `pch->cyclic' is written by `pl330_alloc_chan_resources' under `pl330->lock' but read by `pl330_tx_submit' under `pch->lock'. Second, I have removed locking from `pl330_request_channel' and `pl330_release_channel' functions. Function `pl330_request_channel' is only called from `pl330_alloc_chan_resources', so the lock is already held. Function `pl330_release_channel' is called from `pl330_free_chan_resources', which already holds the lock, and from `pl330_del'. Function `pl330_del' is called in an error path of `pl330_probe' and at the end of `pl330_remove', but I assume that there cannot be concurrent accesses to the protected data at those points. Signed-off-by: Iago Abal <mail@iagoabal.eu> Reviewed-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'sound/soc/au1x/Makefile')