/* * Scatter-Gather buffer * * Copyright (c) by Takashi Iwai * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include #include #include #include #include /* table entries are align to 32 */ #define SGBUF_TBL_ALIGN 32 #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN) int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab) { struct snd_sg_buf *sgbuf = dmab->private_data; struct snd_dma_buffer tmpb; int i; if (! sgbuf) return -EINVAL; vunmap(dmab->area); dmab->area = NULL; tmpb.dev.type = SNDRV_DMA_TYPE_DEV; tmpb.dev.dev = sgbuf->dev; for (i = 0; i < sgbuf->pages; i++) { if (!(sgbuf->table[i].addr & ~PAGE_MASK)) continue; /* continuous pages */ tmpb.area = sgbuf->table[i].buf; tmpb.addr = sgbuf->table[i].addr & PAGE_MASK; tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT; snd_dma_free_pages(&tmpb); } kfree(sgbuf->table); kfree(sgbuf->page_table); kfree(sgbuf); dmab->private_data = NULL; return 0; } #define MAX_ALLOC_PAGES 32 void *snd_malloc_sgbuf_pages(struct device *device, size_t size, struct snd_dma_buffer *dmab, size_t *res_size) { struct snd_sg_buf *sgbuf; unsigned int i, pages, chunk, maxpages; struct snd_dma_buffer tmpb; struct snd_sg_page *table; struct page **pgtable; dmab->area = NULL; dmab->addr = 0; dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); if (! sgbuf) return NULL; sgbuf->dev = device; pages = snd_sgbuf_aligned_pages(size); sgbuf->tblsize = sgbuf_align_table(pages); table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL); if (!table) goto _failed; sgbuf->table = table; pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL); if (!pgtable) goto _failed; sgbuf->page_table = pgtable; /* allocate pages */ maxpages = MAX_ALLOC_PAGES; while (pages > 0) { chunk = pages; /* don't be too eager to take a huge chunk */ if (chunk > maxpages) chunk = maxpages; chunk <<= PAGE_SHIFT; if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device, chunk, &tmpb) < 0) { if (!sgbuf->pages) goto _failed; if (!res_size) goto _failed; size = sgbuf->pages * PAGE_SIZE; break; } chunk = tmpb.bytes >> PAGE_SHIFT; for (i = 0; i < chunk; i++) { table->buf = tmpb.area; table->addr = tmpb.addr; if (!i) table->addr |= chunk; /* mark head */ table++; *pgtable++ = virt_to_page(tmpb.area); tmpb.area += PAGE_SIZE; tmpb.addr += PAGE_SIZE; } sgbuf->pages += chunk; pages -= chunk; if (chunk < maxpages) maxpages = chunk; } sgbuf->size = size; dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL); if (! dmab->area) goto _failed; if (res_size) *res_size = sgbuf->size; return dmab->area; _failed: snd_free_sgbuf_pages(dmab); /* free the table */ return NULL; } /* * compute the max chunk size with continuous pages on sg-buffer */ unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, unsigned int ofs, unsigned int size) { struct snd_sg_buf *sg = dmab->private_data; unsigned int start, end, pg; start = ofs >> PAGE_SHIFT; end = (ofs + size - 1) >> PAGE_SHIFT; /* check page continuity */ pg = sg->table[start].addr >> PAGE_SHIFT; for (;;) { start++; if (start > end) break; pg++; if ((sg->table[start].addr >> PAGE_SHIFT) != pg) return (start << PAGE_SHIFT) - ofs; } /* ok, all on continuous pages */ return size; } EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); tion>mode:
authorMarc Zyngier <marc.zyngier@arm.com>2017-01-17 16:00:48 +0000
committerThomas Gleixner <tglx@linutronix.de>2017-01-30 15:18:56 +0100
commit08d85f3ea99f1eeafc4e8507936190e86a16ee8c (patch)
tree410bb1acd0cd7dcfaad37ae7b63ff243b7fa4bee /sound/soc/codecs/twl6040.c
parent566cf877a1fcb6d6dc0126b076aad062054c2637 (diff)
irqdomain: Avoid activating interrupts more than once
Since commit f3b0946d629c ("genirq/msi: Make sure PCI MSIs are activated early"), we can end-up activating a PCI/MSI twice (once at allocation time, and once at startup time). This is normally of no consequences, except that there is some HW out there that may misbehave if activate is used more than once (the GICv3 ITS, for example, uses the activate callback to issue the MAPVI command, and the architecture spec says that "If there is an existing mapping for the EventID-DeviceID combination, behavior is UNPREDICTABLE"). While this could be worked around in each individual driver, it may make more sense to tackle the issue at the core level. In order to avoid getting in that situation, let's have a per-interrupt flag to remember if we have already activated that interrupt or not. Fixes: f3b0946d629c ("genirq/msi: Make sure PCI MSIs are activated early") Reported-and-tested-by: Andre Przywara <andre.przywara@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/1484668848-24361-1-git-send-email-marc.zyngier@arm.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'sound/soc/codecs/twl6040.c')