/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include #include #include #include #include "rds.h" struct rds_page_remainder { struct page *r_page; unsigned long r_offset; }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders); /* * returns 0 on success or -errno on failure. * * We don't have to worry about flush_dcache_page() as this only works * with private pages. If, say, we were to do directed receive to pinned * user pages we'd have to worry more about cache coherence. (Though * the flush_dcache_page() in get_user_pages() would probably be enough). */ int rds_page_copy_user(struct page *page, unsigned long offset, void __user *ptr, unsigned long bytes, int to_user) { unsigned long ret; void *addr; addr = kmap(page); if (to_user) { rds_stats_add(s_copy_to_user, bytes); ret = copy_to_user(ptr, addr + offset, bytes); } else { rds_stats_add(s_copy_from_user, bytes); ret = copy_from_user(addr + offset, ptr, bytes); } kunmap(page); return ret ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(rds_page_copy_user); /** * rds_page_remainder_alloc - build up regions of a message. * * @scat: Scatter list for message * @bytes: the number of bytes needed. * @gfp: the waiting behaviour of the allocation * * @gfp is always ored with __GFP_HIGHMEM. Callers must be prepared to * kmap the pages, etc. * * If @bytes is at least a full page then this just returns a page from * alloc_page(). * * If @bytes is a partial page then this stores the unused region of the * page in a per-cpu structure. Future partial-page allocations may be * satisfied from that cached region. This lets us waste less memory on * small allocations with minimal complexity. It works because the transmit * path passes read-only page regions down to devices. They hold a page * reference until they are done with the region. */ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, gfp_t gfp) { struct rds_page_remainder *rem; unsigned long flags; struct page *page; int ret; gfp |= __GFP_HIGHMEM; /* jump straight to allocation if we're trying for a huge page */ if (bytes >= PAGE_SIZE) { page = alloc_page(gfp); if (!page) { ret = -ENOMEM; } else { sg_set_page(scat, page, PAGE_SIZE, 0); ret = 0; } goto out; } rem = &per_cpu(rds_page_remainders, get_cpu()); local_irq_save(flags); while (1) { /* avoid a tiny region getting stuck by tossing it */ if (rem->r_page && bytes > (PAGE_SIZE - rem->r_offset)) { rds_stats_inc(s_page_remainder_miss); __free_page(rem->r_page); rem->r_page = NULL; } /* hand out a fragment from the cached page */ if (rem->r_page && bytes <= (PAGE_SIZE - rem->r_offset)) { sg_set_page(scat, rem->r_page, bytes, rem->r_offset); get_page(sg_page(scat)); if (rem->r_offset != 0) rds_stats_inc(s_page_remainder_hit); rem->r_offset += ALIGN(bytes, 8); if (rem->r_offset >= PAGE_SIZE) { __free_page(rem->r_page); rem->r_page = NULL; } ret = 0; break; } /* alloc if there is nothing for us to use */ local_irq_restore(flags); put_cpu(); page = alloc_page(gfp); rem = &per_cpu(rds_page_remainders, get_cpu()); local_irq_save(flags); if (!page) { ret = -ENOMEM; break; } /* did someone race to fill the remainder before us? */ if (rem->r_page) { __free_page(page); continue; } /* otherwise install our page and loop around to alloc */ rem->r_page = page; rem->r_offset = 0; } local_irq_restore(flags); put_cpu(); out: rdsdebug("bytes %lu ret %d %p %u %u\n", bytes, ret, ret ? NULL : sg_page(scat), ret ? 0 : scat->offset, ret ? 0 : scat->length); return ret; } EXPORT_SYMBOL_GPL(rds_page_remainder_alloc); void rds_page_exit(void) { unsigned int cpu; for_each_possible_cpu(cpu) { struct rds_page_remainder *rem; rem = &per_cpu(rds_page_remainders, cpu); rdsdebug("cpu %u\n", cpu); if (rem->r_page) __free_page(rem->r_page); rem->r_page = NULL; } } 59e7cfeb0cc9ef03d885182ce8b (patch) tree139fc7e29f97d6bb6c4dca2a97be2dc3f824bd51 /tools/perf/tests/attr/test-record-group-sampling parent49def1853334396f948dcb4cedb9347abb318df5 (diff)
dmaengine: cppi41: Fix runtime PM timeouts with USB mass storage
Commit fdea2d09b997 ("dmaengine: cppi41: Add basic PM runtime support") added runtime PM support for cppi41, but had corner case issues. Some of the issues were fixed with commit 098de42ad670 ("dmaengine: cppi41: Fix unpaired pm runtime when only a USB hub is connected"). That fix however caused a new regression where we can get error -115 messages with USB on BeagleBone when connecting a USB mass storage device to a hub. This is because when connecting a USB mass storage device to a hub, the initial DMA transfers can take over 200ms to complete and cppi41 autosuspend delay times out. To fix the issue, we want to implement refcounting for chan_busy array that contains the active dma transfers. Increasing the autosuspend delay won't help as that the delay could be potentially seconds, and it's best to let the USB subsystem to deal with the timeouts on errors. The earlier attempt for runtime PM was buggy as the pm_runtime_get/put() calls could get unpaired easily as they did not follow the state of the chan_busy array as described in commit 098de42ad670 ("dmaengine: cppi41: Fix unpaired pm runtime when only a USB hub is connected". Let's fix the issue by adding pm_runtime_get() to where a new transfer is added to the chan_busy array, and calls to pm_runtime_put() where chan_busy array entry is cleared. This prevents any autosuspend timeouts from happening while dma transfers are active. Fixes: 098de42ad670 ("dmaengine: cppi41: Fix unpaired pm runtime when only a USB hub is connected") Fixes: fdea2d09b997 ("dmaengine: cppi41: Add basic PM runtime support") Cc: Andy Shevchenko <andy.shevchenko@gmail.com> Cc: Bin Liu <b-liu@ti.com> Cc: Grygorii Strashko <grygorii.strashko@ti.com> Cc: Kevin Hilman <khilman@baylibre.com> Cc: Patrick Titiano <ptitiano@baylibre.com> Cc: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: Tony Lindgren <tony@atomide.com> Tested-by: Bin Liu <b-liu@ti.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'tools/perf/tests/attr/test-record-group-sampling')