/* * linux/mm/mmzone.c * * management codes for pgdats, zones and page flags */ #include #include #include struct pglist_data *first_online_pgdat(void) { return NODE_DATA(first_online_node); } struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) { int nid = next_online_node(pgdat->node_id); if (nid == MAX_NUMNODES) return NULL; return NODE_DATA(nid); } /* * next_zone - helper magic for for_each_zone() */ struct zone *next_zone(struct zone *zone) { pg_data_t *pgdat = zone->zone_pgdat; if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) zone++; else { pgdat = next_online_pgdat(pgdat); if (pgdat) zone = pgdat->node_zones; else zone = NULL; } return zone; } static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes) { #ifdef CONFIG_NUMA return node_isset(zonelist_node_idx(zref), *nodes); #else return 1; #endif /* CONFIG_NUMA */ } /* Returns the next zone at or below highest_zoneidx in a zonelist */ struct zoneref *__next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, nodemask_t *nodes) { /* * Find the next suitable zone to use for the allocation. * Only filter based on nodemask if it's set */ if (likely(nodes == NULL)) while (zonelist_zone_idx(z) > highest_zoneidx) z++; else while (zonelist_zone_idx(z) > highest_zoneidx || (z->zone && !zref_in_nodemask(z, nodes))) z++; return z; } #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) { if (page_to_pfn(page) != pfn) return false; if (page_zone(page) != zone) return false; return true; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ void lruvec_init(struct lruvec *lruvec) { enum lru_list lru; memset(lruvec, 0, sizeof(struct lruvec)); for_each_lru(lru) INIT_LIST_HEAD(&lruvec->lists[lru]); } #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) int page_cpupid_xchg_last(struct page *page, int cpupid) { unsigned long old_flags, flags; int last_cpupid; do { old_flags = flags = page->flags; last_cpupid = page_cpupid_last(page); flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags)); return last_cpupid; } #endif 07a'/>
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-02-05 18:10:35 +0100
committerJens Axboe <axboe@fb.com>2017-02-06 09:34:46 -0700
commiteeeefd41843218c55a8782a6920f044d9bf6207a (patch)
treec342eac46626e62745aa8e1a982626efca8b121d /include/sound/compress_driver.h
parentc14024dbb156c8392908aaa822097d27c6af8ec8 (diff)
block: don't try Write Same from __blkdev_issue_zeroout
Write Same can return an error asynchronously if it turns out the underlying SCSI device does not support Write Same, which makes a proper fallback to other methods in __blkdev_issue_zeroout impossible. Thus only issue a Write Same from blkdev_issue_zeroout an don't try it at all from __blkdev_issue_zeroout as a non-invasive workaround. Signed-off-by: Christoph Hellwig <hch@lst.de> Reported-by: Junichi Nomura <j-nomura@ce.jp.nec.com> Fixes: e73c23ff ("block: add async variant of blkdev_issue_zeroout") Tested-by: Junichi Nomura <j-nomura@ce.jp.nec.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'include/sound/compress_driver.h')