/* * linux/mm/mmzone.c * * management codes for pgdats, zones and page flags */ #include #include #include struct pglist_data *first_online_pgdat(void) { return NODE_DATA(first_online_node); } struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) { int nid = next_online_node(pgdat->node_id); if (nid == MAX_NUMNODES) return NULL; return NODE_DATA(nid); } /* * next_zone - helper magic for for_each_zone() */ struct zone *next_zone(struct zone *zone) { pg_data_t *pgdat = zone->zone_pgdat; if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) zone++; else { pgdat = next_online_pgdat(pgdat); if (pgdat) zone = pgdat->node_zones; else zone = NULL; } return zone; } static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes) { #ifdef CONFIG_NUMA return node_isset(zonelist_node_idx(zref), *nodes); #else return 1; #endif /* CONFIG_NUMA */ } /* Returns the next zone at or below highest_zoneidx in a zonelist */ struct zoneref *__next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, nodemask_t *nodes) { /* * Find the next suitable zone to use for the allocation. * Only filter based on nodemask if it's set */ if (likely(nodes == NULL)) while (zonelist_zone_idx(z) > highest_zoneidx) z++; else while (zonelist_zone_idx(z) > highest_zoneidx || (z->zone && !zref_in_nodemask(z, nodes))) z++; return z; } #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) { if (page_to_pfn(page) != pfn) return false; if (page_zone(page) != zone) return false; return true; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ void lruvec_init(struct lruvec *lruvec) { enum lru_list lru; memset(lruvec, 0, sizeof(struct lruvec)); for_each_lru(lru) INIT_LIST_HEAD(&lruvec->lists[lru]); } #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) int page_cpupid_xchg_last(struct page *page, int cpupid) { unsigned long old_flags, flags; int last_cpupid; do { old_flags = flags = page->flags; last_cpupid = page_cpupid_last(page); flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags)); return last_cpupid; } #endif f'/>
diff options
context:
space:
mode:
authorSriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>2017-01-09 16:00:44 +0530
committerJ. Bruce Fields <bfields@redhat.com>2017-01-12 16:14:47 -0500
commitce1ca7d2d140a1f4aaffd297ac487f246963dd2f (patch)
tree5aeab5e0cb6ef404c894a18251588278b9432a2c /net/openvswitch/vport-geneve.c
parent546125d1614264d26080817d0c8cddb9b25081fa (diff)
svcrdma: avoid duplicate dma unmapping during error recovery
In rdma_read_chunk_frmr() when ib_post_send() fails, the error code path invokes ib_dma_unmap_sg() to unmap the sg list. It then invokes svc_rdma_put_frmr() which in turn tries to unmap the same sg list through ib_dma_unmap_sg() again. This second unmap is invalid and could lead to problems when the iova being unmapped is subsequently reused. Remove the call to unmap in rdma_read_chunk_frmr() and let svc_rdma_put_frmr() handle it. Fixes: 412a15c0fe53 ("svcrdma: Port to new memory registration API") Cc: stable@vger.kernel.org Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/openvswitch/vport-geneve.c')