/* * linux/mm/mmzone.c * * management codes for pgdats, zones and page flags */ #include #include #include struct pglist_data *first_online_pgdat(void) { return NODE_DATA(first_online_node); } struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) { int nid = next_online_node(pgdat->node_id); if (nid == MAX_NUMNODES) return NULL; return NODE_DATA(nid); } /* * next_zone - helper magic for for_each_zone() */ struct zone *next_zone(struct zone *zone) { pg_data_t *pgdat = zone->zone_pgdat; if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) zone++; else { pgdat = next_online_pgdat(pgdat); if (pgdat) zone = pgdat->node_zones; else zone = NULL; } return zone; } static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes) { #ifdef CONFIG_NUMA return node_isset(zonelist_node_idx(zref), *nodes); #else return 1; #endif /* CONFIG_NUMA */ } /* Returns the next zone at or below highest_zoneidx in a zonelist */ struct zoneref *__next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, nodemask_t *nodes) { /* * Find the next suitable zone to use for the allocation. * Only filter based on nodemask if it's set */ if (likely(nodes == NULL)) while (zonelist_zone_idx(z) > highest_zoneidx) z++; else while (zonelist_zone_idx(z) > highest_zoneidx || (z->zone && !zref_in_nodemask(z, nodes))) z++; return z; } #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) { if (page_to_pfn(page) != pfn) return false; if (page_zone(page) != zone) return false; return true; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ void lruvec_init(struct lruvec *lruvec) { enum lru_list lru; memset(lruvec, 0, sizeof(struct lruvec)); for_each_lru(lru) INIT_LIST_HEAD(&lruvec->lists[lru]); } #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) int page_cpupid_xchg_last(struct page *page, int cpupid) { unsigned long old_flags, flags; int last_cpupid; do { old_flags = flags = page->flags; last_cpupid = page_cpupid_last(page); flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags)); return last_cpupid; } #endif 307339f269d3bf7db877d536f'/>
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-01-30 14:28:22 -0800
committerDavid S. Miller <davem@davemloft.net>2017-01-30 14:28:22 -0800
commit54791b276b4000b307339f269d3bf7db877d536f (patch)
tree1c2616bd373ce5ea28aac2a53e32f5b5834901ce /net/netlabel/netlabel_domainhash.c
parent5d0e7705774dd412a465896d08d59a81a345c1e4 (diff)
parent047487241ff59374fded8c477f21453681f5995c (diff)
Merge branch 'sparc64-non-resumable-user-error-recovery'
Liam R. Howlett says: ==================== sparc64: Recover from userspace non-resumable PIO & MEM errors A non-resumable error from userspace is able to cause a kernel panic or trap loop due to the setup and handling of the queued traps once in the kernel. This patch series addresses both of these issues. The queues are fixed by simply zeroing the memory before use. PIO errors from userspace will result in a SIGBUS being sent to the user process. The MEM errors form userspace will result in a SIGKILL and also cause the offending pages to be claimed so they are no longer used in future tasks. SIGKILL is used to ensure that the process does not try to coredump and result in an attempt to read the memory again from within kernel space. Although there is a HV call to scrub the memory (mem_scrub), there is no easy way to guarantee that the real memory address(es) are not used by other tasks. Clearing the error with mem_scrub would zero the memory and cause the other processes to proceed with bad data. The handling of other non-resumable errors remain unchanged and will cause a panic. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netlabel/netlabel_domainhash.c')