/* * mm/percpu-km.c - kernel memory based chunk allocation * * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo * * This file is released under the GPLv2. * * Chunks are allocated as a contiguous kernel memory using gfp * allocation. This is to be used on nommu architectures. * * To use percpu-km, * * - define CONFIG_NEED_PER_CPU_KM from the arch Kconfig. * * - CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK must not be defined. It's * not compatible with PER_CPU_KM. EMBED_FIRST_CHUNK should work * fine. * * - NUMA is not supported. When setting up the first chunk, * @cpu_distance_fn should be NULL or report all CPUs to be nearer * than or at LOCAL_DISTANCE. * * - It's best if the chunk size is power of two multiple of * PAGE_SIZE. Because each chunk is allocated as a contiguous * kernel memory block using alloc_pages(), memory will be wasted if * chunk size is not aligned. percpu-km code will whine about it. */ #if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) #error "contiguous percpu allocation is incompatible with paged first chunk" #endif #include static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end) { return 0; } static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end) { /* nada */ } static struct pcpu_chunk *pcpu_create_chunk(void) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; struct pcpu_chunk *chunk; struct page *pages; int i; chunk = pcpu_alloc_chunk(); if (!chunk) return NULL; pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); if (!pages) { pcpu_free_chunk(chunk); return NULL; } for (i = 0; i < nr_pages; i++) pcpu_set_page_chunk(nth_page(pages, i), chunk); chunk->data = pages; chunk->base_addr = page_address(pages) - pcpu_group_offsets[0]; spin_lock_irq(&pcpu_lock); pcpu_chunk_populated(chunk, 0, nr_pages); spin_unlock_irq(&pcpu_lock); return chunk; } static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; if (chunk && chunk->data) __free_pages(chunk->data, order_base_2(nr_pages)); pcpu_free_chunk(chunk); } static struct page *pcpu_addr_to_page(void *addr) { return virt_to_page(addr); } static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) { size_t nr_pages, alloc_pages; /* all units must be in a single group */ if (ai->nr_groups != 1) { pr_crit("can't handle more than one group\n"); return -EINVAL; } nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; alloc_pages = roundup_pow_of_two(nr_pages); if (alloc_pages > nr_pages) pr_warn("wasting %zu pages per chunk\n", alloc_pages - nr_pages); return 0; } 76febbdf6dc78d71c6d7b62&showmsg=1'>net/mac80211
AgeCommit message (Collapse)AuthorFilesLines
2017-02-06mac80211: Allocate a sync skcipher explicitly for FILS AEADJouni Malinen1-2/+2
The skcipher could have been of the async variant which may return from skcipher_encrypt() with -EINPROGRESS after having queued the request. The FILS AEAD implementation here does not have code for dealing with that possibility, so allocate a sync cipher explicitly to avoid potential issues with hardware accelerators. This is based on the patch sent out by Ard. Fixes: 39404feee691 ("mac80211: FILS AEAD protection for station mode association frames") Reported-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Jouni Malinen <jouni@qca.qualcomm.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2017-02-06mac80211: Fix FILS AEAD protection in Association Request frameJouni Malinen1-1/+1
Incorrect num_elem parameter value (1 vs. 5) was used in the aes_siv_encrypt() call. This resulted in only the first one of the five AAD vectors to SIV getting included in calculation. This does not protect all the contents correctly and would not interoperate with a standard compliant implementation. Fix this by using the correct number. A matching fix is needed in the AP side (hostapd) to get FILS authentication working properly. Fixes: 39404feee691 ("mac80211: FILS AEAD protection for station mode association frames") Reported-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Jouni Malinen <jouni@qca.qualcomm.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>