#include #include #include #include #include #include #include #include #include #include #include int nr_allocated; int preempt_count; struct kmem_cache { pthread_mutex_t lock; int size; int nr_objs; void *objs; void (*ctor)(void *); }; void *mempool_alloc(mempool_t *pool, int gfp_mask) { return pool->alloc(gfp_mask, pool->data); } void mempool_free(void *element, mempool_t *pool) { pool->free(element, pool->data); } mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) { mempool_t *ret = malloc(sizeof(*ret)); ret->alloc = alloc_fn; ret->free = free_fn; ret->data = pool_data; return ret; } void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) { struct radix_tree_node *node; if (flags & __GFP_NOWARN) return NULL; pthread_mutex_lock(&cachep->lock); if (cachep->nr_objs) { cachep->nr_objs--; node = cachep->objs; cachep->objs = node->private_data; pthread_mutex_unlock(&cachep->lock); node->private_data = NULL; } else { pthread_mutex_unlock(&cachep->lock); node = malloc(cachep->size); if (cachep->ctor) cachep->ctor(node); } uatomic_inc(&nr_allocated); return node; } void kmem_cache_free(struct kmem_cache *cachep, void *objp) { assert(objp); uatomic_dec(&nr_allocated); pthread_mutex_lock(&cachep->lock); if (cachep->nr_objs > 10) { memset(objp, POISON_FREE, cachep->size); free(objp); } else { struct radix_tree_node *node = objp; cachep->nr_objs++; node->private_data = cachep->objs; cachep->objs = node; } pthread_mutex_unlock(&cachep->lock); } void *kmalloc(size_t size, gfp_t gfp) { void *ret = malloc(size); uatomic_inc(&nr_allocated); return ret; } void kfree(void *p) { if (!p) return; uatomic_dec(&nr_allocated); free(p); } struct kmem_cache * kmem_cache_create(const char *name, size_t size, size_t offset, unsigned long flags, void (*ctor)(void *)) { struct kmem_cache *ret = malloc(sizeof(*ret)); pthread_mutex_init(&ret->lock, NULL); ret->size = size; ret->nr_objs = 0; ret->objs = NULL; ret->ctor = ctor; return ret; } c?h=nds-private-remove&id=c26665ab5c49ad3e142e0f054ca3204f259ba09c'>diff
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2017-01-20 21:29:40 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-23 09:39:55 +0100
commitc26665ab5c49ad3e142e0f054ca3204f259ba09c (patch)
tree3bab11918e18e9d25ef7544dba05cdf39d1abec5 /sound/usb/caiaq/device.c
parent7a308bb3016f57e5be11a677d15b821536419d36 (diff)
x86/microcode/intel: Drop stashed AP patch pointer optimization
This was meant to save us the scanning of the microcode containter in the initrd since the first AP had already done that but it can also hurt us: Imagine a single hyperthreaded CPU (Intel(R) Atom(TM) CPU N270, for example) which updates the microcode on the BSP but since the microcode engine is shared between the two threads, the update on CPU1 doesn't happen because it has already happened on CPU0 and we don't find a newer microcode revision on CPU1. Which doesn't set the intel_ucode_patch pointer and at initrd jettisoning time we don't save the microcode patch for later application. Now, when we suspend to RAM, the loaded microcode gets cleared so we need to reload but there's no patch saved in the cache. Removing the optimization fixes this issue and all is fine and dandy. Fixes: 06b8534cb728 ("x86/microcode: Rework microcode loading") Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20170120202955.4091-2-bp@alien8.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'sound/usb/caiaq/device.c')