/* * netsniff-ng - the packet sniffing beast * Copyright 2009, 2010 Daniel Borkmann. * Subject to the GPL, version 2. */ #include "hash.h" #include "xmalloc.h" /* Hash table implementation from the GIT project. */ /* Copyright 2008 (C) Linus Torvalds, GPL version 2 */ /* * Look up a hash entry in the hash table. Return the pointer to * the existing entry, or the empty slot if none existed. The caller * can then look at the (*ptr) to see whether it existed or not. */ static struct hash_table_entry *lookup_hash_entry(unsigned int hash, const struct hash_table *table) { unsigned int size = table->size, nr = hash % size; struct hash_table_entry *array = table->array; while (array[nr].ptr) { if (array[nr].hash == hash) break; nr++; if (nr >= size) nr = 0; } return array + nr; } /* * Insert a new hash entry pointer into the table. * * If that hash entry already existed, return the pointer to * the existing entry (and the caller can create a list of the * pointers or do anything else). If it didn't exist, return * NULL (and the caller knows the pointer has been inserted). */ static void **insert_hash_entry(unsigned int hash, void *ptr, struct hash_table *table) { struct hash_table_entry *entry = lookup_hash_entry(hash, table); if (!entry->ptr) { entry->ptr = ptr; entry->hash = hash; table->nr++; return NULL; } return &entry->ptr; } /* * Removes a hash entry pointer from the table. * * If that hash does not exist, NULL is returned, or, if that hash * exists and is the first entry, ptr_next will be set to that entry * and NULL is returned. Otherwise the caller must maintain the * remaining list. */ static void *remove_hash_entry(unsigned int hash, void *ptr, void *ptr_next, struct hash_table *table) { struct hash_table_entry *entry = lookup_hash_entry(hash, table); if (!entry->ptr) return NULL; else if (entry->ptr == ptr) { entry->ptr = ptr_next; entry->hash = hash; if (!ptr_next) table->nr--; return NULL; } else return entry->ptr; } static void grow_hash_table(struct hash_table *table) { unsigned int i; unsigned int old_size = table->size, new_size; struct hash_table_entry *old_array = table->array, *new_array; new_size = alloc_nr(old_size); new_array = xzmalloc(sizeof(struct hash_table_entry) * new_size); table->size = new_size; table->array = new_array; table->nr = 0; for (i = 0; i < old_size; i++) { unsigned int hash = old_array[i].hash; void *ptr = old_array[i].ptr; if (ptr) insert_hash_entry(hash, ptr, table); } free(old_array); } void *lookup_hash(unsigned int hash, const struct hash_table *table) { if (!table->array) return NULL; return lookup_hash_entry(hash, table)->ptr; } void *remove_hash(unsigned int hash, void *ptr, void *ptr_next, struct hash_table *table) { if (!table->array) return NULL; return remove_hash_entry(hash, ptr, ptr_next, table); } void **insert_hash(unsigned int hash, void *ptr, struct hash_table *table) { unsigned int nr = table->nr; if (nr >= table->size/2) grow_hash_table(table); return insert_hash_entry(hash, ptr, table); } int for_each_hash(const struct hash_table *table, int (*fn)(void *)) { int sum = 0; unsigned int i; unsigned int size = table->size; struct hash_table_entry *array = table->array; for (i = 0; i < size; i++) { void *ptr = array->ptr; array++; if (ptr) { int val = fn(ptr); if (val < 0) return val; sum += val; } } return sum; } int for_each_hash_int(const struct hash_table *table, int (*fn)(void *, int), int arg) { int sum = 0; unsigned int i; unsigned int size = table->size; struct hash_table_entry *array = table->array; for (i = 0; i < size; i++) { void *ptr = array->ptr; array++; if (ptr) { int val = fn(ptr, arg); if (val < 0) return val; sum += val; } } return sum; } void free_hash(struct hash_table *table) { free(table->array); table->array = NULL; table->size = 0; table->nr = 0; } '>9space:mode:
authorBorislav Petkov <bp@suse.de>2016-03-31 16:21:02 +0200
committerIngo Molnar <mingo@kernel.org>2016-04-13 11:52:19 +0200
commit91ed140d6c1e168b11bbbddac4f6066f40a0c6b5 (patch)
treee583014dbc2759b04ba3e1ff09af0e1683f09ad7 /Documentation/IRQ-affinity.txt
parent60a0e2039e3df6c0a2b896bd78af36ff36fb629c (diff)
x86/asm: Make sure verify_cpu() has a good stack
04633df0c43d ("x86/cpu: Call verify_cpu() after having entered long mode too") added the call to verify_cpu() for sanitizing CPU configuration. The latter uses the stack minimally and it can happen that we land in startup_64() directly from a 64-bit bootloader. Then we want to use our own, known good stack. Do that. APs don't need this as the trampoline sets up a stack for them. Reported-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mika Penttilä <mika.penttila@nextfour.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1459434062-31055-1-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'Documentation/IRQ-affinity.txt')