/* * Copyright (C) 2016 Red Hat, Inc. * Author: Michael S. Tsirkin * This work is licensed under the terms of the GNU GPL, version 2. * * Common macros and functions for ring benchmarking. */ #ifndef MAIN_H #define MAIN_H #include extern bool do_exit; #if defined(__x86_64__) || defined(__i386__) #include "x86intrin.h" static inline void wait_cycles(unsigned long long cycles) { unsigned long long t; t = __rdtsc(); while (__rdtsc() - t < cycles) {} } #define VMEXIT_CYCLES 500 #define VMENTRY_CYCLES 500 #elif defined(__s390x__) static inline void wait_cycles(unsigned long long cycles) { asm volatile("0: brctg %0,0b" : : "d" (cycles)); } /* tweak me */ #define VMEXIT_CYCLES 200 #define VMENTRY_CYCLES 200 #else static inline void wait_cycles(unsigned long long cycles) { _Exit(5); } #define VMEXIT_CYCLES 0 #define VMENTRY_CYCLES 0 #endif static inline void vmexit(void) { if (!do_exit) return; wait_cycles(VMEXIT_CYCLES); } static inline void vmentry(void) { if (!do_exit) return; wait_cycles(VMENTRY_CYCLES); } /* implemented by ring */ void alloc_ring(void); /* guest side */ int add_inbuf(unsigned, void *, void *); void *get_buf(unsigned *, void **); void disable_call(); bool used_empty(); bool enable_call(); void kick_available(); /* host side */ void disable_kick(); bool avail_empty(); bool enable_kick(); bool use_buf(unsigned *, void **); void call_used(); /* implemented by main */ extern bool do_sleep; void kick(void); void wait_for_kick(void); void call(void); void wait_for_call(void); extern unsigned ring_size; /* Compiler barrier - similar to what Linux uses */ #define barrier() asm volatile("" ::: "memory") /* Is there a portable way to do this? */ #if defined(__x86_64__) || defined(__i386__) #define cpu_relax() asm ("rep; nop" ::: "memory") #elif defined(__s390x__) #define cpu_relax() barrier() #else #define cpu_relax() assert(0) #endif extern bool do_relax; static inline void busy_wait(void) { if (do_relax) cpu_relax(); else /* prevent compiler from removing busy loops */ barrier(); } /* * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized * with other __ATOMIC_SEQ_CST calls. */ #define smp_mb() __sync_synchronize() /* * This abuses the atomic builtins for thread fences, and * adds a compiler barrier. */ #define smp_release() do { \ barrier(); \ __atomic_thread_fence(__ATOMIC_RELEASE); \ } while (0) #define smp_acquire() do { \ __atomic_thread_fence(__ATOMIC_ACQUIRE); \ barrier(); \ } while (0) #endif e='q' value=''/>
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-02-07 16:29:30 -0500
committerDavid S. Miller <davem@davemloft.net>2017-02-07 16:29:30 -0500
commit3efa70d78f218e4c9276b0bac0545e5184c1c47b (patch)
treef4abe2f05e173023d2a262afd4aebb1e89fe6985 /include/net/netns/mib.h
parent76e0e70e6452b971a69cc9794ff4a6715c11f7f2 (diff)
parent926af6273fc683cd98cd0ce7bf0d04a02eed6742 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The conflict was an interaction between a bug fix in the netvsc driver in 'net' and an optimization of the RX path in 'net-next'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/netns/mib.h')