mirror of
https://github.com/mborgerson/xemu.git
synced 2025-12-01 16:10:01 +00:00
Merge tag 'v6.1.0' into merge/qemu-v6.1.0
v6.1.0 release
This commit is contained in:
@ -78,4 +78,17 @@ int accel_init_machine(AccelState *accel, MachineState *ms);
|
||||
void accel_setup_post(MachineState *ms);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
/**
|
||||
* accel_cpu_instance_init:
|
||||
* @cpu: The CPU that needs to do accel-specific object initializations.
|
||||
*/
|
||||
void accel_cpu_instance_init(CPUState *cpu);
|
||||
|
||||
/**
|
||||
* accel_cpu_realizefn:
|
||||
* @cpu: The CPU that needs to call accel-specific cpu realization.
|
||||
* @errp: currently unused.
|
||||
*/
|
||||
bool accel_cpu_realizefn(CPUState *cpu, Error **errp);
|
||||
|
||||
#endif /* QEMU_ACCEL_H */
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
* See docs/devel/atomics.txt for discussion about the guarantees each
|
||||
* See docs/devel/atomics.rst for discussion about the guarantees each
|
||||
* atomic primitive is meant to provide.
|
||||
*/
|
||||
|
||||
@ -60,8 +60,9 @@
|
||||
(unsigned short)1, \
|
||||
(expr)+0))))))
|
||||
|
||||
#ifdef __ATOMIC_RELAXED
|
||||
/* For C11 atomic ops */
|
||||
#ifndef __ATOMIC_RELAXED
|
||||
#error "Expecting C11 atomic ops"
|
||||
#endif
|
||||
|
||||
/* Manual memory barriers
|
||||
*
|
||||
@ -239,212 +240,27 @@
|
||||
#define qatomic_xor(ptr, n) \
|
||||
((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
|
||||
|
||||
#else /* __ATOMIC_RELAXED */
|
||||
|
||||
#ifdef __alpha__
|
||||
#define smp_read_barrier_depends() asm volatile("mb":::"memory")
|
||||
#endif
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
|
||||
|
||||
/*
|
||||
* Because of the strongly ordered storage model, wmb() and rmb() are nops
|
||||
* here (a compiler barrier only). QEMU doesn't do accesses to write-combining
|
||||
* qemu memory or non-temporal load/stores from C code.
|
||||
*/
|
||||
#define smp_mb_release() barrier()
|
||||
#define smp_mb_acquire() barrier()
|
||||
|
||||
/*
|
||||
* __sync_lock_test_and_set() is documented to be an acquire barrier only,
|
||||
* but it is a full barrier at the hardware level. Add a compiler barrier
|
||||
* to make it a full barrier also at the compiler level.
|
||||
*/
|
||||
#define qatomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
|
||||
|
||||
#elif defined(_ARCH_PPC)
|
||||
|
||||
/*
|
||||
* We use an eieio() for wmb() on powerpc. This assumes we don't
|
||||
* need to order cacheable and non-cacheable stores with respect to
|
||||
* each other.
|
||||
*
|
||||
* smp_mb has the same problem as on x86 for not-very-new GCC
|
||||
* (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
|
||||
*/
|
||||
#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
|
||||
#if defined(__powerpc64__)
|
||||
#define smp_mb_release() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
|
||||
#define smp_mb_acquire() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
|
||||
#else
|
||||
#define smp_mb_release() ({ asm volatile("sync" ::: "memory"); (void)0; })
|
||||
#define smp_mb_acquire() ({ asm volatile("sync" ::: "memory"); (void)0; })
|
||||
#endif
|
||||
#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
|
||||
|
||||
#endif /* _ARCH_PPC */
|
||||
|
||||
/*
|
||||
* For (host) platforms we don't have explicit barrier definitions
|
||||
* for, we use the gcc __sync_synchronize() primitive to generate a
|
||||
* full barrier. This should be safe on all platforms, though it may
|
||||
* be overkill for smp_mb_acquire() and smp_mb_release().
|
||||
*/
|
||||
#ifndef smp_mb
|
||||
#define smp_mb() __sync_synchronize()
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb_acquire
|
||||
#define smp_mb_acquire() __sync_synchronize()
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb_release
|
||||
#define smp_mb_release() __sync_synchronize()
|
||||
#endif
|
||||
|
||||
#ifndef smp_read_barrier_depends
|
||||
#define smp_read_barrier_depends() barrier()
|
||||
#endif
|
||||
|
||||
#ifndef signal_barrier
|
||||
#define signal_barrier() barrier()
|
||||
#endif
|
||||
|
||||
/* These will only be atomic if the processor does the fetch or store
|
||||
* in a single issue memory operation
|
||||
*/
|
||||
#define qatomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
|
||||
#define qatomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
|
||||
|
||||
#define qatomic_read(ptr) qatomic_read__nocheck(ptr)
|
||||
#define qatomic_set(ptr, i) qatomic_set__nocheck(ptr,i)
|
||||
|
||||
/**
|
||||
* qatomic_rcu_read - reads a RCU-protected pointer to a local variable
|
||||
* into a RCU read-side critical section. The pointer can later be safely
|
||||
* dereferenced within the critical section.
|
||||
*
|
||||
* This ensures that the pointer copy is invariant thorough the whole critical
|
||||
* section.
|
||||
*
|
||||
* Inserts memory barriers on architectures that require them (currently only
|
||||
* Alpha) and documents which pointers are protected by RCU.
|
||||
*
|
||||
* qatomic_rcu_read also includes a compiler barrier to ensure that
|
||||
* value-speculative optimizations (e.g. VSS: Value Speculation
|
||||
* Scheduling) does not perform the data read before the pointer read
|
||||
* by speculating the value of the pointer.
|
||||
*
|
||||
* Should match qatomic_rcu_set(), qatomic_xchg(), qatomic_cmpxchg().
|
||||
*/
|
||||
#define qatomic_rcu_read(ptr) ({ \
|
||||
typeof(*ptr) _val = qatomic_read(ptr); \
|
||||
smp_read_barrier_depends(); \
|
||||
_val; \
|
||||
})
|
||||
|
||||
/**
|
||||
* qatomic_rcu_set - assigns (publicizes) a pointer to a new data structure
|
||||
* meant to be read by RCU read-side critical sections.
|
||||
*
|
||||
* Documents which pointers will be dereferenced by RCU read-side critical
|
||||
* sections and adds the required memory barriers on architectures requiring
|
||||
* them. It also makes sure the compiler does not reorder code initializing the
|
||||
* data structure before its publication.
|
||||
*
|
||||
* Should match qatomic_rcu_read().
|
||||
*/
|
||||
#define qatomic_rcu_set(ptr, i) do { \
|
||||
smp_wmb(); \
|
||||
qatomic_set(ptr, i); \
|
||||
} while (0)
|
||||
|
||||
#define qatomic_load_acquire(ptr) ({ \
|
||||
typeof(*ptr) _val = qatomic_read(ptr); \
|
||||
smp_mb_acquire(); \
|
||||
_val; \
|
||||
})
|
||||
|
||||
#define qatomic_store_release(ptr, i) do { \
|
||||
smp_mb_release(); \
|
||||
qatomic_set(ptr, i); \
|
||||
} while (0)
|
||||
|
||||
#ifndef qatomic_xchg
|
||||
#if defined(__clang__)
|
||||
#define qatomic_xchg(ptr, i) __sync_swap(ptr, i)
|
||||
#else
|
||||
/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
|
||||
#define qatomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
|
||||
#endif
|
||||
#endif
|
||||
#define qatomic_xchg__nocheck qatomic_xchg
|
||||
|
||||
/* Provide shorter names for GCC atomic builtins. */
|
||||
#define qatomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
|
||||
#define qatomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
|
||||
|
||||
#define qatomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
|
||||
#define qatomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
|
||||
#define qatomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
|
||||
#define qatomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
|
||||
#define qatomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
|
||||
|
||||
#define qatomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
|
||||
#define qatomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
|
||||
#define qatomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
|
||||
#define qatomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
|
||||
#define qatomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
|
||||
#define qatomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
|
||||
#define qatomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
|
||||
|
||||
#define qatomic_cmpxchg(ptr, old, new) \
|
||||
__sync_val_compare_and_swap(ptr, old, new)
|
||||
#define qatomic_cmpxchg__nocheck(ptr, old, new) qatomic_cmpxchg(ptr, old, new)
|
||||
|
||||
/* And even shorter names that return void. */
|
||||
#define qatomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
|
||||
#define qatomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
|
||||
#define qatomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
|
||||
#define qatomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
|
||||
#define qatomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
|
||||
#define qatomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
|
||||
#define qatomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
|
||||
|
||||
#endif /* __ATOMIC_RELAXED */
|
||||
|
||||
#ifndef smp_wmb
|
||||
#define smp_wmb() smp_mb_release()
|
||||
#endif
|
||||
#ifndef smp_rmb
|
||||
#define smp_rmb() smp_mb_acquire()
|
||||
#endif
|
||||
|
||||
/* This is more efficient than a store plus a fence. */
|
||||
#if !defined(__SANITIZE_THREAD__)
|
||||
#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
|
||||
#define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* qatomic_mb_read/set semantics map Java volatile variables. They are
|
||||
* less expensive on some platforms (notably POWER) than fully
|
||||
* sequentially consistent operations.
|
||||
*
|
||||
* As long as they are used as paired operations they are safe to
|
||||
* use. See docs/devel/atomics.txt for more discussion.
|
||||
* use. See docs/devel/atomics.rst for more discussion.
|
||||
*/
|
||||
|
||||
#ifndef qatomic_mb_read
|
||||
#define qatomic_mb_read(ptr) \
|
||||
qatomic_load_acquire(ptr)
|
||||
#endif
|
||||
|
||||
#ifndef qatomic_mb_set
|
||||
#define qatomic_mb_set(ptr, i) do { \
|
||||
qatomic_store_release(ptr, i); \
|
||||
smp_mb(); \
|
||||
} while(0)
|
||||
#if !defined(__SANITIZE_THREAD__) && \
|
||||
(defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
|
||||
/* This is more efficient than a store plus a fence. */
|
||||
# define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i))
|
||||
#else
|
||||
# define qatomic_mb_set(ptr, i) \
|
||||
({ qatomic_store_release(ptr, i); smp_mb(); })
|
||||
#endif
|
||||
|
||||
#define qatomic_fetch_inc_nonzero(ptr) ({ \
|
||||
@ -455,28 +271,29 @@
|
||||
_oldn; \
|
||||
})
|
||||
|
||||
/* Abstractions to access atomically (i.e. "once") i64/u64 variables */
|
||||
/*
|
||||
* Abstractions to access atomically (i.e. "once") i64/u64 variables.
|
||||
*
|
||||
* The i386 abi is odd in that by default members are only aligned to
|
||||
* 4 bytes, which means that 8-byte types can wind up mis-aligned.
|
||||
* Clang will then warn about this, and emit a call into libatomic.
|
||||
*
|
||||
* Use of these types in structures when they will be used with atomic
|
||||
* operations can avoid this.
|
||||
*/
|
||||
typedef int64_t aligned_int64_t __attribute__((aligned(8)));
|
||||
typedef uint64_t aligned_uint64_t __attribute__((aligned(8)));
|
||||
|
||||
#ifdef CONFIG_ATOMIC64
|
||||
static inline int64_t qatomic_read_i64(const int64_t *ptr)
|
||||
{
|
||||
/* use __nocheck because sizeof(void *) might be < sizeof(u64) */
|
||||
return qatomic_read__nocheck(ptr);
|
||||
}
|
||||
|
||||
static inline uint64_t qatomic_read_u64(const uint64_t *ptr)
|
||||
{
|
||||
return qatomic_read__nocheck(ptr);
|
||||
}
|
||||
|
||||
static inline void qatomic_set_i64(int64_t *ptr, int64_t val)
|
||||
{
|
||||
qatomic_set__nocheck(ptr, val);
|
||||
}
|
||||
|
||||
static inline void qatomic_set_u64(uint64_t *ptr, uint64_t val)
|
||||
{
|
||||
qatomic_set__nocheck(ptr, val);
|
||||
}
|
||||
/* Use __nocheck because sizeof(void *) might be < sizeof(u64) */
|
||||
#define qatomic_read_i64(P) \
|
||||
_Generic(*(P), int64_t: qatomic_read__nocheck(P))
|
||||
#define qatomic_read_u64(P) \
|
||||
_Generic(*(P), uint64_t: qatomic_read__nocheck(P))
|
||||
#define qatomic_set_i64(P, V) \
|
||||
_Generic(*(P), int64_t: qatomic_set__nocheck(P, V))
|
||||
#define qatomic_set_u64(P, V) \
|
||||
_Generic(*(P), uint64_t: qatomic_set__nocheck(P, V))
|
||||
|
||||
static inline void qatomic64_init(void)
|
||||
{
|
||||
|
||||
@ -6,7 +6,7 @@
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
* See docs/devel/atomics.txt for discussion about the guarantees each
|
||||
* See docs/devel/atomics.rst for discussion about the guarantees each
|
||||
* atomic primitive is meant to provide.
|
||||
*/
|
||||
|
||||
|
||||
@ -140,7 +140,8 @@ static inline int test_bit(long nr, const unsigned long *addr)
|
||||
* @addr: The address to start the search at
|
||||
* @size: The maximum size to search
|
||||
*
|
||||
* Returns the bit number of the first set bit, or size.
|
||||
* Returns the bit number of the last set bit,
|
||||
* or @size if there is no set bit in the bitmap.
|
||||
*/
|
||||
unsigned long find_last_bit(const unsigned long *addr,
|
||||
unsigned long size);
|
||||
@ -150,6 +151,9 @@ unsigned long find_last_bit(const unsigned long *addr,
|
||||
* @addr: The address to base the search on
|
||||
* @offset: The bitnumber to start searching at
|
||||
* @size: The bitmap size in bits
|
||||
*
|
||||
* Returns the bit number of the next set bit,
|
||||
* or @size if there are no further set bits in the bitmap.
|
||||
*/
|
||||
unsigned long find_next_bit(const unsigned long *addr,
|
||||
unsigned long size,
|
||||
@ -160,6 +164,9 @@ unsigned long find_next_bit(const unsigned long *addr,
|
||||
* @addr: The address to base the search on
|
||||
* @offset: The bitnumber to start searching at
|
||||
* @size: The bitmap size in bits
|
||||
*
|
||||
* Returns the bit number of the next cleared bit,
|
||||
* or @size if there are no further clear bits in the bitmap.
|
||||
*/
|
||||
|
||||
unsigned long find_next_zero_bit(const unsigned long *addr,
|
||||
@ -171,7 +178,8 @@ unsigned long find_next_zero_bit(const unsigned long *addr,
|
||||
* @addr: The address to start the search at
|
||||
* @size: The maximum size to search
|
||||
*
|
||||
* Returns the bit number of the first set bit.
|
||||
* Returns the bit number of the first set bit,
|
||||
* or @size if there is no set bit in the bitmap.
|
||||
*/
|
||||
static inline unsigned long find_first_bit(const unsigned long *addr,
|
||||
unsigned long size)
|
||||
@ -194,7 +202,8 @@ static inline unsigned long find_first_bit(const unsigned long *addr,
|
||||
* @addr: The address to start the search at
|
||||
* @size: The maximum size to search
|
||||
*
|
||||
* Returns the bit number of the first cleared bit.
|
||||
* Returns the bit number of the first cleared bit,
|
||||
* or @size if there is no clear bit in the bitmap.
|
||||
*/
|
||||
static inline unsigned long find_first_zero_bit(const unsigned long *addr,
|
||||
unsigned long size)
|
||||
@ -282,6 +291,35 @@ static inline uint64_t ror64(uint64_t word, unsigned int shift)
|
||||
return (word >> shift) | (word << ((64 - shift) & 63));
|
||||
}
|
||||
|
||||
/**
|
||||
* hswap32 - swap 16-bit halfwords within a 32-bit value
|
||||
* @h: value to swap
|
||||
*/
|
||||
static inline uint32_t hswap32(uint32_t h)
|
||||
{
|
||||
return rol32(h, 16);
|
||||
}
|
||||
|
||||
/**
|
||||
* hswap64 - swap 16-bit halfwords within a 64-bit value
|
||||
* @h: value to swap
|
||||
*/
|
||||
static inline uint64_t hswap64(uint64_t h)
|
||||
{
|
||||
uint64_t m = 0x0000ffff0000ffffull;
|
||||
h = rol64(h, 32);
|
||||
return ((h & m) << 16) | ((h >> 16) & m);
|
||||
}
|
||||
|
||||
/**
|
||||
* wswap64 - swap 32-bit words within a 64-bit value
|
||||
* @h: value to swap
|
||||
*/
|
||||
static inline uint64_t wswap64(uint64_t h)
|
||||
{
|
||||
return rol64(h, 32);
|
||||
}
|
||||
|
||||
/**
|
||||
* extract32:
|
||||
* @value: the value to extract the bit field from
|
||||
|
||||
@ -1,8 +1,6 @@
|
||||
#ifndef BSWAP_H
|
||||
#define BSWAP_H
|
||||
|
||||
#include "fpu/softfloat-types.h"
|
||||
|
||||
#ifdef CONFIG_MACHINE_BSWAP_H
|
||||
# include <sys/endian.h>
|
||||
# include <machine/bswap.h>
|
||||
@ -12,7 +10,18 @@
|
||||
# include <endian.h>
|
||||
#elif defined(CONFIG_BYTESWAP_H)
|
||||
# include <byteswap.h>
|
||||
#define BSWAP_FROM_BYTESWAP
|
||||
# else
|
||||
#define BSWAP_FROM_FALLBACKS
|
||||
#endif /* ! CONFIG_MACHINE_BSWAP_H */
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "fpu/softfloat-types.h"
|
||||
|
||||
#ifdef BSWAP_FROM_BYTESWAP
|
||||
static inline uint16_t bswap16(uint16_t x)
|
||||
{
|
||||
return bswap_16(x);
|
||||
@ -27,7 +36,9 @@ static inline uint64_t bswap64(uint64_t x)
|
||||
{
|
||||
return bswap_64(x);
|
||||
}
|
||||
# else
|
||||
#endif
|
||||
|
||||
#ifdef BSWAP_FROM_FALLBACKS
|
||||
static inline uint16_t bswap16(uint16_t x)
|
||||
{
|
||||
return (((x & 0x00ff) << 8) |
|
||||
@ -53,7 +64,10 @@ static inline uint64_t bswap64(uint64_t x)
|
||||
((x & 0x00ff000000000000ULL) >> 40) |
|
||||
((x & 0xff00000000000000ULL) >> 56));
|
||||
}
|
||||
#endif /* ! CONFIG_MACHINE_BSWAP_H */
|
||||
#endif
|
||||
|
||||
#undef BSWAP_FROM_BYTESWAP
|
||||
#undef BSWAP_FROM_FALLBACKS
|
||||
|
||||
static inline void bswap16s(uint16_t *s)
|
||||
{
|
||||
@ -494,4 +508,8 @@ DO_STN_LDN_P(be)
|
||||
#undef le_bswaps
|
||||
#undef be_bswaps
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* BSWAP_H */
|
||||
|
||||
@ -26,15 +26,13 @@
|
||||
#ifndef QEMU_CO_SHARED_RESOURCE_H
|
||||
#define QEMU_CO_SHARED_RESOURCE_H
|
||||
|
||||
|
||||
/* Accesses to co-shared-resource API are thread-safe */
|
||||
typedef struct SharedResource SharedResource;
|
||||
|
||||
/*
|
||||
* Create SharedResource structure
|
||||
*
|
||||
* @total: total amount of some resource to be shared between clients
|
||||
*
|
||||
* Note: this API is not thread-safe.
|
||||
*/
|
||||
SharedResource *shres_create(uint64_t total);
|
||||
|
||||
|
||||
@ -72,22 +72,11 @@
|
||||
int:(x) ? -1 : 1; \
|
||||
}
|
||||
|
||||
/* QEMU_BUILD_BUG_MSG() emits the message given if _Static_assert is
|
||||
* supported; otherwise, it will be omitted from the compiler error
|
||||
* message (but as it remains present in the source code, it can still
|
||||
* be useful when debugging). */
|
||||
#if defined(CONFIG_STATIC_ASSERT)
|
||||
#ifdef __cplusplus
|
||||
#define QEMU_BUILD_BUG_MSG(x, msg) static_assert(!(x), msg)
|
||||
#else
|
||||
#define QEMU_BUILD_BUG_MSG(x, msg) _Static_assert(!(x), msg)
|
||||
#endif
|
||||
#elif defined(__COUNTER__)
|
||||
#define QEMU_BUILD_BUG_MSG(x, msg) typedef QEMU_BUILD_BUG_ON_STRUCT(x) \
|
||||
glue(qemu_build_bug_on__, __COUNTER__) __attribute__((unused))
|
||||
#else
|
||||
#define QEMU_BUILD_BUG_MSG(x, msg)
|
||||
#endif
|
||||
|
||||
#define QEMU_BUILD_BUG_ON(x) QEMU_BUILD_BUG_MSG(x, "not expecting: " #x)
|
||||
|
||||
@ -177,46 +166,6 @@
|
||||
#define QEMU_ALWAYS_INLINE
|
||||
#endif
|
||||
|
||||
/* Implement C11 _Generic via GCC builtins. Example:
|
||||
*
|
||||
* QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x)
|
||||
*
|
||||
* The first argument is the discriminator. The last is the default value.
|
||||
* The middle ones are tuples in "(type, expansion)" format.
|
||||
*/
|
||||
|
||||
/* First, find out the number of generic cases. */
|
||||
#define QEMU_GENERIC(x, ...) \
|
||||
QEMU_GENERIC_(typeof(x), __VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
|
||||
|
||||
/* There will be extra arguments, but they are not used. */
|
||||
#define QEMU_GENERIC_(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, count, ...) \
|
||||
QEMU_GENERIC##count(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9)
|
||||
|
||||
/* Two more helper macros, this time to extract items from a parenthesized
|
||||
* list.
|
||||
*/
|
||||
#define QEMU_FIRST_(a, b) a
|
||||
#define QEMU_SECOND_(a, b) b
|
||||
|
||||
/* ... and a final one for the common part of the "recursion". */
|
||||
#define QEMU_GENERIC_IF(x, type_then, else_) \
|
||||
__builtin_choose_expr(__builtin_types_compatible_p(x, \
|
||||
QEMU_FIRST_ type_then), \
|
||||
QEMU_SECOND_ type_then, else_)
|
||||
|
||||
/* CPP poor man's "recursion". */
|
||||
#define QEMU_GENERIC1(x, a0, ...) (a0)
|
||||
#define QEMU_GENERIC2(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC1(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC3(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC2(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC4(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC3(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC5(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC4(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC6(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC5(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC7(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC6(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC8(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC7(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC9(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC8(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC10(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC9(x, __VA_ARGS__))
|
||||
|
||||
/**
|
||||
* qemu_build_not_reached()
|
||||
*
|
||||
|
||||
@ -1,7 +1,9 @@
|
||||
#ifndef QEMU_CONFIG_FILE_H
|
||||
#define QEMU_CONFIG_FILE_H
|
||||
|
||||
typedef void QEMUConfigCB(const char *group, QDict *qdict, void *opaque, Error **errp);
|
||||
|
||||
void qemu_load_module_for_opts(const char *group);
|
||||
QemuOptsList *qemu_find_opts(const char *group);
|
||||
QemuOptsList *qemu_find_opts_err(const char *group, Error **errp);
|
||||
QemuOpts *qemu_find_opts_singleton(const char *group);
|
||||
@ -14,7 +16,10 @@ void qemu_config_write(FILE *fp);
|
||||
int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname,
|
||||
Error **errp);
|
||||
|
||||
int qemu_read_config_file(const char *filename, Error **errp);
|
||||
/* A default callback for qemu_read_config_file(). */
|
||||
void qemu_config_do_parse(const char *group, QDict *qdict, void *opaque, Error **errp);
|
||||
|
||||
int qemu_read_config_file(const char *filename, QEMUConfigCB *f, Error **errp);
|
||||
|
||||
/* Parse QDict options as a replacement for a config file (allowing multiple
|
||||
enumerated (0..(n-1)) configuration "sections") */
|
||||
|
||||
@ -210,13 +210,15 @@ void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock);
|
||||
/**
|
||||
* Removes the next coroutine from the CoQueue, and wake it up.
|
||||
* Returns true if a coroutine was removed, false if the queue is empty.
|
||||
* OK to run from coroutine and non-coroutine context.
|
||||
*/
|
||||
bool coroutine_fn qemu_co_queue_next(CoQueue *queue);
|
||||
bool qemu_co_queue_next(CoQueue *queue);
|
||||
|
||||
/**
|
||||
* Empties the CoQueue; all coroutines are woken up.
|
||||
* OK to run from coroutine and non-coroutine context.
|
||||
*/
|
||||
void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue);
|
||||
void qemu_co_queue_restart_all(CoQueue *queue);
|
||||
|
||||
/**
|
||||
* Removes the next coroutine from the CoQueue, and wake it up. Unlike
|
||||
@ -291,20 +293,27 @@ void qemu_co_rwlock_wrlock(CoRwlock *lock);
|
||||
*/
|
||||
void qemu_co_rwlock_unlock(CoRwlock *lock);
|
||||
|
||||
typedef struct QemuCoSleepState QemuCoSleepState;
|
||||
typedef struct QemuCoSleep {
|
||||
Coroutine *to_wake;
|
||||
} QemuCoSleep;
|
||||
|
||||
/**
|
||||
* Yield the coroutine for a given duration. During this yield, @sleep_state
|
||||
* (if not NULL) is set to an opaque pointer, which may be used for
|
||||
* qemu_co_sleep_wake(). Be careful, the pointer is set back to zero when the
|
||||
* timer fires. Don't save the obtained value to other variables and don't call
|
||||
* qemu_co_sleep_wake from another aio context.
|
||||
* Yield the coroutine for a given duration. Initializes @w so that,
|
||||
* during this yield, it can be passed to qemu_co_sleep_wake() to
|
||||
* terminate the sleep.
|
||||
*/
|
||||
void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns,
|
||||
QemuCoSleepState **sleep_state);
|
||||
void coroutine_fn qemu_co_sleep_ns_wakeable(QemuCoSleep *w,
|
||||
QEMUClockType type, int64_t ns);
|
||||
|
||||
/**
|
||||
* Yield the coroutine until the next call to qemu_co_sleep_wake.
|
||||
*/
|
||||
void coroutine_fn qemu_co_sleep(QemuCoSleep *w);
|
||||
|
||||
static inline void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns)
|
||||
{
|
||||
qemu_co_sleep_ns_wakeable(type, ns, NULL);
|
||||
QemuCoSleep w = { 0 };
|
||||
qemu_co_sleep_ns_wakeable(&w, type, ns);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -313,7 +322,7 @@ static inline void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns)
|
||||
* qemu_co_sleep_ns() and should be checked to be non-NULL before calling
|
||||
* qemu_co_sleep_wake().
|
||||
*/
|
||||
void qemu_co_sleep_wake(QemuCoSleepState *sleep_state);
|
||||
void qemu_co_sleep_wake(QemuCoSleep *w);
|
||||
|
||||
/**
|
||||
* Yield until a file descriptor becomes readable
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
#ifndef HOST_UTILS_H
|
||||
#define HOST_UTILS_H
|
||||
|
||||
#include "qemu/compiler.h"
|
||||
#include "qemu/bswap.h"
|
||||
|
||||
#ifdef CONFIG_INT128
|
||||
@ -272,6 +273,9 @@ static inline int ctpop64(uint64_t val)
|
||||
*/
|
||||
static inline uint8_t revbit8(uint8_t x)
|
||||
{
|
||||
#if __has_builtin(__builtin_bitreverse8)
|
||||
return __builtin_bitreverse8(x);
|
||||
#else
|
||||
/* Assign the correct nibble position. */
|
||||
x = ((x & 0xf0) >> 4)
|
||||
| ((x & 0x0f) << 4);
|
||||
@ -281,6 +285,7 @@ static inline uint8_t revbit8(uint8_t x)
|
||||
| ((x & 0x22) << 1)
|
||||
| ((x & 0x11) << 3);
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -289,6 +294,9 @@ static inline uint8_t revbit8(uint8_t x)
|
||||
*/
|
||||
static inline uint16_t revbit16(uint16_t x)
|
||||
{
|
||||
#if __has_builtin(__builtin_bitreverse16)
|
||||
return __builtin_bitreverse16(x);
|
||||
#else
|
||||
/* Assign the correct byte position. */
|
||||
x = bswap16(x);
|
||||
/* Assign the correct nibble position. */
|
||||
@ -300,6 +308,7 @@ static inline uint16_t revbit16(uint16_t x)
|
||||
| ((x & 0x2222) << 1)
|
||||
| ((x & 0x1111) << 3);
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -308,6 +317,9 @@ static inline uint16_t revbit16(uint16_t x)
|
||||
*/
|
||||
static inline uint32_t revbit32(uint32_t x)
|
||||
{
|
||||
#if __has_builtin(__builtin_bitreverse32)
|
||||
return __builtin_bitreverse32(x);
|
||||
#else
|
||||
/* Assign the correct byte position. */
|
||||
x = bswap32(x);
|
||||
/* Assign the correct nibble position. */
|
||||
@ -319,6 +331,7 @@ static inline uint32_t revbit32(uint32_t x)
|
||||
| ((x & 0x22222222u) << 1)
|
||||
| ((x & 0x11111111u) << 3);
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -327,6 +340,9 @@ static inline uint32_t revbit32(uint32_t x)
|
||||
*/
|
||||
static inline uint64_t revbit64(uint64_t x)
|
||||
{
|
||||
#if __has_builtin(__builtin_bitreverse64)
|
||||
return __builtin_bitreverse64(x);
|
||||
#else
|
||||
/* Assign the correct byte position. */
|
||||
x = bswap64(x);
|
||||
/* Assign the correct nibble position. */
|
||||
@ -338,6 +354,281 @@ static inline uint64_t revbit64(uint64_t x)
|
||||
| ((x & 0x2222222222222222ull) << 1)
|
||||
| ((x & 0x1111111111111111ull) << 3);
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* sadd32_overflow - addition with overflow indication
|
||||
* @x, @y: addends
|
||||
* @ret: Output for sum
|
||||
*
|
||||
* Computes *@ret = @x + @y, and returns true if and only if that
|
||||
* value has been truncated.
|
||||
*/
|
||||
static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5
|
||||
return __builtin_add_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x + y;
|
||||
return ((*ret ^ x) & ~(x ^ y)) < 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* sadd64_overflow - addition with overflow indication
|
||||
* @x, @y: addends
|
||||
* @ret: Output for sum
|
||||
*
|
||||
* Computes *@ret = @x + @y, and returns true if and only if that
|
||||
* value has been truncated.
|
||||
*/
|
||||
static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5
|
||||
return __builtin_add_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x + y;
|
||||
return ((*ret ^ x) & ~(x ^ y)) < 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* uadd32_overflow - addition with overflow indication
|
||||
* @x, @y: addends
|
||||
* @ret: Output for sum
|
||||
*
|
||||
* Computes *@ret = @x + @y, and returns true if and only if that
|
||||
* value has been truncated.
|
||||
*/
|
||||
static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5
|
||||
return __builtin_add_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x + y;
|
||||
return *ret < x;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* uadd64_overflow - addition with overflow indication
|
||||
* @x, @y: addends
|
||||
* @ret: Output for sum
|
||||
*
|
||||
* Computes *@ret = @x + @y, and returns true if and only if that
|
||||
* value has been truncated.
|
||||
*/
|
||||
static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_add_overflow) || __GNUC__ >= 5
|
||||
return __builtin_add_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x + y;
|
||||
return *ret < x;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* ssub32_overflow - subtraction with overflow indication
|
||||
* @x: Minuend
|
||||
* @y: Subtrahend
|
||||
* @ret: Output for difference
|
||||
*
|
||||
* Computes *@ret = @x - @y, and returns true if and only if that
|
||||
* value has been truncated.
|
||||
*/
|
||||
static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5
|
||||
return __builtin_sub_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x - y;
|
||||
return ((*ret ^ x) & (x ^ y)) < 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* ssub64_overflow - subtraction with overflow indication
|
||||
* @x: Minuend
|
||||
* @y: Subtrahend
|
||||
* @ret: Output for sum
|
||||
*
|
||||
* Computes *@ret = @x - @y, and returns true if and only if that
|
||||
* value has been truncated.
|
||||
*/
|
||||
static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5
|
||||
return __builtin_sub_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x - y;
|
||||
return ((*ret ^ x) & (x ^ y)) < 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* usub32_overflow - subtraction with overflow indication
|
||||
* @x: Minuend
|
||||
* @y: Subtrahend
|
||||
* @ret: Output for sum
|
||||
*
|
||||
* Computes *@ret = @x - @y, and returns true if and only if that
|
||||
* value has been truncated.
|
||||
*/
|
||||
static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5
|
||||
return __builtin_sub_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x - y;
|
||||
return x < y;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* usub64_overflow - subtraction with overflow indication
|
||||
* @x: Minuend
|
||||
* @y: Subtrahend
|
||||
* @ret: Output for sum
|
||||
*
|
||||
* Computes *@ret = @x - @y, and returns true if and only if that
|
||||
* value has been truncated.
|
||||
*/
|
||||
static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_sub_overflow) || __GNUC__ >= 5
|
||||
return __builtin_sub_overflow(x, y, ret);
|
||||
#else
|
||||
*ret = x - y;
|
||||
return x < y;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* smul32_overflow - multiplication with overflow indication
|
||||
* @x, @y: Input multipliers
|
||||
* @ret: Output for product
|
||||
*
|
||||
* Computes *@ret = @x * @y, and returns true if and only if that
|
||||
* value has been truncated.
|
||||
*/
|
||||
static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5
|
||||
return __builtin_mul_overflow(x, y, ret);
|
||||
#else
|
||||
int64_t z = (int64_t)x * y;
|
||||
*ret = z;
|
||||
return *ret != z;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* smul64_overflow - multiplication with overflow indication
|
||||
* @x, @y: Input multipliers
|
||||
* @ret: Output for product
|
||||
*
|
||||
* Computes *@ret = @x * @y, and returns true if and only if that
|
||||
* value has been truncated.
|
||||
*/
|
||||
static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5
|
||||
return __builtin_mul_overflow(x, y, ret);
|
||||
#else
|
||||
uint64_t hi, lo;
|
||||
muls64(&lo, &hi, x, y);
|
||||
*ret = lo;
|
||||
return hi != ((int64_t)lo >> 63);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* umul32_overflow - multiplication with overflow indication
|
||||
* @x, @y: Input multipliers
|
||||
* @ret: Output for product
|
||||
*
|
||||
* Computes *@ret = @x * @y, and returns true if and only if that
|
||||
* value has been truncated.
|
||||
*/
|
||||
static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5
|
||||
return __builtin_mul_overflow(x, y, ret);
|
||||
#else
|
||||
uint64_t z = (uint64_t)x * y;
|
||||
*ret = z;
|
||||
return z > UINT32_MAX;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* umul64_overflow - multiplication with overflow indication
|
||||
* @x, @y: Input multipliers
|
||||
* @ret: Output for product
|
||||
*
|
||||
* Computes *@ret = @x * @y, and returns true if and only if that
|
||||
* value has been truncated.
|
||||
*/
|
||||
static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
|
||||
{
|
||||
#if __has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5
|
||||
return __builtin_mul_overflow(x, y, ret);
|
||||
#else
|
||||
uint64_t hi;
|
||||
mulu64(ret, &hi, x, y);
|
||||
return hi != 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* uadd64_carry - addition with carry-in and carry-out
|
||||
* @x, @y: addends
|
||||
* @pcarry: in-out carry value
|
||||
*
|
||||
* Computes @x + @y + *@pcarry, placing the carry-out back
|
||||
* into *@pcarry and returning the 64-bit sum.
|
||||
*/
|
||||
static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry)
|
||||
{
|
||||
#if __has_builtin(__builtin_addcll)
|
||||
unsigned long long c = *pcarry;
|
||||
x = __builtin_addcll(x, y, c, &c);
|
||||
*pcarry = c & 1;
|
||||
return x;
|
||||
#else
|
||||
bool c = *pcarry;
|
||||
/* This is clang's internal expansion of __builtin_addc. */
|
||||
c = uadd64_overflow(x, c, &x);
|
||||
c |= uadd64_overflow(x, y, &x);
|
||||
*pcarry = c;
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* usub64_borrow - subtraction with borrow-in and borrow-out
|
||||
* @x, @y: addends
|
||||
* @pborrow: in-out borrow value
|
||||
*
|
||||
* Computes @x - @y - *@pborrow, placing the borrow-out back
|
||||
* into *@pborrow and returning the 64-bit sum.
|
||||
*/
|
||||
static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow)
|
||||
{
|
||||
#if __has_builtin(__builtin_subcll)
|
||||
unsigned long long b = *pborrow;
|
||||
x = __builtin_subcll(x, y, b, &b);
|
||||
*pborrow = b & 1;
|
||||
return x;
|
||||
#else
|
||||
bool b = *pborrow;
|
||||
b = usub64_overflow(x, b, &x);
|
||||
b |= usub64_overflow(x, y, &x);
|
||||
*pborrow = b;
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Host type specific sizes of these routines. */
|
||||
|
||||
@ -11,6 +11,11 @@ static inline Int128 int128_make64(uint64_t a)
|
||||
return a;
|
||||
}
|
||||
|
||||
static inline Int128 int128_makes64(int64_t a)
|
||||
{
|
||||
return a;
|
||||
}
|
||||
|
||||
static inline Int128 int128_make128(uint64_t lo, uint64_t hi)
|
||||
{
|
||||
return (__uint128_t)hi << 64 | lo;
|
||||
@ -167,6 +172,11 @@ static inline Int128 int128_make64(uint64_t a)
|
||||
return (Int128) { a, 0 };
|
||||
}
|
||||
|
||||
static inline Int128 int128_makes64(int64_t a)
|
||||
{
|
||||
return (Int128) { a, a >> 63 };
|
||||
}
|
||||
|
||||
static inline Int128 int128_make128(uint64_t lo, uint64_t hi)
|
||||
{
|
||||
return (Int128) { lo, hi };
|
||||
|
||||
@ -254,7 +254,7 @@ struct JobDriver {
|
||||
/**
|
||||
* If the callback is not NULL, it will be invoked in job_cancel_async
|
||||
*/
|
||||
void (*cancel)(Job *job);
|
||||
void (*cancel)(Job *job, bool force);
|
||||
|
||||
|
||||
/** Called when the job is freed */
|
||||
|
||||
@ -24,79 +24,71 @@ struct QemuLockable {
|
||||
QemuLockUnlockFunc *unlock;
|
||||
};
|
||||
|
||||
/* This function gives an error if an invalid, non-NULL pointer type is passed
|
||||
* to QEMU_MAKE_LOCKABLE. For optimized builds, we can rely on dead-code elimination
|
||||
* from the compiler, and give the errors already at link time.
|
||||
*/
|
||||
#if defined(__OPTIMIZE__) && !defined(__SANITIZE_ADDRESS__)
|
||||
void unknown_lock_type(void *);
|
||||
#else
|
||||
static inline void unknown_lock_type(void *unused)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline __attribute__((__always_inline__)) QemuLockable *
|
||||
qemu_make_lockable(void *x, QemuLockable *lockable)
|
||||
{
|
||||
/* We cannot test this in a macro, otherwise we get compiler
|
||||
/*
|
||||
* We cannot test this in a macro, otherwise we get compiler
|
||||
* warnings like "the address of 'm' will always evaluate as 'true'".
|
||||
*/
|
||||
return x ? lockable : NULL;
|
||||
}
|
||||
|
||||
/* Auxiliary macros to simplify QEMU_MAKE_LOCABLE. */
|
||||
#define QEMU_LOCK_FUNC(x) ((QemuLockUnlockFunc *) \
|
||||
QEMU_GENERIC(x, \
|
||||
(QemuMutex *, qemu_mutex_lock), \
|
||||
(QemuRecMutex *, qemu_rec_mutex_lock), \
|
||||
(CoMutex *, qemu_co_mutex_lock), \
|
||||
(QemuSpin *, qemu_spin_lock), \
|
||||
unknown_lock_type))
|
||||
static inline __attribute__((__always_inline__)) QemuLockable *
|
||||
qemu_null_lockable(void *x)
|
||||
{
|
||||
if (x != NULL) {
|
||||
qemu_build_not_reached();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define QEMU_UNLOCK_FUNC(x) ((QemuLockUnlockFunc *) \
|
||||
QEMU_GENERIC(x, \
|
||||
(QemuMutex *, qemu_mutex_unlock), \
|
||||
(QemuRecMutex *, qemu_rec_mutex_unlock), \
|
||||
(CoMutex *, qemu_co_mutex_unlock), \
|
||||
(QemuSpin *, qemu_spin_unlock), \
|
||||
unknown_lock_type))
|
||||
|
||||
/* In C, compound literals have the lifetime of an automatic variable.
|
||||
/*
|
||||
* In C, compound literals have the lifetime of an automatic variable.
|
||||
* In C++ it would be different, but then C++ wouldn't need QemuLockable
|
||||
* either...
|
||||
*/
|
||||
#define QEMU_MAKE_LOCKABLE_(x) (&(QemuLockable) { \
|
||||
.object = (x), \
|
||||
.lock = QEMU_LOCK_FUNC(x), \
|
||||
.unlock = QEMU_UNLOCK_FUNC(x), \
|
||||
#define QML_OBJ_(x, name) (&(QemuLockable) { \
|
||||
.object = (x), \
|
||||
.lock = (QemuLockUnlockFunc *) qemu_ ## name ## _lock, \
|
||||
.unlock = (QemuLockUnlockFunc *) qemu_ ## name ## _unlock \
|
||||
})
|
||||
|
||||
/* QEMU_MAKE_LOCKABLE - Make a polymorphic QemuLockable
|
||||
/**
|
||||
* QEMU_MAKE_LOCKABLE - Make a polymorphic QemuLockable
|
||||
*
|
||||
* @x: a lock object (currently one of QemuMutex, QemuRecMutex, CoMutex, QemuSpin).
|
||||
* @x: a lock object (currently one of QemuMutex, QemuRecMutex,
|
||||
* CoMutex, QemuSpin).
|
||||
*
|
||||
* Returns a QemuLockable object that can be passed around
|
||||
* to a function that can operate with locks of any kind, or
|
||||
* NULL if @x is %NULL.
|
||||
*/
|
||||
#define QEMU_MAKE_LOCKABLE(x) \
|
||||
QEMU_GENERIC(x, \
|
||||
(QemuLockable *, (x)), \
|
||||
qemu_make_lockable((x), QEMU_MAKE_LOCKABLE_(x)))
|
||||
|
||||
/* QEMU_MAKE_LOCKABLE_NONNULL - Make a polymorphic QemuLockable
|
||||
*
|
||||
* @x: a lock object (currently one of QemuMutex, QemuRecMutex, CoMutex, QemuSpin).
|
||||
* Note the special case for void *, so that we may pass "NULL".
|
||||
*/
|
||||
#define QEMU_MAKE_LOCKABLE(x) \
|
||||
_Generic((x), QemuLockable *: (x), \
|
||||
void *: qemu_null_lockable(x), \
|
||||
QemuMutex *: qemu_make_lockable(x, QML_OBJ_(x, mutex)), \
|
||||
QemuRecMutex *: qemu_make_lockable(x, QML_OBJ_(x, rec_mutex)), \
|
||||
CoMutex *: qemu_make_lockable(x, QML_OBJ_(x, co_mutex)), \
|
||||
QemuSpin *: qemu_make_lockable(x, QML_OBJ_(x, spin)))
|
||||
|
||||
/**
|
||||
* QEMU_MAKE_LOCKABLE_NONNULL - Make a polymorphic QemuLockable
|
||||
*
|
||||
* @x: a lock object (currently one of QemuMutex, QemuRecMutex,
|
||||
* CoMutex, QemuSpin).
|
||||
*
|
||||
* Returns a QemuLockable object that can be passed around
|
||||
* to a function that can operate with locks of any kind.
|
||||
*/
|
||||
#define QEMU_MAKE_LOCKABLE_NONNULL(x) \
|
||||
QEMU_GENERIC(x, \
|
||||
(QemuLockable *, (x)), \
|
||||
QEMU_MAKE_LOCKABLE_(x))
|
||||
#define QEMU_MAKE_LOCKABLE_NONNULL(x) \
|
||||
_Generic((x), QemuLockable *: (x), \
|
||||
QemuMutex *: QML_OBJ_(x, mutex), \
|
||||
QemuRecMutex *: QML_OBJ_(x, rec_mutex), \
|
||||
CoMutex *: QML_OBJ_(x, co_mutex), \
|
||||
QemuSpin *: QML_OBJ_(x, spin))
|
||||
|
||||
static inline void qemu_lockable_lock(QemuLockable *x)
|
||||
{
|
||||
|
||||
@ -234,24 +234,6 @@ void event_notifier_set_handler(EventNotifier *e,
|
||||
|
||||
GSource *iohandler_get_g_source(void);
|
||||
AioContext *iohandler_get_aio_context(void);
|
||||
#ifdef CONFIG_POSIX
|
||||
/**
|
||||
* qemu_add_child_watch: Register a child process for reaping.
|
||||
*
|
||||
* Under POSIX systems, a parent process must read the exit status of
|
||||
* its child processes using waitpid, or the operating system will not
|
||||
* free some of the resources attached to that process.
|
||||
*
|
||||
* This function directs the QEMU main loop to observe a child process
|
||||
* and call waitpid as soon as it exits; the watch is then removed
|
||||
* automatically. It is useful whenever QEMU forks a child process
|
||||
* but will find out about its termination by other means such as a
|
||||
* "broken pipe".
|
||||
*
|
||||
* @pid: The pid that QEMU should observe.
|
||||
*/
|
||||
int qemu_add_child_watch(pid_t pid);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* qemu_mutex_iothread_locked: Return lock status of the main loop mutex.
|
||||
@ -312,7 +294,9 @@ void qemu_cond_timedwait_iothread(QemuCond *cond, int ms);
|
||||
|
||||
void qemu_fd_register(int fd);
|
||||
|
||||
QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
|
||||
#define qemu_bh_new(cb, opaque) \
|
||||
qemu_bh_new_full((cb), (opaque), (stringify(cb)))
|
||||
QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name);
|
||||
void qemu_bh_schedule_idle(QEMUBH *bh);
|
||||
|
||||
enum {
|
||||
|
||||
@ -7,18 +7,22 @@ size_t qemu_fd_getpagesize(int fd);
|
||||
size_t qemu_mempath_getpagesize(const char *mem_path);
|
||||
|
||||
/**
|
||||
* qemu_ram_mmap: mmap the specified file or device.
|
||||
* qemu_ram_mmap: mmap anonymous memory, the specified file or device.
|
||||
*
|
||||
* mmap() abstraction to map guest RAM, simplifying flag handling, taking
|
||||
* care of alignment requirements and installing guard pages.
|
||||
*
|
||||
* Parameters:
|
||||
* @fd: the file or the device to mmap
|
||||
* @size: the number of bytes to be mmaped
|
||||
* @align: if not zero, specify the alignment of the starting mapping address;
|
||||
* otherwise, the alignment in use will be determined by QEMU.
|
||||
* @readonly: true for a read-only mapping, false for read/write.
|
||||
* @shared: map has RAM_SHARED flag.
|
||||
* @is_pmem: map has RAM_PMEM flag.
|
||||
* @qemu_map_flags: QEMU_MAP_* flags
|
||||
* @map_offset: map starts at offset of map_offset from the start of fd
|
||||
*
|
||||
* Internally, MAP_PRIVATE, MAP_ANONYMOUS and MAP_SHARED_VALIDATE are set
|
||||
* implicitly based on other parameters.
|
||||
*
|
||||
* Return:
|
||||
* On success, return a pointer to the mapped area.
|
||||
* On failure, return MAP_FAILED.
|
||||
@ -26,9 +30,7 @@ size_t qemu_mempath_getpagesize(const char *mem_path);
|
||||
void *qemu_ram_mmap(int fd,
|
||||
size_t size,
|
||||
size_t align,
|
||||
bool readonly,
|
||||
bool shared,
|
||||
bool is_pmem,
|
||||
uint32_t qemu_map_flags,
|
||||
off_t map_offset);
|
||||
|
||||
void qemu_ram_munmap(int fd, void *ptr, size_t size);
|
||||
|
||||
@ -72,5 +72,84 @@ void module_call_init(module_init_type type);
|
||||
bool module_load_one(const char *prefix, const char *lib_name, bool mayfail);
|
||||
void module_load_qom_one(const char *type);
|
||||
void module_load_qom_all(void);
|
||||
void module_allow_arch(const char *arch);
|
||||
|
||||
/**
|
||||
* DOC: module info annotation macros
|
||||
*
|
||||
* `scripts/modinfo-collect.py` will collect module info,
|
||||
* using the preprocessor and -DQEMU_MODINFO.
|
||||
*
|
||||
* `scripts/modinfo-generate.py` will create a module meta-data database
|
||||
* from the collected information so qemu knows about module
|
||||
* dependencies and QOM objects implemented by modules.
|
||||
*
|
||||
* See `*.modinfo` and `modinfo.c` in the build directory to check the
|
||||
* script results.
|
||||
*/
|
||||
#ifdef QEMU_MODINFO
|
||||
# define modinfo(kind, value) \
|
||||
MODINFO_START kind value MODINFO_END
|
||||
#else
|
||||
# define modinfo(kind, value)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* module_obj
|
||||
*
|
||||
* @name: QOM type.
|
||||
*
|
||||
* This module implements QOM type @name.
|
||||
*/
|
||||
#define module_obj(name) modinfo(obj, name)
|
||||
|
||||
/**
|
||||
* module_dep
|
||||
*
|
||||
* @name: module name
|
||||
*
|
||||
* This module depends on module @name.
|
||||
*/
|
||||
#define module_dep(name) modinfo(dep, name)
|
||||
|
||||
/**
|
||||
* module_arch
|
||||
*
|
||||
* @name: target architecture
|
||||
*
|
||||
* This module is for target architecture @arch.
|
||||
*
|
||||
* Note that target-dependent modules are tagged automatically, so
|
||||
* this is only needed in case target-independent modules should be
|
||||
* restricted. Use case example: the ccw bus is implemented by s390x
|
||||
* only.
|
||||
*/
|
||||
#define module_arch(name) modinfo(arch, name)
|
||||
|
||||
/**
|
||||
* module_opts
|
||||
*
|
||||
* @name: QemuOpts name
|
||||
*
|
||||
* This module registers QemuOpts @name.
|
||||
*/
|
||||
#define module_opts(name) modinfo(opts, name)
|
||||
|
||||
/*
|
||||
* module info database
|
||||
*
|
||||
* scripts/modinfo-generate.c will build this using the data collected
|
||||
* by scripts/modinfo-collect.py
|
||||
*/
|
||||
typedef struct QemuModinfo QemuModinfo;
|
||||
struct QemuModinfo {
|
||||
const char *name;
|
||||
const char *arch;
|
||||
const char **objs;
|
||||
const char **deps;
|
||||
const char **opts;
|
||||
};
|
||||
extern const QemuModinfo qemu_modinfo[];
|
||||
void module_init_info(const QemuModinfo *info);
|
||||
|
||||
#endif
|
||||
|
||||
@ -119,7 +119,6 @@ QemuOpts *qemu_opts_create(QemuOptsList *list, const char *id,
|
||||
int fail_if_exists, Error **errp);
|
||||
void qemu_opts_reset(QemuOptsList *list);
|
||||
void qemu_opts_loc_restore(QemuOpts *opts);
|
||||
bool qemu_opts_set(QemuOptsList *list, const char *name, const char *value, Error **errp);
|
||||
const char *qemu_opts_id(QemuOpts *opts);
|
||||
void qemu_opts_set_id(QemuOpts *opts, char *id);
|
||||
void qemu_opts_del(QemuOpts *opts);
|
||||
@ -130,8 +129,6 @@ QemuOpts *qemu_opts_parse_noisily(QemuOptsList *list, const char *params,
|
||||
bool permit_abbrev);
|
||||
QemuOpts *qemu_opts_parse(QemuOptsList *list, const char *params,
|
||||
bool permit_abbrev, Error **errp);
|
||||
void qemu_opts_set_defaults(QemuOptsList *list, const char *params,
|
||||
int permit_abbrev);
|
||||
QemuOpts *qemu_opts_from_qdict(QemuOptsList *list, const QDict *qdict,
|
||||
Error **errp);
|
||||
QDict *qemu_opts_to_qdict_filtered(QemuOpts *opts, QDict *qdict,
|
||||
@ -147,7 +144,10 @@ void qemu_opts_print_help(QemuOptsList *list, bool print_caption);
|
||||
void qemu_opts_free(QemuOptsList *list);
|
||||
QemuOptsList *qemu_opts_append(QemuOptsList *dst, QemuOptsList *list);
|
||||
|
||||
QDict *keyval_parse_into(QDict *qdict, const char *params, const char *implied_key,
|
||||
bool *p_help, Error **errp);
|
||||
QDict *keyval_parse(const char *params, const char *implied_key,
|
||||
bool *help, Error **errp);
|
||||
void keyval_merge(QDict *old, const QDict *new, Error **errp);
|
||||
|
||||
#endif
|
||||
|
||||
@ -131,10 +131,6 @@ QEMU_EXTERN_C int daemon(int, int);
|
||||
*/
|
||||
#include "glib-compat.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
#include "sysemu/os-win32.h"
|
||||
#endif
|
||||
@ -143,6 +139,10 @@ extern "C" {
|
||||
#include "sysemu/os-posix.h"
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "qemu/typedefs.h"
|
||||
|
||||
/*
|
||||
@ -195,6 +195,9 @@ extern "C" {
|
||||
#ifndef MAP_FIXED_NOREPLACE
|
||||
#define MAP_FIXED_NOREPLACE 0
|
||||
#endif
|
||||
#ifndef MAP_NORESERVE
|
||||
#define MAP_NORESERVE 0
|
||||
#endif
|
||||
#ifndef ENOMEDIUM
|
||||
#define ENOMEDIUM ENODEV
|
||||
#endif
|
||||
@ -253,7 +256,7 @@ extern "C" {
|
||||
/* Mac OSX has a <stdint.h> bug that incorrectly defines SIZE_MAX with
|
||||
* the wrong type. Our replacement isn't usable in preprocessor
|
||||
* expressions, but it is sufficient for our needs. */
|
||||
#if defined(HAVE_BROKEN_SIZE_MAX) && HAVE_BROKEN_SIZE_MAX
|
||||
#ifdef HAVE_BROKEN_SIZE_MAX
|
||||
#undef SIZE_MAX
|
||||
#define SIZE_MAX ((size_t)-1)
|
||||
#endif
|
||||
@ -316,11 +319,16 @@ extern "C" {
|
||||
})
|
||||
#endif
|
||||
|
||||
/* Round number down to multiple */
|
||||
/*
|
||||
* Round number down to multiple. Safe when m is not a power of 2 (see
|
||||
* ROUND_DOWN for a faster version when a power of 2 is guaranteed).
|
||||
*/
|
||||
#define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m))
|
||||
|
||||
/* Round number up to multiple. Safe when m is not a power of 2 (see
|
||||
* ROUND_UP for a faster version when a power of 2 is guaranteed) */
|
||||
/*
|
||||
* Round number up to multiple. Safe when m is not a power of 2 (see
|
||||
* ROUND_UP for a faster version when a power of 2 is guaranteed).
|
||||
*/
|
||||
#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m))
|
||||
|
||||
/* Check if n is a multiple of m */
|
||||
@ -337,11 +345,22 @@ extern "C" {
|
||||
/* Check if pointer p is n-bytes aligned */
|
||||
#define QEMU_PTR_IS_ALIGNED(p, n) QEMU_IS_ALIGNED((uintptr_t)(p), (n))
|
||||
|
||||
/* Round number up to multiple. Requires that d be a power of 2 (see
|
||||
/*
|
||||
* Round number down to multiple. Requires that d be a power of 2 (see
|
||||
* QEMU_ALIGN_UP for a safer but slower version on arbitrary
|
||||
* numbers); works even if d is a smaller type than n. */
|
||||
* numbers); works even if d is a smaller type than n.
|
||||
*/
|
||||
#ifndef ROUND_DOWN
|
||||
#define ROUND_DOWN(n, d) ((n) & -(0 ? (n) : (d)))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Round number up to multiple. Requires that d be a power of 2 (see
|
||||
* QEMU_ALIGN_UP for a safer but slower version on arbitrary
|
||||
* numbers); works even if d is a smaller type than n.
|
||||
*/
|
||||
#ifndef ROUND_UP
|
||||
#define ROUND_UP(n, d) (((n) + (d) - 1) & -(0 ? (n) : (d)))
|
||||
#define ROUND_UP(n, d) ROUND_DOWN((n) + (d) - 1, (d))
|
||||
#endif
|
||||
|
||||
#ifndef DIV_ROUND_UP
|
||||
@ -362,10 +381,50 @@ extern "C" {
|
||||
int qemu_daemon(int nochdir, int noclose);
|
||||
void *qemu_try_memalign(size_t alignment, size_t size);
|
||||
void *qemu_memalign(size_t alignment, size_t size);
|
||||
void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared);
|
||||
void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared,
|
||||
bool noreserve);
|
||||
void qemu_vfree(void *ptr);
|
||||
void qemu_anon_ram_free(void *ptr, size_t size);
|
||||
|
||||
/*
|
||||
* It's an analog of GLIB's g_autoptr_cleanup_generic_gfree(), used to define
|
||||
* g_autofree macro.
|
||||
*/
|
||||
static inline void qemu_cleanup_generic_vfree(void *p)
|
||||
{
|
||||
void **pp = (void **)p;
|
||||
qemu_vfree(*pp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Analog of g_autofree, but qemu_vfree is called on cleanup instead of g_free.
|
||||
*/
|
||||
#define QEMU_AUTO_VFREE __attribute__((cleanup(qemu_cleanup_generic_vfree)))
|
||||
|
||||
/*
|
||||
* Abstraction of PROT_ and MAP_ flags as passed to mmap(), for example,
|
||||
* consumed by qemu_ram_mmap().
|
||||
*/
|
||||
|
||||
/* Map PROT_READ instead of PROT_READ | PROT_WRITE. */
|
||||
#define QEMU_MAP_READONLY (1 << 0)
|
||||
|
||||
/* Use MAP_SHARED instead of MAP_PRIVATE. */
|
||||
#define QEMU_MAP_SHARED (1 << 1)
|
||||
|
||||
/*
|
||||
* Use MAP_SYNC | MAP_SHARED_VALIDATE if supported. Ignored without
|
||||
* QEMU_MAP_SHARED. If mapping fails, warn and fallback to !QEMU_MAP_SYNC.
|
||||
*/
|
||||
#define QEMU_MAP_SYNC (1 << 2)
|
||||
|
||||
/*
|
||||
* Use MAP_NORESERVE to skip reservation of swap space (or huge pages if
|
||||
* applicable). Bail out if not supported/effective.
|
||||
*/
|
||||
#define QEMU_MAP_NORESERVE (1 << 3)
|
||||
|
||||
|
||||
#define QEMU_MADV_INVALID -1
|
||||
|
||||
#if defined(CONFIG_MADVISE)
|
||||
@ -410,7 +469,7 @@ void qemu_anon_ram_free(void *ptr, size_t size);
|
||||
#ifdef MADV_REMOVE
|
||||
#define QEMU_MADV_REMOVE MADV_REMOVE
|
||||
#else
|
||||
#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED
|
||||
#endif
|
||||
|
||||
#elif defined(CONFIG_POSIX_MADVISE)
|
||||
@ -424,7 +483,7 @@ void qemu_anon_ram_free(void *ptr, size_t size);
|
||||
#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED
|
||||
|
||||
#else /* no-op */
|
||||
|
||||
@ -512,6 +571,7 @@ void sigaction_invoke(struct sigaction *action,
|
||||
#endif
|
||||
|
||||
int qemu_madvise(void *addr, size_t len, int advice);
|
||||
int qemu_mprotect_rw(void *addr, size_t size);
|
||||
int qemu_mprotect_rwx(void *addr, size_t size);
|
||||
int qemu_mprotect_none(void *addr, size_t size);
|
||||
|
||||
|
||||
@ -6,8 +6,8 @@
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef _PLUGIN_MEMORY_H_
|
||||
#define _PLUGIN_MEMORY_H_
|
||||
#ifndef PLUGIN_MEMORY_H
|
||||
#define PLUGIN_MEMORY_H
|
||||
|
||||
struct qemu_plugin_hwaddr {
|
||||
bool is_io;
|
||||
@ -18,7 +18,7 @@ struct qemu_plugin_hwaddr {
|
||||
hwaddr offset;
|
||||
} io;
|
||||
struct {
|
||||
uint64_t hostaddr;
|
||||
void *hostaddr;
|
||||
} ram;
|
||||
} v;
|
||||
};
|
||||
|
||||
@ -79,7 +79,6 @@ enum plugin_dyn_cb_subtype {
|
||||
struct qemu_plugin_dyn_cb {
|
||||
union qemu_plugin_cb_sig f;
|
||||
void *userp;
|
||||
unsigned tcg_flags;
|
||||
enum plugin_dyn_cb_subtype type;
|
||||
/* @rw applies to mem callbacks only (both regular and inline) */
|
||||
enum qemu_plugin_mem_rw rw;
|
||||
@ -191,6 +190,16 @@ void qemu_plugin_add_dyn_cb_arr(GArray *arr);
|
||||
|
||||
void qemu_plugin_disable_mem_helpers(CPUState *cpu);
|
||||
|
||||
/**
|
||||
* qemu_plugin_user_exit(): clean-up callbacks before calling exit callbacks
|
||||
*
|
||||
* This is a user-mode only helper that ensure we have fully cleared
|
||||
* callbacks from all threads before calling the exit callbacks. This
|
||||
* is so the plugins themselves don't have to jump through hoops to
|
||||
* guard against race conditions.
|
||||
*/
|
||||
void qemu_plugin_user_exit(void);
|
||||
|
||||
#else /* !CONFIG_PLUGIN */
|
||||
|
||||
static inline void qemu_plugin_add_opts(void)
|
||||
@ -251,6 +260,8 @@ void qemu_plugin_add_dyn_cb_arr(GArray *arr)
|
||||
static inline void qemu_plugin_disable_mem_helpers(CPUState *cpu)
|
||||
{ }
|
||||
|
||||
static inline void qemu_plugin_user_exit(void)
|
||||
{ }
|
||||
#endif /* !CONFIG_PLUGIN */
|
||||
|
||||
#endif /* QEMU_PLUGIN_H */
|
||||
|
||||
@ -27,6 +27,8 @@
|
||||
#ifndef QEMU_PROGRESS_METER_H
|
||||
#define QEMU_PROGRESS_METER_H
|
||||
|
||||
#include "qemu/lockable.h"
|
||||
|
||||
typedef struct ProgressMeter {
|
||||
/**
|
||||
* Current progress. The unit is arbitrary as long as the ratio between
|
||||
@ -37,22 +39,24 @@ typedef struct ProgressMeter {
|
||||
|
||||
/** Estimated current value at the completion of the process */
|
||||
uint64_t total;
|
||||
|
||||
QemuMutex lock; /* protects concurrent access to above fields */
|
||||
} ProgressMeter;
|
||||
|
||||
static inline void progress_work_done(ProgressMeter *pm, uint64_t done)
|
||||
{
|
||||
pm->current += done;
|
||||
}
|
||||
void progress_init(ProgressMeter *pm);
|
||||
void progress_destroy(ProgressMeter *pm);
|
||||
|
||||
static inline void progress_set_remaining(ProgressMeter *pm, uint64_t remaining)
|
||||
{
|
||||
pm->total = pm->current + remaining;
|
||||
}
|
||||
/* Get a snapshot of internal current and total values */
|
||||
void progress_get_snapshot(ProgressMeter *pm, uint64_t *current,
|
||||
uint64_t *total);
|
||||
|
||||
static inline void progress_increase_remaining(ProgressMeter *pm,
|
||||
uint64_t delta)
|
||||
{
|
||||
pm->total += delta;
|
||||
}
|
||||
/* Increases the amount of work done so far by @done */
|
||||
void progress_work_done(ProgressMeter *pm, uint64_t done);
|
||||
|
||||
/* Sets how much work has to be done to complete to @remaining */
|
||||
void progress_set_remaining(ProgressMeter *pm, uint64_t remaining);
|
||||
|
||||
/* Increases the total work to do by @delta */
|
||||
void progress_increase_remaining(ProgressMeter *pm, uint64_t delta);
|
||||
|
||||
#endif /* QEMU_PROGRESS_METER_H */
|
||||
|
||||
41
include/qemu/qemu-options.h
Normal file
41
include/qemu/qemu-options.h
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* qemu-options.h
|
||||
*
|
||||
* Defines needed for command line argument processing.
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (c) 2010 Jes Sorensen <Jes.Sorensen@redhat.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef QEMU_OPTIONS_H
|
||||
#define QEMU_OPTIONS_H
|
||||
|
||||
enum {
|
||||
|
||||
#define DEF(option, opt_arg, opt_enum, opt_help, arch_mask) \
|
||||
opt_enum,
|
||||
#define DEFHEADING(text)
|
||||
#define ARCHHEADING(text, arch_mask)
|
||||
|
||||
#include "qemu-options.def"
|
||||
};
|
||||
|
||||
#endif
|
||||
@ -525,6 +525,15 @@ qemu_plugin_register_vcpu_syscall_ret_cb(qemu_plugin_id_t id,
|
||||
|
||||
char *qemu_plugin_insn_disas(const struct qemu_plugin_insn *insn);
|
||||
|
||||
/**
|
||||
* qemu_plugin_insn_symbol() - best effort symbol lookup
|
||||
* @insn: instruction reference
|
||||
*
|
||||
* Return a static string referring to the symbol. This is dependent
|
||||
* on the binary QEMU is running having provided a symbol table.
|
||||
*/
|
||||
const char *qemu_plugin_insn_symbol(const struct qemu_plugin_insn *insn);
|
||||
|
||||
/**
|
||||
* qemu_plugin_vcpu_for_each() - iterate over the existing vCPU
|
||||
* @id: plugin ID
|
||||
@ -540,6 +549,19 @@ void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id,
|
||||
void qemu_plugin_register_flush_cb(qemu_plugin_id_t id,
|
||||
qemu_plugin_simple_cb_t cb);
|
||||
|
||||
/**
|
||||
* qemu_plugin_register_atexit_cb() - register exit callback
|
||||
* @id: plugin ID
|
||||
* @cb: callback
|
||||
* @userdata: user data for callback
|
||||
*
|
||||
* The @cb function is called once execution has finished. Plugins
|
||||
* should be able to free all their resources at this point much like
|
||||
* after a reset/uninstall callback is called.
|
||||
*
|
||||
* In user-mode it is possible a few un-instrumented instructions from
|
||||
* child threads may run before the host kernel reaps the threads.
|
||||
*/
|
||||
void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id,
|
||||
qemu_plugin_udata_cb_t cb, void *userdata);
|
||||
|
||||
|
||||
@ -14,9 +14,11 @@
|
||||
#ifndef QEMU_RATELIMIT_H
|
||||
#define QEMU_RATELIMIT_H
|
||||
|
||||
#include "qemu/lockable.h"
|
||||
#include "qemu/timer.h"
|
||||
|
||||
typedef struct {
|
||||
QemuMutex lock;
|
||||
int64_t slice_start_time;
|
||||
int64_t slice_end_time;
|
||||
uint64_t slice_quota;
|
||||
@ -40,7 +42,12 @@ static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
|
||||
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
double delay_slices;
|
||||
|
||||
assert(limit->slice_quota && limit->slice_ns);
|
||||
QEMU_LOCK_GUARD(&limit->lock);
|
||||
if (!limit->slice_quota) {
|
||||
/* Throttling disabled. */
|
||||
return 0;
|
||||
}
|
||||
assert(limit->slice_ns);
|
||||
|
||||
if (limit->slice_end_time < now) {
|
||||
/* Previous, possibly extended, time slice finished; reset the
|
||||
@ -65,11 +72,26 @@ static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
|
||||
return limit->slice_end_time - now;
|
||||
}
|
||||
|
||||
static inline void ratelimit_init(RateLimit *limit)
|
||||
{
|
||||
qemu_mutex_init(&limit->lock);
|
||||
}
|
||||
|
||||
static inline void ratelimit_destroy(RateLimit *limit)
|
||||
{
|
||||
qemu_mutex_destroy(&limit->lock);
|
||||
}
|
||||
|
||||
static inline void ratelimit_set_speed(RateLimit *limit, uint64_t speed,
|
||||
uint64_t slice_ns)
|
||||
{
|
||||
QEMU_LOCK_GUARD(&limit->lock);
|
||||
limit->slice_ns = slice_ns;
|
||||
limit->slice_quota = MAX(((double)speed * slice_ns) / 1000000000ULL, 1);
|
||||
if (speed == 0) {
|
||||
limit->slice_quota = 0;
|
||||
} else {
|
||||
limit->slice_quota = MAX(((double)speed * slice_ns) / 1000000000ULL, 1);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@ -6,8 +6,8 @@
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef _SELFMAP_H_
|
||||
#define _SELFMAP_H_
|
||||
#ifndef SELFMAP_H
|
||||
#define SELFMAP_H
|
||||
|
||||
typedef struct {
|
||||
unsigned long start;
|
||||
|
||||
@ -111,4 +111,15 @@ SocketAddress *socket_remote_address(int fd, Error **errp);
|
||||
*/
|
||||
SocketAddress *socket_address_flatten(SocketAddressLegacy *addr);
|
||||
|
||||
/**
|
||||
* socket_address_parse_named_fd:
|
||||
*
|
||||
* Modify @addr, replacing a named fd by its corresponding number.
|
||||
* Needed for callers that plan to pass @addr to a context where the
|
||||
* current monitor is not available.
|
||||
*
|
||||
* Return 0 on success.
|
||||
*/
|
||||
int socket_address_parse_named_fd(SocketAddress *addr, Error **errp);
|
||||
|
||||
#endif /* QEMU_SOCKETS_H */
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
|
||||
typedef struct Stat64 {
|
||||
#ifdef CONFIG_ATOMIC64
|
||||
uint64_t value;
|
||||
aligned_uint64_t value;
|
||||
#else
|
||||
uint32_t low, high;
|
||||
uint32_t lock;
|
||||
|
||||
@ -4,12 +4,6 @@
|
||||
#include <pthread.h>
|
||||
#include <semaphore.h>
|
||||
|
||||
typedef QemuMutex QemuRecMutex;
|
||||
#define qemu_rec_mutex_destroy qemu_mutex_destroy
|
||||
#define qemu_rec_mutex_lock_impl qemu_mutex_lock_impl
|
||||
#define qemu_rec_mutex_trylock_impl qemu_mutex_trylock_impl
|
||||
#define qemu_rec_mutex_unlock qemu_mutex_unlock
|
||||
|
||||
struct QemuMutex {
|
||||
pthread_mutex_t lock;
|
||||
#ifdef CONFIG_DEBUG_MUTEX
|
||||
@ -19,6 +13,14 @@ struct QemuMutex {
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
/*
|
||||
* QemuRecMutex cannot be a typedef of QemuMutex lest we have two
|
||||
* compatible cases in _Generic. See qemu/lockable.h.
|
||||
*/
|
||||
typedef struct QemuRecMutex {
|
||||
QemuMutex m;
|
||||
} QemuRecMutex;
|
||||
|
||||
struct QemuCond {
|
||||
pthread_cond_t cond;
|
||||
bool initialized;
|
||||
|
||||
@ -18,12 +18,6 @@ struct QemuRecMutex {
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
|
||||
void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line);
|
||||
int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file,
|
||||
int line);
|
||||
void qemu_rec_mutex_unlock(QemuRecMutex *mutex);
|
||||
|
||||
struct QemuCond {
|
||||
CONDITION_VARIABLE var;
|
||||
bool initialized;
|
||||
|
||||
@ -28,6 +28,12 @@ int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line);
|
||||
void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line);
|
||||
void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line);
|
||||
|
||||
void qemu_rec_mutex_init(QemuRecMutex *mutex);
|
||||
void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
|
||||
void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line);
|
||||
int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line);
|
||||
void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line);
|
||||
|
||||
typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
|
||||
typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
|
||||
typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
|
||||
@ -104,6 +110,9 @@ extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
|
||||
#define qemu_mutex_unlock(mutex) \
|
||||
qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
|
||||
|
||||
#define qemu_rec_mutex_unlock(mutex) \
|
||||
qemu_rec_mutex_unlock_impl(mutex, __FILE__, __LINE__)
|
||||
|
||||
static inline void (qemu_mutex_lock)(QemuMutex *mutex)
|
||||
{
|
||||
qemu_mutex_lock(mutex);
|
||||
@ -129,8 +138,10 @@ static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
|
||||
return qemu_rec_mutex_trylock(mutex);
|
||||
}
|
||||
|
||||
/* Prototypes for other functions are in thread-posix.h/thread-win32.h. */
|
||||
void qemu_rec_mutex_init(QemuRecMutex *mutex);
|
||||
static inline void (qemu_rec_mutex_unlock)(QemuRecMutex *mutex)
|
||||
{
|
||||
qemu_rec_mutex_unlock(mutex);
|
||||
}
|
||||
|
||||
void qemu_cond_init(QemuCond *cond);
|
||||
void qemu_cond_destroy(QemuCond *cond);
|
||||
|
||||
63
include/qemu/transactions.h
Normal file
63
include/qemu/transactions.h
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Simple transactions API
|
||||
*
|
||||
* Copyright (c) 2021 Virtuozzo International GmbH.
|
||||
*
|
||||
* Author:
|
||||
* Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*
|
||||
* = Generic transaction API =
|
||||
*
|
||||
* The intended usage is the following: you create "prepare" functions, which
|
||||
* represents the actions. They will usually have Transaction* argument, and
|
||||
* call tran_add() to register finalization callbacks. For finalization
|
||||
* callbacks, prepare corresponding TransactionActionDrv structures.
|
||||
*
|
||||
* Then, when you need to make a transaction, create an empty Transaction by
|
||||
* tran_create(), call your "prepare" functions on it, and finally call
|
||||
* tran_abort() or tran_commit() to finalize the transaction by corresponding
|
||||
* finalization actions in reverse order.
|
||||
*/
|
||||
|
||||
#ifndef QEMU_TRANSACTIONS_H
|
||||
#define QEMU_TRANSACTIONS_H
|
||||
|
||||
#include <gmodule.h>
|
||||
|
||||
typedef struct TransactionActionDrv {
|
||||
void (*abort)(void *opaque);
|
||||
void (*commit)(void *opaque);
|
||||
void (*clean)(void *opaque);
|
||||
} TransactionActionDrv;
|
||||
|
||||
typedef struct Transaction Transaction;
|
||||
|
||||
Transaction *tran_new(void);
|
||||
void tran_add(Transaction *tran, TransactionActionDrv *drv, void *opaque);
|
||||
void tran_abort(Transaction *tran);
|
||||
void tran_commit(Transaction *tran);
|
||||
|
||||
static inline void tran_finalize(Transaction *tran, int ret)
|
||||
{
|
||||
if (ret < 0) {
|
||||
tran_abort(tran);
|
||||
} else {
|
||||
tran_commit(tran);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* QEMU_TRANSACTIONS_H */
|
||||
Reference in New Issue
Block a user