mirror of
				https://github.com/qemu/qemu.git
				synced 2025-10-30 19:15:42 +00:00 
			
		
		
		
	 418026ca43
			
		
	
	
		418026ca43
		
	
	
	
	
		
			
			This is a library to manage the host vfio interface, which could be used to implement userspace device driver code in QEMU such as NVMe or net controllers. Signed-off-by: Fam Zheng <famz@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20180116060901.17413-3-famz@redhat.com> Signed-off-by: Fam Zheng <famz@redhat.com>
		
			
				
	
	
		
			74 lines
		
	
	
		
			4.3 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			74 lines
		
	
	
		
			4.3 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
| # See docs/devel/tracing.txt for syntax documentation.
 | |
| 
 | |
| # util/aio-posix.c
 | |
| run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64
 | |
| run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
 | |
| poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
 | |
| poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
 | |
| 
 | |
| # util/async.c
 | |
| aio_co_schedule(void *ctx, void *co) "ctx %p co %p"
 | |
| aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p"
 | |
| 
 | |
| # util/thread-pool.c
 | |
| thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
 | |
| thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d"
 | |
| thread_pool_cancel(void *req, void *opaque) "req %p opaque %p"
 | |
| 
 | |
| # util/buffer.c
 | |
| buffer_resize(const char *buf, size_t olen, size_t len) "%s: old %zd, new %zd"
 | |
| buffer_move_empty(const char *buf, size_t len, const char *from) "%s: %zd bytes from %s"
 | |
| buffer_move(const char *buf, size_t len, const char *from) "%s: %zd bytes from %s"
 | |
| buffer_free(const char *buf, size_t len) "%s: capacity %zd"
 | |
| 
 | |
| # util/qemu-coroutine.c
 | |
| qemu_aio_coroutine_enter(void *ctx, void *from, void *to, void *opaque) "ctx %p from %p to %p opaque %p"
 | |
| qemu_coroutine_yield(void *from, void *to) "from %p to %p"
 | |
| qemu_coroutine_terminate(void *co) "self %p"
 | |
| 
 | |
| # util/qemu-coroutine-lock.c
 | |
| qemu_co_queue_run_restart(void *co) "co %p"
 | |
| qemu_co_mutex_lock_uncontended(void *mutex, void *self) "mutex %p self %p"
 | |
| qemu_co_mutex_lock_entry(void *mutex, void *self) "mutex %p self %p"
 | |
| qemu_co_mutex_lock_return(void *mutex, void *self) "mutex %p self %p"
 | |
| qemu_co_mutex_unlock_entry(void *mutex, void *self) "mutex %p self %p"
 | |
| qemu_co_mutex_unlock_return(void *mutex, void *self) "mutex %p self %p"
 | |
| 
 | |
| # util/oslib-win32.c
 | |
| # util/oslib-posix.c
 | |
| qemu_memalign(size_t alignment, size_t size, void *ptr) "alignment %zu size %zu ptr %p"
 | |
| qemu_anon_ram_alloc(size_t size, void *ptr) "size %zu ptr %p"
 | |
| qemu_vfree(void *ptr) "ptr %p"
 | |
| qemu_anon_ram_free(void *ptr, size_t size) "ptr %p size %zu"
 | |
| 
 | |
| # util/hbitmap.c
 | |
| hbitmap_iter_skip_words(const void *hb, void *hbi, uint64_t pos, unsigned long cur) "hb %p hbi %p pos %"PRId64" cur 0x%lx"
 | |
| hbitmap_reset(void *hb, uint64_t start, uint64_t count, uint64_t sbit, uint64_t ebit) "hb %p items %"PRIu64",%"PRIu64" bits %"PRIu64"..%"PRIu64
 | |
| hbitmap_set(void *hb, uint64_t start, uint64_t count, uint64_t sbit, uint64_t ebit) "hb %p items %"PRIu64",%"PRIu64" bits %"PRIu64"..%"PRIu64
 | |
| 
 | |
| # util/lockcnt.c
 | |
| lockcnt_fast_path_attempt(const void *lockcnt, int expected, int new) "lockcnt %p fast path %d->%d"
 | |
| lockcnt_fast_path_success(const void *lockcnt, int expected, int new) "lockcnt %p fast path %d->%d succeeded"
 | |
| lockcnt_unlock_attempt(const void *lockcnt, int expected, int new) "lockcnt %p unlock %d->%d"
 | |
| lockcnt_unlock_success(const void *lockcnt, int expected, int new) "lockcnt %p unlock %d->%d succeeded"
 | |
| lockcnt_futex_wait_prepare(const void *lockcnt, int expected, int new) "lockcnt %p preparing slow path %d->%d"
 | |
| lockcnt_futex_wait(const void *lockcnt, int val) "lockcnt %p waiting on %d"
 | |
| lockcnt_futex_wait_resume(const void *lockcnt, int new) "lockcnt %p after wait: %d"
 | |
| lockcnt_futex_wake(const void *lockcnt) "lockcnt %p waking up one waiter"
 | |
| 
 | |
| # util/qemu-thread.c
 | |
| qemu_mutex_lock(void *mutex, const char *file, const int line) "waiting on mutex %p (%s:%d)"
 | |
| qemu_mutex_locked(void *mutex, const char *file, const int line) "taken mutex %p (%s:%d)"
 | |
| qemu_mutex_unlock(void *mutex, const char *file, const int line) "released mutex %p (%s:%d)"
 | |
| 
 | |
| # util/vfio-helpers.c
 | |
| qemu_vfio_dma_reset_temporary(void *s) "s %p"
 | |
| qemu_vfio_ram_block_added(void *s, void *p, size_t size) "s %p host %p size 0x%zx"
 | |
| qemu_vfio_ram_block_removed(void *s, void *p, size_t size) "s %p host %p size 0x%zx"
 | |
| qemu_vfio_find_mapping(void *s, void *p) "s %p host %p"
 | |
| qemu_vfio_new_mapping(void *s, void *host, size_t size, int index, uint64_t iova) "s %p host %p size %zu index %d iova 0x%"PRIx64
 | |
| qemu_vfio_do_mapping(void *s, void *host, size_t size, uint64_t iova) "s %p host %p size %zu iova 0x%"PRIx64
 | |
| qemu_vfio_dma_map(void *s, void *host, size_t size, bool temporary, uint64_t *iova) "s %p host %p size %zu temporary %d iova %p"
 | |
| qemu_vfio_dma_map_invalid(void *s, void *mapping_host, size_t mapping_size, void *host, size_t size) "s %p mapping %p %zu requested %p %zu"
 | |
| qemu_vfio_dma_unmap(void *s, void *host) "s %p host %p"
 |