diff --git a/coverage_config_x86_64.json b/coverage_config_x86_64.json index afd012e..2249608 100644 --- a/coverage_config_x86_64.json +++ b/coverage_config_x86_64.json @@ -1,5 +1,5 @@ { - "coverage_score": 85.94, + "coverage_score": 84.55, "exclude_path": "xtask", "crate_features": "" } diff --git a/vhost-device-gpu/CHANGELOG.md b/vhost-device-gpu/CHANGELOG.md index de9ed8e..22a7531 100644 --- a/vhost-device-gpu/CHANGELOG.md +++ b/vhost-device-gpu/CHANGELOG.md @@ -5,6 +5,8 @@ ### Changed +- [[#852]] (https://github.com/rust-vmm/vhost-device/pull/890) vhost-device-gpu: Refactor vhost-device-gpu + ### Fixed ### Deprecated diff --git a/vhost-device-gpu/Cargo.toml b/vhost-device-gpu/Cargo.toml index d6d2236..00ebf49 100644 --- a/vhost-device-gpu/Cargo.toml +++ b/vhost-device-gpu/Cargo.toml @@ -26,7 +26,7 @@ libc = "0.2" log = "0.4" [target.'cfg(not(target_env = "musl"))'.dependencies] -rutabaga_gfx = { version = "0.1.75", features = ["virgl_renderer"] } +rutabaga_gfx = "0.1.75" thiserror = "2.0.17" virglrenderer = {version = "0.1.2", optional = true } vhost = { version = "0.14.0", features = ["vhost-user-backend"] } diff --git a/vhost-device-gpu/README.md b/vhost-device-gpu/README.md index 4b32717..a049b30 100644 --- a/vhost-device-gpu/README.md +++ b/vhost-device-gpu/README.md @@ -87,39 +87,34 @@ Because blob resources are not yet supported, some capsets are limited: - gfxstream-vulkan and gfxstream-gles support are exposed, but can practically only be used for display output, there is no hardware acceleration yet. ## Features -The device leverages the [rutabaga_gfx](https://crates.io/crates/rutabaga_gfx) -crate to provide rendering with virglrenderer and gfxstream. - This crate supports two GPU backends: gfxstream (default) and virglrenderer. -Both require the system-provided virglrenderer and minigbm libraries due to the dependence on rutabaga_gfx. + +The **virglrenderer** backend uses the [virglrenderer-rs](https://crates.io/crates/virglrenderer-rs) +crate, which provides Rust bindings to the native virglrenderer library. It translates +OpenGL API and Vulkan calls to an intermediate representation and allows for OpenGL +acceleration on the host. + +The **gfxstream** backend leverages the [rutabaga_gfx](https://crates.io/crates/rutabaga_gfx) +crate. With gfxstream rendering mode, GLES and Vulkan calls are forwarded to the host +with minimal modification. Install the development packages for your distro, then build with: ```session -CROSVM_USE_SYSTEM_VIRGLRENDERER=1 \ -CROSVM_USE_SYSTEM_MINIGBM=1 \ -cargo build +$ cargo build ``` gfxstream support is compiled by default, it can be disabled by not building with the `backend-gfxstream` feature flag, for example: ```session -CROSVM_USE_SYSTEM_VIRGLRENDERER=1 \ -CROSVM_USE_SYSTEM_MINIGBM=1 \ -cargo build --no-default-features +$ cargo build --no-default-features ``` -With Virglrenderer, Rutabaga translates OpenGL API and Vulkan calls to an -intermediate representation and allows for OpenGL acceleration on the host. - -With the gfxstream rendering mode, GLES and Vulkan calls are forwarded to the -host with minimal modification. - ## Examples First start the daemon on the host machine using either of the 2 gpu modes: -1) `virglrenderer` +1) `virglrenderer` (if the crate has been compiled with the feature `backend-virgl`) 2) `gfxstream` (if the crate has been compiled with the feature `backend-gfxstream`) ```shell diff --git a/vhost-device-gpu/src/backend/common.rs b/vhost-device-gpu/src/backend/common.rs new file mode 100644 index 0000000..147ad96 --- /dev/null +++ b/vhost-device-gpu/src/backend/common.rs @@ -0,0 +1,466 @@ +// Copyright 2025 Red Hat Inc +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use std::sync::{Arc, Mutex}; + +use log::{debug, error}; +use vhost::vhost_user::{ + gpu_message::{VhostUserGpuCursorPos, VhostUserGpuCursorUpdate, VhostUserGpuEdidRequest}, + GpuBackend, +}; +use vm_memory::VolatileSlice; + +use crate::{ + gpu_types::{FenceDescriptor, FenceState, Transfer3DDesc, VirtioGpuRing}, + protocol::{ + GpuResponse, + GpuResponse::{ErrUnspec, OkDisplayInfo, OkEdid, OkNoData}, + VirtioGpuResult, VIRTIO_GPU_MAX_SCANOUTS, + }, + renderer::Renderer, +}; + +#[derive(Debug, Clone)] +pub struct VirtioGpuScanout { + pub resource_id: u32, +} + +#[derive(Copy, Clone, Debug, Default)] +pub struct AssociatedScanouts(u32); + +impl AssociatedScanouts { + #[allow(clippy::missing_const_for_fn)] + pub fn enable(&mut self, scanout_id: u32) { + self.0 |= 1 << scanout_id; + } + + #[allow(clippy::missing_const_for_fn)] + pub fn disable(&mut self, scanout_id: u32) { + self.0 &= !(1 << scanout_id); + } + + pub const fn has_any_enabled(self) -> bool { + self.0 != 0 + } + + pub fn iter_enabled(self) -> impl Iterator { + (0..VIRTIO_GPU_MAX_SCANOUTS).filter(move |i| ((self.0 >> i) & 1) == 1) + } +} + +pub const VHOST_USER_GPU_MAX_CURSOR_DATA_SIZE: usize = 16384; // 4*4*1024 +pub const READ_RESOURCE_BYTES_PER_PIXEL: usize = 4; + +#[derive(Copy, Clone, Debug, Default)] +pub struct CursorConfig { + pub width: u32, + pub height: u32, +} + +impl CursorConfig { + pub const fn expected_buffer_len(self) -> usize { + self.width as usize * self.height as usize * READ_RESOURCE_BYTES_PER_PIXEL + } +} + +pub fn common_display_info(gpu_backend: &GpuBackend) -> VirtioGpuResult { + let backend_display_info = gpu_backend.get_display_info().map_err(|e| { + error!("Failed to get display info: {e:?}"); + ErrUnspec + })?; + let display_info = backend_display_info + .pmodes + .iter() + .map(|display| (display.r.width, display.r.height, display.enabled == 1)) + .collect::>(); + debug!("Displays: {display_info:?}"); + Ok(OkDisplayInfo(display_info)) +} + +pub fn common_get_edid( + gpu_backend: &GpuBackend, + edid_req: VhostUserGpuEdidRequest, +) -> VirtioGpuResult { + debug!("edid request: {edid_req:?}"); + let edid = gpu_backend.get_edid(&edid_req).map_err(|e| { + error!("Failed to get edid from frontend: {e}"); + ErrUnspec + })?; + Ok(OkEdid { + blob: Box::from(&edid.edid[..edid.size as usize]), + }) +} + +pub fn common_process_fence( + fence_state: &Arc>, + ring: VirtioGpuRing, + fence_id: u64, + desc_index: u16, + len: u32, +) -> bool { + // In case the fence is signaled immediately after creation, don't add a return + // FenceDescriptor. + let mut fence_state = fence_state.lock().unwrap(); + if fence_id > *fence_state.completed_fences.get(&ring).unwrap_or(&0) { + fence_state.descs.push(FenceDescriptor { + ring, + fence_id, + desc_index, + len, + }); + + false + } else { + true + } +} + +pub fn common_move_cursor( + gpu_backend: &GpuBackend, + resource_id: u32, + cursor: VhostUserGpuCursorPos, +) -> VirtioGpuResult { + if resource_id == 0 { + gpu_backend.cursor_pos_hide(&cursor).map_err(|e| { + error!("Failed to set cursor pos from frontend: {e}"); + ErrUnspec + })?; + } else { + gpu_backend.cursor_pos(&cursor).map_err(|e| { + error!("Failed to set cursor pos from frontend: {e}"); + ErrUnspec + })?; + } + + Ok(GpuResponse::OkNoData) +} + +/// Reads cursor resource data into a buffer using transfer_read. +/// Returns a boxed slice containing the cursor pixel data. +pub fn common_read_cursor_resource( + renderer: &mut dyn Renderer, + resource_id: u32, + config: CursorConfig, +) -> Result, GpuResponse> { + let mut data = vec![0u8; config.expected_buffer_len()].into_boxed_slice(); + + let transfer = Transfer3DDesc { + x: 0, + y: 0, + z: 0, + w: config.width, + h: config.height, + d: 1, + level: 0, + stride: config.width * READ_RESOURCE_BYTES_PER_PIXEL as u32, + layer_stride: 0, + offset: 0, + }; + + // Create VolatileSlice from the buffer + // SAFETY: The buffer is valid for the entire duration of the transfer_read call + let volatile_slice = unsafe { VolatileSlice::new(data.as_mut_ptr(), data.len()) }; + + // ctx_id 0 is used for direct resource operations + renderer + .transfer_read(0, resource_id, transfer, Some(volatile_slice)) + .map_err(|e| { + error!("Failed to read cursor resource: {e:?}"); + ErrUnspec + })?; + + Ok(data) +} + +pub fn common_update_cursor( + gpu_backend: &GpuBackend, + cursor_pos: VhostUserGpuCursorPos, + hot_x: u32, + hot_y: u32, + data: &[u8], + config: CursorConfig, +) -> VirtioGpuResult { + let expected_len = config.expected_buffer_len(); + + if data.len() != expected_len { + error!( + "Mismatched cursor data size: expected {}, got {}", + expected_len, + data.len() + ); + return Err(ErrUnspec); + } + + let data_ref: &[u8] = data; + let cursor_update = VhostUserGpuCursorUpdate { + pos: cursor_pos, + hot_x, + hot_y, + }; + let mut padded_data = [0u8; VHOST_USER_GPU_MAX_CURSOR_DATA_SIZE]; + padded_data[..data_ref.len()].copy_from_slice(data_ref); + + gpu_backend + .cursor_update(&cursor_update, &padded_data) + .map_err(|e| { + error!("Failed to update cursor: {e}"); + ErrUnspec + })?; + + Ok(OkNoData) +} + +pub fn common_set_scanout_disable(scanouts: &mut [Option], scanout_idx: usize) { + scanouts[scanout_idx] = None; + debug!("Disabling scanout scanout_id={scanout_idx}"); +} + +#[cfg(test)] +mod tests { + use std::{ + os::unix::net::UnixStream, + sync::{Arc, Mutex}, + }; + + use assert_matches::assert_matches; + + use super::*; + use crate::{ + gpu_types::VirtioGpuRing, + protocol::{GpuResponse::ErrUnspec, VIRTIO_GPU_MAX_SCANOUTS}, + }; + + const CURSOR_POS: VhostUserGpuCursorPos = VhostUserGpuCursorPos { + scanout_id: 0, + x: 0, + y: 0, + }; + const CURSOR_CONFIG: CursorConfig = CursorConfig { + width: 4, + height: 4, + }; + const BYTES_PER_PIXEL: usize = 4; + const EXPECTED_LEN: usize = + (CURSOR_CONFIG.width as usize) * (CURSOR_CONFIG.height as usize) * BYTES_PER_PIXEL; + + fn dummy_gpu_backend() -> GpuBackend { + let (_, backend) = UnixStream::pair().unwrap(); + GpuBackend::from_stream(backend) + } + + // AssociatedScanouts + // Test that enabling, disabling, iterating, and checking any enabled works as + // expected. + #[test] + fn associated_scanouts_enable_disable_iter_and_any() { + let mut assoc = AssociatedScanouts::default(); + + // No scanouts initially + assert!(!assoc.has_any_enabled()); + assert_eq!(assoc.iter_enabled().count(), 0); + + // Enable a couple + assoc.enable(0); + assoc.enable(3); + assert!(assoc.has_any_enabled()); + assert_eq!(assoc.iter_enabled().collect::>(), vec![0u32, 3u32]); + + // Disable one + assoc.disable(3); + assert!(assoc.has_any_enabled()); + assert_eq!(assoc.iter_enabled().collect::>(), vec![0u32]); + + // Disable last + assoc.disable(0); + assert!(!assoc.has_any_enabled()); + assert_eq!(assoc.iter_enabled().count(), 0); + } + + // CursorConfig + // Test that expected_buffer_len computes the correct size. + #[test] + fn cursor_config_expected_len() { + let cfg = CursorConfig { + width: 64, + height: 64, + }; + assert_eq!( + cfg.expected_buffer_len(), + 64 * 64 * READ_RESOURCE_BYTES_PER_PIXEL + ); + } + + // Update cursor + // Test that updating the cursor with mismatched data size fails. + #[test] + fn update_cursor_mismatched_data_size_fails() { + let gpu_backend = dummy_gpu_backend(); + + // Data has length 1 (expected is 64) + let bad_data = [0u8]; + + let result = common_update_cursor(&gpu_backend, CURSOR_POS, 0, 0, &bad_data, CURSOR_CONFIG); + + assert_matches!(result, Err(ErrUnspec), "Should fail due to mismatched size"); + } + + // Test that updating the cursor with correct data size but backend failure + // returns ErrUnspec. + #[test] + fn update_cursor_backend_failure() { + let gpu_backend = dummy_gpu_backend(); + + // Data has the correct length (64 bytes) + let correct_data = vec![0u8; EXPECTED_LEN]; + + let result = + common_update_cursor(&gpu_backend, CURSOR_POS, 0, 0, &correct_data, CURSOR_CONFIG); + + assert_matches!( + result, + Err(ErrUnspec), + "Should fail due to failure to update cursor" + ); + } + + // Fence handling + // Test that processing a fence pushes a descriptor when the fence is new. + #[test] + fn process_fence_pushes_descriptor_when_new() { + let fence_state = Arc::new(Mutex::new(FenceState::default())); + let ring = VirtioGpuRing::Global; + + // Clone because common_process_fence takes ownership of ring + let ret = common_process_fence(&fence_state, ring.clone(), 42, 7, 512); + assert!(!ret, "New fence should not complete immediately"); + + let st = fence_state.lock().unwrap(); + assert_eq!(st.descs.len(), 1); + assert_eq!(st.descs[0].ring, ring); + assert_eq!(st.descs[0].fence_id, 42); + assert_eq!(st.descs[0].desc_index, 7); + assert_eq!(st.descs[0].len, 512); + drop(st); + } + + // Test that processing a fence that is already completed returns true + // immediately. + #[test] + fn process_fence_immediately_completes_when_already_done() { + let ring = VirtioGpuRing::Global; + + // Seed state so that ring's 100 is already completed. + let mut seeded = FenceState::default(); + seeded.completed_fences.insert(ring.clone(), 100); + let fence_state = Arc::new(Mutex::new(seeded)); + + let ret = common_process_fence(&fence_state, ring, 100, 1, 4); + assert!(ret, "already-completed fence should return true"); + + let st = fence_state.lock().unwrap(); + assert!(st.descs.is_empty()); + drop(st); + } + + // Test that disabling a scanout clears the corresponding slot. + #[test] + fn set_scanout_disable_clears_slot() { + const N: usize = VIRTIO_GPU_MAX_SCANOUTS as usize; + let mut scanouts: [Option; N] = Default::default(); + + scanouts[5] = Some(VirtioGpuScanout { resource_id: 123 }); + common_set_scanout_disable(&mut scanouts, 5); + assert!(scanouts[5].is_none()); + } + + // Test backend operations with dummy backend (all should fail with ErrUnspec) + #[test] + fn backend_operations_without_frontend() { + let gpu_backend = dummy_gpu_backend(); + + // Test display_info + assert_matches!(common_display_info(&gpu_backend), Err(ErrUnspec)); + + // Test get_edid + let edid_req = VhostUserGpuEdidRequest { scanout_id: 0 }; + assert_matches!(common_get_edid(&gpu_backend, edid_req), Err(ErrUnspec)); + } + + // Test common_move_cursor for both hide (resource_id=0) and show + // (resource_id!=0) paths + #[test] + fn move_cursor_operations() { + let gpu_backend = dummy_gpu_backend(); + let cursor_pos = VhostUserGpuCursorPos { + scanout_id: 0, + x: 50, + y: 50, + }; + + // Test hide cursor (resource_id = 0 calls cursor_pos_hide) + assert_matches!( + common_move_cursor(&gpu_backend, 0, cursor_pos), + Err(ErrUnspec) + ); + + // Test show cursor (non-zero resource_id calls cursor_pos) + assert_matches!( + common_move_cursor(&gpu_backend, 42, cursor_pos), + Err(ErrUnspec) + ); + } + + // Test AssociatedScanouts::disable + #[test] + fn associated_scanouts_disable_functionality() { + let mut scanouts = AssociatedScanouts::default(); + scanouts.enable(0); + scanouts.enable(2); + assert!(scanouts.has_any_enabled()); + + scanouts.disable(0); + assert!(scanouts.has_any_enabled()); // Still has 2 + assert_eq!(scanouts.iter_enabled().collect::>(), vec![2u32]); + + scanouts.disable(2); + assert!(!scanouts.has_any_enabled()); + } + + // Test CursorConfig expected_buffer_len calculation + #[test] + fn cursor_config_buffer_calculations() { + // Test various sizes: (width, height, expected_len) + for (width, height) in [(16, 16), (64, 64), (128, 128)] { + let config = CursorConfig { width, height }; + let expected = width as usize * height as usize * READ_RESOURCE_BYTES_PER_PIXEL; + assert_eq!(config.expected_buffer_len(), expected); + } + } + + // Test VirtioGpuScanout structure (creation and clone) + #[test] + fn virtio_gpu_scanout_operations() { + let scanout = VirtioGpuScanout { resource_id: 456 }; + assert_eq!(scanout.resource_id, 456); + } + + // Test fence processing with context-specific ring + #[test] + fn process_fence_context_specific_ring() { + let ring = VirtioGpuRing::ContextSpecific { + ctx_id: 5, + ring_idx: 2, + }; + let fence_state = Arc::new(Mutex::new(FenceState::default())); + + let ret = common_process_fence(&fence_state, ring.clone(), 100, 10, 256); + assert!(!ret, "New fence should not complete immediately"); + + let st = fence_state.lock().unwrap(); + assert_eq!(st.descs.len(), 1); + assert_eq!(st.descs[0].ring, ring); + assert_eq!(st.descs[0].fence_id, 100); + drop(st); + } +} diff --git a/vhost-device-gpu/src/backend/gfxstream.rs b/vhost-device-gpu/src/backend/gfxstream.rs new file mode 100644 index 0000000..c1b41ec --- /dev/null +++ b/vhost-device-gpu/src/backend/gfxstream.rs @@ -0,0 +1,1163 @@ +// Gfxstream backend device +// Copyright 2019 The ChromiumOS Authors +// Copyright 2025 Red Hat Inc +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use std::{ + cell::RefCell, + collections::BTreeMap, + io::IoSliceMut, + os::{fd::FromRawFd, raw::c_void}, + sync::{Arc, Mutex}, +}; + +use log::{debug, error, warn}; +use rutabaga_gfx::{ + ResourceCreate3D, Rutabaga, RutabagaBuilder, RutabagaComponentType, RutabagaFence, + RutabagaFenceHandler, RutabagaHandle, RutabagaIntoRawDescriptor, RutabagaIovec, Transfer3D, +}; +use vhost::vhost_user::{ + gpu_message::{ + VhostUserGpuCursorPos, VhostUserGpuEdidRequest, VhostUserGpuScanout, VhostUserGpuUpdate, + }, + GpuBackend, +}; +use vhost_user_backend::{VringRwLock, VringT}; +use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, VolatileSlice}; +use vmm_sys_util::eventfd::EventFd; + +use crate::{ + backend::{ + common, + common::{common_set_scanout_disable, AssociatedScanouts, CursorConfig, VirtioGpuScanout}, + }, + device::Error, + gpu_types::{FenceState, ResourceCreate3d, Transfer3DDesc, VirtioGpuRing}, + protocol::{ + virtio_gpu_rect, GpuResponse, + GpuResponse::{ + ErrInvalidParameter, ErrInvalidResourceId, ErrUnspec, OkCapset, OkCapsetInfo, OkNoData, + OkResourcePlaneInfo, + }, + GpuResponsePlaneInfo, VirtioGpuResult, VIRTIO_GPU_FLAG_INFO_RING_IDX, + VIRTIO_GPU_MAX_SCANOUTS, + }, + renderer::Renderer, + GpuConfig, +}; + +// Number of bytes per pixel for reading 2D resources (assuming RGBA8 format) +const READ_RESOURCE_BYTES_PER_PIXEL: u32 = 4; + +// A local resource struct for the Gfxstream backend +#[derive(Default, Clone)] +pub struct GfxstreamResource { + pub id: u32, + pub width: u32, + pub height: u32, + scanouts: common::AssociatedScanouts, + pub info_3d: Option, + pub handle: Option>, +} + +impl GfxstreamResource { + fn calculate_size(&self) -> Result { + let width = self.width as usize; + let height = self.height as usize; + let size = width + .checked_mul(height) + .ok_or("Multiplication of width and height overflowed")? + .checked_mul(READ_RESOURCE_BYTES_PER_PIXEL as usize) + .ok_or("Multiplication of result and bytes_per_pixel overflowed")?; + + Ok(size) + } +} + +impl GfxstreamResource { + /// Creates a new `GfxstreamResource` with 2D/3D metadata + pub fn new(resource_id: u32, width: u32, height: u32) -> Self { + Self { + id: resource_id, + width, + height, + scanouts: AssociatedScanouts::default(), + info_3d: None, + handle: None, + } + } +} + +// Thread-local storage for the Rutabaga instance. +// This allows each worker thread to have its own, non-shared instance. +thread_local! { + static TLS_RUTABAGA: RefCell> = const { RefCell::new(None) }; +} + +pub struct GfxstreamAdapter { + gpu_backend: GpuBackend, + resources: BTreeMap, + fence_state: Arc>, + scanouts: [Option; VIRTIO_GPU_MAX_SCANOUTS as usize], +} + +impl GfxstreamAdapter { + pub fn new(queue_ctl: &VringRwLock, gpu_config: &GpuConfig, gpu_backend: GpuBackend) -> Self { + let fence_state = Arc::new(Mutex::new(FenceState::default())); + let fence = Self::create_fence_handler(queue_ctl.clone(), fence_state.clone()); + + // Lazily initialize Rutabaga for the thread + TLS_RUTABAGA.with(|slot| { + if slot.borrow().is_none() { + let (builder, _component) = Self::configure_rutabaga_builder(gpu_config, fence); + let rb = builder.build().expect("Failed to build Rutabaga"); + *slot.borrow_mut() = Some(rb); + } + }); + + Self { + gpu_backend, + fence_state, + resources: BTreeMap::new(), + scanouts: Default::default(), + } + } + + fn create_fence_handler( + queue_ctl: VringRwLock, + fence_state: Arc>, + ) -> RutabagaFenceHandler { + RutabagaFenceHandler::new(move |completed_fence: RutabagaFence| { + debug!( + "XXX - fence called: id={}, ring_idx={}", + completed_fence.fence_id, completed_fence.ring_idx + ); + + let mut fence_state = fence_state.lock().unwrap(); + let mut i = 0; + + let ring = match completed_fence.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX { + 0 => VirtioGpuRing::Global, + _ => VirtioGpuRing::ContextSpecific { + ctx_id: completed_fence.ctx_id, + ring_idx: completed_fence.ring_idx, + }, + }; + + while i < fence_state.descs.len() { + debug!("XXX - fence_id: {}", fence_state.descs[i].fence_id); + if fence_state.descs[i].ring == ring + && fence_state.descs[i].fence_id <= completed_fence.fence_id + { + let completed_desc = fence_state.descs.remove(i); + debug!( + "XXX - found fence: desc_index={}", + completed_desc.desc_index + ); + + queue_ctl + .add_used(completed_desc.desc_index, completed_desc.len) + .unwrap(); + + queue_ctl + .signal_used_queue() + .map_err(Error::NotificationFailed) + .unwrap(); + debug!("Notification sent"); + } else { + i += 1; + } + } + + // Update the last completed fence for this context + fence_state + .completed_fences + .insert(ring, completed_fence.fence_id); + }) + } + + fn configure_rutabaga_builder( + gpu_config: &GpuConfig, + fence: RutabagaFenceHandler, + ) -> (RutabagaBuilder, RutabagaComponentType) { + let component = RutabagaComponentType::Gfxstream; + + let builder = RutabagaBuilder::new(gpu_config.capsets().bits(), fence) + .set_use_egl(gpu_config.flags().use_egl) + .set_use_gles(gpu_config.flags().use_gles) + .set_use_surfaceless(gpu_config.flags().use_surfaceless) + // Since vhost-user-gpu is out-of-process this is the only type of blob resource that + // could work, so this is always enabled + .set_use_external_blob(true); + + (builder, component) + } + + fn sglist_to_rutabaga_iovecs( + vecs: &[(GuestAddress, usize)], + mem: &GuestMemoryMmap, + ) -> std::result::Result, ()> { + if vecs + .iter() + .any(|&(addr, len)| mem.get_slice(addr, len).is_err()) + { + return Err(()); + } + + let mut rutabaga_iovecs: Vec = Vec::new(); + for &(addr, len) in vecs { + let slice = mem.get_slice(addr, len).unwrap(); + let iov = RutabagaIovec { + base: slice.ptr_guard_mut().as_ptr().cast::(), + len, + }; + rutabaga_iovecs.push(iov); + } + Ok(rutabaga_iovecs) + } + + fn with_rutabaga T>(f: F) -> T { + TLS_RUTABAGA.with(|slot| { + let mut opt = slot.borrow_mut(); + let rb = opt.as_mut().expect("Rutabaga not initialized"); + f(rb) + }) + } + + fn read_2d_resource(resource: &GfxstreamResource, output: &mut [u8]) -> Result<(), String> { + let minimal_buffer_size = resource.calculate_size()?; + assert!(output.len() >= minimal_buffer_size); + + let transfer = Transfer3D { + x: 0, + y: 0, + z: 0, + w: resource.width, + h: resource.height, + d: 1, + level: 0, + stride: resource.width * READ_RESOURCE_BYTES_PER_PIXEL, + layer_stride: 0, + offset: 0, + }; + Self::with_rutabaga(|rutabaga| { + rutabaga.transfer_read(0, resource.id, transfer, Some(IoSliceMut::new(output))) + }) + .map_err(|e| format!("{e}"))?; + + Ok(()) + } + + fn result_from_query(resource_id: u32) -> GpuResponse { + let Ok(query) = Self::with_rutabaga(|rutabaga| rutabaga.resource3d_info(resource_id)) + else { + return OkNoData; + }; + let mut plane_info = Vec::with_capacity(4); + for plane_index in 0..4 { + plane_info.push(GpuResponsePlaneInfo { + stride: query.strides[plane_index], + offset: query.offsets[plane_index], + }); + } + let format_modifier = query.modifier; + OkResourcePlaneInfo { + format_modifier, + plane_info, + } + } +} + +impl Renderer for GfxstreamAdapter { + fn resource_create_3d(&mut self, resource_id: u32, args: ResourceCreate3d) -> VirtioGpuResult { + let rutabaga_args: ResourceCreate3D = args.into(); + Self::with_rutabaga(|rutabaga| rutabaga.resource_create_3d(resource_id, rutabaga_args))?; + + let resource = GfxstreamResource { + id: resource_id, + width: rutabaga_args.width, + height: rutabaga_args.height, + scanouts: AssociatedScanouts::default(), + info_3d: None, + handle: None, + }; + debug_assert!( + !self.resources.contains_key(&resource_id), + "Resource ID {resource_id} already exists in the resources map." + ); + + // Rely on rutabaga to check for duplicate resource ids. + self.resources.insert(resource_id, resource); + Ok(Self::result_from_query(resource_id)) + } + + fn unref_resource(&mut self, resource_id: u32) -> VirtioGpuResult { + let resource = self.resources.remove(&resource_id); + match resource { + None => return Err(ErrInvalidResourceId), + // The spec doesn't say anything about this situation and this doesn't actually seem + // to happen in practise but let's be careful and refuse to disable the resource. + // This keeps the internal state of the gpu device and the fronted consistent. + Some(resource) if resource.scanouts.has_any_enabled() => { + warn!( + "The driver requested unref_resource, but resource {resource_id} has \ + associated scanouts, refusing to delete the resource." + ); + return Err(ErrUnspec); + } + _ => (), + } + Self::with_rutabaga(|rutabaga| rutabaga.unref_resource(resource_id))?; + + Ok(OkNoData) + } + + fn transfer_write( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3DDesc, + ) -> VirtioGpuResult { + Self::with_rutabaga(|rutabaga| { + rutabaga.transfer_write(ctx_id, resource_id, transfer.into(), None) + })?; + Ok(OkNoData) + } + + fn transfer_write_2d( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3DDesc, + ) -> VirtioGpuResult { + Self::with_rutabaga(|rutabaga| { + rutabaga.transfer_write(ctx_id, resource_id, transfer.into(), None) + })?; + Ok(OkNoData) + } + + fn transfer_read( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3DDesc, + buf: Option, + ) -> VirtioGpuResult { + let buf = buf.map(|vs| { + IoSliceMut::new( + // SAFETY: trivially safe + unsafe { std::slice::from_raw_parts_mut(vs.ptr_guard_mut().as_ptr(), vs.len()) }, + ) + }); + + Self::with_rutabaga(|rutabaga| { + rutabaga.transfer_read(ctx_id, resource_id, transfer.into(), buf) + })?; + Ok(OkNoData) + } + + fn attach_backing( + &mut self, + resource_id: u32, + mem: &GuestMemoryMmap, + vecs: Vec<(GuestAddress, usize)>, + ) -> VirtioGpuResult { + let rutabaga_iovecs = + Self::sglist_to_rutabaga_iovecs(&vecs[..], mem).map_err(|()| GpuResponse::ErrUnspec)?; + Self::with_rutabaga(|rutabaga| rutabaga.attach_backing(resource_id, rutabaga_iovecs))?; + Ok(OkNoData) + } + + fn detach_backing(&mut self, resource_id: u32) -> VirtioGpuResult { + Self::with_rutabaga(|rutabaga| rutabaga.detach_backing(resource_id))?; + Ok(OkNoData) + } + + fn update_cursor( + &mut self, + resource_id: u32, + cursor_pos: VhostUserGpuCursorPos, + hot_x: u32, + hot_y: u32, + ) -> VirtioGpuResult { + let config = CursorConfig { + width: 64, + height: 64, + }; + + let cursor_resource = self + .resources + .get(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + if cursor_resource.width != config.width || cursor_resource.height != config.height { + error!("Cursor resource has invalid dimensions"); + return Err(ErrInvalidParameter); + } + + let data = common::common_read_cursor_resource(self, resource_id, config)?; + + common::common_update_cursor(&self.gpu_backend, cursor_pos, hot_x, hot_y, &data, config) + } + + fn move_cursor(&mut self, resource_id: u32, cursor: VhostUserGpuCursorPos) -> VirtioGpuResult { + common::common_move_cursor(&self.gpu_backend, resource_id, cursor) + } + + fn resource_assign_uuid(&self, _resource_id: u32) -> VirtioGpuResult { + error!("Not implemented: resource_assign_uuid"); + Err(ErrUnspec) + } + + fn get_capset_info(&self, index: u32) -> VirtioGpuResult { + debug!("get_capset_info index {index}"); + let (capset_id, version, size) = + Self::with_rutabaga(|rutabaga| rutabaga.get_capset_info(index))?; + Ok(OkCapsetInfo { + capset_id, + version, + size, + }) + } + + fn get_capset(&self, capset_id: u32, version: u32) -> VirtioGpuResult { + let capset = Self::with_rutabaga(|rutabaga| rutabaga.get_capset(capset_id, version))?; + Ok(OkCapset(capset)) + } + + fn create_context( + &mut self, + ctx_id: u32, + context_init: u32, + context_name: Option<&str>, + ) -> VirtioGpuResult { + Self::with_rutabaga(|rutabaga| { + rutabaga.create_context(ctx_id, context_init, context_name) + })?; + Ok(OkNoData) + } + + fn destroy_context(&mut self, ctx_id: u32) -> VirtioGpuResult { + Self::with_rutabaga(|rutabaga| rutabaga.destroy_context(ctx_id))?; + Ok(OkNoData) + } + + fn context_attach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult { + Self::with_rutabaga(|rutabaga| rutabaga.context_attach_resource(ctx_id, resource_id))?; + Ok(OkNoData) + } + + fn context_detach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult { + Self::with_rutabaga(|rutabaga| rutabaga.context_detach_resource(ctx_id, resource_id))?; + Ok(OkNoData) + } + + fn submit_command( + &mut self, + ctx_id: u32, + commands: &mut [u8], + fence_ids: &[u64], + ) -> VirtioGpuResult { + Self::with_rutabaga(|rutabaga| rutabaga.submit_command(ctx_id, commands, fence_ids))?; + Ok(OkNoData) + } + + fn create_fence(&mut self, rutabaga_fence: RutabagaFence) -> VirtioGpuResult { + Self::with_rutabaga(|rutabaga| rutabaga.create_fence(rutabaga_fence))?; + Ok(OkNoData) + } + + fn process_fence( + &mut self, + ring: VirtioGpuRing, + fence_id: u64, + desc_index: u16, + len: u32, + ) -> bool { + common::common_process_fence(&self.fence_state, ring, fence_id, desc_index, len) + } + + fn get_event_poll_fd(&self) -> Option { + Self::with_rutabaga(|rutabaga| { + rutabaga.poll_descriptor().map(|fd| { + // SAFETY: Safe, the fd should be valid, because Rutabaga guarantees it. + // into_raw_descriptor() returns a RawFd and makes sure SafeDescriptor::drop + // doesn't run. + unsafe { EventFd::from_raw_fd(fd.into_raw_descriptor()) } + }) + }) + } + + fn event_poll(&self) { + Self::with_rutabaga(|rutabaga| { + rutabaga.event_poll(); + }); + } + + fn force_ctx_0(&self) { + Self::with_rutabaga(|rutabaga| { + rutabaga.force_ctx_0(); + }); + } + + fn display_info(&self) -> VirtioGpuResult { + common::common_display_info(&self.gpu_backend) + } + + fn get_edid(&self, edid_req: VhostUserGpuEdidRequest) -> VirtioGpuResult { + common::common_get_edid(&self.gpu_backend, edid_req) + } + + fn set_scanout( + &mut self, + scanout_id: u32, + resource_id: u32, + rect: virtio_gpu_rect, + ) -> VirtioGpuResult { + let scanout_idx = scanout_id as usize; + if resource_id == 0 { + common_set_scanout_disable(&mut self.scanouts, scanout_idx); + + self.gpu_backend + .set_scanout(&VhostUserGpuScanout { + scanout_id, + width: 0, + height: 0, + }) + .map_err(|e| { + error!("Failed to disable scanout: {e:?}"); + ErrUnspec + })?; + + return Ok(OkNoData); + } + + // If there was a different resource previously associated with this scanout, + // disable the scanout on that old resource + if let Some(old_scanout) = &self.scanouts[scanout_idx] { + let old_resource_id = old_scanout.resource_id; + if old_resource_id != resource_id { + if let Some(old_resource) = self.resources.get_mut(&old_resource_id) { + old_resource.scanouts.disable(scanout_id); + } + } + } + + let resource = self + .resources + .get_mut(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + debug!( + "Enabling legacy scanout scanout_id={scanout_id}, resource_id={resource_id}: {rect:?}" + ); + + self.gpu_backend + .set_scanout(&VhostUserGpuScanout { + scanout_id, + width: rect.width.into(), + height: rect.height.into(), + }) + .map_err(|e| { + error!("Failed to legacy set_scanout: {e:?}"); + ErrUnspec + })?; + + resource.scanouts.enable(scanout_id); + self.scanouts[scanout_idx] = Some(VirtioGpuScanout { resource_id }); + + // Send initial framebuffer update to QEMU + // This ensures the display is properly initialized + let resource_size = resource.calculate_size().map_err(|e| { + error!("Invalid resource size for scanout: {e:?}"); + ErrUnspec + })?; + + let mut data = vec![0; resource_size]; + + if let Err(e) = Self::read_2d_resource(resource, &mut data) { + error!("Failed to read resource {resource_id} for initial scanout {scanout_id}: {e}"); + } else { + // Send the initial framebuffer data to QEMU + self.gpu_backend + .update_scanout( + &VhostUserGpuUpdate { + scanout_id, + x: 0, + y: 0, + width: resource.width, + height: resource.height, + }, + &data, + ) + .map_err(|e| { + error!("Failed to send initial framebuffer update: {e:?}"); + ErrUnspec + })?; + } + + Ok(OkNoData) + } + + fn flush_resource(&mut self, resource_id: u32, _rect: virtio_gpu_rect) -> VirtioGpuResult { + if resource_id == 0 { + return Ok(OkNoData); + } + + let resource = self + .resources + .get(&resource_id) + .ok_or(ErrInvalidResourceId)? + .clone(); + + for scanout_id in resource.scanouts.iter_enabled() { + // Gfxstream expects image memory transfer (read + send) + let resource_size = resource.calculate_size().map_err(|e| { + error!("Invalid resource size for flushing: {e:?}"); + ErrUnspec + })?; + + let mut data = vec![0; resource_size]; + + // Gfxstream doesn't support transfer_read for portion of the resource. So we + // always read the whole resource, even if the guest specified to + // flush only a portion of it. + // + // The function stream_renderer_transfer_read_iov seems to ignore the stride and + // transfer_box parameters and expects the provided buffer to fit the whole + // resource. + if let Err(e) = Self::read_2d_resource(&resource, &mut data) { + error!("Failed to read resource {resource_id} for scanout {scanout_id}: {e}"); + continue; + } + + self.gpu_backend + .update_scanout( + &VhostUserGpuUpdate { + scanout_id, + x: 0, + y: 0, + width: resource.width, + height: resource.height, + }, + &data, + ) + .map_err(|e| { + error!("Failed to update_scanout: {e:?}"); + ErrUnspec + })?; + } + Ok(OkNoData) + } + + fn resource_create_blob( + &mut self, + _ctx_id: u32, + _resource_id: u32, + _blob_id: u64, + _size: u64, + _blob_mem: u32, + _blob_flags: u32, + ) -> VirtioGpuResult { + error!("Not implemented: resource_create_blob"); + Err(ErrUnspec) + } + + fn resource_map_blob(&mut self, _resource_id: u32, _offset: u64) -> VirtioGpuResult { + error!("Not implemented: resource_map_blob"); + Err(ErrUnspec) + } + + fn resource_unmap_blob(&mut self, _resource_id: u32) -> VirtioGpuResult { + error!("Not implemented: resource_unmap_blob"); + Err(ErrUnspec) + } +} + +#[cfg(test)] +mod gfx_fence_tests { + use std::{ + os::unix::net::UnixStream, + sync::{Arc, Mutex}, + }; + + use assert_matches::assert_matches; + use rusty_fork::rusty_fork_test; + use rutabaga_gfx::RutabagaFence; + use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; + + use super::*; + use crate::{ + gpu_types::{FenceDescriptor, FenceState, VirtioGpuRing}, + protocol::{ + VIRTIO_GPU_BIND_RENDER_TARGET, VIRTIO_GPU_FLAG_INFO_RING_IDX, + VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, VIRTIO_GPU_TEXTURE_2D, + }, + testutils::{ + create_vring, test_capset_operations, test_fence_operations, test_move_cursor, + TestingDescChainArgs, + }, + GpuCapset, GpuFlags, GpuMode, + }; + + const CREATE_RESOURCE_2D_720P: ResourceCreate3d = ResourceCreate3d { + target: VIRTIO_GPU_TEXTURE_2D, + format: VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, + bind: VIRTIO_GPU_BIND_RENDER_TARGET, + width: 1280, + height: 720, + depth: 1, + array_size: 1, + last_level: 0, + nr_samples: 0, + flags: 0, + }; + + const CREATE_RESOURCE_CURSOR: ResourceCreate3d = ResourceCreate3d { + target: VIRTIO_GPU_TEXTURE_2D, + format: VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, + bind: VIRTIO_GPU_BIND_RENDER_TARGET, + width: 64, + height: 64, + depth: 1, + array_size: 1, + last_level: 0, + nr_samples: 0, + flags: 0, + }; + + fn dummy_gpu_backend() -> GpuBackend { + let (_, backend) = UnixStream::pair().unwrap(); + GpuBackend::from_stream(backend) + } + + /// Attempts to create a GPU adapter for testing. + /// Returns None if gfxstream initialization fails (e.g., in CI without GPU + /// drivers). + fn new_gpu() -> Option { + let config = GpuConfig::new( + GpuMode::Gfxstream, + Some(GpuCapset::GFXSTREAM_VULKAN | GpuCapset::GFXSTREAM_GLES), + GpuFlags::default(), + ) + .ok()?; + + let mem = vm_memory::GuestMemoryAtomic::new( + vm_memory::GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x20_000)]).unwrap(), + ); + let chains: [TestingDescChainArgs; 0] = []; + let (vring, _outs, _call_evt) = create_vring( + &mem, + &chains, + GuestAddress(0x20_00), + GuestAddress(0x40_00), + 64, + ); + + let fence_state = Arc::new(Mutex::new(FenceState::default())); + + let fence = GfxstreamAdapter::create_fence_handler(vring, fence_state.clone()); + + let builder = GfxstreamAdapter::configure_rutabaga_builder(&config, fence); + + // Try to build rutabaga - will fail in CI without GPU drivers + let rutabaga = match builder.0.build() { + Ok(r) => r, + Err(_) => { + // GPU not available (CI, no drivers, etc.) + return None; + } + }; + + // Install into TLS so Renderer methods can find it + TLS_RUTABAGA.with(|slot| { + *slot.borrow_mut() = Some(rutabaga); + }); + + Some(GfxstreamAdapter { + gpu_backend: dummy_gpu_backend(), + resources: BTreeMap::default(), + fence_state, + scanouts: Default::default(), + }) + } + + fn fence_desc(r: VirtioGpuRing, id: u64, idx: u16, len: u32) -> FenceDescriptor { + FenceDescriptor { + ring: r, + fence_id: id, + desc_index: idx, + len, + } + } + + rusty_fork_test! { + #[test] + fn test_update_cursor_fails() { + let Some(mut gfxstream_gpu) = new_gpu() else { + eprintln!("Skipping test: GPU not available (no drivers in CI)"); + return; + }; + + let cursor_pos = VhostUserGpuCursorPos { + scanout_id: 1, + x: 123, + y: 123, + }; + + // The resource doesn't exist + let result = gfxstream_gpu.update_cursor(1, cursor_pos, 0, 0); + assert_matches!(result, Err(ErrInvalidResourceId)); + + // Create a resource + gfxstream_gpu.resource_create_3d(1, CREATE_RESOURCE_2D_720P).unwrap(); + + // The resource exists, but the dimensions are wrong + let result = gfxstream_gpu.update_cursor(1, cursor_pos, 0, 0); + assert_matches!(result, Err(ErrInvalidParameter)); + + // Create a resource with correct cursor dimensions + let cursor_resource_id = 2; + gfxstream_gpu + .resource_create_3d( + cursor_resource_id, + CREATE_RESOURCE_CURSOR).unwrap(); + + // Attach backing for cursor resource to ensure transfer_read works + let cursor_backing = GuestMemoryMmap::from_ranges(&[(GuestAddress(0xC0000), 0x10000)]).unwrap(); + gfxstream_gpu.attach_backing(cursor_resource_id, &cursor_backing, vec![(GuestAddress(0xC0000), 16384usize)]).unwrap(); + + // The resource exists, the dimensions are correct, and backing is attached + // This exercises common_read_cursor_resource and then fails at cursor_update (no frontend) + let result = gfxstream_gpu.update_cursor(cursor_resource_id, cursor_pos, 5, 5); + assert_matches!(result, Err(ErrUnspec)); + } + + #[test] + fn test_move_cursor_fails() { + let Some(mut gfxstream_gpu) = new_gpu() else { + eprintln!("Skipping test: GPU not available (no drivers in CI)"); + return; + }; + test_move_cursor(&mut gfxstream_gpu); + } + + #[test] + fn test_process_fence() { + let Some(mut gfxstream_gpu) = new_gpu() else { + eprintln!("Skipping test: GPU not available (no drivers in CI)"); + return; + }; + test_fence_operations(&mut gfxstream_gpu); + } + + #[test] + fn test_gpu_commands() { + let Some(mut gfxstream_gpu) = new_gpu() else { + eprintln!("Skipping test: GPU not available (no drivers in CI)"); + return; + }; + gfxstream_gpu.event_poll(); + gfxstream_gpu.get_event_poll_fd(); + gfxstream_gpu.force_ctx_0(); + gfxstream_gpu.display_info().unwrap_err(); + let edid_req = VhostUserGpuEdidRequest { + scanout_id: 0, + }; + gfxstream_gpu.get_edid(edid_req).unwrap_err(); + gfxstream_gpu.create_context(1, 0, None).unwrap(); + gfxstream_gpu.context_attach_resource(1, 1).unwrap_err(); + gfxstream_gpu.context_detach_resource(1, 1).unwrap_err(); + gfxstream_gpu.destroy_context(1).unwrap(); + } + + #[test] + fn test_transfer_read_and_write() { + let Some(mut gfxstream_gpu) = new_gpu() else { + eprintln!("Skipping test: GPU not available (no drivers in CI)"); + return; + }; + let transfer_data: Transfer3DDesc = Transfer3DDesc::new_2d( + 0, + 0, + 64, + 64, + 0, + ); + gfxstream_gpu.transfer_read(1, 1, transfer_data, None).unwrap_err(); + gfxstream_gpu.transfer_write(1, 1, transfer_data).unwrap_err(); + gfxstream_gpu.transfer_write_2d(1, 1, transfer_data).unwrap_err(); + } + #[test] + fn test_create_and_unref_resources() { + let Some(mut gfxstream_gpu) = new_gpu() else { + eprintln!("Skipping test: GPU not available (no drivers in CI)"); + return; + }; + + let rect = virtio_gpu_rect { x: 0.into(), y: 0.into(), width: 32.into(), height: 32.into() }; + // No resources exists, cannot unref anything: + assert!(gfxstream_gpu.resources.is_empty()); + let result = gfxstream_gpu.unref_resource(0); + assert_matches!(result, Err(_)); + + // Create a resource + let result = gfxstream_gpu.resource_create_3d(1, CREATE_RESOURCE_2D_720P); + assert_matches!(result, Ok(_)); + assert_eq!(gfxstream_gpu.resources.len(), 1); + + // Backing memory for the resource: one 4-byte pixel at 0xA0000. + // (Keep this GuestMemoryMmap alive while attached.) + let gm_back = GuestMemoryMmap::from_ranges(&[(GuestAddress(0xA0000), 0x1000)]).unwrap(); + + // Write some bytes into the backing memory so transfer_write has data to pull. + let pattern = [0x11, 0x22, 0x33, 0x44]; + gm_back.write(&pattern, GuestAddress(0xA0000)).unwrap(); + + // Attach that single iovec (addr,len) to the resource. + let sg = vec![(GuestAddress(0xA0000), 4usize)]; + gfxstream_gpu.attach_backing(1, &gm_back, sg).expect("attach_backing"); + // Detach the backing memory from the resource + gfxstream_gpu.detach_backing(1).expect("detach_backing"); + gfxstream_gpu.set_scanout(1, 1, rect).unwrap_err(); + gfxstream_gpu.set_scanout(1, 0, rect).unwrap_err(); + + gfxstream_gpu.flush_resource(1, rect).expect("flush_resource"); + // Unref the created resource + let result = gfxstream_gpu.unref_resource(1); + assert_matches!(result, Ok(_)); + assert!(gfxstream_gpu.resources.is_empty()); + } + + #[test] + fn test_flush_resource_with_scanout() { + let Some(mut gfxstream_gpu) = new_gpu() else { + eprintln!("Skipping test: GPU not available (no drivers in CI)"); + return; + }; + + // Create a resource with specific dimensions + gfxstream_gpu.resource_create_3d(1, CREATE_RESOURCE_2D_720P).unwrap(); + + // Manually enable a scanout on the resource to exercise flush_resource -> read_2d_resource path + // This bypasses set_scanout which would fail without a frontend + if let Some(resource) = gfxstream_gpu.resources.get_mut(&1) { + resource.scanouts.enable(0); // Enable scanout 0 + } + + let rect = virtio_gpu_rect { + x: 0.into(), + y: 0.into(), + width: 32.into(), + height: 32.into(), + }; + + // This should exercise the read_2d_resource path through flush_resource + // It will fail because there's no frontend, but that's after read_2d_resource is called + let _result = gfxstream_gpu.flush_resource(1, rect); + // Note: This may succeed or fail depending on whether update_scanout to backend fails + } + + #[test] + fn test_gpu_capset() { + let Some(gfxstream_gpu) = new_gpu() else { + eprintln!("Skipping test: GPU not available (no drivers in CI)"); + return; + }; + test_capset_operations(&gfxstream_gpu, 0); + } + + #[test] + fn test_gpu_submit_command_fails() { + let Some(mut gfxstream_gpu) = new_gpu() else { + eprintln!("Skipping test: GPU not available (no drivers in CI)"); + return; + }; + let mut cmd_buf = [0; 10]; + let fence_ids: Vec = Vec::with_capacity(0); + gfxstream_gpu + .submit_command(1, &mut cmd_buf[..], &fence_ids) + .unwrap_err(); + } + } + + #[test] + fn fence_handler_global_and_context_paths() { + // One guest memory arena is fine for both sub-cases. + let mem = vm_memory::GuestMemoryAtomic::new( + vm_memory::GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x20_000)]).unwrap(), + ); + let chains: [TestingDescChainArgs; 0] = []; + + // ------------------------------- + // A) Global ring: match → remove + notify + // ------------------------------- + let (vring_a, _outs_a, call_evt_a) = create_vring( + &mem, + &chains, + GuestAddress(0x2000), + GuestAddress(0x4000), + 64, + ); + + let fence_state_a = Arc::new(Mutex::new(FenceState { + descs: vec![ + fence_desc(VirtioGpuRing::Global, 5, 3, 64), + fence_desc(VirtioGpuRing::Global, 9, 4, 64), + ], + completed_fences: BTreeMap::default(), + })); + + let handler_a = GfxstreamAdapter::create_fence_handler(vring_a, fence_state_a.clone()); + + // Drain any stale signal (ignore WouldBlock) + let _ = call_evt_a.read(); + + handler_a.call(RutabagaFence { + fence_id: 7, + ctx_id: 0, + ring_idx: 0, // Global + flags: 0, + }); + + { + let st_a = fence_state_a.lock().unwrap(); + assert_eq!(st_a.descs.len(), 1); + assert_eq!(st_a.descs[0].fence_id, 9); + assert_eq!( + st_a.completed_fences.get(&VirtioGpuRing::Global), + Some(&7u64) + ); + drop(st_a); + } + assert_eq!( + call_evt_a.read().unwrap(), + 1, + "queue should be signaled once" + ); + + // ------------------------------- + // B) Context-specific ring: match → remove + notify + // ------------------------------- + let (vring_b, _outs_b, call_evt_b) = create_vring( + &mem, + &chains, + GuestAddress(0x6000), + GuestAddress(0x8000), + 32, + ); + + let fence_state_b = Arc::new(Mutex::new(FenceState { + // Only global, no context-specific fences + descs: vec![fence_desc(VirtioGpuRing::Global, 7, 1, 1)], + completed_fences: BTreeMap::default(), + })); + + let handler_b = GfxstreamAdapter::create_fence_handler(vring_b, fence_state_b.clone()); + + handler_b.call(RutabagaFence { + fence_id: 6, + ctx_id: 42, + ring_idx: 3, + flags: VIRTIO_GPU_FLAG_INFO_RING_IDX, // use context ring + }); + + { + let st_b = fence_state_b.lock().unwrap(); + assert_eq!(st_b.descs.len(), 1, "no descriptor should be removed"); + let key = VirtioGpuRing::ContextSpecific { + ctx_id: 42, + ring_idx: 3, + }; + assert_eq!(st_b.completed_fences.get(&key), Some(&6u64)); + drop(st_b); + } + assert!( + call_evt_b.read().is_err(), + "no signal expected when no match" + ); + } + + // GfxstreamResource::calculate_size + // Tests for normal and overflow cases. + #[test] + fn calculate_size_ok() { + let r = GfxstreamResource { + id: 1, + width: 64, + height: 64, + ..Default::default() + }; + // 64 * 64 * 4 BPP = 16384 + assert_eq!( + r.calculate_size().unwrap(), + 64 * 64 * (READ_RESOURCE_BYTES_PER_PIXEL as usize) + ); + } + + #[test] + fn calculate_size_overflow_width_height() { + // Width * Height overflows u32 + let r = GfxstreamResource { + id: 1, + width: u32::MAX, + height: u32::MAX, + ..Default::default() + }; + r.calculate_size().unwrap_err(); + } + + #[test] + fn calculate_size_overflow_bpp_multiply() { + // Large width * height that fits in usize but overflows when * BPP + let big = (usize::MAX / (READ_RESOURCE_BYTES_PER_PIXEL as usize)).saturating_add(1); + let r = GfxstreamResource { + id: 1, + width: big as u32, + height: 1, + ..Default::default() + }; + // On 64-bit this should error; if it happens to fit on 32-bit, the guard still + // holds elsewhere. + let _ = r.calculate_size().err(); + } + + // sglist_to_rutabaga_iovecs tests + + #[test] + fn sglist_to_rutabaga_iovecs_ok() { + // Two mapped regions + let gm = GuestMemoryMmap::from_ranges(&[ + (GuestAddress(0x1000), 0x2000), // [0x1000..0x3000) + (GuestAddress(0x9000), 0x1000), // [0x9000..0xA000) + ]) + .expect("GuestMemoryMmap"); + + // Three valid segments, all inside mapped memory + let sg = [ + (GuestAddress(0x1000), 16usize), + (GuestAddress(0x1010), 32usize), + (GuestAddress(0x9000), 8usize), + ]; + + let iovs = GfxstreamAdapter::sglist_to_rutabaga_iovecs(&sg[..], &gm).expect("iovecs"); + + assert_eq!(iovs.len(), 3); + assert_eq!(iovs[0].len, 16); + assert!(!iovs[0].base.is_null()); + assert_eq!(iovs[1].len, 32); + assert!(!iovs[1].base.is_null()); + assert_eq!(iovs[2].len, 8); + assert!(!iovs[2].base.is_null()); + } + + #[test] + fn sglist_to_rutabaga_iovecs_err_on_any_bad_segment() { + let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0x2000), 0x1000)]).unwrap(); + // This segment starts outside mapped memory, it should Err(()) + let sg = [(GuestAddress(0x4000), 16usize)]; + + assert!(GfxstreamAdapter::sglist_to_rutabaga_iovecs(&sg[..], &gm).is_err()); + } +} diff --git a/vhost-device-gpu/src/backend/mod.rs b/vhost-device-gpu/src/backend/mod.rs new file mode 100644 index 0000000..c3fe127 --- /dev/null +++ b/vhost-device-gpu/src/backend/mod.rs @@ -0,0 +1,9 @@ +// Copyright 2025 Red Hat Inc +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +mod common; +#[cfg(feature = "backend-gfxstream")] +pub mod gfxstream; +#[cfg(feature = "backend-virgl")] +pub mod virgl; diff --git a/vhost-device-gpu/src/backend/virgl.rs b/vhost-device-gpu/src/backend/virgl.rs new file mode 100644 index 0000000..b1b6279 --- /dev/null +++ b/vhost-device-gpu/src/backend/virgl.rs @@ -0,0 +1,928 @@ +// Virglrenderer backend device +// Copyright 2019 The ChromiumOS Authors +// Copyright 2025 Red Hat Inc +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use std::{ + collections::BTreeMap, + io::IoSliceMut, + os::fd::{AsFd, FromRawFd, IntoRawFd, RawFd}, + sync::{Arc, Mutex}, +}; + +use libc::c_void; +use log::{debug, error, trace, warn}; +use rutabaga_gfx::RutabagaFence; +use vhost::vhost_user::{ + gpu_message::{ + VhostUserGpuCursorPos, VhostUserGpuDMABUFScanout, VhostUserGpuDMABUFScanout2, + VhostUserGpuEdidRequest, VhostUserGpuUpdate, + }, + GpuBackend, +}; +use vhost_user_backend::{VringRwLock, VringT}; +use virglrenderer::{ + FenceHandler, Iovec, VirglContext, VirglRenderer, VirglRendererFlags, VirglResource, + VIRGL_HANDLE_TYPE_MEM_DMABUF, +}; +use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, VolatileSlice}; +use vmm_sys_util::eventfd::EventFd; + +use crate::{ + backend::{ + common, + common::{common_set_scanout_disable, AssociatedScanouts, CursorConfig, VirtioGpuScanout}, + }, + gpu_types::{FenceState, ResourceCreate3d, Transfer3DDesc, VirtioGpuRing}, + protocol::{ + virtio_gpu_rect, GpuResponse, + GpuResponse::{ + ErrInvalidContextId, ErrInvalidParameter, ErrInvalidResourceId, ErrInvalidScanoutId, + ErrUnspec, OkCapset, OkCapsetInfo, OkNoData, + }, + VirtioGpuResult, VIRTIO_GPU_MAX_SCANOUTS, + }, + renderer::Renderer, + GpuConfig, +}; + +const CAPSET_ID_VIRGL: u32 = 1; +const CAPSET_ID_VIRGL2: u32 = 2; +const CAPSET_ID_VENUS: u32 = 4; + +#[derive(Clone)] +pub struct GpuResource { + pub virgl_resource: VirglResource, + // Stores information about which scanouts are associated with the given + // resource. Resource could be used for multiple scanouts. + pub scanouts: AssociatedScanouts, + pub backing_iovecs: Arc>>>, +} + +fn sglist_to_iovecs( + vecs: &[(GuestAddress, usize)], + mem: &GuestMemoryMmap, +) -> Result, ()> { + if vecs + .iter() + .any(|&(addr, len)| mem.get_slice(addr, len).is_err()) + { + return Err(()); + } + + let mut virgl_iovecs: Vec = Vec::new(); + for &(addr, len) in vecs { + let slice = mem.get_slice(addr, len).unwrap(); + virgl_iovecs.push(Iovec { + base: slice.ptr_guard_mut().as_ptr().cast::(), + len, + }); + } + Ok(virgl_iovecs) +} + +impl From for GpuResponse { + fn from(_: virglrenderer::VirglError) -> Self { + ErrUnspec + } +} +pub struct VirglFenceHandler { + queue_ctl: VringRwLock, + fence_state: Arc>, +} + +impl VirglFenceHandler { + pub const fn new(queue_ctl: VringRwLock, fence_state: Arc>) -> Self { + Self { + queue_ctl, + fence_state, + } + } +} + +impl FenceHandler for VirglFenceHandler { + fn call(&self, fence_id: u64, ctx_id: u32, ring_idx: u8) { + let mut fence_state = self.fence_state.lock().unwrap(); + let mut i = 0; + + let ring = match ring_idx { + 0 => VirtioGpuRing::Global, + _ => VirtioGpuRing::ContextSpecific { ctx_id, ring_idx }, + }; + + while i < fence_state.descs.len() { + if fence_state.descs[i].ring == ring && fence_state.descs[i].fence_id <= fence_id { + let completed_desc = fence_state.descs.remove(i); + + self.queue_ctl + .add_used(completed_desc.desc_index, completed_desc.len) + .unwrap(); + + self.queue_ctl + .signal_used_queue() + .map_err(|e| log::error!("Failed to signal queue: {e:?}")) + .unwrap(); + } else { + i += 1; + } + } + + fence_state.completed_fences.insert(ring, fence_id); + } +} + +pub struct VirglRendererAdapter { + renderer: VirglRenderer, + gpu_backend: GpuBackend, + fence_state: Arc>, + resources: BTreeMap, + contexts: BTreeMap, + scanouts: [Option; VIRTIO_GPU_MAX_SCANOUTS as usize], +} + +impl VirglRendererAdapter { + pub fn new(queue_ctl: &VringRwLock, config: &GpuConfig, gpu_backend: GpuBackend) -> Self { + let virglrenderer_flags = VirglRendererFlags::new() + .use_virgl(true) + .use_venus(true) + .use_egl(config.flags().use_egl) + .use_gles(config.flags().use_gles) + .use_glx(config.flags().use_glx) + .use_surfaceless(config.flags().use_surfaceless) + .use_external_blob(true) + .use_async_fence_cb(true) + .use_thread_sync(true); + let fence_state = Arc::new(Mutex::new(FenceState::default())); + let fence_handler = Box::new(VirglFenceHandler::new( + queue_ctl.clone(), + fence_state.clone(), + )); + + let renderer = VirglRenderer::init(virglrenderer_flags, fence_handler, None) + .expect("Failed to initialize virglrenderer"); + Self { + renderer, + gpu_backend, + fence_state, + resources: BTreeMap::new(), + contexts: BTreeMap::new(), + scanouts: Default::default(), + } + } +} + +impl Renderer for VirglRendererAdapter { + fn resource_create_3d(&mut self, resource_id: u32, args: ResourceCreate3d) -> VirtioGpuResult { + let virgl_args: virglrenderer::ResourceCreate3D = args.into(); + + let virgl_resource = self + .renderer + .create_3d(resource_id, virgl_args) + .map_err(|_| ErrUnspec)?; + let local_resource = GpuResource { + virgl_resource, + scanouts: AssociatedScanouts::default(), + backing_iovecs: Arc::new(Mutex::new(None)), + }; + self.resources.insert(resource_id, local_resource); + Ok(OkNoData) + } + + fn unref_resource(&mut self, resource_id: u32) -> VirtioGpuResult { + let resource = self.resources.remove(&resource_id); + match resource { + None => return Err(ErrInvalidResourceId), + // The spec doesn't say anything about this situation and this doesn't actually seem + // to happen in practise but let's be careful and refuse to disable the resource. + // This keeps the internal state of the gpu device and the fronted consistent. + Some(resource) if resource.scanouts.has_any_enabled() => { + warn!( + "The driver requested unref_resource, but resource {resource_id} has \ + associated scanouts, refusing to delete the resource." + ); + return Err(ErrUnspec); + } + _ => (), + } + self.renderer.unref_resource(resource_id); + Ok(OkNoData) + } + + fn transfer_write( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3DDesc, + ) -> VirtioGpuResult { + trace!("transfer_write ctx_id {ctx_id}, resource_id {resource_id}, {transfer:?}"); + + self.renderer + .transfer_write(resource_id, ctx_id, transfer.into(), None)?; + Ok(OkNoData) + } + fn transfer_write_2d( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3DDesc, + ) -> VirtioGpuResult { + trace!("transfer_write ctx_id {ctx_id}, resource_id {resource_id}, {transfer:?}"); + self.renderer + .transfer_write(resource_id, ctx_id, transfer.into(), None)?; + Ok(OkNoData) + } + + fn transfer_read( + &mut self, + ctx_id: u32, + resource_id: u32, + transfer: Transfer3DDesc, + buf: Option, + ) -> VirtioGpuResult { + let buf = buf.map(|vs| { + IoSliceMut::new( + // SAFETY: trivially safe + unsafe { std::slice::from_raw_parts_mut(vs.ptr_guard_mut().as_ptr(), vs.len()) }, + ) + }); + + self.renderer + .transfer_read(resource_id, ctx_id, transfer.into(), buf)?; + Ok(OkNoData) + } + + fn attach_backing( + &mut self, + resource_id: u32, + mem: &GuestMemoryMmap, + vecs: Vec<(GuestAddress, usize)>, + ) -> VirtioGpuResult { + let mut iovs: Vec = sglist_to_iovecs(&vecs, mem).map_err(|()| ErrUnspec)?; + + // Tell virgl to use our iovec array (pointer must stay valid afterwards) + self.renderer.attach_backing(resource_id, &mut iovs)?; + + // Keep the Vec alive so the buffer’s pointer stays valid + let res = self + .resources + .get_mut(&resource_id) + .ok_or(ErrInvalidResourceId)?; + res.backing_iovecs.lock().unwrap().replace(iovs); + + Ok(OkNoData) + } + + fn detach_backing(&mut self, resource_id: u32) -> VirtioGpuResult { + self.renderer.detach_backing(resource_id); + if let Some(r) = self.resources.get_mut(&resource_id) { + r.backing_iovecs.lock().unwrap().take(); // drop our boxed iovecs + } + Ok(OkNoData) + } + + fn update_cursor( + &mut self, + resource_id: u32, + cursor_pos: VhostUserGpuCursorPos, + hot_x: u32, + hot_y: u32, + ) -> VirtioGpuResult { + let config = CursorConfig { + width: 64, + height: 64, + }; + + let cursor_resource = self + .resources + .get(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + if cursor_resource.virgl_resource.width != config.width + || cursor_resource.virgl_resource.height != config.height + { + error!("Cursor resource has invalid dimensions"); + return Err(ErrInvalidParameter); + } + + let data = common::common_read_cursor_resource(self, resource_id, config)?; + + common::common_update_cursor(&self.gpu_backend, cursor_pos, hot_x, hot_y, &data, config) + } + + fn move_cursor(&mut self, resource_id: u32, cursor: VhostUserGpuCursorPos) -> VirtioGpuResult { + common::common_move_cursor(&self.gpu_backend, resource_id, cursor) + } + + fn resource_assign_uuid(&self, _resource_id: u32) -> VirtioGpuResult { + error!("Not implemented: resource_assign_uuid"); + Err(ErrUnspec) + } + + fn get_capset_info(&self, index: u32) -> VirtioGpuResult { + debug!("the capset index is {index}"); + let capset_id = match index { + 0 => CAPSET_ID_VIRGL, + 1 => CAPSET_ID_VIRGL2, + 3 => CAPSET_ID_VENUS, + _ => return Err(ErrInvalidParameter), + }; + let (version, size) = self.renderer.get_capset_info(index); + Ok(OkCapsetInfo { + capset_id, + version, + size, + }) + } + + fn get_capset(&self, capset_id: u32, version: u32) -> VirtioGpuResult { + let capset = self.renderer.get_capset(capset_id, version); + Ok(OkCapset(capset)) + } + + fn create_context( + &mut self, + ctx_id: u32, + context_init: u32, + context_name: Option<&str>, + ) -> VirtioGpuResult { + if self.contexts.contains_key(&ctx_id) { + return Err(ErrUnspec); + } + + // Create the VirglContext using virglrenderer + let ctx = virglrenderer::VirglContext::create_context(ctx_id, context_init, context_name) + .map_err(|_| ErrInvalidContextId)?; + + // Insert the newly created context into our local BTreeMap. + self.contexts.insert(ctx_id, ctx); + Ok(OkNoData) + } + + fn destroy_context(&mut self, ctx_id: u32) -> VirtioGpuResult { + self.contexts.remove(&ctx_id).ok_or(ErrInvalidContextId)?; + Ok(OkNoData) + } + + fn context_attach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult { + let ctx = self.contexts.get_mut(&ctx_id).ok_or(ErrInvalidContextId)?; + let resource = self + .resources + .get_mut(&resource_id) + .ok_or(ErrInvalidResourceId)?; + ctx.attach(&mut resource.virgl_resource); + Ok(OkNoData) + } + + fn context_detach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult { + let ctx = self.contexts.get_mut(&ctx_id).ok_or(ErrInvalidContextId)?; + let resource = self + .resources + .get_mut(&resource_id) + .ok_or(ErrInvalidResourceId)?; + ctx.detach(&resource.virgl_resource); + Ok(OkNoData) + } + + fn submit_command( + &mut self, + ctx_id: u32, + commands: &mut [u8], + fence_ids: &[u64], + ) -> VirtioGpuResult { + let ctx = self.contexts.get_mut(&ctx_id).ok_or(ErrInvalidContextId)?; + + ctx.submit_cmd(commands, fence_ids) + .map(|()| OkNoData) + .map_err(|_| ErrUnspec) + } + + fn create_fence(&mut self, fence: RutabagaFence) -> VirtioGpuResult { + // Convert the fence ID to u32 + let fence_id_u32 = u32::try_from(fence.fence_id).map_err(|_| GpuResponse::ErrUnspec)?; + + self.renderer + .create_fence(fence_id_u32, fence.ctx_id) + .map_err(|_| ErrUnspec)?; + Ok(OkNoData) + } + + fn process_fence( + &mut self, + ring: VirtioGpuRing, + fence_id: u64, + desc_index: u16, + len: u32, + ) -> bool { + common::common_process_fence(&self.fence_state, ring, fence_id, desc_index, len) + } + + fn get_event_poll_fd(&self) -> Option { + // SAFETY: The fd is guaranteed to be a valid owned descriptor. + self.renderer + .poll_descriptor() + .map(|fd| unsafe { EventFd::from_raw_fd(fd.into_raw_fd()) }) + } + + fn event_poll(&self) { + self.renderer.event_poll(); + } + + fn force_ctx_0(&self) { + self.renderer.force_ctx_0(); + } + + fn display_info(&self) -> VirtioGpuResult { + common::common_display_info(&self.gpu_backend) + } + fn get_edid(&self, edid_req: VhostUserGpuEdidRequest) -> VirtioGpuResult { + common::common_get_edid(&self.gpu_backend, edid_req) + } + fn set_scanout( + &mut self, + scanout_id: u32, + resource_id: u32, + rect: virtio_gpu_rect, + ) -> VirtioGpuResult { + let scanout_idx = scanout_id as usize; + // Basic Validation of scanout_id + if scanout_idx >= VIRTIO_GPU_MAX_SCANOUTS as usize { + return Err(ErrInvalidScanoutId); + } + + // Handle existing scanout to disable it if necessary (like QEMU) + let current_scanout_resource_id = + self.scanouts[scanout_idx].as_ref().map(|s| s.resource_id); + if let Some(old_resource_id) = current_scanout_resource_id { + if old_resource_id != resource_id { + // Only disable if resource_id changes + if let Some(old_resource) = self.resources.get_mut(&old_resource_id) { + old_resource.scanouts.disable(scanout_id); + } + } + } + + // Handle Resource ID 0 (Disable Scanout) + if resource_id == 0 { + common_set_scanout_disable(&mut self.scanouts, scanout_idx); + + // Send VHOST_USER_GPU_DMABUF_SCANOUT message with FD = -1 + self.gpu_backend + .set_dmabuf_scanout( + &VhostUserGpuDMABUFScanout { + scanout_id, + x: 0, + y: 0, + width: 0, + height: 0, + fd_width: 0, + fd_height: 0, + fd_stride: 0, + fd_flags: 0, + fd_drm_fourcc: 0, + }, + None::<&RawFd>, // Send None for the FD, which translates to -1 in the backend + ) + .map_err(|e| { + error!("Failed to send DMABUF scanout disable message: {e:?}"); + ErrUnspec + })?; + return Ok(OkNoData); + } + + // Handling non-zero resource_id (Enable/Update Scanout) + let resource = self + .resources + .get_mut(&resource_id) + .ok_or(ErrInvalidResourceId)?; + + // Extract the DMABUF information (handle and info_3d) + let handle = resource.virgl_resource.handle.as_ref().ok_or_else(|| { + error!("resource {resource_id} has no handle"); + ErrUnspec + })?; + + if handle.handle_type != VIRGL_HANDLE_TYPE_MEM_DMABUF { + error!( + "resource {} handle is not a DMABUF (got type = {})", + resource_id, handle.handle_type + ); + return Err(ErrUnspec); + } + + // Borrow the 3D info directly; no DmabufTextureInfo wrapper. + let info_3d = resource.virgl_resource.info_3d.as_ref().ok_or_else(|| { + error!("resource {resource_id} has handle but no info_3d"); + ErrUnspec + })?; + + // Clone the fd we’ll pass to the backend. + let fd = handle.os_handle.try_clone().map_err(|e| { + error!("Failed to clone DMABUF FD for resource {resource_id}: {e:?}"); + ErrUnspec + })?; + + debug!( + "Using stored DMABUF texture info for resource {}: width={}, height={}, strides={}, fourcc={}, modifier={}", + resource_id, info_3d.width, info_3d.height, info_3d.strides[0], info_3d.drm_fourcc, info_3d.modifier + ); + + // Construct VhostUserGpuDMABUFScanout Message + let dmabuf_scanout_payload = VhostUserGpuDMABUFScanout { + scanout_id, + x: rect.x.into(), + y: rect.y.into(), + width: rect.width.into(), + height: rect.height.into(), + fd_width: info_3d.width, + fd_height: info_3d.height, + fd_stride: info_3d.strides[0], + fd_flags: 0, + fd_drm_fourcc: info_3d.drm_fourcc, + }; + + // Determine which message type to send based on modifier support + let frontend_supports_dmabuf2 = info_3d.modifier != 0; + + if frontend_supports_dmabuf2 { + let dmabuf_scanout2_msg = VhostUserGpuDMABUFScanout2 { + dmabuf_scanout: dmabuf_scanout_payload, + modifier: info_3d.modifier, + }; + self.gpu_backend + .set_dmabuf_scanout2(&dmabuf_scanout2_msg, Some(&fd.as_fd())) + .map_err(|e| { + error!( + "Failed to send VHOST_USER_GPU_DMABUF_SCANOUT2 for resource {resource_id}: {e:?}" + ); + ErrUnspec + })?; + } else { + self.gpu_backend + .set_dmabuf_scanout(&dmabuf_scanout_payload, Some(&fd.as_fd())) + .map_err(|e| { + error!( + "Failed to send VHOST_USER_GPU_DMABUF_SCANOUT for resource {resource_id}: {e:?}" + ); + ErrUnspec + })?; + } + + debug!( + "Sent DMABUF scanout for resource {} using fd {:?}", + resource_id, + fd.as_fd() + ); + + // Update internal state to associate resource with scanout + resource.scanouts.enable(scanout_id); + self.scanouts[scanout_idx] = Some(VirtioGpuScanout { resource_id }); + + Ok(OkNoData) + } + + fn flush_resource(&mut self, resource_id: u32, _rect: virtio_gpu_rect) -> VirtioGpuResult { + if resource_id == 0 { + return Ok(OkNoData); + } + + let resource = self + .resources + .get(&resource_id) + .ok_or(ErrInvalidResourceId)? + .clone(); + + for scanout_id in resource.scanouts.iter_enabled() { + // For VirglRenderer, use update_dmabuf_scanout (no image copy) + self.gpu_backend + .update_dmabuf_scanout(&VhostUserGpuUpdate { + scanout_id, + x: 0, + y: 0, + width: resource.virgl_resource.width, + height: resource.virgl_resource.height, + }) + .map_err(|e| { + error!("Failed to update_dmabuf_scanout: {e:?}"); + ErrUnspec + })?; + } + Ok(OkNoData) + } + + fn resource_create_blob( + &mut self, + _ctx_id: u32, + _resource_id: u32, + _blob_id: u64, + _size: u64, + _blob_mem: u32, + _blob_flags: u32, + ) -> VirtioGpuResult { + error!("Not implemented: resource_create_blob"); + Err(ErrUnspec) + } + + fn resource_map_blob(&mut self, _resource_id: u32, _offset: u64) -> VirtioGpuResult { + error!("Not implemented: resource_map_blob"); + Err(ErrUnspec) + } + + fn resource_unmap_blob(&mut self, _resource_id: u32) -> VirtioGpuResult { + error!("Not implemented: resource_unmap_blob"); + Err(ErrUnspec) + } +} + +#[cfg(test)] +mod virgl_cov_tests { + use std::{ + os::unix::net::UnixStream, + sync::{Arc, Mutex}, + }; + + use assert_matches::assert_matches; + use rusty_fork::rusty_fork_test; + use rutabaga_gfx::{RUTABAGA_PIPE_BIND_RENDER_TARGET, RUTABAGA_PIPE_TEXTURE_2D}; + use vm_memory::{Bytes, GuestAddress, GuestMemoryAtomic, GuestMemoryMmap}; + + use super::*; + use crate::{ + gpu_types::{FenceDescriptor, FenceState, ResourceCreate3d, Transfer3DDesc, VirtioGpuRing}, + protocol::{virtio_gpu_rect, GpuResponse, VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM}, + renderer::Renderer, + testutils::{ + create_vring, test_capset_operations, test_fence_operations, test_move_cursor, + TestingDescChainArgs, + }, + GpuCapset, GpuConfig, GpuFlags, GpuMode, + }; + + fn fence_desc(r: VirtioGpuRing, id: u64, idx: u16, len: u32) -> FenceDescriptor { + FenceDescriptor { + ring: r, + fence_id: id, + desc_index: idx, + len, + } + } + + fn dummy_gpu_backend() -> GpuBackend { + let (_, backend) = UnixStream::pair().unwrap(); + GpuBackend::from_stream(backend) + } + + #[test] + fn sglist_to_iovecs_err_on_invalid_slice() { + // Single region: 0x1000..0x2000 (4 KiB) + let mem = GuestMemoryMmap::from_ranges(&[(GuestAddress(0x1000), 0x1000)]).unwrap(); + + // Segment starts outside of mapped memory -> expect Err(()). + let bad = vec![(GuestAddress(0x3000), 16usize)]; + assert!(sglist_to_iovecs(&bad, &mem).is_err()); + } + + rusty_fork::rusty_fork_test! { + #[test] + fn virgl_end_to_end_once() { + // Fence handler coverage (no virgl init needed) + let mem_a = GuestMemoryAtomic::new( + GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x20_000)]).unwrap() + ); + let (vr_a, _outs_a, call_a) = + create_vring(&mem_a, &[] as &[TestingDescChainArgs], GuestAddress(0x3000), GuestAddress(0x5000), 64); + + let fs_a = Arc::new(Mutex::new(FenceState { + descs: vec![ + fence_desc(VirtioGpuRing::Global, 5, 3, 64), + fence_desc(VirtioGpuRing::Global, 9, 4, 64), + ], + completed_fences: BTreeMap::default(), + })); + + let handler_a = VirglFenceHandler { + queue_ctl: vr_a, + fence_state: fs_a.clone(), + }; + + let _ = call_a.read(); // drain stale + handler_a.call(/*fence_id*/ 7, /*ctx_id*/ 0, /*ring_idx*/ 0); + + { + let st = fs_a.lock().unwrap(); + assert_eq!(st.descs.len(), 1); + assert_eq!(st.descs[0].fence_id, 9); + assert_eq!(st.completed_fences.get(&VirtioGpuRing::Global), Some(&7u64)); + drop(st); + } + assert_eq!(call_a.read().unwrap(), 1); + + // Context ring path: no match → completed_fences updated, no notify + let mem_b = GuestMemoryAtomic::new( + GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x20_000)]).unwrap() + ); + let (vr_b, _outs_b, call_b) = + create_vring(&mem_b, &[] as &[TestingDescChainArgs], GuestAddress(0x6000), GuestAddress(0x8000), 32); + + let ring_b = VirtioGpuRing::ContextSpecific { ctx_id: 42, ring_idx: 3 }; + let fs_b = Arc::new(Mutex::new(FenceState { + descs: vec![fence_desc(VirtioGpuRing::Global, 7, 1, 1)], + completed_fences: BTreeMap::default(), + })); + + let handler_b = VirglFenceHandler { + queue_ctl: vr_b, + fence_state: fs_b.clone(), + }; + handler_b.call(/*fence_id*/ 6, /*ctx_id*/ 42, /*ring_idx*/ 3); + + { + let st = fs_b.lock().unwrap(); + assert_eq!(st.descs.len(), 1); + assert_eq!(st.completed_fences.get(&ring_b), Some(&6u64)); + drop(st); + } + assert!(call_b.read().is_err(), "no signal when no match"); + + // Initialize virgl ONCE in this forked process; exercise adapter paths + let cfg = GpuConfig::new( + GpuMode::VirglRenderer, + Some(GpuCapset::VIRGL | GpuCapset::VIRGL2), + GpuFlags::default(), + ).expect("GpuConfig"); + + let mem = GuestMemoryAtomic::new( + GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x20_000)]).unwrap() + ); + let (vring, _outs, _call_evt) = + create_vring(&mem, &[] as &[TestingDescChainArgs], GuestAddress(0x2000), GuestAddress(0x4000), 64); + + let backend = dummy_gpu_backend(); + let mut gpu = VirglRendererAdapter::new(&vring, &cfg, backend); + + gpu.event_poll(); + let edid_req = VhostUserGpuEdidRequest { + scanout_id: 0, + }; + gpu.get_edid(edid_req).unwrap_err(); + assert!(gpu.unref_resource(99_999).is_err(), "unref on missing must error"); + + // Resource creation + attach backing + let res_id = 1; + let req = ResourceCreate3d { + target: RUTABAGA_PIPE_TEXTURE_2D, + format: VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, + bind: RUTABAGA_PIPE_BIND_RENDER_TARGET, + width: 1, height: 1, depth: 1, + array_size: 1, last_level: 0, nr_samples: 0, flags: 0, + }; + gpu.resource_create_3d(res_id, req).unwrap(); + + let gm_back = GuestMemoryMmap::from_ranges(&[(GuestAddress(0xA0000), 0x1000)]).unwrap(); + let pattern = [0xAA, 0xBB, 0xCC, 0xDD]; + gm_back.write(&pattern, GuestAddress(0xA0000)).unwrap(); + + gpu.attach_backing(res_id, &gm_back, vec![(GuestAddress(0xA0000), 4usize)]).unwrap(); + + // move_cursor: expected to Err with invalid resource id + test_move_cursor(&mut gpu); + + // update_cursor: expected to Err with invalid resource id + let cursor_pos = VhostUserGpuCursorPos { + scanout_id: 0, + x: 10, + y: 10, + }; + gpu.update_cursor(9_999, cursor_pos, 0, 0).unwrap_err(); + + // update_cursor: create cursor resource and test reading path + let cursor_res_id = 2; + let cursor_req = ResourceCreate3d { + target: RUTABAGA_PIPE_TEXTURE_2D, + format: VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, + bind: RUTABAGA_PIPE_BIND_RENDER_TARGET, + width: 64, height: 64, depth: 1, + array_size: 1, last_level: 0, nr_samples: 0, flags: 0, + }; + gpu.resource_create_3d(cursor_res_id, cursor_req).unwrap(); + + // Attach backing for cursor resource + let cursor_backing = GuestMemoryMmap::from_ranges(&[(GuestAddress(0xB0000), 0x10000)]).unwrap(); + gpu.attach_backing(cursor_res_id, &cursor_backing, vec![(GuestAddress(0xB0000), 16384usize)]).unwrap(); + + // This should exercise common_read_cursor_resource and then fail at cursor_update (no frontend) + let result = gpu.update_cursor(cursor_res_id, cursor_pos, 5, 5); + assert_matches!(result, Err(GpuResponse::ErrUnspec), "Should fail at cursor_update to frontend"); + + // submit_command: expected to Err with dummy buffer + let mut cmd = [0u8; 8]; + let fence_id: Vec = vec![]; + gpu.submit_command(1, &mut cmd[..], &fence_id).unwrap_err(); + + let t = Transfer3DDesc::new_2d(0, 0, 1, 1, 0); + gpu.transfer_write(0, res_id, t).unwrap(); + gpu.transfer_read(0, res_id, t, None).unwrap(); + + // create_fence + process_fence + test_fence_operations(&mut gpu); + + gpu.detach_backing(res_id).unwrap(); + + // create_context / destroy_context and use ctx in transfers + let ctx_id = 1; + assert_matches!(gpu.create_context(ctx_id, 0, None), Ok(_)); + gpu.context_attach_resource(1, 1).unwrap(); + gpu.context_detach_resource(1, 1).unwrap(); + + let _ = gpu.destroy_context(ctx_id); + // use invalid ctx_id, should fail after destroy + let _ = gpu.transfer_write(ctx_id, res_id, t).unwrap_err(); + let _ = gpu.transfer_read(0, res_id, t, None).unwrap_err(); + + // scanout + flush paths + let dirty = virtio_gpu_rect { x: 0.into(), y: 0.into(), width: 32.into(), height: 32.into() }; + gpu.flush_resource(9_999, dirty).unwrap_err(); + + let res2 = 404u32; + let req2 = ResourceCreate3d { + target: RUTABAGA_PIPE_TEXTURE_2D, + format: VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, + bind: RUTABAGA_PIPE_BIND_RENDER_TARGET, + width: 64, height: 64, depth: 1, + array_size: 1, last_level: 0, nr_samples: 0, flags: 0, + }; + gpu.resource_create_3d(res2, req2).unwrap(); + + assert_matches!(gpu.flush_resource(res2, dirty), Ok(GpuResponse::OkNoData)); + + gpu.set_scanout(1, 1, dirty).unwrap_err(); + gpu.set_scanout(1, 0, dirty).unwrap_err(); + + // resource_id = 0 disables scanout + assert_matches!(gpu.flush_resource(0, dirty), Ok(GpuResponse::OkNoData)); + + // Test capset queries + for index in [0, 1, 3] { + test_capset_operations(&gpu, index); + } + + // Test blob resource functions (all should return ErrUnspec - not implemented) + assert_matches!( + gpu.resource_create_blob(1, 100, 0, 4096, 0, 0), + Err(GpuResponse::ErrUnspec) + ); + assert_matches!( + gpu.resource_map_blob(100, 0), + Err(GpuResponse::ErrUnspec) + ); + assert_matches!( + gpu.resource_unmap_blob(100), + Err(GpuResponse::ErrUnspec) + ); + + // Test resource_assign_uuid (not implemented) + assert_matches!( + gpu.resource_assign_uuid(1), + Err(GpuResponse::ErrUnspec) + ); + + // Test display_info (should fail without frontend) + assert_matches!( + gpu.display_info(), + Err(GpuResponse::ErrUnspec) + ); + + // Test force_ctx_0 + gpu.force_ctx_0(); + + // Test get_event_poll_fd + let _poll_fd = gpu.get_event_poll_fd(); + + // Test transfer_write_2d + let t2d = Transfer3DDesc::new_2d(0, 0, 1, 1, 0); + gpu.transfer_write_2d(0, res_id, t2d).unwrap_err(); + + // Test unref with resource that has scanouts (should fail) + let res3 = 500u32; + let req3 = ResourceCreate3d { + target: RUTABAGA_PIPE_TEXTURE_2D, + format: VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, + bind: RUTABAGA_PIPE_BIND_RENDER_TARGET, + width: 32, height: 32, depth: 1, + array_size: 1, last_level: 0, nr_samples: 0, flags: 0, + }; + gpu.resource_create_3d(res3, req3).unwrap(); + + // Manually enable scanout on the resource to test unref protection + if let Some(resource) = gpu.resources.get_mut(&res3) { + resource.scanouts.enable(0); + } + + // Now unref should fail because resource has active scanouts + assert_matches!( + gpu.unref_resource(res3), + Err(GpuResponse::ErrUnspec) + ); + } + } +} diff --git a/vhost-device-gpu/src/device.rs b/vhost-device-gpu/src/device.rs index a77fe2c..2064aac 100644 --- a/vhost-device-gpu/src/device.rs +++ b/vhost-device-gpu/src/device.rs @@ -4,6 +4,41 @@ // // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause +/// Helper macro to manage the thread-local lazy initialization of GPU adapters. +/// +/// This macro ensures that a GPU backend adapter (e.g., Gfxstream or Virgl) is +/// instantiated only once per thread when the first event arrives (`lazy +/// initialization`). +macro_rules! handle_adapter { + ($adapter_type:ty, $tls_name:ident, $new_adapter:expr, $self:expr, $device_event:expr, $vrings:expr) => {{ + thread_local! { + static $tls_name: RefCell> = const { RefCell::new(None) }; + } + + let mut event_poll_fd = None; + + $tls_name.with_borrow_mut(|maybe_renderer| { + let renderer = match maybe_renderer { + Some(renderer) => renderer, + None => { + // Pass $vrings to the call + let (control_vring, gpu_backend) = $self.extract_backend_and_vring($vrings)?; + + let renderer = $new_adapter(control_vring, gpu_backend); + + event_poll_fd = renderer.get_event_poll_fd(); + maybe_renderer.insert(renderer) + } + }; + + // Pass $device_event, renderer, and $vrings to the call + $self.handle_event($device_event, renderer, $vrings) + })?; + + Ok(event_poll_fd) + }}; +} + use std::{ cell::RefCell, io::{self, Result as IoResult}, @@ -12,10 +47,7 @@ use std::{ }; use log::{debug, error, info, trace, warn}; -use rutabaga_gfx::{ - ResourceCreate3D, RutabagaFence, Transfer3D, RUTABAGA_PIPE_BIND_RENDER_TARGET, - RUTABAGA_PIPE_TEXTURE_2D, -}; +use rutabaga_gfx::RutabagaFence; use thiserror::Error as ThisError; use vhost::vhost_user::{ gpu_message::{VhostUserGpuCursorPos, VhostUserGpuEdidRequest}, @@ -40,17 +72,23 @@ use vmm_sys_util::{ eventfd::{EventFd, EFD_NONBLOCK}, }; +#[cfg(feature = "backend-gfxstream")] +use crate::backend::gfxstream::GfxstreamAdapter; +#[cfg(feature = "backend-virgl")] +use crate::backend::virgl::VirglRendererAdapter; use crate::{ + gpu_types::{ResourceCreate3d, Transfer3DDesc, VirtioGpuRing}, protocol::{ virtio_gpu_ctrl_hdr, virtio_gpu_ctx_create, virtio_gpu_get_edid, virtio_gpu_resource_create_2d, virtio_gpu_resource_create_3d, virtio_gpu_transfer_host_3d, virtio_gpu_transfer_to_host_2d, virtio_gpu_update_cursor, GpuCommand, GpuCommandDecodeError, GpuResponse::ErrUnspec, GpuResponseEncodeError, VirtioGpuConfig, VirtioGpuResult, CONTROL_QUEUE, CURSOR_QUEUE, NUM_QUEUES, POLL_EVENT, QUEUE_SIZE, - VIRTIO_GPU_FLAG_FENCE, VIRTIO_GPU_FLAG_INFO_RING_IDX, VIRTIO_GPU_MAX_SCANOUTS, + VIRTIO_GPU_BIND_RENDER_TARGET, VIRTIO_GPU_FLAG_FENCE, VIRTIO_GPU_FLAG_INFO_RING_IDX, + VIRTIO_GPU_MAX_SCANOUTS, VIRTIO_GPU_TEXTURE_2D, }, - virtio_gpu::{RutabagaVirtioGpu, VirtioGpu, VirtioGpuRing}, - GpuConfig, + renderer::Renderer, + GpuConfig, GpuMode, }; type Result = std::result::Result; @@ -87,6 +125,8 @@ pub enum Error { EpollHandler(String), #[error("Failed register epoll listener: {0}")] RegisterEpollListener(io::Error), + #[error("Failed to create backend")] + BackendCreationFailed, } impl From for io::Error { @@ -119,7 +159,6 @@ impl VhostUserGpuBackend { gpu_config.capsets(), gpu_config.flags() ); - let inner = VhostUserGpuBackendInner { virtio_cfg: VirtioGpuConfig { events_read: 0.into(), @@ -158,62 +197,63 @@ impl VhostUserGpuBackend { impl VhostUserGpuBackendInner { fn process_gpu_command( - virtio_gpu: &mut impl VirtioGpu, + renderer: &mut dyn Renderer, mem: &GuestMemoryMmap, hdr: virtio_gpu_ctrl_hdr, cmd: GpuCommand, ) -> VirtioGpuResult { - virtio_gpu.force_ctx_0(); + renderer.force_ctx_0(); debug!("process_gpu_command: {cmd:?}"); match cmd { - GpuCommand::GetDisplayInfo => virtio_gpu.display_info(), - GpuCommand::GetEdid(req) => Self::handle_get_edid(virtio_gpu, req), - GpuCommand::ResourceCreate2d(req) => Self::handle_resource_create_2d(virtio_gpu, req), - GpuCommand::ResourceUnref(req) => virtio_gpu.unref_resource(req.resource_id.into()), + GpuCommand::GetDisplayInfo => renderer.display_info(), + GpuCommand::GetEdid(req) => Self::handle_get_edid(renderer, req), + GpuCommand::ResourceCreate2d(req) => Self::handle_resource_create_2d(renderer, req), + GpuCommand::ResourceUnref(req) => renderer.unref_resource(req.resource_id.into()), GpuCommand::SetScanout(req) => { - virtio_gpu.set_scanout(req.scanout_id.into(), req.resource_id.into(), req.r.into()) + renderer.set_scanout(req.scanout_id.into(), req.resource_id.into(), req.r) } GpuCommand::ResourceFlush(req) => { - virtio_gpu.flush_resource(req.resource_id.into(), req.r.into()) + renderer.flush_resource(req.resource_id.into(), req.r) } - GpuCommand::TransferToHost2d(req) => Self::handle_transfer_to_host_2d(virtio_gpu, req), + GpuCommand::TransferToHost2d(req) => Self::handle_transfer_to_host_2d(renderer, req), GpuCommand::ResourceAttachBacking(req, iovecs) => { - virtio_gpu.attach_backing(req.resource_id.into(), mem, iovecs) + renderer.attach_backing(req.resource_id.into(), mem, iovecs) } GpuCommand::ResourceDetachBacking(req) => { - virtio_gpu.detach_backing(req.resource_id.into()) + renderer.detach_backing(req.resource_id.into()) } - GpuCommand::UpdateCursor(req) => Self::handle_update_cursor(virtio_gpu, req), - GpuCommand::MoveCursor(req) => Self::handle_move_cursor(virtio_gpu, req), + GpuCommand::UpdateCursor(req) => Self::handle_update_cursor(renderer, req), + GpuCommand::MoveCursor(req) => Self::handle_move_cursor(renderer, req), GpuCommand::ResourceAssignUuid(_) => { panic!("virtio_gpu: GpuCommand::ResourceAssignUuid unimplemented") } - GpuCommand::GetCapsetInfo(req) => virtio_gpu.get_capset_info(req.capset_index.into()), + GpuCommand::GetCapsetInfo(req) => renderer.get_capset_info(req.capset_index.into()), GpuCommand::GetCapset(req) => { - virtio_gpu.get_capset(req.capset_id.into(), req.capset_version.into()) + renderer.get_capset(req.capset_id.into(), req.capset_version.into()) } - GpuCommand::CtxCreate(req) => Self::handle_ctx_create(virtio_gpu, hdr, req), - GpuCommand::CtxDestroy(_) => virtio_gpu.destroy_context(hdr.ctx_id.into()), + GpuCommand::CtxCreate(req) => Self::handle_ctx_create(renderer, hdr, req), + GpuCommand::CtxDestroy(_) => renderer.destroy_context(hdr.ctx_id.into()), GpuCommand::CtxAttachResource(req) => { - virtio_gpu.context_attach_resource(hdr.ctx_id.into(), req.resource_id.into()) + renderer.context_attach_resource(hdr.ctx_id.into(), req.resource_id.into()) } GpuCommand::CtxDetachResource(req) => { - virtio_gpu.context_detach_resource(hdr.ctx_id.into(), req.resource_id.into()) + renderer.context_detach_resource(hdr.ctx_id.into(), req.resource_id.into()) } - GpuCommand::ResourceCreate3d(req) => Self::handle_resource_create_3d(virtio_gpu, req), + GpuCommand::ResourceCreate3d(req) => Self::handle_resource_create_3d(renderer, req), GpuCommand::TransferToHost3d(req) => { - Self::handle_transfer_to_host_3d(virtio_gpu, hdr.ctx_id.into(), req) + Self::handle_transfer_to_host_3d(renderer, hdr.ctx_id.into(), req) } GpuCommand::TransferFromHost3d(req) => { - Self::handle_transfer_from_host_3d(virtio_gpu, hdr.ctx_id.into(), req) + Self::handle_transfer_from_host_3d(renderer, hdr.ctx_id.into(), req) } GpuCommand::CmdSubmit3d { fence_ids, mut cmd_data, - } => virtio_gpu.submit_command(hdr.ctx_id.into(), &mut cmd_data, &fence_ids), + } => renderer.submit_command(hdr.ctx_id.into(), &mut cmd_data, &fence_ids), GpuCommand::ResourceCreateBlob(_) => { panic!("virtio_gpu: GpuCommand::ResourceCreateBlob unimplemented") } + GpuCommand::SetScanoutBlob(_) => { panic!("virtio_gpu: GpuCommand::SetScanoutBlob unimplemented") } @@ -226,21 +266,21 @@ impl VhostUserGpuBackendInner { } } - fn handle_get_edid(virtio_gpu: &impl VirtioGpu, req: virtio_gpu_get_edid) -> VirtioGpuResult { + fn handle_get_edid(renderer: &dyn Renderer, req: virtio_gpu_get_edid) -> VirtioGpuResult { let edid_req = VhostUserGpuEdidRequest { scanout_id: req.scanout.into(), }; - virtio_gpu.get_edid(edid_req) + renderer.get_edid(edid_req) } fn handle_resource_create_2d( - virtio_gpu: &mut impl VirtioGpu, + renderer: &mut dyn Renderer, req: virtio_gpu_resource_create_2d, ) -> VirtioGpuResult { - let resource_create_3d = ResourceCreate3D { - target: RUTABAGA_PIPE_TEXTURE_2D, + let resource_create_3d = ResourceCreate3d { + target: VIRTIO_GPU_TEXTURE_2D, format: req.format.into(), - bind: RUTABAGA_PIPE_BIND_RENDER_TARGET, + bind: VIRTIO_GPU_BIND_RENDER_TARGET, width: req.width.into(), height: req.height.into(), depth: 1, @@ -249,25 +289,25 @@ impl VhostUserGpuBackendInner { nr_samples: 0, flags: 0, }; - virtio_gpu.resource_create_3d(req.resource_id.into(), resource_create_3d) + renderer.resource_create_3d(req.resource_id.into(), resource_create_3d) } fn handle_transfer_to_host_2d( - virtio_gpu: &mut impl VirtioGpu, + renderer: &mut dyn Renderer, req: virtio_gpu_transfer_to_host_2d, ) -> VirtioGpuResult { - let transfer = Transfer3D::new_2d( + let transfer = Transfer3DDesc::new_2d( req.r.x.into(), req.r.y.into(), req.r.width.into(), req.r.height.into(), req.offset.into(), ); - virtio_gpu.transfer_write(0, req.resource_id.into(), transfer) + renderer.transfer_write_2d(0, req.resource_id.into(), transfer) } fn handle_update_cursor( - virtio_gpu: &mut impl VirtioGpu, + renderer: &mut dyn Renderer, req: virtio_gpu_update_cursor, ) -> VirtioGpuResult { let cursor_pos = VhostUserGpuCursorPos { @@ -275,7 +315,7 @@ impl VhostUserGpuBackendInner { x: req.pos.x.into(), y: req.pos.y.into(), }; - virtio_gpu.update_cursor( + renderer.update_cursor( req.resource_id.into(), cursor_pos, req.hot_x.into(), @@ -284,7 +324,7 @@ impl VhostUserGpuBackendInner { } fn handle_move_cursor( - virtio_gpu: &mut impl VirtioGpu, + renderer: &mut dyn Renderer, req: virtio_gpu_update_cursor, ) -> VirtioGpuResult { let cursor = VhostUserGpuCursorPos { @@ -292,16 +332,16 @@ impl VhostUserGpuBackendInner { x: req.pos.x.into(), y: req.pos.y.into(), }; - virtio_gpu.move_cursor(req.resource_id.into(), cursor) + renderer.move_cursor(req.resource_id.into(), cursor) } fn handle_ctx_create( - virtio_gpu: &mut impl VirtioGpu, + renderer: &mut dyn Renderer, hdr: virtio_gpu_ctrl_hdr, req: virtio_gpu_ctx_create, ) -> VirtioGpuResult { let context_name: Option = Some(req.get_debug_name()); - virtio_gpu.create_context( + renderer.create_context( hdr.ctx_id.into(), req.context_init.into(), context_name.as_deref(), @@ -309,10 +349,10 @@ impl VhostUserGpuBackendInner { } fn handle_resource_create_3d( - virtio_gpu: &mut impl VirtioGpu, + renderer: &mut dyn Renderer, req: virtio_gpu_resource_create_3d, ) -> VirtioGpuResult { - let resource_create_3d = ResourceCreate3D { + let resource_create_3d = ResourceCreate3d { target: req.target.into(), format: req.format.into(), bind: req.bind.into(), @@ -324,15 +364,15 @@ impl VhostUserGpuBackendInner { nr_samples: req.nr_samples.into(), flags: req.flags.into(), }; - virtio_gpu.resource_create_3d(req.resource_id.into(), resource_create_3d) + renderer.resource_create_3d(req.resource_id.into(), resource_create_3d) } fn handle_transfer_to_host_3d( - virtio_gpu: &mut impl VirtioGpu, + renderer: &mut dyn Renderer, ctx_id: u32, req: virtio_gpu_transfer_host_3d, ) -> VirtioGpuResult { - let transfer = Transfer3D { + let transfer = Transfer3DDesc { x: req.box_.x.into(), y: req.box_.y.into(), z: req.box_.z.into(), @@ -344,15 +384,15 @@ impl VhostUserGpuBackendInner { layer_stride: req.layer_stride.into(), offset: req.offset.into(), }; - virtio_gpu.transfer_write(ctx_id, req.resource_id.into(), transfer) + renderer.transfer_write(ctx_id, req.resource_id.into(), transfer) } fn handle_transfer_from_host_3d( - virtio_gpu: &mut impl VirtioGpu, + renderer: &mut dyn Renderer, ctx_id: u32, req: virtio_gpu_transfer_host_3d, ) -> VirtioGpuResult { - let transfer = Transfer3D { + let transfer = Transfer3DDesc { x: req.box_.x.into(), y: req.box_.y.into(), z: req.box_.z.into(), @@ -364,12 +404,12 @@ impl VhostUserGpuBackendInner { layer_stride: req.layer_stride.into(), offset: req.offset.into(), }; - virtio_gpu.transfer_read(ctx_id, req.resource_id.into(), transfer, None) + renderer.transfer_read(ctx_id, req.resource_id.into(), transfer, None) } fn process_queue_chain( &self, - virtio_gpu: &mut impl VirtioGpu, + renderer: &mut dyn Renderer, vring: &VringRwLock, head_index: u16, reader: &mut Reader, @@ -382,8 +422,7 @@ impl VhostUserGpuBackendInner { let ctrl_hdr = match GpuCommand::decode(reader) { Ok((ctrl_hdr, gpu_cmd)) => { let cmd_name = gpu_cmd.command_name(); - let response_result = - Self::process_gpu_command(virtio_gpu, &mem, ctrl_hdr, gpu_cmd); + let response_result = Self::process_gpu_command(renderer, &mem, ctrl_hdr, gpu_cmd); // Unwrap the response from inside Result and log information response = match response_result { Ok(response) => response, @@ -425,7 +464,7 @@ impl VhostUserGpuBackendInner { ctx_id, ring_idx, }; - if let Err(fence_response) = virtio_gpu.create_fence(fence) { + if let Err(fence_response) = renderer.create_fence(fence) { warn!( "Failed to create fence: fence_id: {fence_id} fence_response: \ {fence_response}" @@ -447,7 +486,7 @@ impl VhostUserGpuBackendInner { _ => VirtioGpuRing::ContextSpecific { ctx_id, ring_idx }, }; debug!("Trying to process_fence for the command"); - virtio_gpu.process_fence(ring, fence_id, head_index, response_len) + renderer.process_fence(ring, fence_id, head_index, response_len) } else { true }; @@ -456,14 +495,14 @@ impl VhostUserGpuBackendInner { vring .add_used(head_index, response_len) .map_err(Error::QueueAddUsed)?; - trace!("add_used {} bytes", response_len); + trace!("add_used {response_len} bytes"); *signal_used_queue = true; } Ok(()) } /// Process the requests in the vring and dispatch replies - fn process_queue(&self, virtio_gpu: &mut impl VirtioGpu, vring: &VringRwLock) -> Result<()> { + fn process_queue(&self, renderer: &mut dyn Renderer, vring: &VringRwLock) -> Result<()> { let mem = self.mem.as_ref().unwrap().memory().into_inner(); let desc_chains: Vec<_> = vring .get_mut() @@ -482,7 +521,7 @@ impl VhostUserGpuBackendInner { let mut writer = desc_chain.writer(&mem).map_err(Error::CreateWriter)?; self.process_queue_chain( - virtio_gpu, + renderer, vring, head_index, &mut reader, @@ -505,7 +544,7 @@ impl VhostUserGpuBackendInner { fn handle_event( &self, device_event: u16, - virtio_gpu: &mut impl VirtioGpu, + renderer: &mut dyn Renderer, vrings: &[VringRwLock], ) -> IoResult<()> { match device_event { @@ -520,22 +559,22 @@ impl VhostUserGpuBackendInner { // requests on the queue. loop { vring.disable_notification().unwrap(); - self.process_queue(virtio_gpu, vring)?; + self.process_queue(renderer, vring)?; if !vring.enable_notification().unwrap() { break; } } } else { // Without EVENT_IDX, a single call is enough. - self.process_queue(virtio_gpu, vring)?; + self.process_queue(renderer, vring)?; } } POLL_EVENT => { trace!("Handling POLL_EVENT"); - virtio_gpu.event_poll(); + renderer.event_poll(); } _ => { - warn!("unhandled device_event: {}", device_event); + warn!("unhandled device_event: {device_event}"); return Err(Error::HandleEventUnknown.into()); } } @@ -543,6 +582,18 @@ impl VhostUserGpuBackendInner { Ok(()) } + fn extract_backend_and_vring<'a>( + &mut self, + vrings: &'a [VringRwLock], + ) -> IoResult<(&'a VringRwLock, GpuBackend)> { + let control_vring = &vrings[CONTROL_QUEUE as usize]; + let backend = self + .gpu_backend + .take() + .ok_or_else(|| io::Error::other("set_gpu_socket() not called, GpuBackend missing"))?; + Ok((control_vring, backend)) + } + fn lazy_init_and_handle_event( &mut self, device_event: u16, @@ -550,45 +601,36 @@ impl VhostUserGpuBackendInner { vrings: &[VringRwLock], _thread_id: usize, ) -> IoResult> { - // We use thread_local here because it is the easiest way to handle VirtioGpu - // being !Send - thread_local! { - static VIRTIO_GPU_REF: RefCell> = const { RefCell::new(None) }; - } - debug!("Handle event called"); if evset != EventSet::IN { return Err(Error::HandleEventNotEpollIn.into()); - }; + } - let mut event_poll_fd = None; - VIRTIO_GPU_REF.with_borrow_mut(|maybe_virtio_gpu| { - let virtio_gpu = match maybe_virtio_gpu { - Some(virtio_gpu) => virtio_gpu, - None => { - let gpu_backend = self.gpu_backend.take().ok_or_else(|| { - io::Error::other("set_gpu_socket() not called, GpuBackend missing") - })?; + match self.gpu_config.gpu_mode() { + #[cfg(feature = "backend-gfxstream")] + GpuMode::Gfxstream => handle_adapter!( + GfxstreamAdapter, + TLS_GFXSTREAM, + |control_vring, gpu_backend| { + GfxstreamAdapter::new(control_vring, &self.gpu_config, gpu_backend) + }, + self, + device_event, + vrings + ), - // We currently pass the CONTROL_QUEUE vring to RutabagaVirtioGpu, because we - // only expect to process fences for that queue. - let control_vring = &vrings[CONTROL_QUEUE as usize]; - - // VirtioGpu::new can be called once per process (otherwise it panics), - // so if somehow another thread accidentally wants to create another gpu here, - // it will panic anyway - let virtio_gpu = - RutabagaVirtioGpu::new(control_vring, &self.gpu_config, gpu_backend); - event_poll_fd = virtio_gpu.get_event_poll_fd(); - - maybe_virtio_gpu.insert(virtio_gpu) - } - }; - - self.handle_event(device_event, virtio_gpu, vrings) - })?; - - Ok(event_poll_fd) + #[cfg(feature = "backend-virgl")] + GpuMode::VirglRenderer => handle_adapter!( + VirglRendererAdapter, + TLS_VIRGL, + |control_vring, gpu_backend| { + VirglRendererAdapter::new(control_vring, &self.gpu_config, gpu_backend) + }, + self, + device_event, + vrings + ), + } } fn get_config(&self, offset: u32, size: u32) -> Vec { @@ -640,7 +682,7 @@ impl VhostUserBackend for VhostUserGpuBackend { fn set_event_idx(&self, enabled: bool) { self.inner.lock().unwrap().event_idx_enabled = enabled; - debug!("Event idx set to: {}", enabled); + debug!("Event idx set to: {enabled}"); } fn update_memory(&self, mem: GuestMemoryAtomic) -> IoResult<()> { @@ -677,11 +719,11 @@ impl VhostUserBackend for VhostUserGpuBackend { )?; if let Some(poll_event_fd) = poll_event_fd { - let epoll_handler = match self.epoll_handler.lock() { + let Some(epoll_handler) = (match self.epoll_handler.lock() { Ok(h) => h, Err(poisoned) => poisoned.into_inner(), - }; - let Some(epoll_handler) = epoll_handler.upgrade() else { + }) + .upgrade() else { return Err( Error::EpollHandler("Failed to upgrade epoll handler".to_string()).into(), ); @@ -705,39 +747,33 @@ impl VhostUserBackend for VhostUserGpuBackend { #[cfg(test)] mod tests { use std::{ - fs::File, io::{ErrorKind, Read}, - iter::zip, mem, - os::{fd::FromRawFd, unix::net::UnixStream}, + os::unix::net::UnixStream, sync::Arc, thread, time::Duration, }; use assert_matches::assert_matches; - use mockall::predicate; + use mockall::{mock, predicate}; use rusty_fork::rusty_fork_test; use vhost::vhost_user::gpu_message::{VhostUserGpuDMABUFScanout, VhostUserGpuUpdate}; use vhost_user_backend::{VhostUserDaemon, VringRwLock, VringT}; - use virtio_bindings::virtio_ring::{VRING_DESC_F_NEXT, VRING_DESC_F_WRITE}; - use virtio_queue::{ - desc::{split::Descriptor as SplitDescriptor, RawDescriptor}, - mock::MockSplitQueue, - Queue, QueueT, - }; use vm_memory::{ - ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryAtomic, GuestMemoryMmap, + ByteValued, Bytes, GuestAddress, GuestMemoryAtomic, GuestMemoryMmap, VolatileSlice, }; use super::*; use crate::{ + gpu_types::{ResourceCreate3d, Transfer3DDesc, VirtioGpuRing}, protocol::{ - virtio_gpu_ctx_create, virtio_gpu_ctx_destroy, virtio_gpu_ctx_resource, - virtio_gpu_get_capset_info, virtio_gpu_mem_entry, virtio_gpu_rect, - virtio_gpu_resource_attach_backing, virtio_gpu_resource_detach_backing, - virtio_gpu_resource_flush, virtio_gpu_resource_unref, virtio_gpu_set_scanout, - GpuResponse::{OkCapsetInfo, OkDisplayInfo, OkEdid, OkNoData}, + virtio_gpu_ctrl_hdr, virtio_gpu_ctx_create, virtio_gpu_ctx_destroy, + virtio_gpu_ctx_resource, virtio_gpu_get_capset, virtio_gpu_get_capset_info, + virtio_gpu_mem_entry, virtio_gpu_rect, virtio_gpu_resource_attach_backing, + virtio_gpu_resource_detach_backing, virtio_gpu_resource_flush, + virtio_gpu_resource_unref, virtio_gpu_set_scanout, + GpuResponse::{OkCapset, OkCapsetInfo, OkDisplayInfo, OkEdid, OkNoData}, VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, VIRTIO_GPU_CMD_CTX_CREATE, VIRTIO_GPU_CMD_CTX_DESTROY, VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE, VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, @@ -747,10 +783,87 @@ mod tests { VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, VIRTIO_GPU_RESP_ERR_UNSPEC, VIRTIO_GPU_RESP_OK_NODATA, }, - virtio_gpu::MockVirtioGpu, + renderer::Renderer, + testutils::{create_vring, TestingDescChainArgs}, GpuCapset, GpuFlags, GpuMode, }; + // Create a mock for the Renderer trait + mock! { + pub MockRenderer {} + + impl Renderer for MockRenderer { + fn display_info(&self) -> VirtioGpuResult; + fn get_edid(&self, edid_req: VhostUserGpuEdidRequest) -> VirtioGpuResult; + fn set_scanout(&mut self, scanout_id: u32, resource_id: u32, rect: virtio_gpu_rect) -> VirtioGpuResult; + fn flush_resource(&mut self, resource_id: u32, rect: virtio_gpu_rect) -> VirtioGpuResult; + fn resource_create_blob( + &mut self, + ctx_id: u32, + resource_id: u32, + blob_id: u64, + size: u64, + blob_mem: u32, + blob_flags: u32, + ) -> VirtioGpuResult; + fn resource_map_blob(&mut self, resource_id: u32, offset: u64) -> VirtioGpuResult; + fn resource_unmap_blob(&mut self, resource_id: u32) -> VirtioGpuResult; + fn resource_create_3d(&mut self, resource_id: u32, req: ResourceCreate3d) -> VirtioGpuResult; + fn unref_resource(&mut self, resource_id: u32) -> VirtioGpuResult; + fn transfer_write(&mut self, ctx_id: u32, resource_id: u32, req: Transfer3DDesc) -> VirtioGpuResult; + fn transfer_write_2d(&mut self, ctx_id: u32, resource_id: u32, req: Transfer3DDesc) -> VirtioGpuResult; + fn transfer_read<'a>( + &mut self, + ctx_id: u32, + resource_id: u32, + req: Transfer3DDesc, + buf: Option>, + ) -> VirtioGpuResult; + fn attach_backing( + &mut self, + resource_id: u32, + mem: &GuestMemoryMmap, + vecs: Vec<(GuestAddress, usize)>, + ) -> VirtioGpuResult; + fn detach_backing(&mut self, resource_id: u32) -> VirtioGpuResult; + fn update_cursor( + &mut self, + resource_id: u32, + cursor_pos: VhostUserGpuCursorPos, + hot_x: u32, + hot_y: u32, + ) -> VirtioGpuResult; + fn move_cursor( + &mut self, + resource_id: u32, + cursor: VhostUserGpuCursorPos, + ) -> VirtioGpuResult; + fn get_capset_info(&self, index: u32) -> VirtioGpuResult; + fn get_capset(&self, capset_id: u32, version: u32) -> VirtioGpuResult; + fn create_context<'a>( + &mut self, + ctx_id: u32, + context_init: u32, + context_name: Option<&'a str>, + ) -> VirtioGpuResult; + fn destroy_context(&mut self, ctx_id: u32) -> VirtioGpuResult; + fn context_attach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult; + fn context_detach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult; + fn submit_command( + &mut self, + ctx_id: u32, + commands: &mut [u8], + fence_ids: &[u64], + ) -> VirtioGpuResult; + fn force_ctx_0(&self); + fn resource_assign_uuid(&self, resource_id: u32) -> VirtioGpuResult; + fn create_fence(&mut self, rutabaga_fence: RutabagaFence) -> VirtioGpuResult; + fn process_fence(&mut self, ring: VirtioGpuRing, fence_id: u64, desc_index: u16, len: u32) -> bool; + fn get_event_poll_fd(&self) -> Option; + fn event_poll(&self); + } + } + const MEM_SIZE: usize = 2 * 1024 * 1024; // 2MiB const CURSOR_QUEUE_ADDR: GuestAddress = GuestAddress(0x0); @@ -776,12 +889,6 @@ mod tests { (backend, mem) } - /// Arguments to create a descriptor chain for testing - struct TestingDescChainArgs<'a> { - readable_desc_bufs: &'a [&'a [u8]], - writable_desc_lengths: &'a [u32], - } - fn gpu_backend_pair() -> (UnixStream, GpuBackend) { let (frontend, backend) = UnixStream::pair().unwrap(); let backend = GpuBackend::from_stream(backend); @@ -789,26 +896,21 @@ mod tests { (frontend, backend) } - fn event_fd_into_file(event_fd: EventFd) -> File { - // SAFETY: We ensure that the `event_fd` is properly handled such that its file - // descriptor is not closed after `File` takes ownership of it. - unsafe { - let event_fd_raw = event_fd.as_raw_fd(); - mem::forget(event_fd); - File::from_raw_fd(event_fd_raw) - } - } - #[test] fn test_process_gpu_command() { let (_, mem) = init(); let hdr = virtio_gpu_ctrl_hdr::default(); - let test_cmd = |cmd: GpuCommand, setup: fn(&mut MockVirtioGpu)| { - let mut mock_gpu = MockVirtioGpu::new(); - mock_gpu.expect_force_ctx_0().return_once(|| ()); - setup(&mut mock_gpu); - VhostUserGpuBackendInner::process_gpu_command(&mut mock_gpu, &mem.memory(), hdr, cmd) + let test_cmd = |cmd: GpuCommand, setup: fn(&mut MockMockRenderer)| { + let mut mock_renderer = MockMockRenderer::new(); + mock_renderer.expect_force_ctx_0().return_const(()); + setup(&mut mock_renderer); + VhostUserGpuBackendInner::process_gpu_command( + &mut mock_renderer, + &mem.memory(), + hdr, + cmd, + ) }; let cmd = GpuCommand::GetDisplayInfo; @@ -855,7 +957,7 @@ mod tests { let cmd = GpuCommand::TransferToHost2d(virtio_gpu_transfer_to_host_2d::default()); let result = test_cmd(cmd, |g| { - g.expect_transfer_write() + g.expect_transfer_write_2d() .return_once(|_, _, _| Ok(OkNoData)); }); assert_matches!(result, Ok(OkNoData)); @@ -876,6 +978,19 @@ mod tests { }); assert_matches!(result, Ok(OkNoData)); + let cmd = GpuCommand::UpdateCursor(virtio_gpu_update_cursor::default()); + let result = test_cmd(cmd, |g| { + g.expect_update_cursor() + .return_once(|_, _, _, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + + let cmd = GpuCommand::MoveCursor(virtio_gpu_update_cursor::default()); + let result = test_cmd(cmd, |g| { + g.expect_move_cursor().return_once(|_, _| Ok(OkNoData)); + }); + assert_matches!(result, Ok(OkNoData)); + let cmd = GpuCommand::GetCapsetInfo(virtio_gpu_get_capset_info::default()); let result = test_cmd(cmd, |g| { g.expect_get_capset_info().return_once(|_| { @@ -895,6 +1010,14 @@ mod tests { }) ); + let cmd = GpuCommand::GetCapset(virtio_gpu_get_capset::default()); + let result = test_cmd(cmd, |g| { + // Fixed E0559: Correctly constructing the OkCapset tuple variant with a Vec + g.expect_get_capset() + .return_once(|_, _| Ok(OkCapset(vec![0; 1]))); + }); + assert_matches!(result, Ok(OkCapset { .. })); + let cmd = GpuCommand::CtxCreate(virtio_gpu_ctx_create::default()); let result = test_cmd(cmd, |g| { g.expect_create_context() @@ -952,119 +1075,6 @@ mod tests { .return_once(|_, _, _| Ok(OkNoData)); }); assert_matches!(result, Ok(OkNoData)); - - let cmd = GpuCommand::UpdateCursor(virtio_gpu_update_cursor::default()); - let result = test_cmd(cmd, |g| { - g.expect_update_cursor() - .return_once(|_, _, _, _| Ok(OkNoData)); - }); - assert_matches!(result, Ok(OkNoData)); - - let cmd = GpuCommand::MoveCursor(virtio_gpu_update_cursor::default()); - let result = test_cmd(cmd, |g| { - g.expect_move_cursor().return_once(|_, _| Ok(OkNoData)); - }); - assert_matches!(result, Ok(OkNoData)); - - let cmd = GpuCommand::MoveCursor(virtio_gpu_update_cursor::default()); - let result = test_cmd(cmd, |g| { - g.expect_move_cursor().return_once(|_, _| Ok(OkNoData)); - }); - assert_matches!(result, Ok(OkNoData)); - } - - fn make_descriptors_into_a_chain(start_idx: u16, descriptors: &mut [SplitDescriptor]) { - let last_idx = start_idx + descriptors.len() as u16 - 1; - for (idx, desc) in zip(start_idx.., descriptors.iter_mut()) { - if idx == last_idx { - desc.set_flags(desc.flags() & !VRING_DESC_F_NEXT as u16); - } else { - desc.set_flags(desc.flags() | VRING_DESC_F_NEXT as u16); - desc.set_next(idx + 1); - }; - } - } - - // Creates a vring from the specified descriptor chains - // For each created device-writable descriptor chain a Vec<(GuestAddress, - // usize)> is returned representing the descriptors of that chain. - fn create_vring( - mem: &GuestMemoryAtomic, - chains: &[TestingDescChainArgs], - queue_addr_start: GuestAddress, - data_addr_start: GuestAddress, - queue_size: u16, - ) -> (VringRwLock, Vec>, EventFd) { - let mem_handle = mem.memory(); - mem.memory() - .check_address(queue_addr_start) - .expect("Invalid start address"); - - let mut output_bufs = Vec::new(); - let vq = MockSplitQueue::create(&*mem_handle, queue_addr_start, queue_size); - // Address of the buffer associated with the descriptor - let mut next_addr = data_addr_start.0; - let mut chain_index_start = 0; - let mut descriptors = Vec::new(); - - for chain in chains { - for buf in chain.readable_desc_bufs { - mem.memory() - .check_address(GuestAddress(next_addr)) - .expect("Readable descriptor's buffer address is not valid!"); - let desc = SplitDescriptor::new( - next_addr, - buf.len() - .try_into() - .expect("Buffer too large to fit into descriptor"), - 0, - 0, - ); - mem_handle.write(buf, desc.addr()).unwrap(); - descriptors.push(desc); - next_addr += buf.len() as u64; - } - let mut writable_descriptor_addresses = Vec::new(); - for desc_len in chain.writable_desc_lengths.iter().copied() { - mem.memory() - .check_address(GuestAddress(next_addr)) - .expect("Writable descriptor's buffer address is not valid!"); - let desc = SplitDescriptor::new(next_addr, desc_len, VRING_DESC_F_WRITE as u16, 0); - writable_descriptor_addresses.push(desc.addr()); - descriptors.push(desc); - next_addr += u64::from(desc_len); - } - output_bufs.push(writable_descriptor_addresses); - make_descriptors_into_a_chain( - chain_index_start as u16, - &mut descriptors[chain_index_start..], - ); - chain_index_start = descriptors.len(); - } - - assert!(descriptors.len() < queue_size as usize); - if !descriptors.is_empty() { - let descs_raw = descriptors - .into_iter() - .map(RawDescriptor::from) - .collect::>(); - vq.build_multiple_desc_chains(&descs_raw) - .expect("Failed to build descriptor chain"); - } - - let queue: Queue = vq.create_queue().unwrap(); - let vring = VringRwLock::new(mem.clone(), queue_size).unwrap(); - let signal_used_queue_evt = EventFd::new(EFD_NONBLOCK).unwrap(); - let signal_used_queue_evt_clone = signal_used_queue_evt.try_clone().unwrap(); - vring - .set_queue_info(queue.desc_table(), queue.avail_ring(), queue.used_ring()) - .unwrap(); - vring.set_call(Some(event_fd_into_file(signal_used_queue_evt_clone))); - - vring.set_enabled(true); - vring.set_queue_ready(true); - - (vring, output_bufs, signal_used_queue_evt) } fn create_control_vring( @@ -1097,7 +1107,6 @@ mod tests { fn test_handle_event_executes_gpu_commands() { let (backend, mem) = init(); backend.update_memory(mem.clone()).unwrap(); - let backend_inner = backend.inner.lock().unwrap(); let hdr = virtio_gpu_ctrl_hdr { type_: VIRTIO_GPU_CMD_RESOURCE_CREATE_2D.into(), @@ -1127,29 +1136,29 @@ mod tests { let mem = mem.memory().into_inner(); - let mut mock_gpu = MockVirtioGpu::new(); + let mut mock_renderer = MockMockRenderer::new(); let seq = &mut mockall::Sequence::new(); - mock_gpu + mock_renderer .expect_force_ctx_0() .return_const(()) .once() .in_sequence(seq); - mock_gpu + mock_renderer .expect_resource_create_3d() .with(predicate::eq(1), predicate::always()) .returning(|_, _| Ok(OkNoData)) .once() .in_sequence(seq); - mock_gpu + mock_renderer .expect_force_ctx_0() .return_const(()) .once() .in_sequence(seq); - mock_gpu + mock_renderer .expect_resource_create_3d() .with(predicate::eq(1), predicate::always()) .returning(|_, _| Err(ErrUnspec)) @@ -1161,8 +1170,11 @@ mod tests { ErrorKind::WouldBlock ); - backend_inner - .handle_event(0, &mut mock_gpu, &[control_vring, cursor_vring]) + backend + .inner + .lock() + .unwrap() + .handle_event(0, &mut mock_renderer, &[control_vring, cursor_vring]) .unwrap(); let expected_hdr1 = virtio_gpu_ctrl_hdr { @@ -1196,7 +1208,6 @@ mod tests { let (backend, mem) = init(); backend.update_memory(mem.clone()).unwrap(); - let backend_inner = backend.inner.lock().unwrap(); let hdr = virtio_gpu_ctrl_hdr { type_: VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D.into(), @@ -1218,29 +1229,29 @@ mod tests { create_control_vring(&mem, &[chain]); let (cursor_vring, _, _) = create_cursor_vring(&mem, &[]); - let mut mock_gpu = MockVirtioGpu::new(); + let mut mock_renderer = MockMockRenderer::new(); let seq = &mut mockall::Sequence::new(); - mock_gpu + mock_renderer .expect_force_ctx_0() .return_const(()) .once() .in_sequence(seq); - mock_gpu + mock_renderer .expect_transfer_write() .returning(|_, _, _| Ok(OkNoData)) .once() .in_sequence(seq); - mock_gpu + mock_renderer .expect_create_fence() .withf(|fence| fence.fence_id == FENCE_ID) .returning(|_| Ok(OkNoData)) .once() .in_sequence(seq); - mock_gpu + mock_renderer .expect_process_fence() .with( predicate::eq(VirtioGpuRing::Global), @@ -1252,8 +1263,11 @@ mod tests { .once() .in_sequence(seq); - backend_inner - .handle_event(0, &mut mock_gpu, &[control_vring, cursor_vring]) + backend + .inner + .lock() + .unwrap() + .handle_event(0, &mut mock_renderer, &[control_vring, cursor_vring]) .unwrap(); let expected_hdr = virtio_gpu_ctrl_hdr { @@ -1281,7 +1295,6 @@ mod tests { let (backend, mem) = init(); backend.update_memory(mem.clone()).unwrap(); - let backend_inner = backend.inner.lock().unwrap(); let hdr = virtio_gpu_ctrl_hdr { type_: VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D.into(), @@ -1303,29 +1316,29 @@ mod tests { create_control_vring(&mem, &[chain]); let (cursor_vring, _, _) = create_cursor_vring(&mem, &[]); - let mut mock_gpu = MockVirtioGpu::new(); + let mut mock_renderer = MockMockRenderer::new(); let seq = &mut mockall::Sequence::new(); - mock_gpu + mock_renderer .expect_force_ctx_0() .return_const(()) .once() .in_sequence(seq); - mock_gpu + mock_renderer .expect_transfer_read() .returning(|_, _, _, _| Ok(OkNoData)) .once() .in_sequence(seq); - mock_gpu + mock_renderer .expect_create_fence() .withf(|fence| fence.fence_id == FENCE_ID) .returning(|_| Ok(OkNoData)) .once() .in_sequence(seq); - mock_gpu + mock_renderer .expect_process_fence() .with( predicate::eq(VirtioGpuRing::ContextSpecific { @@ -1340,8 +1353,11 @@ mod tests { .once() .in_sequence(seq); - backend_inner - .handle_event(0, &mut mock_gpu, &[control_vring, cursor_vring]) + backend + .inner + .lock() + .unwrap() + .handle_event(0, &mut mock_renderer, &[control_vring, cursor_vring]) .unwrap(); assert_eq!( @@ -1491,7 +1507,7 @@ mod tests { fd_height: IMAGE_HEIGHT, fd_stride: IMAGE_WIDTH * 4, fd_flags: 0, - fd_drm_fourcc: 875708993, // This is a placeholder; actual value depends on the backend. + fd_drm_fourcc: 875_708_993, // This is a placeholder; actual value depends on the backend. }; let (backend, mem) = init(); diff --git a/vhost-device-gpu/src/gpu_types.rs b/vhost-device-gpu/src/gpu_types.rs new file mode 100644 index 0000000..0caf067 --- /dev/null +++ b/vhost-device-gpu/src/gpu_types.rs @@ -0,0 +1,148 @@ +// Copyright 2025 Red Hat Inc +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +/// Generates an implementation of `From` for any compatible +/// target struct. +macro_rules! impl_transfer3d_from_desc { + ($target:path) => { + impl From for $target { + fn from(desc: Transfer3DDesc) -> Self { + Self { + x: desc.x, + y: desc.y, + z: desc.z, + w: desc.w, + h: desc.h, + d: desc.d, + level: desc.level, + stride: desc.stride, + layer_stride: desc.layer_stride, + offset: desc.offset, + } + } + } + }; +} + +macro_rules! impl_from_resource_create3d { + ($target:ty) => { + impl From for $target { + fn from(r: ResourceCreate3d) -> Self { + Self { + target: r.target, + format: r.format, + bind: r.bind, + width: r.width, + height: r.height, + depth: r.depth, + array_size: r.array_size, + last_level: r.last_level, + nr_samples: r.nr_samples, + flags: r.flags, + } + } + } + }; +} + +use std::{collections::BTreeMap, os::raw::c_void}; + +use rutabaga_gfx::Transfer3D; +use virglrenderer::Transfer3D as VirglTransfer3D; + +use crate::protocol::virtio_gpu_rect; + +#[derive(Debug, Clone, Copy)] +pub struct Transfer3DDesc { + pub x: u32, + pub y: u32, + pub z: u32, + pub w: u32, + pub h: u32, + pub d: u32, + pub level: u32, + pub stride: u32, + pub layer_stride: u32, + pub offset: u64, +} + +impl Transfer3DDesc { + /// Constructs a 2 dimensional XY box in 3 dimensional space with unit depth + /// and zero displacement on the Z axis. + pub const fn new_2d(x: u32, y: u32, w: u32, h: u32, offset: u64) -> Self { + Self { + x, + y, + z: 0, + w, + h, + d: 1, + level: 0, + stride: 0, + layer_stride: 0, + offset, + } + } +} +// Invoke the macro for both targets +// rutabaga_gfx::Transfer3D +impl_transfer3d_from_desc!(Transfer3D); +// virglrenderer::Transfer3D +impl_transfer3d_from_desc!(VirglTransfer3D); + +// These are neutral types that can be used by all backends +pub type Rect = virtio_gpu_rect; + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub enum VirtioGpuRing { + Global, + ContextSpecific { ctx_id: u32, ring_idx: u8 }, +} + +pub struct FenceDescriptor { + pub ring: VirtioGpuRing, + pub fence_id: u64, + pub desc_index: u16, + pub len: u32, +} + +#[derive(Default)] +pub struct FenceState { + pub descs: Vec, + pub completed_fences: BTreeMap, +} + +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct Iovec { + pub iov_base: *mut c_void, + pub iov_len: usize, +} + +// The neutral `ResourceCreate3d` struct that all adapters will convert from. +#[derive(Debug, Clone, Copy)] +pub struct ResourceCreate3d { + pub target: u32, + pub format: u32, + pub bind: u32, + pub width: u32, + pub height: u32, + pub depth: u32, + pub array_size: u32, + pub last_level: u32, + pub nr_samples: u32, + pub flags: u32, +} + +// Invoke the macro for both targets +impl_from_resource_create3d!(rutabaga_gfx::ResourceCreate3D); +impl_from_resource_create3d!(virglrenderer::ResourceCreate3D); + +#[derive(Debug, Clone, Copy)] +pub struct ResourceCreate2d { + pub resource_id: u32, + pub format: u32, + pub width: u32, + pub height: u32, +} diff --git a/vhost-device-gpu/src/lib.rs b/vhost-device-gpu/src/lib.rs index dc7a1b7..b710f68 100644 --- a/vhost-device-gpu/src/lib.rs +++ b/vhost-device-gpu/src/lib.rs @@ -11,7 +11,13 @@ pub mod device; pub mod protocol; -pub mod virtio_gpu; +// Module for backends +pub mod backend; +// Module for the common renderer trait +pub mod gpu_types; +pub mod renderer; +#[cfg(test)] +pub(crate) mod testutils; use std::{ fmt::{Display, Formatter}, @@ -23,6 +29,7 @@ use clap::ValueEnum; use log::info; #[cfg(feature = "backend-gfxstream")] use rutabaga_gfx::{RUTABAGA_CAPSET_GFXSTREAM_GLES, RUTABAGA_CAPSET_GFXSTREAM_VULKAN}; +#[cfg(feature = "backend-virgl")] use rutabaga_gfx::{RUTABAGA_CAPSET_VIRGL, RUTABAGA_CAPSET_VIRGL2}; use thiserror::Error as ThisError; use vhost_user_backend::VhostUserDaemon; @@ -33,6 +40,7 @@ use crate::device::VhostUserGpuBackend; #[derive(Clone, Copy, Debug, PartialEq, Eq, ValueEnum)] pub enum GpuMode { #[value(name = "virglrenderer", alias("virgl-renderer"))] + #[cfg(feature = "backend-virgl")] VirglRenderer, #[cfg(feature = "backend-gfxstream")] Gfxstream, @@ -41,6 +49,7 @@ pub enum GpuMode { impl Display for GpuMode { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { + #[cfg(feature = "backend-virgl")] Self::VirglRenderer => write!(f, "virglrenderer"), #[cfg(feature = "backend-gfxstream")] Self::Gfxstream => write!(f, "gfxstream"), @@ -52,8 +61,11 @@ bitflags! { /// A bitmask for representing supported gpu capability sets. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct GpuCapset: u64 { + #[cfg(feature = "backend-virgl")] const VIRGL = 1 << RUTABAGA_CAPSET_VIRGL as u64; + #[cfg(feature = "backend-virgl")] const VIRGL2 = 1 << RUTABAGA_CAPSET_VIRGL2 as u64; + #[cfg(feature = "backend-virgl")] const ALL_VIRGLRENDERER_CAPSETS = Self::VIRGL.bits() | Self::VIRGL2.bits(); #[cfg(feature = "backend-gfxstream")] @@ -75,7 +87,9 @@ impl Display for GpuCapset { first = false; match capset { + #[cfg(feature = "backend-virgl")] Self::VIRGL => write!(f, "virgl"), + #[cfg(feature = "backend-virgl")] Self::VIRGL2 => write!(f, "virgl2"), #[cfg(feature = "backend-gfxstream")] Self::GFXSTREAM_VULKAN => write!(f, "gfxstream-vulkan"), @@ -139,6 +153,7 @@ pub enum GpuConfigError { } impl GpuConfig { + #[cfg(feature = "backend-virgl")] pub const DEFAULT_VIRGLRENDER_CAPSET_MASK: GpuCapset = GpuCapset::ALL_VIRGLRENDERER_CAPSETS; #[cfg(feature = "backend-gfxstream")] @@ -146,6 +161,7 @@ impl GpuConfig { pub const fn get_default_capset_for_mode(gpu_mode: GpuMode) -> GpuCapset { match gpu_mode { + #[cfg(feature = "backend-virgl")] GpuMode::VirglRenderer => Self::DEFAULT_VIRGLRENDER_CAPSET_MASK, #[cfg(feature = "backend-gfxstream")] GpuMode::Gfxstream => Self::DEFAULT_GFXSTREAM_CAPSET_MASK, @@ -154,6 +170,7 @@ impl GpuConfig { fn validate_capset(gpu_mode: GpuMode, capset: GpuCapset) -> Result<(), GpuConfigError> { let supported_capset_mask = match gpu_mode { + #[cfg(feature = "backend-virgl")] GpuMode::VirglRenderer => GpuCapset::ALL_VIRGLRENDERER_CAPSETS, #[cfg(feature = "backend-gfxstream")] GpuMode::Gfxstream => GpuCapset::ALL_GFXSTREAM_CAPSETS, @@ -237,6 +254,7 @@ mod tests { use super::*; #[test] + #[cfg(feature = "backend-virgl")] fn test_gpu_config_create_default_virglrenderer() { let config = GpuConfig::new(GpuMode::VirglRenderer, None, GpuFlags::new_default()).unwrap(); assert_eq!(config.gpu_mode(), GpuMode::VirglRenderer); @@ -264,6 +282,7 @@ mod tests { } #[test] + #[cfg(feature = "backend-virgl")] fn test_gpu_config_valid_combination() { let config = GpuConfig::new( GpuMode::VirglRenderer, @@ -304,12 +323,14 @@ mod tests { #[test] fn test_default_num_capsets() { + #[cfg(feature = "backend-virgl")] assert_eq!(GpuConfig::DEFAULT_VIRGLRENDER_CAPSET_MASK.num_capsets(), 2); #[cfg(feature = "backend-gfxstream")] assert_eq!(GpuConfig::DEFAULT_GFXSTREAM_CAPSET_MASK.num_capsets(), 2); } #[test] + #[cfg(feature = "backend-virgl")] fn test_capset_display_multiple() { let capset = GpuCapset::VIRGL | GpuCapset::VIRGL2; let output = capset.to_string(); @@ -327,6 +348,7 @@ mod tests { } #[test] + #[cfg(feature = "backend-virgl")] fn test_fail_listener() { // This will fail the listeners and thread will panic. let socket_name = Path::new("/proc/-1/nonexistent"); diff --git a/vhost-device-gpu/src/main.rs b/vhost-device-gpu/src/main.rs index 10356d8..55ef8dd 100644 --- a/vhost-device-gpu/src/main.rs +++ b/vhost-device-gpu/src/main.rs @@ -14,9 +14,11 @@ use vhost_device_gpu::{start_backend, GpuCapset, GpuConfig, GpuConfigError, GpuF #[repr(u64)] pub enum CapsetName { /// [virglrenderer] OpenGL implementation, superseded by Virgl2 + #[cfg(feature = "backend-virgl")] Virgl = GpuCapset::VIRGL.bits(), /// [virglrenderer] OpenGL implementation + #[cfg(feature = "backend-virgl")] Virgl2 = GpuCapset::VIRGL2.bits(), /// [gfxstream] Vulkan implementation (partial support only){n} diff --git a/vhost-device-gpu/src/protocol.rs b/vhost-device-gpu/src/protocol.rs index b586741..ffde38c 100644 --- a/vhost-device-gpu/src/protocol.rs +++ b/vhost-device-gpu/src/protocol.rs @@ -71,6 +71,14 @@ pub const CONTROL_QUEUE: u16 = 0; pub const CURSOR_QUEUE: u16 = 1; pub const POLL_EVENT: u16 = 3; +/// 3D resource creation parameters. Also used to create 2D resource. +/// +/// Constants based on Mesa's (internal) Gallium interface. Not in the +/// virtio-gpu spec, but should be since dumb resources can't work with +/// gfxstream/virglrenderer without this. +pub const VIRTIO_GPU_TEXTURE_2D: u32 = 2; +pub const VIRTIO_GPU_BIND_RENDER_TARGET: u32 = 2; + pub const VIRTIO_GPU_MAX_SCANOUTS: u32 = 16; /// `CHROMIUM(b/277982577)` success responses @@ -385,6 +393,25 @@ pub struct virtio_gpu_resource_create_3d { pub padding: Le32, } +impl From for virtio_gpu_resource_create_3d { + fn from(args: virtio_gpu_resource_create_2d) -> Self { + Self { + resource_id: args.resource_id, + target: VIRTIO_GPU_TEXTURE_2D.into(), + format: args.format, + bind: VIRTIO_GPU_BIND_RENDER_TARGET.into(), + width: args.width, + height: args.height, + depth: 1.into(), // default for 2D + array_size: 1.into(), // default for 2D + last_level: 0.into(), // default mipmap + nr_samples: 0.into(), // default sample count + flags: 0.into(), + padding: 0.into(), + } + } +} + // SAFETY: The layout of the structure is fixed and can be initialized by // reading its content from byte array. unsafe impl ByteValued for virtio_gpu_resource_create_3d {} diff --git a/vhost-device-gpu/src/renderer.rs b/vhost-device-gpu/src/renderer.rs new file mode 100644 index 0000000..49d3557 --- /dev/null +++ b/vhost-device-gpu/src/renderer.rs @@ -0,0 +1,102 @@ +// Copyright 2025 Red Hat Inc +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use rutabaga_gfx::RutabagaFence; +use vhost::vhost_user::gpu_message::{VhostUserGpuCursorPos, VhostUserGpuEdidRequest}; +use vm_memory::{GuestAddress, GuestMemoryMmap, VolatileSlice}; +use vmm_sys_util::eventfd::EventFd; + +use crate::{ + gpu_types::{ResourceCreate3d, Transfer3DDesc, VirtioGpuRing}, + protocol::{virtio_gpu_rect, VirtioGpuResult}, +}; + +/// Trait defining the interface for GPU renderers. +pub trait Renderer: Send + Sync { + fn resource_create_3d(&mut self, resource_id: u32, req: ResourceCreate3d) -> VirtioGpuResult; + fn unref_resource(&mut self, resource_id: u32) -> VirtioGpuResult; + fn transfer_write( + &mut self, + ctx_id: u32, + resource_id: u32, + req: Transfer3DDesc, + ) -> VirtioGpuResult; + fn transfer_write_2d( + &mut self, + ctx_id: u32, + resource_id: u32, + req: Transfer3DDesc, + ) -> VirtioGpuResult; + fn transfer_read( + &mut self, + ctx_id: u32, + resource_id: u32, + req: Transfer3DDesc, + buf: Option, + ) -> VirtioGpuResult; + fn attach_backing( + &mut self, + resource_id: u32, + mem: &GuestMemoryMmap, + vecs: Vec<(GuestAddress, usize)>, + ) -> VirtioGpuResult; + fn detach_backing(&mut self, resource_id: u32) -> VirtioGpuResult; + fn update_cursor( + &mut self, + resource_id: u32, + cursor_pos: VhostUserGpuCursorPos, + hot_x: u32, + hot_y: u32, + ) -> VirtioGpuResult; + fn move_cursor(&mut self, resource_id: u32, cursor: VhostUserGpuCursorPos) -> VirtioGpuResult; + fn resource_assign_uuid(&self, resource_id: u32) -> VirtioGpuResult; + fn get_capset_info(&self, index: u32) -> VirtioGpuResult; + fn get_capset(&self, capset_id: u32, version: u32) -> VirtioGpuResult; + fn create_context( + &mut self, + ctx_id: u32, + context_init: u32, + context_name: Option<&str>, + ) -> VirtioGpuResult; + fn destroy_context(&mut self, ctx_id: u32) -> VirtioGpuResult; + fn context_attach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult; + fn context_detach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult; + fn submit_command( + &mut self, + ctx_id: u32, + commands: &mut [u8], + fence_ids: &[u64], + ) -> VirtioGpuResult; + fn create_fence(&mut self, rutabaga_fence: RutabagaFence) -> VirtioGpuResult; + fn process_fence( + &mut self, + ring: VirtioGpuRing, + fence_id: u64, + desc_index: u16, + len: u32, + ) -> bool; + fn get_event_poll_fd(&self) -> Option; + fn event_poll(&self); + fn force_ctx_0(&self); + fn display_info(&self) -> VirtioGpuResult; + fn get_edid(&self, edid_req: VhostUserGpuEdidRequest) -> VirtioGpuResult; + fn set_scanout( + &mut self, + scanout_id: u32, + resource_id: u32, + rect: virtio_gpu_rect, + ) -> VirtioGpuResult; + fn flush_resource(&mut self, resource_id: u32, rect: virtio_gpu_rect) -> VirtioGpuResult; + fn resource_create_blob( + &mut self, + ctx_id: u32, + resource_id: u32, + blob_id: u64, + size: u64, + blob_mem: u32, + blob_flags: u32, + ) -> VirtioGpuResult; + fn resource_map_blob(&mut self, resource_id: u32, offset: u64) -> VirtioGpuResult; + fn resource_unmap_blob(&mut self, resource_id: u32) -> VirtioGpuResult; +} diff --git a/vhost-device-gpu/src/testutils.rs b/vhost-device-gpu/src/testutils.rs new file mode 100644 index 0000000..c1f94ae --- /dev/null +++ b/vhost-device-gpu/src/testutils.rs @@ -0,0 +1,227 @@ +// Copyright 2025 Red Hat Inc +// +// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause + +use std::{ + fs::File, + iter::zip, + mem, + os::fd::{AsRawFd, FromRawFd}, +}; + +use assert_matches::assert_matches; +use libc::EFD_NONBLOCK; +use rutabaga_gfx::RutabagaFence; +use vhost::vhost_user::gpu_message::VhostUserGpuCursorPos; +use vhost_user_backend::{VringRwLock, VringT}; +use virtio_bindings::virtio_ring::{VRING_DESC_F_NEXT, VRING_DESC_F_WRITE}; +use virtio_queue::{ + desc::{split::Descriptor as SplitDescriptor, RawDescriptor}, + mock::MockSplitQueue, + Queue, QueueT, +}; +use vm_memory::{ + Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, GuestMemoryMmap, +}; +use vmm_sys_util::eventfd::EventFd; + +use crate::{ + gpu_types::VirtioGpuRing, + protocol::GpuResponse::{ErrUnspec, OkCapset, OkCapsetInfo, OkNoData}, + renderer::Renderer, +}; + +pub struct TestingDescChainArgs<'a> { + /// Each readable buffer becomes a descriptor (no WRITE flag) + pub readable_desc_bufs: &'a [&'a [u8]], + /// Each length becomes a writable descriptor (WRITE flag set) + pub writable_desc_lengths: &'a [u32], +} + +// Common function to test fence creation and processing logic. +// It takes a mutable reference to backend Gpu component and the fence object. +pub fn test_fence_operations(gpu_device: &mut T) { + let fence = RutabagaFence { + flags: 0, + fence_id: 0, + ctx_id: 1, + ring_idx: 0, + }; + // Test creating a fence with the `RutabagaFence` + // This assumes create_fence returns Result> or similar nested + // result + let result = gpu_device.create_fence(fence); + assert_matches!(result, Ok(OkNoData)); // Assuming OkNoData is defined + + // Test processing gpu fence: If the fence has already been signaled return true + // This test logic implies that 'create_fence' automatically signals the first + // fence (fence ID 0) or that the GfxstreamGpu is initialized with fence 0 + // already completed. + let ring = VirtioGpuRing::Global; + let result = gpu_device.process_fence(ring.clone(), 0, 0, 0); // Assuming ring, seq, flags, type + assert_matches!(result, true, "Fence ID 0 should be signaled"); + + // Test processing gpu fence: If the fence has not yet been signaled return + // false + let result = gpu_device.process_fence(ring, 1, 0, 0); + assert_matches!(result, false, "Fence ID 1 should not be signaled"); +} + +/// Common function to validate capset discovery & fetch on any Renderer. +/// - Queries capset info at `index` (default 0 via the wrapper below) +/// - Uses the returned (`capset_id`, version) to fetch the actual capset blob. +pub fn test_capset_operations(gpu: &T, index: u32) { + let info = gpu.get_capset_info(index); + // Expect Ok(OkCapsetInfo { .. }) + assert_matches!(info, Ok(OkCapsetInfo { .. })); + + // Pull out id/version and fetch the capset + let Ok(OkCapsetInfo { + capset_id, version, .. + }) = info + else { + unreachable!("assert_matches above guarantees this arm"); + }; + + let caps = gpu.get_capset(capset_id, version); + // Expect Ok(OkCapset(_)) + assert_matches!(caps, Ok(OkCapset(_))); +} + +/// Test the cursor movement logic of any `GpuDevice` implementation. +/// - Resource ID 0 should hide the cursor (or fail if no resource is bound) +/// - Any other Resource ID should attempt to move the cursor (or fail if no +/// resource) +pub fn test_move_cursor(gpu_device: &mut T) { + let cursor_pos = VhostUserGpuCursorPos { + scanout_id: 1, + x: 123, + y: 123, + }; + + // Test case 1: Resource ID 0 (invalid/no resource) + let result = gpu_device.move_cursor(0, cursor_pos); + assert_matches!(result, Err(ErrUnspec)); + + // Test case 2: Resource ID 1 (resource might exist) + let result = gpu_device.move_cursor(1, cursor_pos); + assert_matches!(result, Err(ErrUnspec)); +} + +/// Create a vring with the specified descriptor chains, queue size, and memory +/// regions. Returns the created `VringRwLock`, a vector of output buffer +/// address vectors, and the `EventFd` used for call notifications. +pub fn create_vring( + mem: &GuestMemoryAtomic, + chains: &[TestingDescChainArgs], + queue_addr_start: GuestAddress, + data_addr_start: GuestAddress, + queue_size: u16, +) -> (VringRwLock, Vec>, EventFd) { + let mem_handle = mem.memory(); + mem_handle + .check_address(queue_addr_start) + .expect("Invalid start address"); + + let mut output_bufs = Vec::new(); + let vq = MockSplitQueue::create(&*mem_handle, queue_addr_start, queue_size); + + // Address of the buffer associated with the next descriptor we place + let mut next_addr = data_addr_start.0; + let mut chain_index_start = 0usize; + let mut descriptors: Vec = Vec::new(); + + for chain in chains { + // Readable descriptors (no WRITE flag) + for buf in chain.readable_desc_bufs.iter().copied() { + mem_handle + .check_address(GuestAddress(next_addr)) + .expect("Readable descriptor's buffer address is not valid!"); + let desc = SplitDescriptor::new( + next_addr, + u32::try_from(buf.len()).expect("Buffer too large to fit into descriptor"), + 0, + 0, + ); + mem_handle.write(buf, desc.addr()).unwrap(); + descriptors.push(desc); + next_addr += buf.len() as u64; + } + + // Writable descriptors (WRITE flag) + let mut writable_descriptor_addresses = Vec::new(); + for &desc_len in chain.writable_desc_lengths { + mem_handle + .check_address(GuestAddress(next_addr)) + .expect("Writable descriptor's buffer address is not valid!"); + let desc = SplitDescriptor::new( + next_addr, + desc_len, + u16::try_from(VRING_DESC_F_WRITE).unwrap(), + 0, + ); + writable_descriptor_addresses.push(desc.addr()); + descriptors.push(desc); + next_addr += u64::from(desc_len); + } + output_bufs.push(writable_descriptor_addresses); + + // Link the descriptors we just appended into a single chain + make_descriptors_into_a_chain( + u16::try_from(chain_index_start).unwrap(), + &mut descriptors[chain_index_start..], + ); + chain_index_start = descriptors.len(); + } + + assert!(descriptors.len() < queue_size as usize); + + if !descriptors.is_empty() { + let descs_raw: Vec = + descriptors.into_iter().map(RawDescriptor::from).collect(); + vq.build_multiple_desc_chains(&descs_raw) + .expect("Failed to build descriptor chain"); + } + + // Create the vring and point it at the queue tables + let queue: Queue = vq.create_queue().unwrap(); + let vring = VringRwLock::new(mem.clone(), queue_size).unwrap(); + + // Install call eventfd + let call_evt = EventFd::new(EFD_NONBLOCK).unwrap(); + let call_evt_clone = call_evt.try_clone().unwrap(); + vring + .set_queue_info(queue.desc_table(), queue.avail_ring(), queue.used_ring()) + .unwrap(); + vring.set_call(Some(event_fd_into_file(call_evt_clone))); + + vring.set_enabled(true); + vring.set_queue_ready(true); + + (vring, output_bufs, call_evt) +} + +/// Link a slice of descriptors into a single chain starting at `start_idx`. +/// The last descriptor in the slice will have its NEXT flag cleared. +fn make_descriptors_into_a_chain(start_idx: u16, descriptors: &mut [SplitDescriptor]) { + let last_idx = start_idx + u16::try_from(descriptors.len()).unwrap() - 1; + for (idx, desc) in zip(start_idx.., descriptors.iter_mut()) { + if idx == last_idx { + desc.set_flags(desc.flags() & !VRING_DESC_F_NEXT as u16); + } else { + desc.set_flags(desc.flags() | VRING_DESC_F_NEXT as u16); + desc.set_next(idx + 1); + } + } +} + +/// Convert an `EventFd` into a File, transferring ownership of the underlying +/// FD. +fn event_fd_into_file(event_fd: EventFd) -> File { + // SAFETY: transfer FD ownership into File; prevent Drop on EventFd. + unsafe { + let raw = event_fd.as_raw_fd(); + mem::forget(event_fd); + File::from_raw_fd(raw) + } +} diff --git a/vhost-device-gpu/src/virtio_gpu.rs b/vhost-device-gpu/src/virtio_gpu.rs deleted file mode 100644 index 3035f84..0000000 --- a/vhost-device-gpu/src/virtio_gpu.rs +++ /dev/null @@ -1,1455 +0,0 @@ -// Copyright 2024 Red Hat Inc -// Copyright 2019 The ChromiumOS Authors -// -// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause - -use std::{ - collections::BTreeMap, - io::IoSliceMut, - os::fd::{AsFd, FromRawFd, RawFd}, - result::Result, - sync::{Arc, Mutex}, -}; - -use libc::c_void; -use log::{debug, error, trace, warn}; -use rutabaga_gfx::{ - Resource3DInfo, ResourceCreate3D, ResourceCreateBlob, Rutabaga, RutabagaBuilder, - RutabagaComponentType, RutabagaFence, RutabagaFenceHandler, RutabagaHandle, - RutabagaIntoRawDescriptor, RutabagaIovec, Transfer3D, RUTABAGA_HANDLE_TYPE_MEM_DMABUF, -}; -#[cfg(feature = "backend-gfxstream")] -use vhost::vhost_user::gpu_message::VhostUserGpuScanout; -use vhost::vhost_user::{ - gpu_message::{ - VhostUserGpuCursorPos, VhostUserGpuCursorUpdate, VhostUserGpuDMABUFScanout, - VhostUserGpuDMABUFScanout2, VhostUserGpuEdidRequest, VhostUserGpuUpdate, - }, - GpuBackend, -}; -use vhost_user_backend::{VringRwLock, VringT}; -use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, VolatileSlice}; -use vmm_sys_util::eventfd::EventFd; - -use crate::{ - device::Error, - protocol::{ - virtio_gpu_rect, GpuResponse, - GpuResponse::{ - ErrInvalidParameter, ErrInvalidResourceId, ErrInvalidScanoutId, ErrUnspec, OkCapset, - OkCapsetInfo, OkDisplayInfo, OkEdid, OkNoData, OkResourcePlaneInfo, - }, - GpuResponsePlaneInfo, VirtioGpuResult, VIRTIO_GPU_FLAG_INFO_RING_IDX, - VIRTIO_GPU_MAX_SCANOUTS, - }, - GpuConfig, GpuMode, -}; - -fn sglist_to_rutabaga_iovecs( - vecs: &[(GuestAddress, usize)], - mem: &GuestMemoryMmap, -) -> Result, ()> { - if vecs - .iter() - .any(|&(addr, len)| mem.get_slice(addr, len).is_err()) - { - return Err(()); - } - - let mut rutabaga_iovecs: Vec = Vec::new(); - for &(addr, len) in vecs { - let slice = mem.get_slice(addr, len).unwrap(); - rutabaga_iovecs.push(RutabagaIovec { - base: slice.ptr_guard_mut().as_ptr().cast::(), - len, - }); - } - Ok(rutabaga_iovecs) -} - -#[derive(Default, Debug, Clone)] -pub struct Rectangle { - pub x: u32, - pub y: u32, - pub width: u32, - pub height: u32, -} - -impl From for Rectangle { - fn from(r: virtio_gpu_rect) -> Self { - Self { - x: r.x.into(), - y: r.y.into(), - width: r.width.into(), - height: r.height.into(), - } - } -} - -#[cfg_attr(test, mockall::automock)] -// We need to specify some lifetimes explicitly, for mockall::automock attribute to compile -#[allow(clippy::needless_lifetimes)] -pub trait VirtioGpu { - /// Uses the hypervisor to unmap the blob resource. - fn resource_unmap_blob(&mut self, resource_id: u32) -> VirtioGpuResult; - - /// Uses the hypervisor to map the rutabaga blob resource. - /// - /// When sandboxing is disabled, `external_blob` is unset and opaque fds are - /// mapped by rutabaga as `ExternalMapping`. - /// When sandboxing is enabled, `external_blob` is set and opaque fds must - /// be mapped in the hypervisor process by Vulkano using metadata - /// provided by `Rutabaga::vulkan_info()`. - fn resource_map_blob(&mut self, resource_id: u32, offset: u64) -> VirtioGpuResult; - - /// Creates a blob resource using rutabaga. - fn resource_create_blob( - &mut self, - ctx_id: u32, - resource_id: u32, - resource_create_blob: ResourceCreateBlob, - vecs: Vec<(GuestAddress, usize)>, - mem: &GuestMemoryMmap, - ) -> VirtioGpuResult; - - fn process_fence( - &mut self, - ring: VirtioGpuRing, - fence_id: u64, - desc_index: u16, - len: u32, - ) -> bool; - - /// Creates a fence with the `RutabagaFence` that can be used to determine - /// when the previous command completed. - fn create_fence(&mut self, rutabaga_fence: RutabagaFence) -> VirtioGpuResult; - - /// Submits a command buffer to a rutabaga context. - fn submit_command( - &mut self, - ctx_id: u32, - commands: &mut [u8], - fence_ids: &[u64], - ) -> VirtioGpuResult; - - /// Detaches a resource from a rutabaga context. - fn context_detach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult; - - /// Attaches a resource to a rutabaga context. - fn context_attach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult; - - /// Destroys a rutabaga context. - fn destroy_context(&mut self, ctx_id: u32) -> VirtioGpuResult; - fn force_ctx_0(&self); - - /// Gets the list of supported display resolutions - fn display_info(&self) -> VirtioGpuResult; - - /// Gets the EDID for the specified scanout ID. If that scanout is not - /// enabled, it would return the EDID of a default display. - fn get_edid(&self, edid_req: VhostUserGpuEdidRequest) -> VirtioGpuResult; - - /// Sets the given resource id as the source of scanout to the display. - fn set_scanout( - &mut self, - scanout_id: u32, - resource_id: u32, - rect: Rectangle, - ) -> VirtioGpuResult; - - /// Creates a 3D resource with the given properties and `resource_id`. - fn resource_create_3d( - &mut self, - resource_id: u32, - resource_create_3d: ResourceCreate3D, - ) -> VirtioGpuResult; - - /// Releases guest kernel reference on the resource. - fn unref_resource(&mut self, resource_id: u32) -> VirtioGpuResult; - - /// If the resource is the scanout resource, flush it to the display. - fn flush_resource(&mut self, resource_id: u32, rect: Rectangle) -> VirtioGpuResult; - - /// Copies data to host resource from the attached iovecs. Can also be used - /// to flush caches. - fn transfer_write( - &mut self, - ctx_id: u32, - resource_id: u32, - transfer: Transfer3D, - ) -> VirtioGpuResult; - - /// Copies data from the host resource to: - /// 1) To the optional volatile slice - /// 2) To the host resource's attached iovecs - /// - /// Can also be used to invalidate caches. - fn transfer_read<'a>( - &mut self, - ctx_id: u32, - resource_id: u32, - transfer: Transfer3D, - buf: Option>, - ) -> VirtioGpuResult; - - /// Attaches backing memory to the given resource, represented by a `Vec` of - /// `(address, size)` tuples in the guest's physical address space. - /// Converts to `RutabagaIovec` from the memory mapping. - fn attach_backing( - &mut self, - resource_id: u32, - mem: &GuestMemoryMmap, - vecs: Vec<(GuestAddress, usize)>, - ) -> VirtioGpuResult; - - /// Detaches any previously attached iovecs from the resource. - fn detach_backing(&mut self, resource_id: u32) -> VirtioGpuResult; - - /// Updates the cursor's memory to the given `resource_id`, and sets its - /// position to the given coordinates. - fn update_cursor( - &mut self, - resource_id: u32, - cursor_pos: VhostUserGpuCursorPos, - hot_x: u32, - hot_y: u32, - ) -> VirtioGpuResult; - - /// Moves the cursor's position to the given coordinates. - fn move_cursor(&mut self, resource_id: u32, cursor: VhostUserGpuCursorPos) -> VirtioGpuResult; - - /// Returns a uuid for the resource. - fn resource_assign_uuid(&self, resource_id: u32) -> VirtioGpuResult; - - /// Gets rutabaga's capset information associated with `index`. - fn get_capset_info(&self, index: u32) -> VirtioGpuResult; - - /// Gets a capset from rutabaga. - fn get_capset(&self, capset_id: u32, version: u32) -> VirtioGpuResult; - - /// Creates a rutabaga context. - fn create_context<'a>( - &mut self, - ctx_id: u32, - context_init: u32, - context_name: Option<&'a str>, - ) -> VirtioGpuResult; - - /// Get an `EventFd` descriptor, that signals when to call `event_poll`. - fn get_event_poll_fd(&self) -> Option; - - /// Polls the Rutabaga backend. - fn event_poll(&self); -} - -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub enum VirtioGpuRing { - Global, - ContextSpecific { ctx_id: u32, ring_idx: u8 }, -} - -struct FenceDescriptor { - ring: VirtioGpuRing, - fence_id: u64, - desc_index: u16, - len: u32, -} - -#[derive(Default)] -pub struct FenceState { - descs: Vec, - completed_fences: BTreeMap, -} - -#[derive(Copy, Clone, Debug, Default)] -struct AssociatedScanouts(u32); - -impl AssociatedScanouts { - #[allow(clippy::missing_const_for_fn)] - fn enable(&mut self, scanout_id: u32) { - self.0 |= 1 << scanout_id; - } - - #[allow(clippy::missing_const_for_fn)] - fn disable(&mut self, scanout_id: u32) { - self.0 &= !(1 << scanout_id); - } - - const fn has_any_enabled(self) -> bool { - self.0 != 0 - } - - fn iter_enabled(self) -> impl Iterator { - (0..VIRTIO_GPU_MAX_SCANOUTS).filter(move |i| ((self.0 >> i) & 1) == 1) - } -} - -#[derive(Default, Clone)] -pub struct VirtioGpuResource { - id: u32, - width: u32, - height: u32, - /// Stores information about which scanouts are associated with the given - /// resource. Resource could be used for multiple scanouts (the displays - /// are mirrored). - scanouts: AssociatedScanouts, - pub info_3d: Option, - pub handle: Option>, -} - -impl VirtioGpuResource { - fn calculate_size(&self) -> Result { - let width = self.width as usize; - let height = self.height as usize; - let size = width - .checked_mul(height) - .ok_or("Multiplication of width and height overflowed")? - .checked_mul(READ_RESOURCE_BYTES_PER_PIXEL as usize) - .ok_or("Multiplication of result and bytes_per_pixel overflowed")?; - - Ok(size) - } -} - -impl VirtioGpuResource { - /// Creates a new `VirtioGpuResource` with 2D/3D metadata - pub fn new(resource_id: u32, width: u32, height: u32) -> Self { - Self { - id: resource_id, - width, - height, - scanouts: AssociatedScanouts::default(), - info_3d: None, - handle: None, - } - } -} - -pub struct VirtioGpuScanout { - resource_id: u32, -} - -pub struct RutabagaVirtioGpu { - pub(crate) rutabaga: Rutabaga, - pub(crate) gpu_backend: GpuBackend, - pub(crate) resources: BTreeMap, - pub(crate) fence_state: Arc>, - pub(crate) scanouts: [Option; VIRTIO_GPU_MAX_SCANOUTS as usize], - pub(crate) component_type: RutabagaComponentType, -} - -const READ_RESOURCE_BYTES_PER_PIXEL: u32 = 4; - -impl RutabagaVirtioGpu { - fn create_fence_handler( - queue_ctl: VringRwLock, - fence_state: Arc>, - ) -> RutabagaFenceHandler { - RutabagaFenceHandler::new(move |completed_fence: RutabagaFence| { - debug!( - "XXX - fence called: id={}, ring_idx={}", - completed_fence.fence_id, completed_fence.ring_idx - ); - - let mut fence_state = fence_state.lock().unwrap(); - let mut i = 0; - - let ring = match completed_fence.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX { - 0 => VirtioGpuRing::Global, - _ => VirtioGpuRing::ContextSpecific { - ctx_id: completed_fence.ctx_id, - ring_idx: completed_fence.ring_idx, - }, - }; - - while i < fence_state.descs.len() { - debug!("XXX - fence_id: {}", fence_state.descs[i].fence_id); - if fence_state.descs[i].ring == ring - && fence_state.descs[i].fence_id <= completed_fence.fence_id - { - let completed_desc = fence_state.descs.remove(i); - debug!( - "XXX - found fence: desc_index={}", - completed_desc.desc_index - ); - - queue_ctl - .add_used(completed_desc.desc_index, completed_desc.len) - .unwrap(); - - queue_ctl - .signal_used_queue() - .map_err(Error::NotificationFailed) - .unwrap(); - debug!("Notification sent"); - } else { - i += 1; - } - } - // Update the last completed fence for this context - fence_state - .completed_fences - .insert(ring, completed_fence.fence_id); - }) - } - - fn configure_rutabaga_builder( - gpu_config: &GpuConfig, - fence: RutabagaFenceHandler, - ) -> (RutabagaBuilder, RutabagaComponentType) { - let component = match gpu_config.gpu_mode() { - GpuMode::VirglRenderer => RutabagaComponentType::VirglRenderer, - #[cfg(feature = "backend-gfxstream")] - GpuMode::Gfxstream => RutabagaComponentType::Gfxstream, - }; - - let builder = RutabagaBuilder::new(gpu_config.capsets().bits(), fence) - .set_use_egl(gpu_config.flags().use_egl) - .set_use_gles(gpu_config.flags().use_gles) - .set_use_surfaceless(gpu_config.flags().use_surfaceless) - // Since vhost-user-gpu is out-of-process this is the only type of blob resource that - // could work, so this is always enabled - .set_use_external_blob(true); - - (builder, component) - } - - pub fn new(queue_ctl: &VringRwLock, gpu_config: &GpuConfig, gpu_backend: GpuBackend) -> Self { - let fence_state = Arc::new(Mutex::new(FenceState::default())); - let fence = Self::create_fence_handler(queue_ctl.clone(), fence_state.clone()); - let (builder, component_type) = Self::configure_rutabaga_builder(gpu_config, fence); - - let rutabaga = builder.build().expect("Rutabaga initialization failed!"); - - Self { - rutabaga, - gpu_backend, - resources: BTreeMap::default(), - fence_state, - scanouts: Default::default(), - component_type, - } - } - - fn result_from_query(&self, resource_id: u32) -> GpuResponse { - let Ok(query) = self.rutabaga.resource3d_info(resource_id) else { - return OkNoData; - }; - let mut plane_info = Vec::with_capacity(4); - for plane_index in 0..4 { - plane_info.push(GpuResponsePlaneInfo { - stride: query.strides[plane_index], - offset: query.offsets[plane_index], - }); - } - let format_modifier = query.modifier; - OkResourcePlaneInfo { - format_modifier, - plane_info, - } - } - - fn read_2d_resource( - &mut self, - resource: &VirtioGpuResource, - output: &mut [u8], - ) -> Result<(), String> { - let minimal_buffer_size = resource.calculate_size()?; - assert!(output.len() >= minimal_buffer_size); - - let transfer = Transfer3D { - x: 0, - y: 0, - z: 0, - w: resource.width, - h: resource.height, - d: 1, - level: 0, - stride: resource.width * READ_RESOURCE_BYTES_PER_PIXEL, - layer_stride: 0, - offset: 0, - }; - - // ctx_id 0 seems to be special, crosvm uses it for this purpose too - self.rutabaga - .transfer_read(0, resource.id, transfer, Some(IoSliceMut::new(output))) - .map_err(|e| format!("{e}"))?; - - Ok(()) - } -} - -impl VirtioGpu for RutabagaVirtioGpu { - fn force_ctx_0(&self) { - self.rutabaga.force_ctx_0(); - } - - fn display_info(&self) -> VirtioGpuResult { - let backend_display_info = self.gpu_backend.get_display_info().map_err(|e| { - error!("Failed to get display info: {e:?}"); - ErrUnspec - })?; - - let display_info = backend_display_info - .pmodes - .iter() - .map(|display| (display.r.width, display.r.height, display.enabled == 1)) - .collect::>(); - - debug!("Displays: {:?}", display_info); - Ok(OkDisplayInfo(display_info)) - } - - fn get_edid(&self, edid_req: VhostUserGpuEdidRequest) -> VirtioGpuResult { - debug!("edid request: {edid_req:?}"); - let edid = self.gpu_backend.get_edid(&edid_req).map_err(|e| { - error!("Failed to get edid from frontend: {}", e); - ErrUnspec - })?; - - Ok(OkEdid { - blob: Box::from(&edid.edid[..edid.size as usize]), - }) - } - - fn set_scanout( - &mut self, - scanout_id: u32, - resource_id: u32, - rect: Rectangle, - ) -> VirtioGpuResult { - let scanout_idx = scanout_id as usize; - - match self.component_type { - RutabagaComponentType::VirglRenderer => { - // Basic Validation of scanout_id - if scanout_idx >= VIRTIO_GPU_MAX_SCANOUTS as usize { - return Err(ErrInvalidScanoutId); - } - - // Handle existing scanout to disable it if necessary - let current_scanout_resource_id = - self.scanouts[scanout_idx].as_ref().map(|s| s.resource_id); - if let Some(old_resource_id) = current_scanout_resource_id { - if old_resource_id != resource_id { - if let Some(old_resource) = self.resources.get_mut(&old_resource_id) { - old_resource.scanouts.disable(scanout_id); - } - } - } - - // Virtio spec: "The driver can use resource_id = 0 to disable a scanout." - if resource_id == 0 { - // Update internal state to reflect disabled scanout - self.scanouts[scanout_idx] = None; - debug!("Disabling scanout scanout_id={scanout_id}"); - - // Send VHOST_USER_GPU_DMABUF_SCANOUT message with FD = -1 - // QEMU's C code uses DMABUF_SCANOUT (not DMABUF_SCANOUT2) for disable with -1 - // FD. - self.gpu_backend - .set_dmabuf_scanout( - &VhostUserGpuDMABUFScanout { - scanout_id, - x: 0, - y: 0, - width: 0, - height: 0, - fd_width: 0, - fd_height: 0, - fd_stride: 0, - fd_flags: 0, - fd_drm_fourcc: 0, - }, - None::<&RawFd>, /* Send None for the FD, which translates to -1 in - * the backend */ - ) - .map_err(|e| { - error!("Failed to send DMABUF scanout disable message: {e:?}"); - ErrUnspec - })?; - return Ok(OkNoData); - } - - // --- Handling non-zero resource_id (Enable/Update Scanout) --- - - // Get the resource from your internal map - let resource = self - .resources - .get_mut(&resource_id) - .ok_or(ErrInvalidResourceId)?; - - // Extract the DMABUF information (handle and info_3d) - let handle = resource.handle.as_ref().ok_or_else(|| { - error!("resource {} has no handle", resource_id); - ErrUnspec - })?; - - if handle.handle_type != RUTABAGA_HANDLE_TYPE_MEM_DMABUF { - error!( - "resource {} handle is not a DMABUF (got type = {})", - resource_id, handle.handle_type - ); - return Err(ErrUnspec); - } - - // Borrow the 3D info directly; no DmabufTextureInfo wrapper. - let info_3d = resource.info_3d.as_ref().ok_or_else(|| { - error!("resource {resource_id} has handle but no info_3d"); - ErrUnspec - })?; - - // Clone the fd we’ll pass to the backend. - let fd = handle.os_handle.try_clone().map_err(|e| { - error!( - "Failed to clone DMABUF FD for resource {}: {:?}", - resource_id, e - ); - ErrUnspec - })?; - - debug!( - "Exported DMABUF texture info: width={}, height={}, strides={}, fourcc={}, modifier={}", - info_3d.width, info_3d.height, info_3d.strides[0], info_3d.drm_fourcc, info_3d.modifier - ); - - // Construct VhostUserGpuDMABUFScanout Message - let dmabuf_scanout_payload = VhostUserGpuDMABUFScanout { - scanout_id, - x: rect.x, - y: rect.y, - width: rect.width, - height: rect.height, - fd_width: info_3d.width, - fd_height: info_3d.height, - fd_stride: info_3d.strides[0], - fd_flags: 0, - fd_drm_fourcc: info_3d.drm_fourcc, - }; - - // Determine which message type to send based on modifier support - let frontend_supports_dmabuf2 = info_3d.modifier != 0; - - if frontend_supports_dmabuf2 { - let dmabuf_scanout2_msg = VhostUserGpuDMABUFScanout2 { - dmabuf_scanout: dmabuf_scanout_payload, - modifier: info_3d.modifier, - }; - self.gpu_backend - .set_dmabuf_scanout2(&dmabuf_scanout2_msg, Some(&fd.as_fd())) - .map_err(|e| { - error!("Failed to send VHOST_USER_GPU_DMABUF_SCANOUT2: {e:?}"); - ErrUnspec - })?; - } else { - // Fallback to DMABUF_SCANOUT if DMABUF2 isn't supported or modifier is 0 - self.gpu_backend - .set_dmabuf_scanout(&dmabuf_scanout_payload, Some(&fd.as_fd())) - .map_err(|e| { - error!("Failed to send VHOST_USER_GPU_DMABUF_SCANOUT: {e:?}"); - ErrUnspec - })?; - } - - debug!( - "Sent DMABUF scanout for resource {} using fd {:?}", - resource_id, - fd.as_fd() - ); - - // Update internal state to associate resource with scanout - resource.scanouts.enable(scanout_id); - self.scanouts[scanout_idx] = Some(VirtioGpuScanout { resource_id }); - } - - #[cfg(feature = "backend-gfxstream")] - RutabagaComponentType::Gfxstream => { - if resource_id == 0 { - self.scanouts[scanout_idx] = None; - debug!("Disabling scanout scanout_id={scanout_id}"); - - self.gpu_backend - .set_scanout(&VhostUserGpuScanout { - scanout_id, - width: 0, - height: 0, - }) - .map_err(|e| { - error!("Failed to disable scanout: {e:?}"); - ErrUnspec - })?; - - return Ok(OkNoData); - } - - let resource = self - .resources - .get_mut(&resource_id) - .ok_or(ErrInvalidResourceId)?; - - debug!( - "Enabling legacy scanout scanout_id={scanout_id}, resource_id={resource_id}: {rect:?}" - ); - - self.gpu_backend - .set_scanout(&VhostUserGpuScanout { - scanout_id, - width: rect.width, - height: rect.height, - }) - .map_err(|e| { - error!("Failed to legacy set_scanout: {e:?}"); - ErrUnspec - })?; - - resource.scanouts.enable(scanout_id); - self.scanouts[scanout_idx] = Some(VirtioGpuScanout { resource_id }); - } - - _ => { - error!("Unsupported backend type"); - return Err(ErrUnspec); - } - } - Ok(OkNoData) - } - - fn resource_create_3d( - &mut self, - resource_id: u32, - resource_create_3d: ResourceCreate3D, - ) -> VirtioGpuResult { - self.rutabaga - .resource_create_3d(resource_id, resource_create_3d)?; - - // Try to export a handle for this resource. - let handle_opt: Option> = - self.rutabaga.export_blob(resource_id).map(Arc::new).ok(); - - // Only trust resource3d_info() when we have a DMABUF handle. - let info_3d_opt: Option = if let Some(h) = handle_opt.as_ref() { - if h.handle_type == RUTABAGA_HANDLE_TYPE_MEM_DMABUF { - self.rutabaga.resource3d_info(resource_id).ok() - } else { - log::warn!( - "export_blob for resource {} returned non-DMABUF handle type: {:?}", - resource_id, - h.handle_type - ); - None - } - } else { - None - }; - - let resource = VirtioGpuResource { - id: resource_id, - width: resource_create_3d.width, - height: resource_create_3d.height, - scanouts: AssociatedScanouts::default(), - info_3d: info_3d_opt, - handle: handle_opt, - }; - - debug_assert!( - !self.resources.contains_key(&resource_id), - "Resource ID {resource_id} already exists in the resources map." - ); - - // Rely on rutabaga to check for duplicate resource ids. - self.resources.insert(resource_id, resource); - Ok(self.result_from_query(resource_id)) - } - - fn unref_resource(&mut self, resource_id: u32) -> VirtioGpuResult { - let resource = self.resources.remove(&resource_id); - match resource { - None => return Err(ErrInvalidResourceId), - // The spec doesn't say anything about this situation and this doesn't actually seem - // to happen in practise but let's be careful and refuse to disable the resource. - // This keeps the internal state of the gpu device and the fronted consistent. - Some(resource) if resource.scanouts.has_any_enabled() => { - warn!( - "The driver requested unref_resource, but resource {resource_id} has \ - associated scanouts, refusing to delete the resource." - ); - return Err(ErrUnspec); - } - _ => (), - } - self.rutabaga.unref_resource(resource_id)?; - Ok(OkNoData) - } - - /// If the resource is the scanout resource, flush it to the display. - fn flush_resource(&mut self, resource_id: u32, _rect: Rectangle) -> VirtioGpuResult { - if resource_id == 0 { - return Ok(OkNoData); - } - - let resource = self - .resources - .get(&resource_id) - .ok_or(ErrInvalidResourceId)? - .clone(); - - for scanout_id in resource.scanouts.iter_enabled() { - match self.component_type { - RutabagaComponentType::VirglRenderer => { - // For VirglRenderer, use update_dmabuf_scanout - self.gpu_backend - .update_dmabuf_scanout(&VhostUserGpuUpdate { - scanout_id, - x: 0, - y: 0, - width: resource.width, - height: resource.height, - }) - .map_err(|e| { - error!("Failed to update_dmabuf_scanout: {e:?}"); - ErrUnspec - })?; - } - - #[cfg(feature = "backend-gfxstream")] - RutabagaComponentType::Gfxstream => { - // Gfxstream expects image memory transfer (read + send) - let resource_size = resource.calculate_size().map_err(|e| { - error!("Invalid resource size for flushing: {e:?}"); - ErrUnspec - })?; - - let mut data = vec![0; resource_size]; - - // Gfxstream doesn't support transfer_read for portion of the resource. So we - // always read the whole resource, even if the guest specified to - // flush only a portion of it. - // - // The function stream_renderer_transfer_read_iov seems to ignore the stride and - // transfer_box parameters and expects the provided buffer to fit the whole - // resource. - if let Err(e) = self.read_2d_resource(&resource, &mut data) { - error!( - "Failed to read resource {} for scanout {}: {}", - resource_id, scanout_id, e - ); - continue; - } - - self.gpu_backend - .update_scanout( - &VhostUserGpuUpdate { - scanout_id, - x: 0, - y: 0, - width: resource.width, - height: resource.height, - }, - &data, - ) - .map_err(|e| { - error!("Failed to update_scanout: {e:?}"); - ErrUnspec - })?; - } - - _ => { - error!("flush_resource: unsupported component_type"); - return Err(ErrUnspec); - } - } - } - - Ok(OkNoData) - } - - fn transfer_write( - &mut self, - ctx_id: u32, - resource_id: u32, - transfer: Transfer3D, - ) -> VirtioGpuResult { - trace!("transfer_write ctx_id {ctx_id}, resource_id {resource_id}, {transfer:?}"); - - self.rutabaga - .transfer_write(ctx_id, resource_id, transfer, None)?; - Ok(OkNoData) - } - - fn transfer_read( - &mut self, - ctx_id: u32, - resource_id: u32, - transfer: Transfer3D, - buf: Option, - ) -> VirtioGpuResult { - let buf = buf.map(|vs| { - IoSliceMut::new( - // SAFETY: trivially safe - unsafe { std::slice::from_raw_parts_mut(vs.ptr_guard_mut().as_ptr(), vs.len()) }, - ) - }); - self.rutabaga - .transfer_read(ctx_id, resource_id, transfer, buf)?; - Ok(OkNoData) - } - - fn attach_backing( - &mut self, - resource_id: u32, - mem: &GuestMemoryMmap, - vecs: Vec<(GuestAddress, usize)>, - ) -> VirtioGpuResult { - let rutabaga_iovecs = sglist_to_rutabaga_iovecs(&vecs[..], mem).map_err(|()| ErrUnspec)?; - self.rutabaga.attach_backing(resource_id, rutabaga_iovecs)?; - Ok(OkNoData) - } - - fn detach_backing(&mut self, resource_id: u32) -> VirtioGpuResult { - self.rutabaga.detach_backing(resource_id)?; - Ok(OkNoData) - } - - fn update_cursor( - &mut self, - resource_id: u32, - cursor_pos: VhostUserGpuCursorPos, - hot_x: u32, - hot_y: u32, - ) -> VirtioGpuResult { - const CURSOR_WIDTH: u32 = 64; - const CURSOR_HEIGHT: u32 = 64; - - let mut data = Box::new( - [0; READ_RESOURCE_BYTES_PER_PIXEL as usize - * CURSOR_WIDTH as usize - * CURSOR_HEIGHT as usize], - ); - - let cursor_resource = self - .resources - .get(&resource_id) - .ok_or(ErrInvalidResourceId)?; - - if cursor_resource.width != CURSOR_WIDTH || cursor_resource.height != CURSOR_HEIGHT { - error!("Cursor resource has invalid dimensions"); - return Err(ErrInvalidParameter); - } - - self.read_2d_resource(&cursor_resource.clone(), &mut data[..]) - .map_err(|e| { - error!("Failed to read resource of cursor: {e}"); - ErrUnspec - })?; - - let cursor_update = VhostUserGpuCursorUpdate { - pos: cursor_pos, - hot_x, - hot_y, - }; - - self.gpu_backend - .cursor_update(&cursor_update, &data) - .map_err(|e| { - error!("Failed to update cursor pos from frontend: {}", e); - ErrUnspec - })?; - - Ok(OkNoData) - } - - fn move_cursor(&mut self, resource_id: u32, cursor: VhostUserGpuCursorPos) -> VirtioGpuResult { - if resource_id == 0 { - self.gpu_backend.cursor_pos_hide(&cursor).map_err(|e| { - error!("Failed to set cursor pos from frontend: {}", e); - ErrUnspec - })?; - } else { - self.gpu_backend.cursor_pos(&cursor).map_err(|e| { - error!("Failed to set cursor pos from frontend: {}", e); - ErrUnspec - })?; - } - - Ok(OkNoData) - } - - fn resource_assign_uuid(&self, _resource_id: u32) -> VirtioGpuResult { - error!("Not implemented: resource_assign_uuid"); - Err(ErrUnspec) - } - - fn get_capset_info(&self, index: u32) -> VirtioGpuResult { - let (capset_id, version, size) = self.rutabaga.get_capset_info(index)?; - Ok(OkCapsetInfo { - capset_id, - version, - size, - }) - } - - fn get_capset(&self, capset_id: u32, version: u32) -> VirtioGpuResult { - let capset = self.rutabaga.get_capset(capset_id, version)?; - Ok(OkCapset(capset)) - } - - fn create_context( - &mut self, - ctx_id: u32, - context_init: u32, - context_name: Option<&str>, - ) -> VirtioGpuResult { - self.rutabaga - .create_context(ctx_id, context_init, context_name)?; - Ok(OkNoData) - } - - fn destroy_context(&mut self, ctx_id: u32) -> VirtioGpuResult { - self.rutabaga.destroy_context(ctx_id)?; - Ok(OkNoData) - } - - fn context_attach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult { - self.rutabaga.context_attach_resource(ctx_id, resource_id)?; - Ok(OkNoData) - } - - fn context_detach_resource(&mut self, ctx_id: u32, resource_id: u32) -> VirtioGpuResult { - self.rutabaga.context_detach_resource(ctx_id, resource_id)?; - Ok(OkNoData) - } - - fn submit_command( - &mut self, - ctx_id: u32, - commands: &mut [u8], - fence_ids: &[u64], - ) -> VirtioGpuResult { - self.rutabaga.submit_command(ctx_id, commands, fence_ids)?; - Ok(OkNoData) - } - - fn create_fence(&mut self, rutabaga_fence: RutabagaFence) -> VirtioGpuResult { - self.rutabaga.create_fence(rutabaga_fence)?; - Ok(OkNoData) - } - - fn process_fence( - &mut self, - ring: VirtioGpuRing, - fence_id: u64, - desc_index: u16, - len: u32, - ) -> bool { - // In case the fence is signaled immediately after creation, don't add a return - // FenceDescriptor. - let mut fence_state = self.fence_state.lock().unwrap(); - if fence_id > *fence_state.completed_fences.get(&ring).unwrap_or(&0) { - fence_state.descs.push(FenceDescriptor { - ring, - fence_id, - desc_index, - len, - }); - - false - } else { - true - } - } - - fn resource_create_blob( - &mut self, - _ctx_id: u32, - _resource_id: u32, - _resource_create_blob: ResourceCreateBlob, - _vecs: Vec<(GuestAddress, usize)>, - _mem: &GuestMemoryMmap, - ) -> VirtioGpuResult { - error!("Not implemented: resource_create_blob"); - Err(ErrUnspec) - } - - fn resource_map_blob(&mut self, _resource_id: u32, _offset: u64) -> VirtioGpuResult { - error!("Not implemented: resource_map_blob"); - Err(ErrUnspec) - } - - fn resource_unmap_blob(&mut self, _resource_id: u32) -> VirtioGpuResult { - error!("Not implemented: resource_unmap_blob"); - Err(ErrUnspec) - } - - fn get_event_poll_fd(&self) -> Option { - self.rutabaga.poll_descriptor().map(|fd| { - // SAFETY: Safe, the fd should be valid, because Rutabaga guarantees it. - // into_raw_descriptor() returns a RawFd and makes sure SafeDescriptor::drop - // doesn't run. - unsafe { EventFd::from_raw_fd(fd.into_raw_descriptor()) } - }) - } - - fn event_poll(&self) { - self.rutabaga.event_poll(); - } -} - -#[cfg(test)] -mod tests { - #[cfg(feature = "backend-gfxstream")] - use std::env::set_var; - use std::{ - os::unix::net::UnixStream, - sync::{Arc, Mutex}, - }; - - use assert_matches::assert_matches; - use rusty_fork::rusty_fork_test; - use rutabaga_gfx::{RutabagaFence, RUTABAGA_PIPE_BIND_RENDER_TARGET, RUTABAGA_PIPE_TEXTURE_2D}; - use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestMemoryMmap}; - - use super::*; - use crate::{protocol::VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, GpuCapset, GpuFlags}; - - const CREATE_RESOURCE_2D_720P: ResourceCreate3D = ResourceCreate3D { - target: RUTABAGA_PIPE_TEXTURE_2D, - format: VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, - bind: RUTABAGA_PIPE_BIND_RENDER_TARGET, - width: 1280, - height: 720, - depth: 1, - array_size: 1, - last_level: 0, - nr_samples: 0, - flags: 0, - }; - - const CREATE_RESOURCE_CURSOR: ResourceCreate3D = ResourceCreate3D { - target: RUTABAGA_PIPE_TEXTURE_2D, - format: VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, - bind: RUTABAGA_PIPE_BIND_RENDER_TARGET, - width: 64, - height: 64, - depth: 1, - array_size: 1, - last_level: 0, - nr_samples: 0, - flags: 0, - }; - - fn dummy_gpu_backend() -> GpuBackend { - let (_, backend) = UnixStream::pair().unwrap(); - GpuBackend::from_stream(backend) - } - - fn new_gpu(component_type: RutabagaComponentType) -> RutabagaVirtioGpu { - let (gpu_mode, capsets) = match component_type { - RutabagaComponentType::VirglRenderer => ( - GpuMode::VirglRenderer, - Some(GpuCapset::VIRGL | GpuCapset::VIRGL2), - ), - #[cfg(feature = "backend-gfxstream")] - RutabagaComponentType::Gfxstream => { - (GpuMode::Gfxstream, Some(GpuCapset::GFXSTREAM_GLES)) - } - _ => panic!("Unsupported component type for test"), - }; - - let config = GpuConfig::new(gpu_mode, capsets, GpuFlags::default()).unwrap(); - - // Mock memory - let mem = GuestMemoryAtomic::new( - GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(), - ); - let vring = VringRwLock::new(mem, 0x1000).unwrap(); - vring.set_queue_info(0x100, 0x200, 0x300).unwrap(); - vring.set_queue_ready(true); - - let fence_state = Arc::new(Mutex::new(FenceState::default())); - let fence = RutabagaVirtioGpu::create_fence_handler(vring, fence_state); - // Test creating a fence with the `RutabagaFence` that can be used to determine - // when the previous command completed. - let (builder, actual_component_type) = - RutabagaVirtioGpu::configure_rutabaga_builder(&config, fence); - let rutabaga = builder.build().unwrap(); - RutabagaVirtioGpu { - rutabaga, - gpu_backend: dummy_gpu_backend(), - resources: BTreeMap::default(), - fence_state: Arc::new(Mutex::new(FenceState::default())), - scanouts: Default::default(), - component_type: actual_component_type, - } - } - - #[test] - fn test_virtio_gpu_associated_scanouts() { - use super::AssociatedScanouts; - - let mut scanouts = AssociatedScanouts::default(); - - assert!(!scanouts.has_any_enabled()); - assert_eq!(scanouts.iter_enabled().next(), None); - - scanouts.enable(1); - assert!(scanouts.has_any_enabled()); - scanouts.disable(1); - assert!(!scanouts.has_any_enabled()); - - (0..VIRTIO_GPU_MAX_SCANOUTS).for_each(|scanout| scanouts.enable(scanout)); - assert!(scanouts.has_any_enabled()); - assert_eq!( - scanouts.iter_enabled().collect::>(), - (0..VIRTIO_GPU_MAX_SCANOUTS).collect::>() - ); - - (0..VIRTIO_GPU_MAX_SCANOUTS) - .filter(|&i| i % 2 == 0) - .for_each(|scanout| scanouts.disable(scanout)); - assert_eq!( - scanouts.iter_enabled().collect::>(), - (1..VIRTIO_GPU_MAX_SCANOUTS) - .step_by(2) - .collect::>() - ); - - (0..VIRTIO_GPU_MAX_SCANOUTS) - .filter(|&i| i % 2 != 0) - .for_each(|scanout| scanouts.disable(scanout)); - assert!(!scanouts.has_any_enabled()); - } - - rusty_fork_test! { - #[test] - fn test_update_cursor_fails() { - let mut virtio_gpu = new_gpu(RutabagaComponentType::VirglRenderer); - - let cursor_pos = VhostUserGpuCursorPos { - scanout_id: 1, - x: 123, - y: 123, - }; - - // The resource doesn't exist - let result = virtio_gpu.update_cursor(1, cursor_pos, 0, 0); - assert_matches!(result, Err(ErrInvalidResourceId)); - - // Create a resource - virtio_gpu.resource_create_3d(1, CREATE_RESOURCE_2D_720P).unwrap(); - - // The resource exists, but the dimensions are wrong - let result = virtio_gpu.update_cursor(1, cursor_pos, 0, 0); - assert_matches!(result, Err(ErrInvalidParameter)); - - // Create a resource with correct cursor dimensions - let cursor_resource_id = 2; - virtio_gpu - .resource_create_3d( - cursor_resource_id, - CREATE_RESOURCE_CURSOR).unwrap(); - - // The resource exists, the dimensions are correct but the test - // fails to update cursor position from frontend - let result = virtio_gpu.update_cursor(cursor_resource_id, cursor_pos, 5, 5); - assert_matches!(result, Err(ErrUnspec)); - } - - #[test] - fn test_move_cursor_fails() { - let mut virtio_gpu = new_gpu(RutabagaComponentType::VirglRenderer); - let cursor_pos = VhostUserGpuCursorPos { - scanout_id: 1, - x: 123, - y: 123, - }; - - // No resources exists, but the test fails to set cursor position from frontend - let result = virtio_gpu.move_cursor(0, cursor_pos); - assert_matches!(result, Err(ErrUnspec)); - - // Resources exists, but the test fails to set cursor position from frontend - let result = virtio_gpu.move_cursor(1, cursor_pos); - assert_matches!(result, Err(ErrUnspec)); - } - - #[test] - fn test_process_fence() { - let mut virtio_gpu = new_gpu(RutabagaComponentType::VirglRenderer); - let fence = RutabagaFence { - flags: 0, - fence_id: 0, - ctx_id: 1, - ring_idx: 0, - }; - - // Test creating a fence with the `RutabagaFence` that can be used to determine when the previous - // command completed. - let result = virtio_gpu.create_fence(fence); - assert_matches!(result, Ok(OkNoData)); - - // Test processing gpu fence: If the fence has already been signaled return true - let ring = VirtioGpuRing::Global; - let result = virtio_gpu.process_fence(ring.clone(), 0, 0, 0); - assert_matches!(result, true); - - // Test processing gpu fence: If the fence has not yet been signaled return false - let result = virtio_gpu.process_fence(ring, 1, 0, 0); - assert_matches!(result, false); - } - - #[test] - fn test_event_poll() { - let virtio_gpu = new_gpu(RutabagaComponentType::VirglRenderer); - virtio_gpu.event_poll(); - } - - #[test] - fn test_create_and_unref_resources() { - let mut virtio_gpu = new_gpu(RutabagaComponentType::VirglRenderer); - - // No resources exists, cannot unref anything: - assert!(virtio_gpu.resources.is_empty()); - let result = virtio_gpu.unref_resource(0); - assert_matches!(result, Err(_)); - - // Create a resource - let result = virtio_gpu.resource_create_3d(1, CREATE_RESOURCE_2D_720P); - assert_matches!(result, Ok(_)); - assert_eq!(virtio_gpu.resources.len(), 1); - - // Unref the created resource - let result = virtio_gpu.unref_resource(1); - assert_matches!(result, Ok(_)); - assert!(virtio_gpu.resources.is_empty()); - } - - #[test] - fn test_set_scanout_validation() { - let mut virtio_gpu = new_gpu(RutabagaComponentType::VirglRenderer); - - // Invalid scanout ID (larger than max) - let rect = Rectangle { x: 0, y: 0, width: 640, height: 480 }; - let result = virtio_gpu.set_scanout(VIRTIO_GPU_MAX_SCANOUTS + 1, 1, rect.clone()); - assert_matches!(result, Err(ErrInvalidScanoutId)); - - // Disabling scanout with resource_id = 0 (no resource needed) - let result = virtio_gpu.set_scanout(0, 0, rect.clone()); - // Fails because backend connection is a dummy, but still exercises disable path - assert_matches!(result, Err(ErrUnspec)); - - // Enabling scanout with non-existent resource - let result = virtio_gpu.set_scanout(0, 123, rect.clone()); - assert_matches!(result, Err(ErrInvalidResourceId)); - - // Create a valid resource, but it will be missing handle/info_3d - virtio_gpu.resource_create_3d(1, CREATE_RESOURCE_2D_720P).unwrap(); - - // Try to set scanout with a resource that has no exported DMABUF handle - let result = virtio_gpu.set_scanout(0, 1, rect); - assert_matches!(result, Err(ErrUnspec)); - } - - #[cfg(feature = "backend-gfxstream")] - #[test] - fn test_set_scanout_with_gfxstream_backend() { - set_var("EGL_PLATFORM", "surfaceless"); // no X/Wayland/GBM needed - set_var("LIBGL_ALWAYS_SOFTWARE", "1"); // force llvmpipe - set_var("GALLIUM_DRIVER", "llvmpipe"); // (belt + suspenders) - - let mut virtio_gpu = new_gpu(RutabagaComponentType::Gfxstream); - let rect = Rectangle { x: 0, y: 0, width: 1280, height: 720 }; - - // Create a simple valid resource (no DMABUF needed for gfxstream) - let mut res = VirtioGpuResource::new(1, 1280, 720); - res.info_3d = Some(Resource3DInfo { - width: 1280, - height: 720, - strides: [5120, 0, 0, 0], - offsets: [0, 0, 0, 0], - drm_fourcc: 0x34325241, - modifier: 0, - }); - let result = virtio_gpu.set_scanout(VIRTIO_GPU_MAX_SCANOUTS + 1, 1, rect.clone()); - assert_matches!(result, Err(ErrInvalidResourceId)); - - // Disabling scanout with resource_id = 0 (no resource needed) - let result = virtio_gpu.set_scanout(0, 0, rect.clone()); - // Fails because backend connection is a dummy, but still exercises disable path - assert_matches!(result, Err(ErrUnspec)); - - // Enabling scanout with non-existent resource - let result = virtio_gpu.set_scanout(0, 123, rect.clone()); - assert_matches!(result, Err(ErrInvalidResourceId)); - virtio_gpu.resources.insert(1, res); - - // Try to set scanout with a resource that has no exported DMABUF handle - let result = virtio_gpu.set_scanout(0, 1, rect); - assert_matches!(result, Err(ErrUnspec)); - - // Resource 1 should have scanout 0 disabled - assert_eq!(virtio_gpu.resources.get(&1).unwrap().scanouts.0, 0); - } - - #[test] - fn test_set_scanout_switches_resource_and_disables_old() { - let mut gpu = new_gpu(RutabagaComponentType::VirglRenderer); - let rect = Rectangle { x: 0, y: 0, width: 64, height: 64 }; - - // Helper: create a resource with dummy DMABUF + info_3d - fn make_resource(id: u32) -> VirtioGpuResource { - let file = tempfile::tempfile().unwrap(); - let raw_fd = file.as_fd().try_clone_to_owned().unwrap(); - let handle = Arc::new(RutabagaHandle { - os_handle: raw_fd.into(), - handle_type: RUTABAGA_HANDLE_TYPE_MEM_DMABUF, - }); - let mut res = VirtioGpuResource::new(id, 64, 64); - res.handle = Some(handle); - res.info_3d = Some(Resource3DInfo { - width: 64, - height: 64, - strides: [256, 0, 0, 0], - offsets: [0, 0, 0, 0], - drm_fourcc: 0x34325241, // 'AR24' - modifier: 0, - }); - res - } - - // Insert resources - gpu.resources.insert(1, make_resource(1)); - - // First bind scanout 0 -> resource 1 - let _ = gpu.set_scanout(0, 1, rect); - - // Resource 1 should have scanout 0 disabled - assert_eq!(gpu.resources.get(&1).unwrap().scanouts.0, 0); - - } - - #[test] - fn test_gpu_capset() { - let virtio_gpu = new_gpu(RutabagaComponentType::VirglRenderer); - - let capset_info = virtio_gpu.get_capset_info(0); - assert_matches!(capset_info, Ok(OkCapsetInfo { .. })); - - let Ok(OkCapsetInfo {capset_id, version, ..}) = capset_info else { - unreachable!("Response should have been checked by assert") - }; - - let capset_info = virtio_gpu.get_capset(capset_id, version); - assert_matches!(capset_info, Ok(OkCapset(_))); - } - - #[test] - fn test_gpu_submit_command_fails() { - let mut virtio_gpu = new_gpu(RutabagaComponentType::VirglRenderer); - let mut cmd_buf = [0; 10]; - let fence_ids: Vec = Vec::with_capacity(0); - virtio_gpu - .submit_command(1, &mut cmd_buf[..], &fence_ids) - .unwrap_err(); - } - } -}