From c07b42b6626c2ba2acbdc4d22e66cbaefe164689 Mon Sep 17 00:00:00 2001 From: Erik Schilling Date: Fri, 12 May 2023 09:41:44 +0200 Subject: [PATCH] scsi: Add tests for daemon and virtio code Signed-off-by: Erik Schilling --- crates/scsi/src/main.rs | 34 +++++ crates/scsi/src/vhu_scsi.rs | 285 ++++++++++++++++++++++++++++++++++++ crates/scsi/src/virtio.rs | 57 ++++++++ 3 files changed, 376 insertions(+) diff --git a/crates/scsi/src/main.rs b/crates/scsi/src/main.rs index bfb8ec2..e48959f 100644 --- a/crates/scsi/src/main.rs +++ b/crates/scsi/src/main.rs @@ -126,3 +126,37 @@ fn main() -> Result<()> { let backend = create_backend(&args)?; start_backend(backend, args) } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_create_backend() { + let sock = tempfile::NamedTempFile::new().unwrap(); + let args = ScsiArgs { + images: vec!["/dev/null".into()], + read_only: true, + socket_path: sock.path().into(), + solid_state: false, + }; + create_backend(&args).unwrap(); + } + + #[test] + fn test_fail_listener() { + let socket_name = "~/path/not/present/scsi"; + let args = ScsiArgs { + images: vec!["/dev/null".into()], + read_only: true, + socket_path: socket_name.into(), + solid_state: false, + }; + let backend = create_backend(&args).unwrap(); + let err = start_backend(backend, args).unwrap_err(); + if let Error::FailedCreatingListener(_) = err { + } else { + panic!("expected failure when creating listener"); + } + } +} diff --git a/crates/scsi/src/vhu_scsi.rs b/crates/scsi/src/vhu_scsi.rs index 2cb12e4..915419e 100644 --- a/crates/scsi/src/vhu_scsi.rs +++ b/crates/scsi/src/vhu_scsi.rs @@ -281,3 +281,288 @@ impl VhostUserBackendMut for VhostUserScsiBackend { Some(self.exit_event.try_clone().expect("Cloning exit eventfd")) } } + +#[cfg(test)] +mod tests { + use std::{ + convert::TryInto, + io::{self, Read, Write}, + sync::{Arc, Mutex}, + }; + + use vhost_user_backend::{VhostUserBackendMut, VringRwLock, VringT}; + use virtio_bindings::{ + virtio_ring::VRING_DESC_F_WRITE, + virtio_scsi::{ + virtio_scsi_cmd_req, VIRTIO_SCSI_S_BAD_TARGET, VIRTIO_SCSI_S_FAILURE, VIRTIO_SCSI_S_OK, + }, + }; + use virtio_queue::{mock::MockSplitQueue, Descriptor}; + use vm_memory::{ + Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic, + GuestMemoryMmap, + }; + + use super::VhostUserScsiBackend; + use crate::{ + scsi::{CmdOutput, Target, TaskAttr}, + virtio::{ + tests::{VirtioScsiCmdReq, VirtioScsiCmdResp}, + VirtioScsiLun, CDB_SIZE, + }, + }; + + #[allow(dead_code)] + struct RecordedCommand { + lun: u16, + id: u64, + cdb: [u8; CDB_SIZE], + task_attr: TaskAttr, + crn: u8, + prio: u8, + } + + struct FakeTargetCommandCollector { + received_commands: Vec, + } + + impl FakeTargetCommandCollector { + fn new() -> Arc> { + Arc::new(Mutex::new(Self { + received_commands: vec![], + })) + } + } + + type FakeResponse = Result; + + struct FakeTarget { + collector: Arc>, + callback: Cb, + } + + impl FakeTarget { + fn new(collector: Arc>, callback: Cb) -> Self + where + Cb: FnMut(u16, crate::scsi::Request) -> FakeResponse + Sync + Send, + { + Self { + collector, + callback, + } + } + } + + impl Target for FakeTarget + where + Cb: FnMut(u16, crate::scsi::Request) -> FakeResponse + Sync + Send, + { + fn execute_command( + &mut self, + lun: u16, + _data_out: &mut dyn Read, + _data_in: &mut dyn Write, + req: crate::scsi::Request, + ) -> Result { + let mut collector = self.collector.lock().unwrap(); + collector.received_commands.push(RecordedCommand { + lun, + id: req.id, + cdb: req.cdb.try_into().unwrap(), + task_attr: req.task_attr, + crn: req.crn, + prio: req.prio, + }); + (self.callback)(lun, req) + } + } + + fn setup( + req: impl ByteValued, + ) -> ( + VhostUserScsiBackend, + VringRwLock, + GuestMemoryAtomic, + ) { + let mem = GuestMemoryAtomic::new( + GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x1000_0000)]).unwrap(), + ); + // The `build_desc_chain` function will populate the `NEXT` related flags and field. + let v = vec![ + Descriptor::new(0x10_0000, 0x100, 0, 0), // request + Descriptor::new(0x20_0000, 0x100, VRING_DESC_F_WRITE as u16, 0), // response + ]; + + mem.memory() + .write_obj(req, GuestAddress(0x10_0000)) + .expect("writing to succeed"); + + let mem_handle = mem.memory(); + + let queue = MockSplitQueue::new(&*mem_handle, 16); + // queue.set_avail_idx(1); + + queue.build_desc_chain(&v).unwrap(); + + // Put the descriptor index 0 in the first available ring position. + mem.memory() + .write_obj(0u16, queue.avail_addr().unchecked_add(4)) + .unwrap(); + + // Set `avail_idx` to 1. + mem.memory() + .write_obj(1u16, queue.avail_addr().unchecked_add(2)) + .unwrap(); + + let vring = VringRwLock::new(mem.clone(), 16).unwrap(); + + // vring.set_queue_info(0x10_0000, 0x10_0000, 0x300).unwrap(); + vring.set_queue_size(16); + vring + .set_queue_info( + queue.desc_table_addr().0, + queue.avail_addr().0, + queue.used_addr().0, + ) + .unwrap(); + vring.set_queue_ready(true); + + let mut backend = VhostUserScsiBackend::new(); + backend.update_memory(mem.clone()).unwrap(); + + (backend, vring, mem) + } + + fn get_response(mem: &GuestMemoryAtomic) -> VirtioScsiCmdResp { + mem.memory() + .read_obj::(GuestAddress(0x20_0000)) + .expect("Unable to read response from memory") + } + + fn create_lun_specifier(target: u8, lun: u16) -> [u8; 8] { + let lun = lun.to_le_bytes(); + + [ + 0x1, + target, + lun[0] | VirtioScsiLun::FLAT_SPACE_ADDRESSING_METHOD, + lun[1], + 0x0, + 0x0, + 0x0, + 0x0, + ] + } + + #[test] + fn backend_test() { + let collector = FakeTargetCommandCollector::new(); + let fake_target = Box::new(FakeTarget::new(collector.clone(), |_, _| { + Ok(CmdOutput::ok()) + })); + + let req = VirtioScsiCmdReq(virtio_scsi_cmd_req { + lun: create_lun_specifier(0, 0), + tag: 0, + task_attr: 0, + prio: 0, + crn: 0, + cdb: [0; CDB_SIZE], + }); + + let (mut backend, vring, mem) = setup(req); + backend.add_target(fake_target); + backend.process_request_queue(&vring).unwrap(); + + let res = get_response(&mem); + assert_eq!(res.0.response, VIRTIO_SCSI_S_OK as u8); + + let collector = collector.lock().unwrap(); + assert_eq!( + collector.received_commands.len(), + 1, + "expect one command to be passed to Target" + ); + } + + #[test] + fn backend_error_reporting_test() { + let collector = FakeTargetCommandCollector::new(); + let fake_target = Box::new(FakeTarget::new(collector.clone(), |_, _| { + Err(crate::scsi::CmdError::DataIn(io::Error::new( + io::ErrorKind::Other, + "internal error", + ))) + })); + + let req = VirtioScsiCmdReq(virtio_scsi_cmd_req { + lun: create_lun_specifier(0, 0), + tag: 0, + task_attr: 0, + prio: 0, + crn: 0, + cdb: [0; CDB_SIZE], + }); + + let (mut backend, vring, mem) = setup(req); + backend.add_target(fake_target); + backend.process_request_queue(&vring).unwrap(); + + let res = get_response(&mem); + assert_eq!(res.0.response, VIRTIO_SCSI_S_FAILURE as u8); + + let collector = collector.lock().unwrap(); + assert_eq!( + collector.received_commands.len(), + 1, + "expect one command to be passed to Target" + ); + } + + #[test] + fn test_command_to_unknown_lun() { + let collector = FakeTargetCommandCollector::new(); + + let req = VirtioScsiCmdReq(virtio_scsi_cmd_req { + lun: create_lun_specifier(0, 0), + tag: 0, + task_attr: 0, + prio: 0, + crn: 0, + cdb: [0; CDB_SIZE], + }); + + let (mut backend, vring, mem) = setup(req); + backend.process_request_queue(&vring).unwrap(); + + let res = get_response(&mem); + assert_eq!(res.0.response, VIRTIO_SCSI_S_BAD_TARGET as u8); + + let collector = collector.lock().unwrap(); + assert_eq!( + collector.received_commands.len(), + 0, + "expect no command to make it to the target" + ); + } + + #[test] + fn test_broken_read_descriptor() { + let collector = FakeTargetCommandCollector::new(); + + let broken_req = [0u8; 1]; // single byte request + + let (mut backend, vring, mem) = setup(broken_req); + backend.process_request_queue(&vring).unwrap(); + + let res = get_response(&mem); + assert_eq!(res.0.response, VIRTIO_SCSI_S_FAILURE as u8); + + let collector = collector.lock().unwrap(); + assert_eq!( + collector.received_commands.len(), + 0, + "expect no command to make it to the target" + ); + } +} diff --git a/crates/scsi/src/virtio.rs b/crates/scsi/src/virtio.rs index 2f2ecb3..423c0ab 100644 --- a/crates/scsi/src/virtio.rs +++ b/crates/scsi/src/virtio.rs @@ -311,3 +311,60 @@ where } } } + +#[cfg(test)] +pub(crate) mod tests { + use virtio_bindings::virtio_scsi::{virtio_scsi_cmd_req, virtio_scsi_cmd_resp}; + use virtio_queue::{mock::MockSplitQueue, Descriptor}; + use vm_memory::{ByteValued, GuestAddress, GuestMemoryMmap}; + + use super::*; + + #[derive(Debug, Default, Clone, Copy)] + #[repr(transparent)] + pub(crate) struct VirtioScsiCmdReq(pub virtio_scsi_cmd_req); + /// SAFETY: struct is a transparent wrapper around the request + /// which can be read from a byte array + unsafe impl ByteValued for VirtioScsiCmdReq {} + + #[derive(Debug, Default, Clone, Copy)] + #[repr(transparent)] + pub(crate) struct VirtioScsiCmdResp(pub virtio_scsi_cmd_resp); + /// SAFETY: struct is a transparent wrapper around the response + /// which can be read from a byte array + unsafe impl ByteValued for VirtioScsiCmdResp {} + + pub(crate) fn report_luns_command() -> VirtioScsiCmdReq { + VirtioScsiCmdReq(virtio_scsi_cmd_req { + lun: REPORT_LUNS, + tag: 0, + task_attr: 0, + prio: 0, + crn: 0, + cdb: [0; CDB_SIZE], + }) + } + + #[test] + fn test_parse_request() { + let mem: GuestMemoryMmap = + GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x1000_0000)]).unwrap(); + + // The `build_desc_chain` function will populate the `NEXT` related flags and field. + let v = vec![ + // A device-writable request header descriptor. + Descriptor::new(0x10_0000, 0x100, 0, 0), + ]; + + let req = report_luns_command(); + mem.write_obj(req, GuestAddress(0x10_0000)) + .expect("writing to succeed"); + + let queue = MockSplitQueue::new(&mem, 16); + let chain = queue.build_desc_chain(&v).unwrap(); + + let mut chain = DescriptorChainReader::new(chain.clone()); + let req = Request::parse(&mut chain).expect("request failed to parse"); + assert_eq!(req.lun, VirtioScsiLun::ReportLuns); + } +}