vsock: use pub(crate) for types used only by our binary

This is a binary crate and most of the types shouldn't be visible
out of this crate. Change also documentation with ///.

If we would like to export some types as part of a library in the
future, we will adjust the visibility accordingly.

Suggested-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
This commit is contained in:
Stefano Garzarella 2022-10-12 11:24:12 +02:00
parent a6d157f5e5
commit f7f239e390
7 changed files with 55 additions and 59 deletions

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub enum RxOps {
pub(crate) enum RxOps {
/// VSOCK_OP_REQUEST
Request = 0,
/// VSOCK_OP_RW

View File

@ -3,7 +3,7 @@
use super::rxops::RxOps;
#[derive(Debug, Eq, PartialEq)]
pub struct RxQueue {
pub(crate) struct RxQueue {
/// Bitmap of rx operations.
queue: u8,
}

View File

@ -20,7 +20,7 @@ use std::{
use virtio_vsock::packet::VsockPacket;
use vm_memory::bitmap::BitmapSlice;
pub struct VsockThreadBackend {
pub(crate) struct VsockThreadBackend {
/// Map of ConnMapKey objects indexed by raw file descriptors.
pub listener_map: HashMap<RawFd, ConnMapKey>,
/// Map of vsock connection objects indexed by ConnMapKey objects.
@ -63,7 +63,7 @@ impl VsockThreadBackend {
/// Returns:
/// - `Ok(())` if the packet was successfully filled in
/// - `Err(Error::EmptyBackendRxQ) if there was no available data
pub(crate) fn recv_pkt<B: BitmapSlice>(&mut self, pkt: &mut VsockPacket<B>) -> Result<()> {
pub fn recv_pkt<B: BitmapSlice>(&mut self, pkt: &mut VsockPacket<B>) -> Result<()> {
// Pop an event from the backend_rxq
let key = self.backend_rxq.pop_front().ok_or(Error::EmptyBackendRxQ)?;
let conn = match self.conn_map.get_mut(&key) {
@ -117,7 +117,7 @@ impl VsockThreadBackend {
///
/// Returns:
/// - always `Ok(())` if packet has been consumed correctly
pub(crate) fn send_pkt<B: BitmapSlice>(&mut self, pkt: &VsockPacket<B>) -> Result<()> {
pub fn send_pkt<B: BitmapSlice>(&mut self, pkt: &VsockPacket<B>) -> Result<()> {
let key = ConnMapKey::new(pkt.dst_port(), pkt.src_port());
// TODO: Rst if packet has unsupported type

View File

@ -5,7 +5,7 @@ use std::{io::Write, num::Wrapping};
use vm_memory::{bitmap::BitmapSlice, VolatileSlice};
#[derive(Debug)]
pub struct LocalTxBuf {
pub(crate) struct LocalTxBuf {
/// Buffer holding data to be forwarded to a host-side application
buf: Vec<u8>,
/// Index into buffer from which data can be consumed from the buffer
@ -31,7 +31,7 @@ impl LocalTxBuf {
/// Add new data to the tx buffer, push all or none.
/// Returns LocalTxBufFull error if space not sufficient.
pub(crate) fn push<B: BitmapSlice>(&mut self, data_buf: &VolatileSlice<B>) -> Result<()> {
pub fn push<B: BitmapSlice>(&mut self, data_buf: &VolatileSlice<B>) -> Result<()> {
if CONN_TX_BUF_SIZE as usize - self.len() < data_buf.len() {
// Tx buffer is full
return Err(Error::LocalTxBufFull);
@ -58,7 +58,7 @@ impl LocalTxBuf {
}
/// Flush buf data to stream.
pub(crate) fn flush_to<S: Write>(&mut self, stream: &mut S) -> Result<usize> {
pub fn flush_to<S: Write>(&mut self, stream: &mut S) -> Result<usize> {
if self.is_empty() {
// No data to be flushed
return Ok(0);

View File

@ -26,42 +26,43 @@ const RX_QUEUE_EVENT: u16 = 0;
const TX_QUEUE_EVENT: u16 = 1;
// New descriptors are pending on the event queue.
const EVT_QUEUE_EVENT: u16 = 2;
// Notification coming from the backend.
pub const BACKEND_EVENT: u16 = 3;
// Vsock connection TX buffer capacity
// TODO: Make this value configurable
pub const CONN_TX_BUF_SIZE: u32 = 64 * 1024;
/// Notification coming from the backend.
pub(crate) const BACKEND_EVENT: u16 = 3;
// CID of the host
pub const VSOCK_HOST_CID: u64 = 2;
/// Vsock connection TX buffer capacity
/// TODO: Make this value configurable
pub(crate) const CONN_TX_BUF_SIZE: u32 = 64 * 1024;
// Connection oriented packet
pub const VSOCK_TYPE_STREAM: u16 = 1;
/// CID of the host
pub(crate) const VSOCK_HOST_CID: u64 = 2;
/// Connection oriented packet
pub(crate) const VSOCK_TYPE_STREAM: u16 = 1;
// Vsock packet operation ID
//
// Connection request
pub const VSOCK_OP_REQUEST: u16 = 1;
// Connection response
pub const VSOCK_OP_RESPONSE: u16 = 2;
// Connection reset
pub const VSOCK_OP_RST: u16 = 3;
// Shutdown connection
pub const VSOCK_OP_SHUTDOWN: u16 = 4;
// Data read/write
pub const VSOCK_OP_RW: u16 = 5;
// Flow control credit update
pub const VSOCK_OP_CREDIT_UPDATE: u16 = 6;
// Flow control credit request
pub const VSOCK_OP_CREDIT_REQUEST: u16 = 7;
/// Connection request
pub(crate) const VSOCK_OP_REQUEST: u16 = 1;
/// Connection response
pub(crate) const VSOCK_OP_RESPONSE: u16 = 2;
/// Connection reset
pub(crate) const VSOCK_OP_RST: u16 = 3;
/// Shutdown connection
pub(crate) const VSOCK_OP_SHUTDOWN: u16 = 4;
/// Data read/write
pub(crate) const VSOCK_OP_RW: u16 = 5;
/// Flow control credit update
pub(crate) const VSOCK_OP_CREDIT_UPDATE: u16 = 6;
/// Flow control credit request
pub(crate) const VSOCK_OP_CREDIT_REQUEST: u16 = 7;
// Vsock packet flags
//
// VSOCK_OP_SHUTDOWN: Packet sender will receive no more data
pub const VSOCK_FLAGS_SHUTDOWN_RCV: u32 = 1;
// VSOCK_OP_SHUTDOWN: Packet sender will send no more data
pub const VSOCK_FLAGS_SHUTDOWN_SEND: u32 = 2;
/// VSOCK_OP_SHUTDOWN: Packet sender will receive no more data
pub(crate) const VSOCK_FLAGS_SHUTDOWN_RCV: u32 = 1;
/// VSOCK_OP_SHUTDOWN: Packet sender will send no more data
pub(crate) const VSOCK_FLAGS_SHUTDOWN_SEND: u32 = 2;
// Queue mask to select vrings.
const QUEUE_MASK: u64 = 0b11;
@ -197,7 +198,7 @@ impl TryFrom<VsockArgs> for VsockConfig {
/// A local port and peer port pair used to retrieve
/// the corresponding connection.
#[derive(Hash, PartialEq, Eq, Debug, Clone)]
pub struct ConnMapKey {
pub(crate) struct ConnMapKey {
local_port: u32,
peer_port: u32,
}
@ -220,7 +221,7 @@ struct VirtioVsockConfig {
unsafe impl ByteValued for VirtioVsockConfig {}
pub struct VhostUserVsockBackend {
pub(crate) struct VhostUserVsockBackend {
config: VirtioVsockConfig,
pub threads: Vec<Mutex<VhostUserVsockThread>>,
queues_per_thread: Vec<u64>,
@ -228,7 +229,7 @@ pub struct VhostUserVsockBackend {
}
impl VhostUserVsockBackend {
pub(crate) fn new(vsock_config: VsockConfig) -> Result<Self> {
pub fn new(vsock_config: VsockConfig) -> Result<Self> {
let thread = Mutex::new(VhostUserVsockThread::new(
vsock_config.get_uds_path(),
vsock_config.get_guest_cid(),

View File

@ -27,14 +27,11 @@ use vhost_user_backend::{VringEpollHandler, VringRwLock, VringT};
use virtio_queue::QueueOwnedT;
use virtio_vsock::packet::{VsockPacket, PKT_HEADER_SIZE};
use vm_memory::{GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap};
use vmm_sys_util::{
epoll::EventSet,
eventfd::{EventFd, EFD_NONBLOCK},
};
use vmm_sys_util::epoll::EventSet;
type ArcVhostBknd = Arc<RwLock<VhostUserVsockBackend>>;
pub struct VhostUserVsockThread {
pub(crate) struct VhostUserVsockThread {
/// Guest memory map.
pub mem: Option<GuestMemoryAtomic<GuestMemoryMmap>>,
/// VIRTIO_RING_F_EVENT_IDX.
@ -45,8 +42,6 @@ pub struct VhostUserVsockThread {
host_sock_path: String,
/// Listener listening for new connections on the host.
host_listener: UnixListener,
/// Used to kill the thread.
pub kill_evt: EventFd,
/// Instance of VringWorker.
vring_worker: Option<Arc<VringEpollHandler<ArcVhostBknd, VringRwLock, ()>>>,
/// epoll fd to which new host connections are added.
@ -63,7 +58,7 @@ pub struct VhostUserVsockThread {
impl VhostUserVsockThread {
/// Create a new instance of VhostUserVsockThread.
pub(crate) fn new(uds_path: String, guest_cid: u64) -> Result<Self> {
pub fn new(uds_path: String, guest_cid: u64) -> Result<Self> {
// TODO: better error handling, maybe add a param to force the unlink
let _ = std::fs::remove_file(uds_path.clone());
let host_sock = UnixListener::bind(&uds_path)
@ -81,7 +76,6 @@ impl VhostUserVsockThread {
host_sock: host_sock.as_raw_fd(),
host_sock_path: uds_path.clone(),
host_listener: host_sock,
kill_evt: EventFd::new(EFD_NONBLOCK).unwrap(),
vring_worker: None,
epoll_file,
thread_backend: VsockThreadBackend::new(uds_path, epoll_fd),
@ -99,7 +93,7 @@ impl VhostUserVsockThread {
}
/// Register a file with an epoll to listen for events in evset.
pub(crate) fn epoll_register(epoll_fd: RawFd, fd: RawFd, evset: epoll::Events) -> Result<()> {
pub fn epoll_register(epoll_fd: RawFd, fd: RawFd, evset: epoll::Events) -> Result<()> {
epoll::ctl(
epoll_fd,
epoll::ControlOptions::EPOLL_CTL_ADD,
@ -112,7 +106,7 @@ impl VhostUserVsockThread {
}
/// Remove a file from the epoll.
pub(crate) fn epoll_unregister(epoll_fd: RawFd, fd: RawFd) -> Result<()> {
pub fn epoll_unregister(epoll_fd: RawFd, fd: RawFd) -> Result<()> {
epoll::ctl(
epoll_fd,
epoll::ControlOptions::EPOLL_CTL_DEL,
@ -125,7 +119,7 @@ impl VhostUserVsockThread {
}
/// Modify the events we listen to for the fd in the epoll.
pub(crate) fn epoll_modify(epoll_fd: RawFd, fd: RawFd, evset: epoll::Events) -> Result<()> {
pub fn epoll_modify(epoll_fd: RawFd, fd: RawFd, evset: epoll::Events) -> Result<()> {
epoll::ctl(
epoll_fd,
epoll::ControlOptions::EPOLL_CTL_MOD,
@ -462,7 +456,7 @@ impl VhostUserVsockThread {
}
/// Wrapper to process rx queue based on whether event idx is enabled or not.
pub(crate) fn process_rx(&mut self, vring: &VringRwLock, event_idx: bool) -> Result<bool> {
pub fn process_rx(&mut self, vring: &VringRwLock, event_idx: bool) -> Result<bool> {
if event_idx {
// To properly handle EVENT_IDX we need to keep calling
// process_rx_queue until it stops finding new requests
@ -562,7 +556,7 @@ impl VhostUserVsockThread {
}
/// Wrapper to process tx queue based on whether event idx is enabled or not.
pub(crate) fn process_tx(&mut self, vring_lock: &VringRwLock, event_idx: bool) -> Result<bool> {
pub fn process_tx(&mut self, vring_lock: &VringRwLock, event_idx: bool) -> Result<bool> {
if event_idx {
// To properly handle EVENT_IDX we need to keep calling
// process_rx_queue until it stops finding new requests
@ -591,9 +585,10 @@ impl Drop for VhostUserVsockThread {
mod tests {
use super::*;
use vm_memory::GuestAddress;
use vmm_sys_util::eventfd::EventFd;
impl VhostUserVsockThread {
pub fn get_epoll_file(&self) -> &File {
fn get_epoll_file(&self) -> &File {
&self.epoll_file
}
}

View File

@ -21,7 +21,7 @@ use virtio_vsock::packet::{VsockPacket, PKT_HEADER_SIZE};
use vm_memory::{bitmap::BitmapSlice, Bytes, VolatileSlice};
#[derive(Debug)]
pub struct VsockConnection<S> {
pub(crate) struct VsockConnection<S> {
/// Host-side stream corresponding to this vsock connection.
pub stream: S,
/// Specifies if the stream is connected to a listener on the host.
@ -120,7 +120,7 @@ impl<S: AsRawFd + Read + Write> VsockConnection<S> {
/// Process a vsock packet that is meant for this connection.
/// Forward data to the host-side application if the vsock packet
/// contains a RW operation.
pub(crate) fn recv_pkt<B: BitmapSlice>(&mut self, pkt: &mut VsockPacket<B>) -> Result<()> {
pub fn recv_pkt<B: BitmapSlice>(&mut self, pkt: &mut VsockPacket<B>) -> Result<()> {
// Initialize all fields in the packet header
self.init_pkt(pkt);
@ -201,7 +201,7 @@ impl<S: AsRawFd + Read + Write> VsockConnection<S> {
///
/// Returns:
/// - always `Ok(())` to indicate that the packet has been consumed
pub(crate) fn send_pkt<B: BitmapSlice>(&mut self, pkt: &VsockPacket<B>) -> Result<()> {
pub fn send_pkt<B: BitmapSlice>(&mut self, pkt: &VsockPacket<B>) -> Result<()> {
// Update peer credit information
self.peer_buf_alloc = pkt.buf_alloc();
self.peer_fwd_cnt = Wrapping(pkt.fwd_cnt());
@ -365,7 +365,7 @@ mod tests {
}
impl HeadParams {
pub fn new(head_len: usize, data_len: u32) -> Self {
fn new(head_len: usize, data_len: u32) -> Self {
Self { head_len, data_len }
}
fn construct_head(&self) -> Vec<u8> {