mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-01 15:14:52 +00:00
virtio_net: xsk: bind/unbind xsk for tx
This patch implement the logic of bind/unbind xsk pool to sq and rq. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Acked-by: Jason Wang <jasowang@redhat.com> Link: https://patch.msgid.link/20241112012928.102478-10-xuanzhuo@linux.alibaba.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
7db956707f
commit
21a4e3ce6d
@ -295,6 +295,10 @@ struct send_queue {
|
|||||||
|
|
||||||
/* Record whether sq is in reset state. */
|
/* Record whether sq is in reset state. */
|
||||||
bool reset;
|
bool reset;
|
||||||
|
|
||||||
|
struct xsk_buff_pool *xsk_pool;
|
||||||
|
|
||||||
|
dma_addr_t xsk_hdr_dma_addr;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Internal representation of a receive virtqueue */
|
/* Internal representation of a receive virtqueue */
|
||||||
@ -495,6 +499,8 @@ struct virtio_net_common_hdr {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct virtio_net_common_hdr xsk_hdr;
|
||||||
|
|
||||||
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
|
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
|
||||||
static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
|
static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
|
||||||
struct net_device *dev,
|
struct net_device *dev,
|
||||||
@ -5561,6 +5567,29 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
|
||||||
|
struct send_queue *sq,
|
||||||
|
struct xsk_buff_pool *pool)
|
||||||
|
{
|
||||||
|
int err, qindex;
|
||||||
|
|
||||||
|
qindex = sq - vi->sq;
|
||||||
|
|
||||||
|
virtnet_tx_pause(vi, sq);
|
||||||
|
|
||||||
|
err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf);
|
||||||
|
if (err) {
|
||||||
|
netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
|
||||||
|
pool = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
sq->xsk_pool = pool;
|
||||||
|
|
||||||
|
virtnet_tx_resume(vi, sq);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
static int virtnet_xsk_pool_enable(struct net_device *dev,
|
static int virtnet_xsk_pool_enable(struct net_device *dev,
|
||||||
struct xsk_buff_pool *pool,
|
struct xsk_buff_pool *pool,
|
||||||
u16 qid)
|
u16 qid)
|
||||||
@ -5569,6 +5598,7 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
|
|||||||
struct receive_queue *rq;
|
struct receive_queue *rq;
|
||||||
struct device *dma_dev;
|
struct device *dma_dev;
|
||||||
struct send_queue *sq;
|
struct send_queue *sq;
|
||||||
|
dma_addr_t hdr_dma;
|
||||||
int err, size;
|
int err, size;
|
||||||
|
|
||||||
if (vi->hdr_len > xsk_pool_get_headroom(pool))
|
if (vi->hdr_len > xsk_pool_get_headroom(pool))
|
||||||
@ -5606,6 +5636,11 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
|
|||||||
if (!rq->xsk_buffs)
|
if (!rq->xsk_buffs)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
|
||||||
|
DMA_TO_DEVICE, 0);
|
||||||
|
if (virtqueue_dma_mapping_error(sq->vq, hdr_dma))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
err = xsk_pool_dma_map(pool, dma_dev, 0);
|
err = xsk_pool_dma_map(pool, dma_dev, 0);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_xsk_map;
|
goto err_xsk_map;
|
||||||
@ -5614,11 +5649,24 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_rq;
|
goto err_rq;
|
||||||
|
|
||||||
|
err = virtnet_sq_bind_xsk_pool(vi, sq, pool);
|
||||||
|
if (err)
|
||||||
|
goto err_sq;
|
||||||
|
|
||||||
|
/* Now, we do not support tx offload(such as tx csum), so all the tx
|
||||||
|
* virtnet hdr is zero. So all the tx packets can share a single hdr.
|
||||||
|
*/
|
||||||
|
sq->xsk_hdr_dma_addr = hdr_dma;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_sq:
|
||||||
|
virtnet_rq_bind_xsk_pool(vi, rq, NULL);
|
||||||
err_rq:
|
err_rq:
|
||||||
xsk_pool_dma_unmap(pool, 0);
|
xsk_pool_dma_unmap(pool, 0);
|
||||||
err_xsk_map:
|
err_xsk_map:
|
||||||
|
virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
|
||||||
|
DMA_TO_DEVICE, 0);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5627,19 +5675,24 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
|
|||||||
struct virtnet_info *vi = netdev_priv(dev);
|
struct virtnet_info *vi = netdev_priv(dev);
|
||||||
struct xsk_buff_pool *pool;
|
struct xsk_buff_pool *pool;
|
||||||
struct receive_queue *rq;
|
struct receive_queue *rq;
|
||||||
|
struct send_queue *sq;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (qid >= vi->curr_queue_pairs)
|
if (qid >= vi->curr_queue_pairs)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
sq = &vi->sq[qid];
|
||||||
rq = &vi->rq[qid];
|
rq = &vi->rq[qid];
|
||||||
|
|
||||||
pool = rq->xsk_pool;
|
pool = rq->xsk_pool;
|
||||||
|
|
||||||
err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
|
err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
|
||||||
|
err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL);
|
||||||
|
|
||||||
xsk_pool_dma_unmap(pool, 0);
|
xsk_pool_dma_unmap(pool, 0);
|
||||||
|
|
||||||
|
virtqueue_dma_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr,
|
||||||
|
vi->hdr_len, DMA_TO_DEVICE, 0);
|
||||||
kvfree(rq->xsk_buffs);
|
kvfree(rq->xsk_buffs);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
Loading…
Reference in New Issue
Block a user