mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-26 21:52:20 +00:00
ionic: convert Rx queue buffers to use page_pool
Our home-grown buffer management needs to go away and we need to be playing nicely with the page_pool infrastructure. This converts the Rx traffic queues to use page_pool. Also, since ionic_rx_buf_size() was removed, redefine IONIC_PAGE_SIZE to account for IONIC_MAX_BUF_LEN being the largest allowed buffer to prevent overflowing u16 variables, which could happen when PAGE_SIZE is defined as >= 64KB. include/linux/minmax.h:93:37: warning: conversion from 'long unsigned int' to 'u16' {aka 'short unsigned int'} changes value from '65536' to '0' [-Woverflow] Signed-off-by: Shannon Nelson <shannon.nelson@amd.com> Signed-off-by: Brett Creeley <brett.creeley@amd.com> Link: https://patch.msgid.link/20240906232623.39651-7-brett.creeley@amd.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
a7f3f635f0
commit
ac8813c0ab
@ -23,6 +23,7 @@ config IONIC
|
|||||||
depends on PTP_1588_CLOCK_OPTIONAL
|
depends on PTP_1588_CLOCK_OPTIONAL
|
||||||
select NET_DEVLINK
|
select NET_DEVLINK
|
||||||
select DIMLIB
|
select DIMLIB
|
||||||
|
select PAGE_POOL
|
||||||
help
|
help
|
||||||
This enables the support for the Pensando family of Ethernet
|
This enables the support for the Pensando family of Ethernet
|
||||||
adapters. More specific information on this driver can be
|
adapters. More specific information on this driver can be
|
||||||
|
@ -181,10 +181,7 @@ struct ionic_queue;
|
|||||||
struct ionic_qcq;
|
struct ionic_qcq;
|
||||||
|
|
||||||
#define IONIC_MAX_BUF_LEN ((u16)-1)
|
#define IONIC_MAX_BUF_LEN ((u16)-1)
|
||||||
#define IONIC_PAGE_SIZE PAGE_SIZE
|
#define IONIC_PAGE_SIZE MIN(PAGE_SIZE, IONIC_MAX_BUF_LEN)
|
||||||
#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2)
|
|
||||||
#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
|
|
||||||
__GFP_COMP | __GFP_MEMALLOC)
|
|
||||||
|
|
||||||
#define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \
|
#define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \
|
||||||
(VLAN_ETH_HLEN + \
|
(VLAN_ETH_HLEN + \
|
||||||
@ -248,11 +245,6 @@ struct ionic_queue {
|
|||||||
struct ionic_rxq_desc *rxq;
|
struct ionic_rxq_desc *rxq;
|
||||||
struct ionic_admin_cmd *adminq;
|
struct ionic_admin_cmd *adminq;
|
||||||
};
|
};
|
||||||
union {
|
|
||||||
void __iomem *cmb_base;
|
|
||||||
struct ionic_txq_desc __iomem *cmb_txq;
|
|
||||||
struct ionic_rxq_desc __iomem *cmb_rxq;
|
|
||||||
};
|
|
||||||
union {
|
union {
|
||||||
void *sg_base;
|
void *sg_base;
|
||||||
struct ionic_txq_sg_desc *txq_sgl;
|
struct ionic_txq_sg_desc *txq_sgl;
|
||||||
@ -261,8 +253,14 @@ struct ionic_queue {
|
|||||||
};
|
};
|
||||||
struct xdp_rxq_info *xdp_rxq_info;
|
struct xdp_rxq_info *xdp_rxq_info;
|
||||||
struct bpf_prog *xdp_prog;
|
struct bpf_prog *xdp_prog;
|
||||||
|
struct page_pool *page_pool;
|
||||||
struct ionic_queue *partner;
|
struct ionic_queue *partner;
|
||||||
|
|
||||||
|
union {
|
||||||
|
void __iomem *cmb_base;
|
||||||
|
struct ionic_txq_desc __iomem *cmb_txq;
|
||||||
|
struct ionic_rxq_desc __iomem *cmb_rxq;
|
||||||
|
};
|
||||||
unsigned int type;
|
unsigned int type;
|
||||||
unsigned int hw_index;
|
unsigned int hw_index;
|
||||||
dma_addr_t base_pa;
|
dma_addr_t base_pa;
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <linux/crash_dump.h>
|
#include <linux/crash_dump.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
|
#include <net/page_pool/helpers.h>
|
||||||
|
|
||||||
#include "ionic.h"
|
#include "ionic.h"
|
||||||
#include "ionic_bus.h"
|
#include "ionic_bus.h"
|
||||||
@ -439,6 +440,9 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
|
|||||||
qcq->sg_base_pa = 0;
|
qcq->sg_base_pa = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
page_pool_destroy(qcq->q.page_pool);
|
||||||
|
qcq->q.page_pool = NULL;
|
||||||
|
|
||||||
ionic_qcq_intr_free(lif, qcq);
|
ionic_qcq_intr_free(lif, qcq);
|
||||||
vfree(qcq->q.info);
|
vfree(qcq->q.info);
|
||||||
qcq->q.info = NULL;
|
qcq->q.info = NULL;
|
||||||
@ -553,7 +557,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
|
|||||||
unsigned int cq_desc_size,
|
unsigned int cq_desc_size,
|
||||||
unsigned int sg_desc_size,
|
unsigned int sg_desc_size,
|
||||||
unsigned int desc_info_size,
|
unsigned int desc_info_size,
|
||||||
unsigned int pid, struct ionic_qcq **qcq)
|
unsigned int pid, struct bpf_prog *xdp_prog,
|
||||||
|
struct ionic_qcq **qcq)
|
||||||
{
|
{
|
||||||
struct ionic_dev *idev = &lif->ionic->idev;
|
struct ionic_dev *idev = &lif->ionic->idev;
|
||||||
struct device *dev = lif->ionic->dev;
|
struct device *dev = lif->ionic->dev;
|
||||||
@ -579,6 +584,31 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
|
|||||||
goto err_out_free_qcq;
|
goto err_out_free_qcq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (type == IONIC_QTYPE_RXQ) {
|
||||||
|
struct page_pool_params pp_params = {
|
||||||
|
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
|
||||||
|
.order = 0,
|
||||||
|
.pool_size = num_descs,
|
||||||
|
.nid = NUMA_NO_NODE,
|
||||||
|
.dev = lif->ionic->dev,
|
||||||
|
.napi = &new->napi,
|
||||||
|
.dma_dir = DMA_FROM_DEVICE,
|
||||||
|
.max_len = PAGE_SIZE,
|
||||||
|
.netdev = lif->netdev,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (xdp_prog)
|
||||||
|
pp_params.dma_dir = DMA_BIDIRECTIONAL;
|
||||||
|
|
||||||
|
new->q.page_pool = page_pool_create(&pp_params);
|
||||||
|
if (IS_ERR(new->q.page_pool)) {
|
||||||
|
netdev_err(lif->netdev, "Cannot create page_pool\n");
|
||||||
|
err = PTR_ERR(new->q.page_pool);
|
||||||
|
new->q.page_pool = NULL;
|
||||||
|
goto err_out_free_q_info;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
new->q.type = type;
|
new->q.type = type;
|
||||||
new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
|
new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
|
||||||
|
|
||||||
@ -586,12 +616,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
|
|||||||
desc_size, sg_desc_size, pid);
|
desc_size, sg_desc_size, pid);
|
||||||
if (err) {
|
if (err) {
|
||||||
netdev_err(lif->netdev, "Cannot initialize queue\n");
|
netdev_err(lif->netdev, "Cannot initialize queue\n");
|
||||||
goto err_out_free_q_info;
|
goto err_out_free_page_pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ionic_alloc_qcq_interrupt(lif, new);
|
err = ionic_alloc_qcq_interrupt(lif, new);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out_free_q_info;
|
goto err_out_free_page_pool;
|
||||||
|
|
||||||
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
|
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
|
||||||
if (err) {
|
if (err) {
|
||||||
@ -712,6 +742,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
|
|||||||
devm_free_irq(dev, new->intr.vector, &new->napi);
|
devm_free_irq(dev, new->intr.vector, &new->napi);
|
||||||
ionic_intr_free(lif->ionic, new->intr.index);
|
ionic_intr_free(lif->ionic, new->intr.index);
|
||||||
}
|
}
|
||||||
|
err_out_free_page_pool:
|
||||||
|
page_pool_destroy(new->q.page_pool);
|
||||||
err_out_free_q_info:
|
err_out_free_q_info:
|
||||||
vfree(new->q.info);
|
vfree(new->q.info);
|
||||||
err_out_free_qcq:
|
err_out_free_qcq:
|
||||||
@ -734,7 +766,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
|
|||||||
sizeof(struct ionic_admin_comp),
|
sizeof(struct ionic_admin_comp),
|
||||||
0,
|
0,
|
||||||
sizeof(struct ionic_admin_desc_info),
|
sizeof(struct ionic_admin_desc_info),
|
||||||
lif->kern_pid, &lif->adminqcq);
|
lif->kern_pid, NULL, &lif->adminqcq);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
ionic_debugfs_add_qcq(lif, lif->adminqcq);
|
ionic_debugfs_add_qcq(lif, lif->adminqcq);
|
||||||
@ -747,7 +779,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
|
|||||||
sizeof(union ionic_notifyq_comp),
|
sizeof(union ionic_notifyq_comp),
|
||||||
0,
|
0,
|
||||||
sizeof(struct ionic_admin_desc_info),
|
sizeof(struct ionic_admin_desc_info),
|
||||||
lif->kern_pid, &lif->notifyqcq);
|
lif->kern_pid, NULL, &lif->notifyqcq);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
|
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
|
||||||
@ -965,7 +997,7 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
|
|||||||
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
|
||||||
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
||||||
sizeof(struct ionic_tx_desc_info),
|
sizeof(struct ionic_tx_desc_info),
|
||||||
lif->kern_pid, &txq);
|
lif->kern_pid, NULL, &txq);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_qcq_alloc;
|
goto err_qcq_alloc;
|
||||||
|
|
||||||
@ -1025,7 +1057,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
|
|||||||
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
|
||||||
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
||||||
sizeof(struct ionic_rx_desc_info),
|
sizeof(struct ionic_rx_desc_info),
|
||||||
lif->kern_pid, &rxq);
|
lif->kern_pid, NULL, &rxq);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_qcq_alloc;
|
goto err_qcq_alloc;
|
||||||
|
|
||||||
@ -2051,7 +2083,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
|
|||||||
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
|
||||||
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
||||||
sizeof(struct ionic_tx_desc_info),
|
sizeof(struct ionic_tx_desc_info),
|
||||||
lif->kern_pid, &lif->txqcqs[i]);
|
lif->kern_pid, NULL, &lif->txqcqs[i]);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
@ -2083,7 +2115,8 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
|
|||||||
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
|
||||||
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
||||||
sizeof(struct ionic_rx_desc_info),
|
sizeof(struct ionic_rx_desc_info),
|
||||||
lif->kern_pid, &lif->rxqcqs[i]);
|
lif->kern_pid, lif->xdp_prog,
|
||||||
|
&lif->rxqcqs[i]);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
@ -2679,14 +2712,14 @@ static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
|
|||||||
|
|
||||||
err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
|
err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n",
|
netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg failed, err %d\n",
|
||||||
q->index, err);
|
q->index, err);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL);
|
err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_POOL, q->page_pool);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
|
netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg_mem_model failed, err %d\n",
|
||||||
q->index, err);
|
q->index, err);
|
||||||
xdp_rxq_info_unreg(rxq_info);
|
xdp_rxq_info_unreg(rxq_info);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
@ -2853,7 +2886,16 @@ static int ionic_cmb_reconfig(struct ionic_lif *lif,
|
|||||||
|
|
||||||
static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
|
static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
|
||||||
{
|
{
|
||||||
/* only swapping the queues, not the napi, flags, or other stuff */
|
/* only swapping the queues and napi, not flags or other stuff */
|
||||||
|
swap(a->napi, b->napi);
|
||||||
|
|
||||||
|
if (a->q.type == IONIC_QTYPE_RXQ) {
|
||||||
|
swap(a->q.page_pool, b->q.page_pool);
|
||||||
|
a->q.page_pool->p.napi = &a->napi;
|
||||||
|
if (b->q.page_pool) /* is NULL when increasing queue count */
|
||||||
|
b->q.page_pool->p.napi = &b->napi;
|
||||||
|
}
|
||||||
|
|
||||||
swap(a->q.features, b->q.features);
|
swap(a->q.features, b->q.features);
|
||||||
swap(a->q.num_descs, b->q.num_descs);
|
swap(a->q.num_descs, b->q.num_descs);
|
||||||
swap(a->q.desc_size, b->q.desc_size);
|
swap(a->q.desc_size, b->q.desc_size);
|
||||||
@ -2943,7 +2985,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
|
|||||||
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
|
||||||
4, desc_sz, comp_sz, sg_desc_sz,
|
4, desc_sz, comp_sz, sg_desc_sz,
|
||||||
sizeof(struct ionic_tx_desc_info),
|
sizeof(struct ionic_tx_desc_info),
|
||||||
lif->kern_pid, &lif->txqcqs[i]);
|
lif->kern_pid, NULL, &lif->txqcqs[i]);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
@ -2952,7 +2994,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
|
|||||||
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
|
||||||
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
||||||
sizeof(struct ionic_tx_desc_info),
|
sizeof(struct ionic_tx_desc_info),
|
||||||
lif->kern_pid, &tx_qcqs[i]);
|
lif->kern_pid, NULL, &tx_qcqs[i]);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
@ -2974,7 +3016,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
|
|||||||
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
|
||||||
4, desc_sz, comp_sz, sg_desc_sz,
|
4, desc_sz, comp_sz, sg_desc_sz,
|
||||||
sizeof(struct ionic_rx_desc_info),
|
sizeof(struct ionic_rx_desc_info),
|
||||||
lif->kern_pid, &lif->rxqcqs[i]);
|
lif->kern_pid, NULL, &lif->rxqcqs[i]);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
@ -2983,7 +3025,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
|
|||||||
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
|
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
|
||||||
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
num_desc, desc_sz, comp_sz, sg_desc_sz,
|
||||||
sizeof(struct ionic_rx_desc_info),
|
sizeof(struct ionic_rx_desc_info),
|
||||||
lif->kern_pid, &rx_qcqs[i]);
|
lif->kern_pid, qparam->xdp_prog, &rx_qcqs[i]);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <linux/if_vlan.h>
|
#include <linux/if_vlan.h>
|
||||||
#include <net/ip6_checksum.h>
|
#include <net/ip6_checksum.h>
|
||||||
#include <net/netdev_queues.h>
|
#include <net/netdev_queues.h>
|
||||||
|
#include <net/page_pool/helpers.h>
|
||||||
|
|
||||||
#include "ionic.h"
|
#include "ionic.h"
|
||||||
#include "ionic_lif.h"
|
#include "ionic_lif.h"
|
||||||
@ -118,108 +119,57 @@ static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info)
|
|||||||
|
|
||||||
static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
|
static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
|
||||||
{
|
{
|
||||||
return buf_info->dma_addr + buf_info->page_offset;
|
return page_pool_get_dma_addr(buf_info->page) + buf_info->page_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
|
static void __ionic_rx_put_buf(struct ionic_queue *q,
|
||||||
|
struct ionic_buf_info *buf_info,
|
||||||
|
bool recycle_direct)
|
||||||
{
|
{
|
||||||
return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ionic_rx_page_alloc(struct ionic_queue *q,
|
|
||||||
struct ionic_buf_info *buf_info)
|
|
||||||
{
|
|
||||||
struct device *dev = q->dev;
|
|
||||||
dma_addr_t dma_addr;
|
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
|
|
||||||
if (unlikely(!page)) {
|
|
||||||
net_err_ratelimited("%s: %s page alloc failed\n",
|
|
||||||
dev_name(dev), q->name);
|
|
||||||
q_to_rx_stats(q)->alloc_err++;
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_addr = dma_map_page(dev, page, 0,
|
|
||||||
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
|
|
||||||
if (unlikely(dma_mapping_error(dev, dma_addr))) {
|
|
||||||
__free_pages(page, 0);
|
|
||||||
net_err_ratelimited("%s: %s dma map failed\n",
|
|
||||||
dev_name(dev), q->name);
|
|
||||||
q_to_rx_stats(q)->dma_map_err++;
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
buf_info->dma_addr = dma_addr;
|
|
||||||
buf_info->page = page;
|
|
||||||
buf_info->page_offset = 0;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ionic_rx_page_free(struct ionic_queue *q,
|
|
||||||
struct ionic_buf_info *buf_info)
|
|
||||||
{
|
|
||||||
struct device *dev = q->dev;
|
|
||||||
|
|
||||||
if (unlikely(!buf_info)) {
|
|
||||||
net_err_ratelimited("%s: %s invalid buf_info in free\n",
|
|
||||||
dev_name(dev), q->name);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!buf_info->page)
|
if (!buf_info->page)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
|
page_pool_put_full_page(q->page_pool, buf_info->page, recycle_direct);
|
||||||
__free_pages(buf_info->page, 0);
|
|
||||||
buf_info->page = NULL;
|
buf_info->page = NULL;
|
||||||
|
buf_info->len = 0;
|
||||||
|
buf_info->page_offset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ionic_rx_buf_recycle(struct ionic_queue *q,
|
|
||||||
struct ionic_buf_info *buf_info, u32 len)
|
static void ionic_rx_put_buf(struct ionic_queue *q,
|
||||||
|
struct ionic_buf_info *buf_info)
|
||||||
{
|
{
|
||||||
u32 size;
|
__ionic_rx_put_buf(q, buf_info, false);
|
||||||
|
}
|
||||||
|
|
||||||
/* don't re-use pages allocated in low-mem condition */
|
static void ionic_rx_put_buf_direct(struct ionic_queue *q,
|
||||||
if (page_is_pfmemalloc(buf_info->page))
|
struct ionic_buf_info *buf_info)
|
||||||
return false;
|
{
|
||||||
|
__ionic_rx_put_buf(q, buf_info, true);
|
||||||
/* don't re-use buffers from non-local numa nodes */
|
|
||||||
if (page_to_nid(buf_info->page) != numa_mem_id())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
size = ALIGN(len, q->xdp_prog ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ);
|
|
||||||
buf_info->page_offset += size;
|
|
||||||
if (buf_info->page_offset >= IONIC_PAGE_SIZE)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
get_page(buf_info->page);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ionic_rx_add_skb_frag(struct ionic_queue *q,
|
static void ionic_rx_add_skb_frag(struct ionic_queue *q,
|
||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
struct ionic_buf_info *buf_info,
|
struct ionic_buf_info *buf_info,
|
||||||
u32 off, u32 len,
|
u32 headroom, u32 len,
|
||||||
bool synced)
|
bool synced)
|
||||||
{
|
{
|
||||||
if (!synced)
|
if (!synced)
|
||||||
dma_sync_single_range_for_cpu(q->dev, ionic_rx_buf_pa(buf_info),
|
page_pool_dma_sync_for_cpu(q->page_pool,
|
||||||
off, len, DMA_FROM_DEVICE);
|
buf_info->page,
|
||||||
|
buf_info->page_offset + headroom,
|
||||||
|
len);
|
||||||
|
|
||||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||||
buf_info->page, buf_info->page_offset + off,
|
buf_info->page, buf_info->page_offset + headroom,
|
||||||
len,
|
len, buf_info->len);
|
||||||
IONIC_PAGE_SIZE);
|
|
||||||
|
|
||||||
if (!ionic_rx_buf_recycle(q, buf_info, len)) {
|
/* napi_gro_frags() will release/recycle the
|
||||||
dma_unmap_page(q->dev, buf_info->dma_addr,
|
* page_pool buffers from the frags list
|
||||||
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
|
*/
|
||||||
buf_info->page = NULL;
|
buf_info->page = NULL;
|
||||||
}
|
buf_info->len = 0;
|
||||||
|
buf_info->page_offset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
|
static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
|
||||||
@ -244,12 +194,13 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
|
|||||||
q_to_rx_stats(q)->alloc_err++;
|
q_to_rx_stats(q)->alloc_err++;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
skb_mark_for_recycle(skb);
|
||||||
|
|
||||||
if (headroom)
|
if (headroom)
|
||||||
frag_len = min_t(u16, len,
|
frag_len = min_t(u16, len,
|
||||||
IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
|
IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
|
||||||
else
|
else
|
||||||
frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
|
frag_len = min_t(u16, len, IONIC_PAGE_SIZE);
|
||||||
|
|
||||||
if (unlikely(!buf_info->page))
|
if (unlikely(!buf_info->page))
|
||||||
goto err_bad_buf_page;
|
goto err_bad_buf_page;
|
||||||
@ -260,7 +211,7 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
|
|||||||
for (i = 0; i < num_sg_elems; i++, buf_info++) {
|
for (i = 0; i < num_sg_elems; i++, buf_info++) {
|
||||||
if (unlikely(!buf_info->page))
|
if (unlikely(!buf_info->page))
|
||||||
goto err_bad_buf_page;
|
goto err_bad_buf_page;
|
||||||
frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
|
frag_len = min_t(u16, len, buf_info->len);
|
||||||
ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced);
|
ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced);
|
||||||
len -= frag_len;
|
len -= frag_len;
|
||||||
}
|
}
|
||||||
@ -277,11 +228,13 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
|
|||||||
struct ionic_rx_desc_info *desc_info,
|
struct ionic_rx_desc_info *desc_info,
|
||||||
unsigned int headroom,
|
unsigned int headroom,
|
||||||
unsigned int len,
|
unsigned int len,
|
||||||
|
unsigned int num_sg_elems,
|
||||||
bool synced)
|
bool synced)
|
||||||
{
|
{
|
||||||
struct ionic_buf_info *buf_info;
|
struct ionic_buf_info *buf_info;
|
||||||
struct device *dev = q->dev;
|
struct device *dev = q->dev;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
int i;
|
||||||
|
|
||||||
buf_info = &desc_info->bufs[0];
|
buf_info = &desc_info->bufs[0];
|
||||||
|
|
||||||
@ -292,54 +245,52 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
|
|||||||
q_to_rx_stats(q)->alloc_err++;
|
q_to_rx_stats(q)->alloc_err++;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
skb_mark_for_recycle(skb);
|
||||||
if (unlikely(!buf_info->page)) {
|
|
||||||
dev_kfree_skb(skb);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!synced)
|
if (!synced)
|
||||||
dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
|
page_pool_dma_sync_for_cpu(q->page_pool,
|
||||||
headroom, len, DMA_FROM_DEVICE);
|
buf_info->page,
|
||||||
|
buf_info->page_offset + headroom,
|
||||||
|
len);
|
||||||
|
|
||||||
skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
|
skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
|
||||||
dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info),
|
|
||||||
headroom, len, DMA_FROM_DEVICE);
|
|
||||||
|
|
||||||
skb_put(skb, len);
|
skb_put(skb, len);
|
||||||
skb->protocol = eth_type_trans(skb, netdev);
|
skb->protocol = eth_type_trans(skb, netdev);
|
||||||
|
|
||||||
|
/* recycle the Rx buffer now that we're done with it */
|
||||||
|
ionic_rx_put_buf_direct(q, buf_info);
|
||||||
|
buf_info++;
|
||||||
|
for (i = 0; i < num_sg_elems; i++, buf_info++)
|
||||||
|
ionic_rx_put_buf_direct(q, buf_info);
|
||||||
|
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
|
static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
|
||||||
struct ionic_tx_desc_info *desc_info)
|
struct ionic_tx_desc_info *desc_info,
|
||||||
|
bool in_napi)
|
||||||
{
|
{
|
||||||
unsigned int nbufs = desc_info->nbufs;
|
struct xdp_frame_bulk bq;
|
||||||
struct ionic_buf_info *buf_info;
|
|
||||||
struct device *dev = q->dev;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!nbufs)
|
if (!desc_info->nbufs)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
buf_info = desc_info->bufs;
|
xdp_frame_bulk_init(&bq);
|
||||||
dma_unmap_single(dev, buf_info->dma_addr,
|
rcu_read_lock(); /* need for xdp_return_frame_bulk */
|
||||||
buf_info->len, DMA_TO_DEVICE);
|
|
||||||
if (desc_info->act == XDP_TX)
|
|
||||||
__free_pages(buf_info->page, 0);
|
|
||||||
buf_info->page = NULL;
|
|
||||||
|
|
||||||
buf_info++;
|
if (desc_info->act == XDP_TX) {
|
||||||
for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) {
|
if (likely(in_napi))
|
||||||
dma_unmap_page(dev, buf_info->dma_addr,
|
xdp_return_frame_rx_napi(desc_info->xdpf);
|
||||||
buf_info->len, DMA_TO_DEVICE);
|
else
|
||||||
if (desc_info->act == XDP_TX)
|
xdp_return_frame(desc_info->xdpf);
|
||||||
__free_pages(buf_info->page, 0);
|
} else if (desc_info->act == XDP_REDIRECT) {
|
||||||
buf_info->page = NULL;
|
ionic_tx_desc_unmap_bufs(q, desc_info);
|
||||||
|
xdp_return_frame_bulk(desc_info->xdpf, &bq);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (desc_info->act == XDP_REDIRECT)
|
xdp_flush_frame_bulk(&bq);
|
||||||
xdp_return_frame(desc_info->xdpf);
|
rcu_read_unlock();
|
||||||
|
|
||||||
desc_info->nbufs = 0;
|
desc_info->nbufs = 0;
|
||||||
desc_info->xdpf = NULL;
|
desc_info->xdpf = NULL;
|
||||||
@ -363,9 +314,17 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
|
|||||||
buf_info = desc_info->bufs;
|
buf_info = desc_info->bufs;
|
||||||
stats = q_to_tx_stats(q);
|
stats = q_to_tx_stats(q);
|
||||||
|
|
||||||
|
if (act == XDP_TX) {
|
||||||
|
dma_addr = page_pool_get_dma_addr(page) +
|
||||||
|
off + XDP_PACKET_HEADROOM;
|
||||||
|
dma_sync_single_for_device(q->dev, dma_addr,
|
||||||
|
len, DMA_TO_DEVICE);
|
||||||
|
} else /* XDP_REDIRECT */ {
|
||||||
dma_addr = ionic_tx_map_single(q, frame->data, len);
|
dma_addr = ionic_tx_map_single(q, frame->data, len);
|
||||||
if (!dma_addr)
|
if (!dma_addr)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
buf_info->dma_addr = dma_addr;
|
buf_info->dma_addr = dma_addr;
|
||||||
buf_info->len = len;
|
buf_info->len = len;
|
||||||
buf_info->page = page;
|
buf_info->page = page;
|
||||||
@ -387,11 +346,22 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
|
|||||||
frag = sinfo->frags;
|
frag = sinfo->frags;
|
||||||
elem = ionic_tx_sg_elems(q);
|
elem = ionic_tx_sg_elems(q);
|
||||||
for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
|
for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
|
||||||
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
|
if (act == XDP_TX) {
|
||||||
if (!dma_addr) {
|
struct page *pg = skb_frag_page(frag);
|
||||||
|
|
||||||
|
dma_addr = page_pool_get_dma_addr(pg) +
|
||||||
|
skb_frag_off(frag);
|
||||||
|
dma_sync_single_for_device(q->dev, dma_addr,
|
||||||
|
skb_frag_size(frag),
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
} else {
|
||||||
|
dma_addr = ionic_tx_map_frag(q, frag, 0,
|
||||||
|
skb_frag_size(frag));
|
||||||
|
if (dma_mapping_error(q->dev, dma_addr)) {
|
||||||
ionic_tx_desc_unmap_bufs(q, desc_info);
|
ionic_tx_desc_unmap_bufs(q, desc_info);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
bi->dma_addr = dma_addr;
|
bi->dma_addr = dma_addr;
|
||||||
bi->len = skb_frag_size(frag);
|
bi->len = skb_frag_size(frag);
|
||||||
bi->page = skb_frag_page(frag);
|
bi->page = skb_frag_page(frag);
|
||||||
@ -488,8 +458,6 @@ static void ionic_xdp_rx_unlink_bufs(struct ionic_queue *q,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nbufs; i++) {
|
for (i = 0; i < nbufs; i++) {
|
||||||
dma_unmap_page(q->dev, buf_info->dma_addr,
|
|
||||||
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
|
|
||||||
buf_info->page = NULL;
|
buf_info->page = NULL;
|
||||||
buf_info++;
|
buf_info++;
|
||||||
}
|
}
|
||||||
@ -516,11 +484,9 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
|
|||||||
frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
|
frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
|
||||||
xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
|
xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
|
||||||
XDP_PACKET_HEADROOM, frag_len, false);
|
XDP_PACKET_HEADROOM, frag_len, false);
|
||||||
|
page_pool_dma_sync_for_cpu(rxq->page_pool, buf_info->page,
|
||||||
dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
|
buf_info->page_offset + XDP_PACKET_HEADROOM,
|
||||||
XDP_PACKET_HEADROOM, frag_len,
|
frag_len);
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
|
|
||||||
prefetchw(&xdp_buf.data_hard_start);
|
prefetchw(&xdp_buf.data_hard_start);
|
||||||
|
|
||||||
/* We limit MTU size to one buffer if !xdp_has_frags, so
|
/* We limit MTU size to one buffer if !xdp_has_frags, so
|
||||||
@ -542,15 +508,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
|
|||||||
do {
|
do {
|
||||||
if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
|
if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
|
||||||
err = -ENOSPC;
|
err = -ENOSPC;
|
||||||
goto out_xdp_abort;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
frag = &sinfo->frags[sinfo->nr_frags];
|
frag = &sinfo->frags[sinfo->nr_frags];
|
||||||
sinfo->nr_frags++;
|
sinfo->nr_frags++;
|
||||||
bi++;
|
bi++;
|
||||||
frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi));
|
frag_len = min_t(u16, remain_len, bi->len);
|
||||||
dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi),
|
page_pool_dma_sync_for_cpu(rxq->page_pool, bi->page,
|
||||||
0, frag_len, DMA_FROM_DEVICE);
|
buf_info->page_offset,
|
||||||
|
frag_len);
|
||||||
skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
|
skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
|
||||||
sinfo->xdp_frags_size += frag_len;
|
sinfo->xdp_frags_size += frag_len;
|
||||||
remain_len -= frag_len;
|
remain_len -= frag_len;
|
||||||
@ -569,14 +536,16 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
|
|||||||
return false; /* false = we didn't consume the packet */
|
return false; /* false = we didn't consume the packet */
|
||||||
|
|
||||||
case XDP_DROP:
|
case XDP_DROP:
|
||||||
ionic_rx_page_free(rxq, buf_info);
|
ionic_rx_put_buf_direct(rxq, buf_info);
|
||||||
stats->xdp_drop++;
|
stats->xdp_drop++;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case XDP_TX:
|
case XDP_TX:
|
||||||
xdpf = xdp_convert_buff_to_frame(&xdp_buf);
|
xdpf = xdp_convert_buff_to_frame(&xdp_buf);
|
||||||
if (!xdpf)
|
if (!xdpf) {
|
||||||
goto out_xdp_abort;
|
err = -ENOSPC;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
txq = rxq->partner;
|
txq = rxq->partner;
|
||||||
nq = netdev_get_tx_queue(netdev, txq->index);
|
nq = netdev_get_tx_queue(netdev, txq->index);
|
||||||
@ -588,7 +557,8 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
|
|||||||
ionic_q_space_avail(txq),
|
ionic_q_space_avail(txq),
|
||||||
1, 1)) {
|
1, 1)) {
|
||||||
__netif_tx_unlock(nq);
|
__netif_tx_unlock(nq);
|
||||||
goto out_xdp_abort;
|
err = -EIO;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
|
err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
|
||||||
@ -598,19 +568,17 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
|
|||||||
__netif_tx_unlock(nq);
|
__netif_tx_unlock(nq);
|
||||||
if (unlikely(err)) {
|
if (unlikely(err)) {
|
||||||
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
|
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
|
||||||
goto out_xdp_abort;
|
break;
|
||||||
}
|
}
|
||||||
ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
|
ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
|
||||||
stats->xdp_tx++;
|
stats->xdp_tx++;
|
||||||
|
|
||||||
/* the Tx completion will free the buffers */
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case XDP_REDIRECT:
|
case XDP_REDIRECT:
|
||||||
err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
|
err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
|
||||||
if (unlikely(err)) {
|
if (unlikely(err)) {
|
||||||
netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
|
netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
|
||||||
goto out_xdp_abort;
|
break;
|
||||||
}
|
}
|
||||||
ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
|
ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
|
||||||
rxq->xdp_flush = true;
|
rxq->xdp_flush = true;
|
||||||
@ -619,15 +587,15 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
|
|||||||
|
|
||||||
case XDP_ABORTED:
|
case XDP_ABORTED:
|
||||||
default:
|
default:
|
||||||
goto out_xdp_abort;
|
err = -EIO;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
if (err) {
|
||||||
|
ionic_rx_put_buf_direct(rxq, buf_info);
|
||||||
out_xdp_abort:
|
|
||||||
trace_xdp_exception(netdev, xdp_prog, xdp_action);
|
trace_xdp_exception(netdev, xdp_prog, xdp_action);
|
||||||
ionic_rx_page_free(rxq, buf_info);
|
|
||||||
stats->xdp_aborted++;
|
stats->xdp_aborted++;
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -673,7 +641,8 @@ static void ionic_rx_clean(struct ionic_queue *q,
|
|||||||
use_copybreak = len <= q->lif->rx_copybreak;
|
use_copybreak = len <= q->lif->rx_copybreak;
|
||||||
if (use_copybreak)
|
if (use_copybreak)
|
||||||
skb = ionic_rx_copybreak(netdev, q, desc_info,
|
skb = ionic_rx_copybreak(netdev, q, desc_info,
|
||||||
headroom, len, synced);
|
headroom, len,
|
||||||
|
comp->num_sg_elems, synced);
|
||||||
else
|
else
|
||||||
skb = ionic_rx_build_skb(q, desc_info, headroom, len,
|
skb = ionic_rx_build_skb(q, desc_info, headroom, len,
|
||||||
comp->num_sg_elems, synced);
|
comp->num_sg_elems, synced);
|
||||||
@ -794,6 +763,9 @@ void ionic_rx_fill(struct ionic_queue *q)
|
|||||||
struct ionic_buf_info *buf_info;
|
struct ionic_buf_info *buf_info;
|
||||||
unsigned int fill_threshold;
|
unsigned int fill_threshold;
|
||||||
struct ionic_rxq_desc *desc;
|
struct ionic_rxq_desc *desc;
|
||||||
|
unsigned int first_frag_len;
|
||||||
|
unsigned int first_buf_len;
|
||||||
|
unsigned int headroom = 0;
|
||||||
unsigned int remain_len;
|
unsigned int remain_len;
|
||||||
unsigned int frag_len;
|
unsigned int frag_len;
|
||||||
unsigned int nfrags;
|
unsigned int nfrags;
|
||||||
@ -811,35 +783,42 @@ void ionic_rx_fill(struct ionic_queue *q)
|
|||||||
|
|
||||||
len = netdev->mtu + VLAN_ETH_HLEN;
|
len = netdev->mtu + VLAN_ETH_HLEN;
|
||||||
|
|
||||||
for (i = n_fill; i; i--) {
|
if (q->xdp_prog) {
|
||||||
unsigned int headroom = 0;
|
/* Always alloc the full size buffer, but only need
|
||||||
unsigned int buf_len;
|
* the actual frag_len in the descriptor
|
||||||
|
* XDP uses space in the first buffer, so account for
|
||||||
|
* head room, tail room, and ip header in the first frag size.
|
||||||
|
*/
|
||||||
|
headroom = XDP_PACKET_HEADROOM;
|
||||||
|
first_buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN + headroom;
|
||||||
|
first_frag_len = min_t(u16, len + headroom, first_buf_len);
|
||||||
|
} else {
|
||||||
|
/* Use MTU size if smaller than max buffer size */
|
||||||
|
first_frag_len = min_t(u16, len, IONIC_PAGE_SIZE);
|
||||||
|
first_buf_len = first_frag_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = n_fill; i; i--) {
|
||||||
|
/* fill main descriptor - buf[0] */
|
||||||
nfrags = 0;
|
nfrags = 0;
|
||||||
remain_len = len;
|
remain_len = len;
|
||||||
desc = &q->rxq[q->head_idx];
|
desc = &q->rxq[q->head_idx];
|
||||||
desc_info = &q->rx_info[q->head_idx];
|
desc_info = &q->rx_info[q->head_idx];
|
||||||
buf_info = &desc_info->bufs[0];
|
buf_info = &desc_info->bufs[0];
|
||||||
|
|
||||||
if (!buf_info->page) { /* alloc a new buffer? */
|
buf_info->len = first_buf_len;
|
||||||
if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
|
frag_len = first_frag_len - headroom;
|
||||||
desc->addr = 0;
|
|
||||||
desc->len = 0;
|
/* get a new buffer if we can't reuse one */
|
||||||
|
if (!buf_info->page)
|
||||||
|
buf_info->page = page_pool_alloc(q->page_pool,
|
||||||
|
&buf_info->page_offset,
|
||||||
|
&buf_info->len,
|
||||||
|
GFP_ATOMIC);
|
||||||
|
if (unlikely(!buf_info->page)) {
|
||||||
|
buf_info->len = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/* fill main descriptor - buf[0]
|
|
||||||
* XDP uses space in the first buffer, so account for
|
|
||||||
* head room, tail room, and ip header in the first frag size.
|
|
||||||
*/
|
|
||||||
if (q->xdp_prog) {
|
|
||||||
buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN;
|
|
||||||
headroom = XDP_PACKET_HEADROOM;
|
|
||||||
} else {
|
|
||||||
buf_len = ionic_rx_buf_size(buf_info);
|
|
||||||
}
|
|
||||||
frag_len = min_t(u16, len, buf_len);
|
|
||||||
|
|
||||||
desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
|
desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
|
||||||
desc->len = cpu_to_le16(frag_len);
|
desc->len = cpu_to_le16(frag_len);
|
||||||
@ -850,16 +829,26 @@ void ionic_rx_fill(struct ionic_queue *q)
|
|||||||
/* fill sg descriptors - buf[1..n] */
|
/* fill sg descriptors - buf[1..n] */
|
||||||
sg_elem = q->rxq_sgl[q->head_idx].elems;
|
sg_elem = q->rxq_sgl[q->head_idx].elems;
|
||||||
for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) {
|
for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) {
|
||||||
if (!buf_info->page) { /* alloc a new sg buffer? */
|
frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE);
|
||||||
if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
|
|
||||||
sg_elem->addr = 0;
|
/* Recycle any leftover buffers that are too small to reuse */
|
||||||
sg_elem->len = 0;
|
if (unlikely(buf_info->page && buf_info->len < frag_len))
|
||||||
|
ionic_rx_put_buf_direct(q, buf_info);
|
||||||
|
|
||||||
|
/* Get new buffer if needed */
|
||||||
|
if (!buf_info->page) {
|
||||||
|
buf_info->len = frag_len;
|
||||||
|
buf_info->page = page_pool_alloc(q->page_pool,
|
||||||
|
&buf_info->page_offset,
|
||||||
|
&buf_info->len,
|
||||||
|
GFP_ATOMIC);
|
||||||
|
if (unlikely(!buf_info->page)) {
|
||||||
|
buf_info->len = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
|
sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
|
||||||
frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info));
|
|
||||||
sg_elem->len = cpu_to_le16(frag_len);
|
sg_elem->len = cpu_to_le16(frag_len);
|
||||||
remain_len -= frag_len;
|
remain_len -= frag_len;
|
||||||
buf_info++;
|
buf_info++;
|
||||||
@ -889,17 +878,12 @@ void ionic_rx_fill(struct ionic_queue *q)
|
|||||||
void ionic_rx_empty(struct ionic_queue *q)
|
void ionic_rx_empty(struct ionic_queue *q)
|
||||||
{
|
{
|
||||||
struct ionic_rx_desc_info *desc_info;
|
struct ionic_rx_desc_info *desc_info;
|
||||||
struct ionic_buf_info *buf_info;
|
|
||||||
unsigned int i, j;
|
unsigned int i, j;
|
||||||
|
|
||||||
for (i = 0; i < q->num_descs; i++) {
|
for (i = 0; i < q->num_descs; i++) {
|
||||||
desc_info = &q->rx_info[i];
|
desc_info = &q->rx_info[i];
|
||||||
for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) {
|
for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++)
|
||||||
buf_info = &desc_info->bufs[j];
|
ionic_rx_put_buf(q, &desc_info->bufs[j]);
|
||||||
if (buf_info->page)
|
|
||||||
ionic_rx_page_free(q, buf_info);
|
|
||||||
}
|
|
||||||
|
|
||||||
desc_info->nbufs = 0;
|
desc_info->nbufs = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1172,7 +1156,7 @@ static void ionic_tx_clean(struct ionic_queue *q,
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
if (desc_info->xdpf) {
|
if (desc_info->xdpf) {
|
||||||
ionic_xdp_tx_desc_clean(q->partner, desc_info);
|
ionic_xdp_tx_desc_clean(q->partner, desc_info, in_napi);
|
||||||
stats->clean++;
|
stats->clean++;
|
||||||
|
|
||||||
if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
|
if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
|
||||||
|
Loading…
Reference in New Issue
Block a user