net: airoha: Enable Rx Scatter-Gather

EN7581 SoC can receive 9k frames. Enable the reception of Scatter-Gather
(SG) frames.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20250304-airoha-eth-rx-sg-v1-2-283ebc61120e@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Lorenzo Bianconi 2025-03-04 15:21:09 +01:00 committed by Jakub Kicinski
parent 54d989d58d
commit e12182ddb6
3 changed files with 48 additions and 26 deletions

View File

@ -615,10 +615,10 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
struct airoha_qdma_desc *desc = &q->desc[q->tail]; struct airoha_qdma_desc *desc = &q->desc[q->tail];
u32 hash, reason, msg1 = le32_to_cpu(desc->msg1); u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
dma_addr_t dma_addr = le32_to_cpu(desc->addr); dma_addr_t dma_addr = le32_to_cpu(desc->addr);
struct page *page = virt_to_head_page(e->buf);
u32 desc_ctrl = le32_to_cpu(desc->ctrl); u32 desc_ctrl = le32_to_cpu(desc->ctrl);
struct airoha_gdm_port *port; struct airoha_gdm_port *port;
struct sk_buff *skb; int data_len, len, p;
int len, p;
if (!(desc_ctrl & QDMA_DESC_DONE_MASK)) if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
break; break;
@ -636,30 +636,41 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
dma_sync_single_for_cpu(eth->dev, dma_addr, dma_sync_single_for_cpu(eth->dev, dma_addr,
SKB_WITH_OVERHEAD(q->buf_size), dir); SKB_WITH_OVERHEAD(q->buf_size), dir);
data_len = q->skb ? q->buf_size
: SKB_WITH_OVERHEAD(q->buf_size);
if (data_len < len)
goto free_frag;
p = airoha_qdma_get_gdm_port(eth, desc); p = airoha_qdma_get_gdm_port(eth, desc);
if (p < 0 || !eth->ports[p]) { if (p < 0 || !eth->ports[p])
page_pool_put_full_page(q->page_pool, goto free_frag;
virt_to_head_page(e->buf),
true);
continue;
}
port = eth->ports[p]; port = eth->ports[p];
skb = napi_build_skb(e->buf, q->buf_size); if (!q->skb) { /* first buffer */
if (!skb) { q->skb = napi_build_skb(e->buf, q->buf_size);
page_pool_put_full_page(q->page_pool, if (!q->skb)
virt_to_head_page(e->buf), goto free_frag;
true);
break; __skb_put(q->skb, len);
skb_mark_for_recycle(q->skb);
q->skb->dev = port->dev;
q->skb->protocol = eth_type_trans(q->skb, port->dev);
q->skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(q->skb, qid);
} else { /* scattered frame */
struct skb_shared_info *shinfo = skb_shinfo(q->skb);
int nr_frags = shinfo->nr_frags;
if (nr_frags >= ARRAY_SIZE(shinfo->frags))
goto free_frag;
skb_add_rx_frag(q->skb, nr_frags, page,
e->buf - page_address(page), len,
q->buf_size);
} }
skb_reserve(skb, 2); if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
__skb_put(skb, len); continue;
skb_mark_for_recycle(skb);
skb->dev = port->dev;
skb->protocol = eth_type_trans(skb, skb->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, qid);
if (netdev_uses_dsa(port->dev)) { if (netdev_uses_dsa(port->dev)) {
/* PPE module requires untagged packets to work /* PPE module requires untagged packets to work
@ -672,22 +683,27 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
if (sptag < ARRAY_SIZE(port->dsa_meta) && if (sptag < ARRAY_SIZE(port->dsa_meta) &&
port->dsa_meta[sptag]) port->dsa_meta[sptag])
skb_dst_set_noref(skb, skb_dst_set_noref(q->skb,
&port->dsa_meta[sptag]->dst); &port->dsa_meta[sptag]->dst);
} }
hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1); hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
if (hash != AIROHA_RXD4_FOE_ENTRY) if (hash != AIROHA_RXD4_FOE_ENTRY)
skb_set_hash(skb, jhash_1word(hash, 0), skb_set_hash(q->skb, jhash_1word(hash, 0),
PKT_HASH_TYPE_L4); PKT_HASH_TYPE_L4);
reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1); reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
airoha_ppe_check_skb(eth->ppe, hash); airoha_ppe_check_skb(eth->ppe, hash);
napi_gro_receive(&q->napi, skb);
done++; done++;
napi_gro_receive(&q->napi, q->skb);
q->skb = NULL;
continue;
free_frag:
page_pool_put_full_page(q->page_pool, page, true);
dev_kfree_skb(q->skb);
q->skb = NULL;
} }
airoha_qdma_fill_rx_queue(q); airoha_qdma_fill_rx_queue(q);
@ -762,6 +778,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
FIELD_PREP(RX_RING_THR_MASK, thr)); FIELD_PREP(RX_RING_THR_MASK, thr));
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
airoha_qdma_fill_rx_queue(q); airoha_qdma_fill_rx_queue(q);
@ -1161,7 +1178,6 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
} }
airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG, airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
GLOBAL_CFG_RX_2B_OFFSET_MASK |
FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) | FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
GLOBAL_CFG_CPU_TXR_RR_MASK | GLOBAL_CFG_CPU_TXR_RR_MASK |
GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK | GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |

View File

@ -176,6 +176,7 @@ struct airoha_queue {
struct napi_struct napi; struct napi_struct napi;
struct page_pool *page_pool; struct page_pool *page_pool;
struct sk_buff *skb;
}; };
struct airoha_tx_irq_queue { struct airoha_tx_irq_queue {

View File

@ -626,10 +626,15 @@
#define REG_RX_DELAY_INT_IDX(_n) \ #define REG_RX_DELAY_INT_IDX(_n) \
(((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5)) (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
#define REG_RX_SCATTER_CFG(_n) \
(((_n) < 16) ? 0x0214 + ((_n) << 5) : 0x0e14 + (((_n) - 16) << 5))
#define RX_DELAY_INT_MASK GENMASK(15, 0) #define RX_DELAY_INT_MASK GENMASK(15, 0)
#define RX_RING_DMA_IDX_MASK GENMASK(15, 0) #define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
#define RX_RING_SG_EN_MASK BIT(0)
#define REG_INGRESS_TRTCM_CFG 0x0070 #define REG_INGRESS_TRTCM_CFG 0x0070
#define INGRESS_TRTCM_EN_MASK BIT(31) #define INGRESS_TRTCM_EN_MASK BIT(31)
#define INGRESS_TRTCM_MODE_MASK BIT(30) #define INGRESS_TRTCM_MODE_MASK BIT(30)