linux-loongson/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
Fan Gong 17fcb3dc12 hinic3: module initialization and tx/rx logic
This is [1/3] part of hinic3 Ethernet driver initial submission.
With this patch hinic3 is a valid kernel module but non-functional
driver.

The driver parts contained in this patch:
Module initialization.
PCI driver registration but with empty id_table.
Auxiliary driver registration.
Net device_ops registration but open/stop are empty stubs.
tx/rx logic.

All major data structures of the driver are fully introduced with the
code that uses them but without their initialization code that requires
management interface with the hw.

Co-developed-by: Xin Guo <guoxin09@huawei.com>
Signed-off-by: Xin Guo <guoxin09@huawei.com>
Signed-off-by: Fan Gong <gongfan1@huawei.com>
Co-developed-by: Gur Stavi <gur.stavi@huawei.com>
Signed-off-by: Gur Stavi <gur.stavi@huawei.com>
Link: https://patch.msgid.link/76a137ffdfe115c737c2c224f0c93b60ba53cc16.1747736586.git.gur.stavi@huawei.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-05-21 20:31:42 -07:00

77 lines
2.2 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
#ifndef _HINIC3_WQ_H_
#define _HINIC3_WQ_H_
#include <linux/io.h>
#include "hinic3_queue_common.h"
struct hinic3_sq_bufdesc {
/* 31-bits Length, L2NIC only uses length[17:0] */
u32 len;
u32 rsvd;
u32 hi_addr;
u32 lo_addr;
};
/* Work queue is used to submit elements (tx, rx, cmd) to hw.
* Driver is the producer that advances prod_idx. cons_idx is advanced when
* HW reports completions of previously submitted elements.
*/
struct hinic3_wq {
struct hinic3_queue_pages qpages;
/* Unmasked producer/consumer indices that are advanced to natural
* integer overflow regardless of queue depth.
*/
u16 cons_idx;
u16 prod_idx;
u32 q_depth;
u16 idx_mask;
/* Work Queue (logical WQEBB array) is mapped to hw via Chip Logical
* Address (CLA) using 1 of 2 levels:
* level 0 - direct mapping of single wq page
* level 1 - indirect mapping of multiple pages via additional page
* table.
* When wq uses level 1, wq_block will hold the allocated indirection
* table.
*/
dma_addr_t wq_block_paddr;
__be64 *wq_block_vaddr;
} ____cacheline_aligned;
/* Get number of elements in work queue that are in-use. */
static inline u16 hinic3_wq_get_used(const struct hinic3_wq *wq)
{
return READ_ONCE(wq->prod_idx) - READ_ONCE(wq->cons_idx);
}
static inline u16 hinic3_wq_free_wqebbs(struct hinic3_wq *wq)
{
/* Don't allow queue to become completely full, report (free - 1). */
return wq->q_depth - hinic3_wq_get_used(wq) - 1;
}
static inline void *hinic3_wq_get_one_wqebb(struct hinic3_wq *wq, u16 *pi)
{
*pi = wq->prod_idx & wq->idx_mask;
wq->prod_idx++;
return get_q_element(&wq->qpages, *pi, NULL);
}
static inline void hinic3_wq_put_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs)
{
wq->cons_idx += num_wqebbs;
}
void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
u16 num_wqebbs, u16 *prod_idx,
struct hinic3_sq_bufdesc **first_part_wqebbs,
struct hinic3_sq_bufdesc **second_part_wqebbs,
u16 *first_part_wqebbs_num);
#endif