linux-loongson/drivers/net/ethernet/intel/idpf/idpf_controlq.h
Joshua Hay 6aa53e861c idpf: implement get LAN MMIO memory regions
The RDMA driver needs to map its own MMIO regions for the sake of
performance, meaning the IDPF needs to avoid mapping portions of the BAR
space. However, to be HW agnostic, the IDPF cannot assume where
these are and must avoid mapping hard coded regions as much as possible.

The IDPF maps the bare minimum to load and communicate with the
control plane, i.e., the mailbox registers and the reset state
registers. Because of how and when mailbox register offsets are
initialized, it is easier to adjust the existing defines to be relative
to the mailbox region starting address. Use a specific mailbox register
write function that uses these relative offsets. The reset state
register addresses are calculated the same way as for other registers,
described below.

The IDPF then calls a new virtchnl op to fetch a list of MMIO regions
that it should map. The addresses for the registers in these regions are
calculated by determining what region the register resides in, adjusting
the offset to be relative to that region, and then adding the
register's offset to that region's mapped address.

If the new virtchnl op is not supported, the IDPF will fallback to
mapping the whole bar. However, it will still map them as separate
regions outside the mailbox and reset state registers. This way we can
use the same logic in both cases to access the MMIO space.

Reviewed-by: Madhu Chittim <madhu.chittim@intel.com>
Signed-off-by: Joshua Hay <joshua.a.hay@intel.com>
Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2025-07-14 10:57:51 -07:00

145 lines
4.1 KiB
C

/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2023 Intel Corporation */
#ifndef _IDPF_CONTROLQ_H_
#define _IDPF_CONTROLQ_H_
#include <linux/slab.h>
#include "idpf_controlq_api.h"
/* Maximum buffer length for all control queue types */
#define IDPF_CTLQ_MAX_BUF_LEN 4096
#define IDPF_CTLQ_DESC(R, i) \
(&(((struct idpf_ctlq_desc *)((R)->desc_ring.va))[i]))
#define IDPF_CTLQ_DESC_UNUSED(R) \
((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->ring_size) + \
(R)->next_to_clean - (R)->next_to_use - 1))
/* Control Queue default settings */
#define IDPF_CTRL_SQ_CMD_TIMEOUT 250 /* msecs */
struct idpf_ctlq_desc {
/* Control queue descriptor flags */
__le16 flags;
/* Control queue message opcode */
__le16 opcode;
__le16 datalen; /* 0 for direct commands */
union {
__le16 ret_val;
__le16 pfid_vfid;
#define IDPF_CTLQ_DESC_VF_ID_S 0
#define IDPF_CTLQ_DESC_VF_ID_M (0x7FF << IDPF_CTLQ_DESC_VF_ID_S)
#define IDPF_CTLQ_DESC_PF_ID_S 11
#define IDPF_CTLQ_DESC_PF_ID_M (0x1F << IDPF_CTLQ_DESC_PF_ID_S)
};
/* Virtchnl message opcode and virtchnl descriptor type
* v_opcode=[27:0], v_dtype=[31:28]
*/
__le32 v_opcode_dtype;
/* Virtchnl return value */
__le32 v_retval;
union {
struct {
__le32 param0;
__le32 param1;
__le32 param2;
__le32 param3;
} direct;
struct {
__le32 param0;
__le16 sw_cookie;
/* Virtchnl flags */
__le16 v_flags;
__le32 addr_high;
__le32 addr_low;
} indirect;
u8 raw[16];
} params;
};
/* Flags sub-structure
* |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
* |DD |CMP|ERR| * RSV * |FTYPE | *RSV* |RD |VFC|BUF| HOST_ID |
*/
/* command flags and offsets */
#define IDPF_CTLQ_FLAG_DD_S 0
#define IDPF_CTLQ_FLAG_CMP_S 1
#define IDPF_CTLQ_FLAG_ERR_S 2
#define IDPF_CTLQ_FLAG_FTYPE_S 6
#define IDPF_CTLQ_FLAG_RD_S 10
#define IDPF_CTLQ_FLAG_VFC_S 11
#define IDPF_CTLQ_FLAG_BUF_S 12
#define IDPF_CTLQ_FLAG_HOST_ID_S 13
#define IDPF_CTLQ_FLAG_DD BIT(IDPF_CTLQ_FLAG_DD_S) /* 0x1 */
#define IDPF_CTLQ_FLAG_CMP BIT(IDPF_CTLQ_FLAG_CMP_S) /* 0x2 */
#define IDPF_CTLQ_FLAG_ERR BIT(IDPF_CTLQ_FLAG_ERR_S) /* 0x4 */
#define IDPF_CTLQ_FLAG_FTYPE_VM BIT(IDPF_CTLQ_FLAG_FTYPE_S) /* 0x40 */
#define IDPF_CTLQ_FLAG_FTYPE_PF BIT(IDPF_CTLQ_FLAG_FTYPE_S + 1) /* 0x80 */
#define IDPF_CTLQ_FLAG_RD BIT(IDPF_CTLQ_FLAG_RD_S) /* 0x400 */
#define IDPF_CTLQ_FLAG_VFC BIT(IDPF_CTLQ_FLAG_VFC_S) /* 0x800 */
#define IDPF_CTLQ_FLAG_BUF BIT(IDPF_CTLQ_FLAG_BUF_S) /* 0x1000 */
/* Host ID is a special field that has 3b and not a 1b flag */
#define IDPF_CTLQ_FLAG_HOST_ID_M MAKE_MASK(0x7000UL, IDPF_CTLQ_FLAG_HOST_ID_S)
struct idpf_mbxq_desc {
u8 pad[8]; /* CTLQ flags/opcode/len/retval fields */
u32 chnl_opcode; /* avoid confusion with desc->opcode */
u32 chnl_retval; /* ditto for desc->retval */
u32 pf_vf_id; /* used by CP when sending to PF */
};
/* Max number of MMIO regions not including the mailbox and rstat regions in
* the fallback case when the whole bar is mapped.
*/
#define IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING 3
struct idpf_mmio_reg {
void __iomem *vaddr;
resource_size_t addr_start;
resource_size_t addr_len;
};
/* Define the driver hardware struct to replace other control structs as needed
* Align to ctlq_hw_info
*/
struct idpf_hw {
struct idpf_mmio_reg mbx;
struct idpf_mmio_reg rstat;
/* Array of remaining LAN BAR regions */
int num_lan_regs;
struct idpf_mmio_reg *lan_regs;
struct idpf_adapter *back;
/* control queue - send and receive */
struct idpf_ctlq_info *asq;
struct idpf_ctlq_info *arq;
/* pci info */
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
u16 subsystem_vendor_id;
u8 revision_id;
bool adapter_stopped;
struct list_head cq_list_head;
};
int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw,
struct idpf_ctlq_info *cq);
void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
/* prototype for functions used for dynamic memory allocation */
void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem,
u64 size);
void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem);
#endif /* _IDPF_CONTROLQ_H_ */