mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-08-26 14:59:32 +00:00
ice: do switchdev slow-path Rx using PF VSI
Add an ICE_RX_FLAG_MULTIDEV flag to Rx ring. If it is set try to find correct port representor. Do it based on src_vsi value stored in flex descriptor. Ids of representor pointers stored in xarray are equal to corresponding src_vsi value. Thanks to that we can directly get correct representor if we have src_vsi value. Set multidev flag during ring configuration. If the mode is switchdev, change the ring descriptor to the one that contains src_vsi value. PF netdev should be reconfigured, do it by calling ice_down() and ice_up() if the netdev was up before configuring switchdev. Reviewed-by: Marcin Szycik <marcin.szycik@linux.intel.com> Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com> Tested-by: Sujai Buvaneswaran <sujai.buvaneswaran@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
This commit is contained in:
parent
6235cb6e5b
commit
44ba608db5
@ -452,6 +452,14 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
|
||||
/* Rx queue threshold in units of 64 */
|
||||
rlan_ctx.lrxqthresh = 1;
|
||||
|
||||
/* PF acts as uplink for switchdev; set flex descriptor with src_vsi
|
||||
* metadata and flags to allow redirecting to PR netdev
|
||||
*/
|
||||
if (ice_is_eswitch_mode_switchdev(vsi->back)) {
|
||||
ring->flags |= ICE_RX_FLAGS_MULTIDEV;
|
||||
rxdid = ICE_RXDID_FLEX_NIC_2;
|
||||
}
|
||||
|
||||
/* Enable Flexible Descriptors in the queue context which
|
||||
* allows this driver to select a specific receive descriptor format
|
||||
* increasing context priority to pick up profile ID; default is 0x01;
|
||||
|
@ -21,8 +21,13 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
|
||||
struct net_device *netdev = uplink_vsi->netdev;
|
||||
bool if_running = netif_running(netdev);
|
||||
struct ice_vsi_vlan_ops *vlan_ops;
|
||||
|
||||
if (if_running && !test_and_set_bit(ICE_VSI_DOWN, uplink_vsi->state))
|
||||
if (ice_down(uplink_vsi))
|
||||
return -ENODEV;
|
||||
|
||||
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
|
||||
|
||||
netif_addr_lock_bh(netdev);
|
||||
@ -51,8 +56,13 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
|
||||
if (ice_vsi_update_local_lb(uplink_vsi, true))
|
||||
goto err_override_local_lb;
|
||||
|
||||
if (if_running && ice_up(uplink_vsi))
|
||||
goto err_up;
|
||||
|
||||
return 0;
|
||||
|
||||
err_up:
|
||||
ice_vsi_update_local_lb(uplink_vsi, false);
|
||||
err_override_local_lb:
|
||||
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
|
||||
err_override_uplink:
|
||||
@ -69,6 +79,9 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
|
||||
ice_fltr_add_mac_and_broadcast(uplink_vsi,
|
||||
uplink_vsi->port_info->mac.perm_addr,
|
||||
ICE_FWD_TO_VSI);
|
||||
if (if_running)
|
||||
ice_up(uplink_vsi);
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -493,3 +506,26 @@ void ice_eswitch_rebuild(struct ice_pf *pf)
|
||||
xa_for_each(&pf->eswitch.reprs, id, repr)
|
||||
ice_eswitch_detach(pf, repr->vf);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_eswitch_get_target - get netdev based on src_vsi from descriptor
|
||||
* @rx_ring: ring used to receive the packet
|
||||
* @rx_desc: descriptor used to get src_vsi value
|
||||
*
|
||||
* Get src_vsi value from descriptor and load correct representor. If it isn't
|
||||
* found return rx_ring->netdev.
|
||||
*/
|
||||
struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
|
||||
union ice_32b_rx_flex_desc *rx_desc)
|
||||
{
|
||||
struct ice_eswitch *eswitch = &rx_ring->vsi->back->eswitch;
|
||||
struct ice_32b_rx_flex_desc_nic_2 *desc;
|
||||
struct ice_repr *repr;
|
||||
|
||||
desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
|
||||
repr = xa_load(&eswitch->reprs, le16_to_cpu(desc->src_vsi));
|
||||
if (!repr)
|
||||
return rx_ring->netdev;
|
||||
|
||||
return repr->netdev;
|
||||
}
|
||||
|
@ -26,6 +26,8 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb,
|
||||
struct ice_tx_offload_params *off);
|
||||
netdev_tx_t
|
||||
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
|
||||
struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
|
||||
union ice_32b_rx_flex_desc *rx_desc);
|
||||
#else /* CONFIG_ICE_SWITCHDEV */
|
||||
static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
|
||||
|
||||
@ -76,5 +78,12 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
{
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
static inline struct net_device *
|
||||
ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
|
||||
union ice_32b_rx_flex_desc *rx_desc)
|
||||
{
|
||||
return rx_ring->netdev;
|
||||
}
|
||||
#endif /* CONFIG_ICE_SWITCHDEV */
|
||||
#endif /* _ICE_ESWITCH_H_ */
|
||||
|
@ -365,6 +365,7 @@ struct ice_rx_ring {
|
||||
u8 ptp_rx;
|
||||
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
|
||||
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
|
||||
#define ICE_RX_FLAGS_MULTIDEV BIT(3)
|
||||
u8 flags;
|
||||
/* CL5 - 5th cacheline starts here */
|
||||
struct xdp_rxq_info xdp_rxq;
|
||||
|
@ -236,7 +236,14 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
|
||||
ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype);
|
||||
|
||||
/* modifies the skb - consumes the enet header */
|
||||
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
|
||||
if (unlikely(rx_ring->flags & ICE_RX_FLAGS_MULTIDEV)) {
|
||||
struct net_device *netdev = ice_eswitch_get_target(rx_ring,
|
||||
rx_desc);
|
||||
|
||||
skb->protocol = eth_type_trans(skb, netdev);
|
||||
} else {
|
||||
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
|
||||
}
|
||||
|
||||
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user