mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2026-01-25 20:15:01 +00:00
nfp: use a counter instead of log message for allocation failures
Add a counter incremented when allocation of replacement RX page fails. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Simon Horman <simon.horman@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
790a399171
commit
16f50cda06
@ -394,6 +394,7 @@ struct nfp_net_rx_ring {
|
||||
* @tx_lso: Counter of LSO packets sent
|
||||
* @tx_errors: How many TX errors were encountered
|
||||
* @tx_busy: How often was TX busy (no space)?
|
||||
* @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures
|
||||
* @irq_vector: Interrupt vector number (use for talking to the OS)
|
||||
* @handler: Interrupt handler for this ring vector
|
||||
* @name: Name of the interrupt vector
|
||||
@ -437,6 +438,8 @@ struct nfp_net_r_vector {
|
||||
u64 hw_csum_tx_inner;
|
||||
u64 tx_gather;
|
||||
u64 tx_lso;
|
||||
|
||||
u64 rx_replace_buf_alloc_fail;
|
||||
u64 tx_errors;
|
||||
u64 tx_busy;
|
||||
|
||||
|
||||
@ -1209,15 +1209,15 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
|
||||
|
||||
if (!dp->xdp_prog) {
|
||||
frag = napi_alloc_frag(dp->fl_bufsz);
|
||||
if (unlikely(!frag))
|
||||
return NULL;
|
||||
} else {
|
||||
struct page *page;
|
||||
|
||||
page = dev_alloc_page();
|
||||
frag = page ? page_address(page) : NULL;
|
||||
}
|
||||
if (!frag) {
|
||||
nn_dp_warn(dp, "Failed to alloc receive page frag\n");
|
||||
return NULL;
|
||||
if (unlikely(!page))
|
||||
return NULL;
|
||||
frag = page_address(page);
|
||||
}
|
||||
|
||||
*dma_addr = nfp_net_dma_map_rx(dp, frag);
|
||||
@ -1514,6 +1514,11 @@ nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
|
||||
{
|
||||
u64_stats_update_begin(&r_vec->rx_sync);
|
||||
r_vec->rx_drops++;
|
||||
/* If we have both skb and rxbuf the replacement buffer allocation
|
||||
* must have failed, count this as an alloc failure.
|
||||
*/
|
||||
if (skb && rxbuf)
|
||||
r_vec->rx_replace_buf_alloc_fail++;
|
||||
u64_stats_update_end(&r_vec->rx_sync);
|
||||
|
||||
/* skb is build based on the frag, free_skb() would free the frag
|
||||
|
||||
@ -181,7 +181,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
|
||||
|
||||
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
|
||||
#define NN_ET_SWITCH_STATS_LEN 9
|
||||
#define NN_ET_RVEC_GATHER_STATS 7
|
||||
#define NN_ET_RVEC_GATHER_STATS 8
|
||||
|
||||
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
|
||||
{
|
||||
@ -444,6 +444,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
|
||||
data = nfp_pr_et(data, "hw_rx_csum_ok");
|
||||
data = nfp_pr_et(data, "hw_rx_csum_inner_ok");
|
||||
data = nfp_pr_et(data, "hw_rx_csum_err");
|
||||
data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
|
||||
data = nfp_pr_et(data, "hw_tx_csum");
|
||||
data = nfp_pr_et(data, "hw_tx_inner_csum");
|
||||
data = nfp_pr_et(data, "tx_gather");
|
||||
@ -468,16 +469,17 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
|
||||
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
|
||||
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
|
||||
tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
|
||||
tmp[3] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
|
||||
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
|
||||
data[1] = nn->r_vecs[i].tx_pkts;
|
||||
data[2] = nn->r_vecs[i].tx_busy;
|
||||
tmp[3] = nn->r_vecs[i].hw_csum_tx;
|
||||
tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
|
||||
tmp[5] = nn->r_vecs[i].tx_gather;
|
||||
tmp[6] = nn->r_vecs[i].tx_lso;
|
||||
tmp[4] = nn->r_vecs[i].hw_csum_tx;
|
||||
tmp[5] = nn->r_vecs[i].hw_csum_tx_inner;
|
||||
tmp[6] = nn->r_vecs[i].tx_gather;
|
||||
tmp[7] = nn->r_vecs[i].tx_lso;
|
||||
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
|
||||
|
||||
data += 3;
|
||||
|
||||
Loading…
Reference in New Issue
Block a user