Skip to content

Commit 7abd955

Browse files
dtatuleaSaeed Mahameed
authored and
Saeed Mahameed
committed
net/mlx5e: RX, Fix page_pool page fragment tracking for XDP
Currently mlx5e releases pages directly to the page_pool for XDP_TX and does page fragment counting for XDP_REDIRECT. RX pages from the page_pool are leaking on XDP_REDIRECT because the xdp core will release only one fragment out of MLX5E_PAGECNT_BIAS_MAX and subsequently the page is marked as "skip release" which avoids the driver release. A fix would be to take an extra fragment for XDP_REDIRECT and not set the "skip release" bit so that the release on the driver side can handle the remaining bias fragments. But this would be a shortsighted solution. Instead, this patch converges the two XDP paths (XDP_TX and XDP_REDIRECT) to always do fragment tracking. The "skip release" bit is no longer necessary for XDP. Fixes: 6f57428 ("net/mlx5e: RX, Enable skb page recycling through the page_pool") Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
1 parent 6496357 commit 7abd955

File tree

2 files changed

+13
-22
lines changed

2 files changed

+13
-22
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -662,8 +662,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
662662
/* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
663663
* as we know this is a page_pool page.
664664
*/
665-
page_pool_put_defragged_page(page->pp,
666-
page, -1, true);
665+
page_pool_recycle_direct(page->pp, page);
667666
} while (++n < num);
668667

669668
break;

drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

Lines changed: 12 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1751,11 +1751,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
17511751

17521752
prog = rcu_dereference(rq->xdp_prog);
17531753
if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
1754-
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1754+
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
17551755
struct mlx5e_wqe_frag_info *pwi;
17561756

17571757
for (pwi = head_wi; pwi < wi; pwi++)
1758-
pwi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
1758+
pwi->frag_page->frags++;
17591759
}
17601760
return NULL; /* page/packet was consumed by XDP */
17611761
}
@@ -1825,12 +1825,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
18251825
rq, wi, cqe, cqe_bcnt);
18261826
if (!skb) {
18271827
/* probably for XDP */
1828-
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1829-
/* do not return page to cache,
1830-
* it will be returned on XDP_TX completion.
1831-
*/
1832-
wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
1833-
}
1828+
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1829+
wi->frag_page->frags++;
18341830
goto wq_cyc_pop;
18351831
}
18361832

@@ -1876,12 +1872,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
18761872
rq, wi, cqe, cqe_bcnt);
18771873
if (!skb) {
18781874
/* probably for XDP */
1879-
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1880-
/* do not return page to cache,
1881-
* it will be returned on XDP_TX completion.
1882-
*/
1883-
wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
1884-
}
1875+
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1876+
wi->frag_page->frags++;
18851877
goto wq_cyc_pop;
18861878
}
18871879

@@ -2060,12 +2052,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
20602052
if (prog) {
20612053
if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
20622054
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
2063-
int i;
2055+
struct mlx5e_frag_page *pfp;
2056+
2057+
for (pfp = head_page; pfp < frag_page; pfp++)
2058+
pfp->frags++;
20642059

2065-
for (i = 0; i < sinfo->nr_frags; i++)
2066-
/* non-atomic */
2067-
__set_bit(page_idx + i, wi->skip_release_bitmap);
2068-
return NULL;
2060+
wi->linear_page.frags++;
20692061
}
20702062
mlx5e_page_release_fragmented(rq, &wi->linear_page);
20712063
return NULL; /* page/packet was consumed by XDP */
@@ -2163,7 +2155,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
21632155
cqe_bcnt, &mxbuf);
21642156
if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
21652157
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
2166-
__set_bit(page_idx, wi->skip_release_bitmap); /* non-atomic */
2158+
frag_page->frags++;
21672159
return NULL; /* page/packet was consumed by XDP */
21682160
}
21692161

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy