summaryrefslogtreecommitdiff
path: root/drivers/net/bnxt/bnxt_rxr.c
diff options
context:
space:
mode:
authorAjit Khaparde <ajit.khaparde@broadcom.com>2017-06-01 12:07:10 -0500
committerFerruh Yigit <ferruh.yigit@intel.com>2017-06-12 10:41:28 +0100
commit0958d8b6435de318e2a46d799e356ed08fa40343 (patch)
tree6ab5235315e15074f56de1d212398e4ca3ab78e1 /drivers/net/bnxt/bnxt_rxr.c
parentdaef48efe5e5034dac09f8be9904a59ca15b472e (diff)
downloaddpdk-next-eventdev-0958d8b6435de318e2a46d799e356ed08fa40343.zip
dpdk-next-eventdev-0958d8b6435de318e2a46d799e356ed08fa40343.tar.gz
dpdk-next-eventdev-0958d8b6435de318e2a46d799e356ed08fa40343.tar.xz
net/bnxt: support LRO
This patch adds support to enable and disable LRO To support this feature, the driver creates an aggregator ring. When the hardware starts doing LRO, it sends a tpa_start completion. When the driver receives a tpa_end completion, it indicates that the LRO chaining is complete. Signed-off-by: Steeven Li <steeven.li@broadcom.com> Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Diffstat (limited to 'drivers/net/bnxt/bnxt_rxr.c')
-rw-r--r--drivers/net/bnxt/bnxt_rxr.c313
1 files changed, 240 insertions, 73 deletions
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 8489a1c..5f30e02 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -34,6 +34,7 @@
#include <inttypes.h>
#include <stdbool.h>
+#include <rte_bitmap.h>
#include <rte_byteorder.h>
#include <rte_malloc.h>
#include <rte_memory.h>
@@ -102,24 +103,28 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
return 0;
}
-#ifdef BNXT_DEBUG
-static void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
+static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
struct rte_mbuf *mbuf)
{
- uint16_t prod = rxr->rx_prod;
+ uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
struct bnxt_sw_rx_bd *prod_rx_buf;
- struct rx_prod_pkt_bd *prod_bd, *cons_bd;
+ struct rx_prod_pkt_bd *prod_bd;
prod_rx_buf = &rxr->rx_buf_ring[prod];
+ RTE_ASSERT(prod_rx_buf->mbuf == NULL);
+ RTE_ASSERT(mbuf != NULL);
+
prod_rx_buf->mbuf = mbuf;
prod_bd = &rxr->rx_desc_ring[prod];
- cons_bd = &rxr->rx_desc_ring[cons];
- prod_bd->addr = cons_bd->addr;
+ prod_bd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(mbuf));
+
+ rxr->rx_prod = prod;
}
+#ifdef BNXT_DEBUG
static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
struct rte_mbuf *mbuf)
{
@@ -138,7 +143,190 @@ static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
}
#endif
-static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
+static inline
+struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
+ uint16_t cons)
+{
+ struct bnxt_sw_rx_bd *cons_rx_buf;
+ struct rte_mbuf *mbuf;
+
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ RTE_ASSERT(cons_rx_buf->mbuf != NULL);
+ mbuf = cons_rx_buf->mbuf;
+ cons_rx_buf->mbuf = NULL;
+ return mbuf;
+}
+
+static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
+ struct rx_tpa_start_cmpl *tpa_start,
+ struct rx_tpa_start_cmpl_hi *tpa_start1)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id &
+ RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT;
+ uint16_t data_cons;
+ struct bnxt_tpa_info *tpa_info;
+ struct rte_mbuf *mbuf;
+
+ data_cons = tpa_start->opaque;
+ tpa_info = &rxr->tpa_info[agg_id];
+
+ mbuf = bnxt_consume_rx_buf(rxr, data_cons);
+
+ bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
+
+ tpa_info->mbuf = mbuf;
+ tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
+
+ mbuf->nb_segs = 1;
+ mbuf->next = NULL;
+ mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = rxq->port_id;
+ mbuf->ol_flags = PKT_RX_LRO;
+ if (likely(tpa_start->flags_type &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
+ mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ } else {
+ mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ }
+ if (tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
+ mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
+ mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ }
+ if (likely(tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ /* recycle next mbuf */
+ data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
+ bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
+ uint8_t agg_bufs, uint32_t raw_cp_cons)
+{
+ uint16_t last_cp_cons;
+ struct rx_pkt_cmpl *agg_cmpl;
+
+ raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
+ last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
+ agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
+ return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
+}
+
+/* TPA consume agg buffer out of order, allocate connected data only */
+static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
+
+ /* TODO batch allocation for better performance */
+ while (rte_bitmap_get(rxr->ag_bitmap, next)) {
+ if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
+ RTE_LOG(ERR, PMD,
+ "agg mbuf alloc failed: prod=0x%x\n", next);
+ break;
+ }
+ rte_bitmap_clear(rxr->ag_bitmap, next);
+ rxr->ag_prod = next;
+ next = RING_NEXT(rxr->ag_ring_struct, next);
+ }
+
+ return 0;
+}
+
+static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
+ struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
+ uint8_t agg_buf)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ int i;
+ uint16_t cp_cons, ag_cons;
+ struct rx_pkt_cmpl *rxcmp;
+ struct rte_mbuf *last = mbuf;
+
+ for (i = 0; i < agg_buf; i++) {
+ struct bnxt_sw_rx_bd *ag_buf;
+ struct rte_mbuf *ag_mbuf;
+ *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
+ cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)
+ &cpr->cp_desc_ring[cp_cons];
+
+#ifdef BNXT_DEBUG
+ bnxt_dump_cmpl(cp_cons, rxcmp);
+#endif
+
+ ag_cons = rxcmp->opaque;
+ RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
+ ag_buf = &rxr->ag_buf_ring[ag_cons];
+ ag_mbuf = ag_buf->mbuf;
+ RTE_ASSERT(ag_mbuf != NULL);
+
+ ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
+
+ mbuf->nb_segs++;
+ mbuf->pkt_len += ag_mbuf->data_len;
+
+ last->next = ag_mbuf;
+ last = ag_mbuf;
+
+ ag_buf->mbuf = NULL;
+
+ /*
+ * As aggregation buffer consumed out of order in TPA module,
+ * use bitmap to track freed slots to be allocated and notified
+ * to NIC
+ */
+ rte_bitmap_set(rxr->ag_bitmap, ag_cons);
+ }
+ bnxt_prod_ag_mbuf(rxq);
+ return 0;
+}
+
+static inline struct rte_mbuf *bnxt_tpa_end(
+ struct bnxt_rx_queue *rxq,
+ uint32_t *raw_cp_cons,
+ struct rx_tpa_end_cmpl *tpa_end,
+ struct rx_tpa_end_cmpl_hi *tpa_end1 __rte_unused)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint8_t agg_id = (tpa_end->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK)
+ >> RX_TPA_END_CMPL_AGG_ID_SFT;
+ struct rte_mbuf *mbuf;
+ uint8_t agg_bufs;
+ struct bnxt_tpa_info *tpa_info;
+
+ tpa_info = &rxr->tpa_info[agg_id];
+ mbuf = tpa_info->mbuf;
+ RTE_ASSERT(mbuf != NULL);
+
+ rte_prefetch0(mbuf);
+ agg_bufs = (rte_le_to_cpu_32(tpa_end->agg_bufs_v1) &
+ RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT;
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
+ return NULL;
+ bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs);
+ }
+ mbuf->l4_len = tpa_end->payload_offset;
+
+ struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
+ RTE_ASSERT(new_data != NULL);
+ if (!new_data)
+ return NULL;
+ tpa_info->mbuf = new_data;
+
+ return mbuf;
+}
+
+static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
{
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
@@ -148,12 +336,13 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
uint32_t tmp_raw_cons = *raw_cons;
uint16_t cons, prod, cp_cons =
RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
- uint16_t ag_cons, ag_prod = rxr->ag_prod;
- struct bnxt_sw_rx_bd *rx_buf;
+#ifdef BNXT_DEBUG
+ uint16_t ag_cons;
+#endif
struct rte_mbuf *mbuf;
int rc = 0;
- uint8_t i;
uint8_t agg_buf = 0;
+ uint16_t cmp_type;
rxcmp = (struct rx_pkt_cmpl *)
&cpr->cp_desc_ring[cp_cons];
@@ -165,12 +354,34 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
return -EBUSY;
+ cmp_type = CMP_TYPE(rxcmp);
+ if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_START) {
+ bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
+ (struct rx_tpa_start_cmpl_hi *)rxcmp1);
+ rc = -EINVAL; /* Continue w/o new mbuf */
+ goto next_rx;
+ } else if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) {
+ mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
+ (struct rx_tpa_end_cmpl *)rxcmp,
+ (struct rx_tpa_end_cmpl_hi *)rxcmp1);
+ if (unlikely(!mbuf))
+ return -EBUSY;
+ *rx_pkt = mbuf;
+ goto next_rx;
+ } else if (cmp_type != 0x11) {
+ rc = -EINVAL;
+ goto next_rx;
+ }
+
+ agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
+ >> RX_PKT_CMPL_AGG_BUFS_SFT;
+ if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
+ return -EBUSY;
+
prod = rxr->rx_prod;
- /* EW - GRO deferred to phase 3 */
cons = rxcmp->opaque;
- rx_buf = &rxr->rx_buf_ring[cons];
- mbuf = rx_buf->mbuf;
+ mbuf = bnxt_consume_rx_buf(rxr, cons);
rte_prefetch0(mbuf);
if (mbuf == NULL)
@@ -190,61 +401,8 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
}
- agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
- >> RX_PKT_CMPL_AGG_BUFS_SFT;
- if (agg_buf) {
- cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons + agg_buf);
- rxcmp = (struct rx_pkt_cmpl *)
- &cpr->cp_desc_ring[cp_cons];
- if (!CMP_VALID(rxcmp, tmp_raw_cons + agg_buf,
- cpr->cp_ring_struct))
- return -EBUSY;
- RTE_LOG(DEBUG, PMD, "JUMBO Frame %d. %x, agg_buf %x,\n",
- mbuf->pkt_len, rxcmp->agg_bufs_v1, agg_buf);
- }
-
- for (i = 0; i < agg_buf; i++) {
- struct bnxt_sw_rx_bd *ag_buf;
- struct rte_mbuf *ag_mbuf;
- tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
- cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
- rxcmp = (struct rx_pkt_cmpl *)
- &cpr->cp_desc_ring[cp_cons];
- ag_cons = rxcmp->opaque;
- ag_buf = &rxr->ag_buf_ring[ag_cons];
- ag_mbuf = ag_buf->mbuf;
- ag_mbuf->nb_segs = 1;
- ag_mbuf->data_len = rxcmp->len;
-
- mbuf->nb_segs++;
- mbuf->pkt_len += ag_mbuf->data_len;
- if (mbuf->next == NULL) {
- mbuf->next = ag_mbuf;
- } else {
- struct rte_mbuf *temp_mbuf = mbuf;
-
- while (temp_mbuf->next != NULL)
- temp_mbuf = temp_mbuf->next;
- temp_mbuf->next = ag_mbuf;
- }
- ag_buf->mbuf = NULL;
-
- ag_prod = RING_NEXT(rxr->ag_ring_struct, ag_prod);
- if (bnxt_alloc_ag_data(rxq, rxr, ag_prod)) {
- RTE_LOG(ERR, PMD,
- "agg mbuf alloc failed: prod=0x%x\n",
- ag_prod);
- rc = -ENOMEM;
- }
- rxr->ag_prod = ag_prod;
-
-#ifdef BNXT_DEBUG
- if (!CMP_VALID((struct cmpl_base *)
- &cpr->cp_desc_ring[cp_cons], tmp_raw_cons,
- cpr->cp_ring_struct))
- return -EBUSY;
-#endif
- }
+ if (agg_buf)
+ bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf);
if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
mbuf->vlan_tci = rxcmp1->metadata &
@@ -254,7 +412,6 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf->ol_flags |= PKT_RX_VLAN_PKT;
}
- rx_buf->mbuf = NULL;
#ifdef BNXT_DEBUG
if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
/* Re-install the mbuf back to the rx ring */
@@ -293,9 +450,9 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
*/
*rx_pkt = mbuf;
-#ifdef BNXT_DEBUG
+
next_rx:
-#endif
+
*raw_cons = tmp_raw_cons;
return rc;
@@ -330,7 +487,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
if (likely(!rc))
nb_rx_pkts++;
- else if (rc == -EBUSY) /* partial completion */
+ if (rc == -EBUSY) /* partial completion */
break;
}
raw_cons = NEXT_RAW_CMP(raw_cons);
@@ -338,6 +495,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
break;
}
+ cpr->cp_raw_cons = raw_cons;
if (prod == rxr->rx_prod && ag_prod == rxr->ag_prod) {
/*
* For PMD, there is no need to keep on pushing to REARM
@@ -345,7 +503,6 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
return nb_rx_pkts;
}
- cpr->cp_raw_cons = raw_cons;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
@@ -515,5 +672,15 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
}
RTE_LOG(DEBUG, PMD, "%s AGG Done!\n", __func__);
+ if (rxr->tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ rxr->tpa_info[i].mbuf =
+ __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!rxr->tpa_info[i].mbuf)
+ return -ENOMEM;
+ }
+ }
+ RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__);
+
return 0;
}