summaryrefslogtreecommitdiff
path: root/lib/librte_gso
diff options
context:
space:
mode:
authorMark Kavanagh <mark.b.kavanagh@intel.com>2017-10-07 22:56:42 +0800
committerFerruh Yigit <ferruh.yigit@intel.com>2017-10-12 01:36:57 +0100
commit70e737e448c734b8d728fd17f3e50d28711d475d (patch)
tree4773abd4c159f84fad028e1aa4a7208a7c9e6b2d /lib/librte_gso
parentb058d92ea95d50c8ed3cde333c979cb367a97d15 (diff)
downloaddpdk-70e737e448c734b8d728fd17f3e50d28711d475d.zip
dpdk-70e737e448c734b8d728fd17f3e50d28711d475d.tar.gz
dpdk-70e737e448c734b8d728fd17f3e50d28711d475d.tar.xz
gso: support GRE GSO
This patch adds GSO support for GRE-tunneled packets. Supported GRE packets must contain an outer IPv4 header, and inner TCP/IPv4 headers. They may also contain a single VLAN tag. GRE GSO doesn't check if all input packets have correct checksums and doesn't update checksums for output packets. Additionally, it doesn't process IP fragmented packets. As with VxLAN GSO, GRE GSO uses a two-segment MBUF to organize each output packet, which requires multi-segment mbuf support in the TX functions of the NIC driver. Also, if a packet is GSOed, GRE GSO reduces its MBUF refcnt by 1. As a result, when all of its GSOed segments are freed, the packet is freed automatically. Signed-off-by: Mark Kavanagh <mark.b.kavanagh@intel.com> Signed-off-by: Jiayu Hu <jiayu.hu@intel.com> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Diffstat (limited to 'lib/librte_gso')
-rw-r--r--lib/librte_gso/gso_common.h5
-rw-r--r--lib/librte_gso/gso_tunnel_tcp4.c14
-rw-r--r--lib/librte_gso/rte_gso.c9
3 files changed, 21 insertions, 7 deletions
diff --git a/lib/librte_gso/gso_common.h b/lib/librte_gso/gso_common.h
index 95d54e7..145ea49 100644
--- a/lib/librte_gso/gso_common.h
+++ b/lib/librte_gso/gso_common.h
@@ -55,6 +55,11 @@
(PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
PKT_TX_TUNNEL_VXLAN))
+#define IS_IPV4_GRE_TCP4(flag) (((flag) & (PKT_TX_TCP_SEG | PKT_TX_IPV4 | \
+ PKT_TX_OUTER_IPV4 | PKT_TX_TUNNEL_GRE)) == \
+ (PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
+ PKT_TX_TUNNEL_GRE))
+
/**
* Internal function which updates the UDP header of a packet, following
* segmentation. This is required to update the header's datagram length field.
diff --git a/lib/librte_gso/gso_tunnel_tcp4.c b/lib/librte_gso/gso_tunnel_tcp4.c
index 5e8c8e5..8d0cfd7 100644
--- a/lib/librte_gso/gso_tunnel_tcp4.c
+++ b/lib/librte_gso/gso_tunnel_tcp4.c
@@ -42,11 +42,13 @@ update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta,
struct tcp_hdr *tcp_hdr;
uint32_t sent_seq;
uint16_t outer_id, inner_id, tail_idx, i;
- uint16_t outer_ipv4_offset, inner_ipv4_offset, udp_offset, tcp_offset;
+ uint16_t outer_ipv4_offset, inner_ipv4_offset;
+ uint16_t udp_gre_offset, tcp_offset;
+ uint8_t update_udp_hdr;
outer_ipv4_offset = pkt->outer_l2_len;
- udp_offset = outer_ipv4_offset + pkt->outer_l3_len;
- inner_ipv4_offset = udp_offset + pkt->l2_len;
+ udp_gre_offset = outer_ipv4_offset + pkt->outer_l3_len;
+ inner_ipv4_offset = udp_gre_offset + pkt->l2_len;
tcp_offset = inner_ipv4_offset + pkt->l3_len;
/* Outer IPv4 header. */
@@ -63,9 +65,13 @@ update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta,
sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
tail_idx = nb_segs - 1;
+ /* Only update UDP header for VxLAN packets. */
+ update_udp_hdr = (pkt->ol_flags & PKT_TX_TUNNEL_VXLAN) ? 1 : 0;
+
for (i = 0; i < nb_segs; i++) {
update_ipv4_header(segs[i], outer_ipv4_offset, outer_id);
- update_udp_header(segs[i], udp_offset);
+ if (update_udp_hdr)
+ update_udp_header(segs[i], udp_gre_offset);
update_ipv4_header(segs[i], inner_ipv4_offset, inner_id);
update_tcp_header(segs[i], tcp_offset, sent_seq, i < tail_idx);
outer_id++;
diff --git a/lib/librte_gso/rte_gso.c b/lib/librte_gso/rte_gso.c
index 0a3ef11..f86e654 100644
--- a/lib/librte_gso/rte_gso.c
+++ b/lib/librte_gso/rte_gso.c
@@ -58,7 +58,8 @@ rte_gso_segment(struct rte_mbuf *pkt,
nb_pkts_out < 1 ||
gso_ctx->gso_size < RTE_GSO_SEG_SIZE_MIN ||
((gso_ctx->gso_types & (DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) == 0))
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0))
return -EINVAL;
if (gso_ctx->gso_size >= pkt->pkt_len) {
@@ -73,8 +74,10 @@ rte_gso_segment(struct rte_mbuf *pkt,
ipid_delta = (gso_ctx->flag != RTE_GSO_FLAG_IPID_FIXED);
ol_flags = pkt->ol_flags;
- if (IS_IPV4_VXLAN_TCP4(pkt->ol_flags)
- && (gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) {
+ if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
+ (gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
+ ((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
+ (gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
pkt->ol_flags &= (~PKT_TX_TCP_SEG);
ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
direct_pool, indirect_pool,