Message ID | 1425474922-4378-1-git-send-email-maxim.uvarov@linaro.org |
---|---|
State | Superseded |
Headers | show |
v6 gives me an error: odp_packet_socket.c:658:mmap_setup_ring():setsockopt(pkt mmap): Invalid argument I'm trying to figure out what is the problem. On Wed, Mar 4, 2015 at 3:15 PM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote: > Support for jumbo frames for linux-generic with unsegmented buffers. > Test for pkio is also adjusted to work with 9*1024=9216 bytes packets. > https://bugs.linaro.org/show_bug.cgi?id=509 > > Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> > --- > v6: - rewrite mmap_fill_ring to take into account pool settings. > v5: - test_4_jumbo_pkts -> test_jumbo > - do not use stack for jumbo packet, simple allocate it. > > v4: - fix work on real interfaces (make check under root) > - better define jumbo packet payload size > > platform/linux-generic/odp_packet_socket.c | 40 ++++++++++--- > test/validation/odp_pktio.c | 95 +++++++++++++++++++++++------- > test/validation/odp_pktio_run | 4 +- > 3 files changed, 107 insertions(+), 32 deletions(-) > > diff --git a/platform/linux-generic/odp_packet_socket.c b/platform/linux-generic/odp_packet_socket.c > index 55c212e..0b066cf 100644 > --- a/platform/linux-generic/odp_packet_socket.c > +++ b/platform/linux-generic/odp_packet_socket.c > @@ -584,11 +584,33 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring, > return i; > } > > -static void mmap_fill_ring(struct ring *ring, unsigned blocks) > +static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout) > { > - ring->req.tp_block_size = getpagesize() << 2; > - ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; > - ring->req.tp_block_nr = blocks; > + /*@todo add Huge Pages support*/ > + int pz = getpagesize(); > + uint32_t pool_id = pool_handle_to_index(pool_hdl); > + pool_entry_t *pool = get_pool_entry(pool_id); > + > + if (pool == NULL) > + ODP_ABORT("NULL pool entry"); > + > + /* Frame has to capture full packet which can fit to the pool block.*/ > + ring->req.tp_frame_size = pool->s.blk_size + > + TPACKET_HDRLEN + TPACKET_ALIGNMENT; > + /* Calculate how many pages do we need to hold all pool packets > + * and align size to page boundary. > + */ > + ring->req.tp_block_size = (ring->req.tp_frame_size * pool->s.buf_num > + + (pz - 1)) & (-pz); > + > + if (!fanout) { > + /* Single socket is in use. Use 1 block with buf_num frames. */ > + ring->req.tp_block_nr = 1; > + } else { > + /* Fanout is in use, more likely taffic split accodring to > + * number of cpu threads. Use cpu blocks and buf_num frames. */ > + ring->req.tp_block_nr = odp_cpu_count(); > + } > > ring->req.tp_frame_nr = ring->req.tp_block_size / > ring->req.tp_frame_size * ring->req.tp_block_nr; > @@ -613,10 +635,10 @@ static int mmap_set_packet_loss_discard(int sock) > return 0; > } > > -static int mmap_setup_ring(int sock, struct ring *ring, int type) > +static int mmap_setup_ring(int sock, struct ring *ring, int type, > + odp_pool_t pool_hdl, int fanout) > { > int ret = 0; > - unsigned blocks = 256; > > ring->sock = sock; > ring->type = type; > @@ -628,7 +650,7 @@ static int mmap_setup_ring(int sock, struct ring *ring, int type) > return -1; > } > > - mmap_fill_ring(ring, blocks); > + mmap_fill_ring(ring, pool_hdl, fanout); > > ret = setsockopt(sock, SOL_PACKET, type, &ring->req, sizeof(ring->req)); > if (ret == -1) { > @@ -772,12 +794,12 @@ int setup_pkt_sock_mmap(pkt_sock_mmap_t *const pkt_sock, const char *netdev, > return -1; > > ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring, > - PACKET_TX_RING); > + PACKET_TX_RING, pool, fanout); > if (ret != 0) > return -1; > > ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring, > - PACKET_RX_RING); > + PACKET_RX_RING, pool, fanout); > if (ret != 0) > return -1; > > diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c > index 8df367d..f8b9ecc 100644 > --- a/test/validation/odp_pktio.c > +++ b/test/validation/odp_pktio.c > @@ -15,6 +15,10 @@ > > #define PKT_BUF_NUM 32 > #define PKT_BUF_SIZE 1856 > +#define PKT_BUF_JUMBO_SIZE 9216 > +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\ > + (ODPH_UDPHDR_LEN +\ > + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN)) > #define MAX_NUM_IFACES 2 > #define TEST_SEQ_INVALID ((uint32_t)~0) > #define TEST_SEQ_MAGIC 0x92749451 > @@ -33,12 +37,21 @@ typedef struct { > odp_queue_t inq; > } pktio_info_t; > > -/** structure of test packet UDP payload */ > -typedef struct { > +typedef struct ODP_PACKED { > uint32be_t magic; > uint32be_t seq; > +} pkt_head_t; > + > +/** structure of test packet UDP payload */ > +typedef struct ODP_PACKED { > + pkt_head_t head; > + char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(pkt_head_t) - > + sizeof(uint32be_t)]; > + uint32be_t magic2; > } pkt_test_data_t; > > +static int test_jumbo; > + > /** default packet pool */ > odp_pool_t default_pkt_pool = ODP_POOL_INVALID; > > @@ -59,14 +72,18 @@ static void pktio_pkt_set_macs(odp_packet_t pkt, > CU_ASSERT(ret == ODPH_ETHADDR_LEN); > } > > +static uint32_t pkt_payload_len(void) > +{ > + return test_jumbo ? sizeof(pkt_test_data_t) : sizeof(pkt_head_t); > +} > + > static int pktio_pkt_set_seq(odp_packet_t pkt) > { > static uint32_t tstseq; > size_t l4_off; > - pkt_test_data_t data; > + pkt_test_data_t *data; > + uint32_t len = pkt_payload_len(); > > - data.magic = TEST_SEQ_MAGIC; > - data.seq = tstseq; > > l4_off = odp_packet_l4_offset(pkt); > if (!l4_off) { > @@ -74,9 +91,16 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) > return -1; > } > > + data = calloc(1, len); > + CU_ASSERT_FATAL(data != NULL); > + > + data->head.magic = TEST_SEQ_MAGIC; > + data->magic2 = TEST_SEQ_MAGIC; > + data->head.seq = tstseq; > + > odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN, > - sizeof(data), &data); > - > + len, data); > + free(data); > tstseq++; > > return 0; > @@ -85,18 +109,30 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) > static uint32_t pktio_pkt_seq(odp_packet_t pkt) > { > size_t l4_off; > - pkt_test_data_t data; > + uint32_t seq = TEST_SEQ_INVALID; > + pkt_test_data_t *data; > + uint32_t len = pkt_payload_len(); > > l4_off = odp_packet_l4_offset(pkt); > - if (l4_off) { > - odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, > - sizeof(data), &data); > + if (!l4_off) > + return TEST_SEQ_INVALID; > > - if (data.magic == TEST_SEQ_MAGIC) > - return data.seq; > + data = calloc(1, len); > + CU_ASSERT_FATAL(data != NULL); > + > + odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, > + len, data); > + > + if (data->head.magic == TEST_SEQ_MAGIC) { > + if (test_jumbo && data->magic2 != TEST_SEQ_MAGIC) { > + free(data); > + return TEST_SEQ_INVALID; > + } > + seq = data->head.seq; > } > > - return TEST_SEQ_INVALID; > + free(data); > + return seq; > } > > static odp_packet_t pktio_create_packet(void) > @@ -107,7 +143,7 @@ static odp_packet_t pktio_create_packet(void) > odph_udphdr_t *udp; > char *buf; > uint16_t seq; > - size_t payload_len = sizeof(pkt_test_data_t); > + size_t payload_len = pkt_payload_len(); > uint8_t mac[ODPH_ETHADDR_LEN] = {0}; > > pkt = odp_packet_alloc(default_pkt_pool, payload_len + ODPH_UDPHDR_LEN + > @@ -187,8 +223,8 @@ static int default_pool_create(void) > return -1; > > memset(¶ms, 0, sizeof(params)); > - params.pkt.seg_len = PKT_BUF_SIZE; > - params.pkt.len = PKT_BUF_SIZE; > + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; > + params.pkt.len = PKT_BUF_JUMBO_SIZE; > params.pkt.num = PKT_BUF_NUM; > params.type = ODP_POOL_PACKET; > > @@ -208,15 +244,24 @@ static odp_pktio_t create_pktio(const char *iface) > odp_pool_param_t params; > > memset(¶ms, 0, sizeof(params)); > - params.pkt.seg_len = PKT_BUF_SIZE; > - params.pkt.len = PKT_BUF_SIZE; > + if (test_jumbo) { > + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; > + params.pkt.len = PKT_BUF_JUMBO_SIZE; > + > + } else { > + params.pkt.seg_len = PKT_BUF_SIZE; > + params.pkt.len = PKT_BUF_SIZE; > + } > params.pkt.num = PKT_BUF_NUM; > params.type = ODP_POOL_PACKET; > > snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface); > + > pool = odp_pool_lookup(pool_name); > - if (pool == ODP_POOL_INVALID) > - pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); > + if (pool != ODP_POOL_INVALID) > + odp_pool_destroy(pool); > + > + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); > CU_ASSERT(pool != ODP_POOL_INVALID); > > pktio = odp_pktio_open(iface, pool); > @@ -450,6 +495,13 @@ static void test_odp_pktio_sched_multi(void) > pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4); > } > > +static void test_odp_pktio_jumbo(void) > +{ > + test_jumbo = 1; > + test_odp_pktio_sched_multi(); > + test_jumbo = 0; > +} > + > static void test_odp_pktio_mtu(void) > { > int ret; > @@ -668,6 +720,7 @@ CU_TestInfo pktio_tests[] = { > {"pktio poll multi", test_odp_pktio_poll_multi}, > {"pktio sched queues", test_odp_pktio_sched_queue}, > {"pktio sched multi", test_odp_pktio_sched_multi}, > + {"pktio jumbo frames", test_odp_pktio_jumbo}, > {"pktio mtu", test_odp_pktio_mtu}, > {"pktio promisc mode", test_odp_pktio_promisc}, > {"pktio mac", test_odp_pktio_mac}, > diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run > index 08288e6..b9d7e3c 100755 > --- a/test/validation/odp_pktio_run > +++ b/test/validation/odp_pktio_run > @@ -56,8 +56,8 @@ setup_env1() > echo "pktio: error: unable to create veth pair" > exit $TEST_SKIPPED > fi > - ip link set $IF0 up > - ip link set $IF1 up > + ip link set $IF0 mtu 9216 up > + ip link set $IF1 mtu 9216 up > > # network needs a little time to come up > sleep 1 > -- > 1.9.1 > > > _______________________________________________ > lng-odp mailing list > lng-odp@lists.linaro.org > http://lists.linaro.org/mailman/listinfo/lng-odp
When running odp_l2fwd, I traced mmap_setup_ring which gave me these values: {tp_block_size = 1130496, tp_block_nr = 4, tp_frame_size = 2004, tp_frame_nr = 2256} The frame_size is not aligned to TPACKET_ALIGNMENT. I corrected that and ran odp_l2fwd with the test setup for ovs, but for some reason there are no more packets received after a while (after exactly 564 packets sent to the other side). I traced that to ring->req.tp_block_nr = odp_cpu_count(); when I set it to 1 odp_l2fwd seems to work fine. I'm also not sure about including the number of buffers in the pool to compute tp_block_size. I played with that and I got the best results when replacing pool->s.buf_num with 4096. Overall odp_l2fwd didn't exceed 500 kpps with 64 byte packets. On Wed, Mar 4, 2015 at 5:52 PM, Ciprian Barbu <ciprian.barbu@linaro.org> wrote: > v6 gives me an error: > odp_packet_socket.c:658:mmap_setup_ring():setsockopt(pkt mmap): Invalid argument > > I'm trying to figure out what is the problem. > > On Wed, Mar 4, 2015 at 3:15 PM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote: >> Support for jumbo frames for linux-generic with unsegmented buffers. >> Test for pkio is also adjusted to work with 9*1024=9216 bytes packets. >> https://bugs.linaro.org/show_bug.cgi?id=509 >> >> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> >> --- >> v6: - rewrite mmap_fill_ring to take into account pool settings. >> v5: - test_4_jumbo_pkts -> test_jumbo >> - do not use stack for jumbo packet, simple allocate it. >> >> v4: - fix work on real interfaces (make check under root) >> - better define jumbo packet payload size >> >> platform/linux-generic/odp_packet_socket.c | 40 ++++++++++--- >> test/validation/odp_pktio.c | 95 +++++++++++++++++++++++------- >> test/validation/odp_pktio_run | 4 +- >> 3 files changed, 107 insertions(+), 32 deletions(-) >> >> diff --git a/platform/linux-generic/odp_packet_socket.c b/platform/linux-generic/odp_packet_socket.c >> index 55c212e..0b066cf 100644 >> --- a/platform/linux-generic/odp_packet_socket.c >> +++ b/platform/linux-generic/odp_packet_socket.c >> @@ -584,11 +584,33 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring, >> return i; >> } >> >> -static void mmap_fill_ring(struct ring *ring, unsigned blocks) >> +static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout) >> { >> - ring->req.tp_block_size = getpagesize() << 2; >> - ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; >> - ring->req.tp_block_nr = blocks; >> + /*@todo add Huge Pages support*/ >> + int pz = getpagesize(); >> + uint32_t pool_id = pool_handle_to_index(pool_hdl); >> + pool_entry_t *pool = get_pool_entry(pool_id); >> + >> + if (pool == NULL) >> + ODP_ABORT("NULL pool entry"); >> + >> + /* Frame has to capture full packet which can fit to the pool block.*/ >> + ring->req.tp_frame_size = pool->s.blk_size + >> + TPACKET_HDRLEN + TPACKET_ALIGNMENT; >> + /* Calculate how many pages do we need to hold all pool packets >> + * and align size to page boundary. >> + */ >> + ring->req.tp_block_size = (ring->req.tp_frame_size * pool->s.buf_num >> + + (pz - 1)) & (-pz); >> + >> + if (!fanout) { >> + /* Single socket is in use. Use 1 block with buf_num frames. */ >> + ring->req.tp_block_nr = 1; >> + } else { >> + /* Fanout is in use, more likely taffic split accodring to >> + * number of cpu threads. Use cpu blocks and buf_num frames. */ >> + ring->req.tp_block_nr = odp_cpu_count(); >> + } >> >> ring->req.tp_frame_nr = ring->req.tp_block_size / >> ring->req.tp_frame_size * ring->req.tp_block_nr; >> @@ -613,10 +635,10 @@ static int mmap_set_packet_loss_discard(int sock) >> return 0; >> } >> >> -static int mmap_setup_ring(int sock, struct ring *ring, int type) >> +static int mmap_setup_ring(int sock, struct ring *ring, int type, >> + odp_pool_t pool_hdl, int fanout) >> { >> int ret = 0; >> - unsigned blocks = 256; >> >> ring->sock = sock; >> ring->type = type; >> @@ -628,7 +650,7 @@ static int mmap_setup_ring(int sock, struct ring *ring, int type) >> return -1; >> } >> >> - mmap_fill_ring(ring, blocks); >> + mmap_fill_ring(ring, pool_hdl, fanout); >> >> ret = setsockopt(sock, SOL_PACKET, type, &ring->req, sizeof(ring->req)); >> if (ret == -1) { >> @@ -772,12 +794,12 @@ int setup_pkt_sock_mmap(pkt_sock_mmap_t *const pkt_sock, const char *netdev, >> return -1; >> >> ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring, >> - PACKET_TX_RING); >> + PACKET_TX_RING, pool, fanout); >> if (ret != 0) >> return -1; >> >> ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring, >> - PACKET_RX_RING); >> + PACKET_RX_RING, pool, fanout); >> if (ret != 0) >> return -1; >> >> diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c >> index 8df367d..f8b9ecc 100644 >> --- a/test/validation/odp_pktio.c >> +++ b/test/validation/odp_pktio.c >> @@ -15,6 +15,10 @@ >> >> #define PKT_BUF_NUM 32 >> #define PKT_BUF_SIZE 1856 >> +#define PKT_BUF_JUMBO_SIZE 9216 >> +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\ >> + (ODPH_UDPHDR_LEN +\ >> + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN)) >> #define MAX_NUM_IFACES 2 >> #define TEST_SEQ_INVALID ((uint32_t)~0) >> #define TEST_SEQ_MAGIC 0x92749451 >> @@ -33,12 +37,21 @@ typedef struct { >> odp_queue_t inq; >> } pktio_info_t; >> >> -/** structure of test packet UDP payload */ >> -typedef struct { >> +typedef struct ODP_PACKED { >> uint32be_t magic; >> uint32be_t seq; >> +} pkt_head_t; >> + >> +/** structure of test packet UDP payload */ >> +typedef struct ODP_PACKED { >> + pkt_head_t head; >> + char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(pkt_head_t) - >> + sizeof(uint32be_t)]; >> + uint32be_t magic2; >> } pkt_test_data_t; >> >> +static int test_jumbo; >> + >> /** default packet pool */ >> odp_pool_t default_pkt_pool = ODP_POOL_INVALID; >> >> @@ -59,14 +72,18 @@ static void pktio_pkt_set_macs(odp_packet_t pkt, >> CU_ASSERT(ret == ODPH_ETHADDR_LEN); >> } >> >> +static uint32_t pkt_payload_len(void) >> +{ >> + return test_jumbo ? sizeof(pkt_test_data_t) : sizeof(pkt_head_t); >> +} >> + >> static int pktio_pkt_set_seq(odp_packet_t pkt) >> { >> static uint32_t tstseq; >> size_t l4_off; >> - pkt_test_data_t data; >> + pkt_test_data_t *data; >> + uint32_t len = pkt_payload_len(); >> >> - data.magic = TEST_SEQ_MAGIC; >> - data.seq = tstseq; >> >> l4_off = odp_packet_l4_offset(pkt); >> if (!l4_off) { >> @@ -74,9 +91,16 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >> return -1; >> } >> >> + data = calloc(1, len); >> + CU_ASSERT_FATAL(data != NULL); >> + >> + data->head.magic = TEST_SEQ_MAGIC; >> + data->magic2 = TEST_SEQ_MAGIC; >> + data->head.seq = tstseq; >> + >> odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN, >> - sizeof(data), &data); >> - >> + len, data); >> + free(data); >> tstseq++; >> >> return 0; >> @@ -85,18 +109,30 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >> static uint32_t pktio_pkt_seq(odp_packet_t pkt) >> { >> size_t l4_off; >> - pkt_test_data_t data; >> + uint32_t seq = TEST_SEQ_INVALID; >> + pkt_test_data_t *data; >> + uint32_t len = pkt_payload_len(); >> >> l4_off = odp_packet_l4_offset(pkt); >> - if (l4_off) { >> - odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, >> - sizeof(data), &data); >> + if (!l4_off) >> + return TEST_SEQ_INVALID; >> >> - if (data.magic == TEST_SEQ_MAGIC) >> - return data.seq; >> + data = calloc(1, len); >> + CU_ASSERT_FATAL(data != NULL); >> + >> + odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, >> + len, data); >> + >> + if (data->head.magic == TEST_SEQ_MAGIC) { >> + if (test_jumbo && data->magic2 != TEST_SEQ_MAGIC) { >> + free(data); >> + return TEST_SEQ_INVALID; >> + } >> + seq = data->head.seq; >> } >> >> - return TEST_SEQ_INVALID; >> + free(data); >> + return seq; >> } >> >> static odp_packet_t pktio_create_packet(void) >> @@ -107,7 +143,7 @@ static odp_packet_t pktio_create_packet(void) >> odph_udphdr_t *udp; >> char *buf; >> uint16_t seq; >> - size_t payload_len = sizeof(pkt_test_data_t); >> + size_t payload_len = pkt_payload_len(); >> uint8_t mac[ODPH_ETHADDR_LEN] = {0}; >> >> pkt = odp_packet_alloc(default_pkt_pool, payload_len + ODPH_UDPHDR_LEN + >> @@ -187,8 +223,8 @@ static int default_pool_create(void) >> return -1; >> >> memset(¶ms, 0, sizeof(params)); >> - params.pkt.seg_len = PKT_BUF_SIZE; >> - params.pkt.len = PKT_BUF_SIZE; >> + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; >> + params.pkt.len = PKT_BUF_JUMBO_SIZE; >> params.pkt.num = PKT_BUF_NUM; >> params.type = ODP_POOL_PACKET; >> >> @@ -208,15 +244,24 @@ static odp_pktio_t create_pktio(const char *iface) >> odp_pool_param_t params; >> >> memset(¶ms, 0, sizeof(params)); >> - params.pkt.seg_len = PKT_BUF_SIZE; >> - params.pkt.len = PKT_BUF_SIZE; >> + if (test_jumbo) { >> + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; >> + params.pkt.len = PKT_BUF_JUMBO_SIZE; >> + >> + } else { >> + params.pkt.seg_len = PKT_BUF_SIZE; >> + params.pkt.len = PKT_BUF_SIZE; >> + } >> params.pkt.num = PKT_BUF_NUM; >> params.type = ODP_POOL_PACKET; >> >> snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface); >> + >> pool = odp_pool_lookup(pool_name); >> - if (pool == ODP_POOL_INVALID) >> - pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); >> + if (pool != ODP_POOL_INVALID) >> + odp_pool_destroy(pool); >> + >> + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); >> CU_ASSERT(pool != ODP_POOL_INVALID); >> >> pktio = odp_pktio_open(iface, pool); >> @@ -450,6 +495,13 @@ static void test_odp_pktio_sched_multi(void) >> pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4); >> } >> >> +static void test_odp_pktio_jumbo(void) >> +{ >> + test_jumbo = 1; >> + test_odp_pktio_sched_multi(); >> + test_jumbo = 0; >> +} >> + >> static void test_odp_pktio_mtu(void) >> { >> int ret; >> @@ -668,6 +720,7 @@ CU_TestInfo pktio_tests[] = { >> {"pktio poll multi", test_odp_pktio_poll_multi}, >> {"pktio sched queues", test_odp_pktio_sched_queue}, >> {"pktio sched multi", test_odp_pktio_sched_multi}, >> + {"pktio jumbo frames", test_odp_pktio_jumbo}, >> {"pktio mtu", test_odp_pktio_mtu}, >> {"pktio promisc mode", test_odp_pktio_promisc}, >> {"pktio mac", test_odp_pktio_mac}, >> diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run >> index 08288e6..b9d7e3c 100755 >> --- a/test/validation/odp_pktio_run >> +++ b/test/validation/odp_pktio_run >> @@ -56,8 +56,8 @@ setup_env1() >> echo "pktio: error: unable to create veth pair" >> exit $TEST_SKIPPED >> fi >> - ip link set $IF0 up >> - ip link set $IF1 up >> + ip link set $IF0 mtu 9216 up >> + ip link set $IF1 mtu 9216 up >> >> # network needs a little time to come up >> sleep 1 >> -- >> 1.9.1 >> >> >> _______________________________________________ >> lng-odp mailing list >> lng-odp@lists.linaro.org >> http://lists.linaro.org/mailman/listinfo/lng-odp
On 03/04/15 18:52, Ciprian Barbu wrote: > v6 gives me an error: > odp_packet_socket.c:658:mmap_setup_ring():setsockopt(pkt mmap): Invalid argument > > I'm trying to figure out what is the problem. Found that. Looks like wrong round up of frame. But it's interesting that with pool from validation pktio test suite everything was ok. strace -tt ./example/packet/odp_pktio -i pktio-p0 -m 1 2>&1 |grep -C 10 mmap_setup 19:33:42.413890 setsockopt(3, SOL_PACKET, PACKET_TX_RING, {block_size=1130496, block_nr=8, frame_size=2004, frame_nr=4512}, 16) = -1 EINVAL (Invalid argument) 19:33:42.413923 write(2, "odp_packet_socket.c:658:mmap_set"..., 81odp_packet_socket.c:658:mmap_setup_ring():setsockopt(pkt mmap): Invalid argument ) = 81 Will fix that. Maxim. > > On Wed, Mar 4, 2015 at 3:15 PM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote: >> Support for jumbo frames for linux-generic with unsegmented buffers. >> Test for pkio is also adjusted to work with 9*1024=9216 bytes packets. >> https://bugs.linaro.org/show_bug.cgi?id=509 >> >> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> >> --- >> v6: - rewrite mmap_fill_ring to take into account pool settings. >> v5: - test_4_jumbo_pkts -> test_jumbo >> - do not use stack for jumbo packet, simple allocate it. >> >> v4: - fix work on real interfaces (make check under root) >> - better define jumbo packet payload size >> >> platform/linux-generic/odp_packet_socket.c | 40 ++++++++++--- >> test/validation/odp_pktio.c | 95 +++++++++++++++++++++++------- >> test/validation/odp_pktio_run | 4 +- >> 3 files changed, 107 insertions(+), 32 deletions(-) >> >> diff --git a/platform/linux-generic/odp_packet_socket.c b/platform/linux-generic/odp_packet_socket.c >> index 55c212e..0b066cf 100644 >> --- a/platform/linux-generic/odp_packet_socket.c >> +++ b/platform/linux-generic/odp_packet_socket.c >> @@ -584,11 +584,33 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring, >> return i; >> } >> >> -static void mmap_fill_ring(struct ring *ring, unsigned blocks) >> +static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout) >> { >> - ring->req.tp_block_size = getpagesize() << 2; >> - ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; >> - ring->req.tp_block_nr = blocks; >> + /*@todo add Huge Pages support*/ >> + int pz = getpagesize(); >> + uint32_t pool_id = pool_handle_to_index(pool_hdl); >> + pool_entry_t *pool = get_pool_entry(pool_id); >> + >> + if (pool == NULL) >> + ODP_ABORT("NULL pool entry"); >> + >> + /* Frame has to capture full packet which can fit to the pool block.*/ >> + ring->req.tp_frame_size = pool->s.blk_size + >> + TPACKET_HDRLEN + TPACKET_ALIGNMENT; >> + /* Calculate how many pages do we need to hold all pool packets >> + * and align size to page boundary. >> + */ >> + ring->req.tp_block_size = (ring->req.tp_frame_size * pool->s.buf_num >> + + (pz - 1)) & (-pz); >> + >> + if (!fanout) { >> + /* Single socket is in use. Use 1 block with buf_num frames. */ >> + ring->req.tp_block_nr = 1; >> + } else { >> + /* Fanout is in use, more likely taffic split accodring to >> + * number of cpu threads. Use cpu blocks and buf_num frames. */ >> + ring->req.tp_block_nr = odp_cpu_count(); >> + } >> >> ring->req.tp_frame_nr = ring->req.tp_block_size / >> ring->req.tp_frame_size * ring->req.tp_block_nr; >> @@ -613,10 +635,10 @@ static int mmap_set_packet_loss_discard(int sock) >> return 0; >> } >> >> -static int mmap_setup_ring(int sock, struct ring *ring, int type) >> +static int mmap_setup_ring(int sock, struct ring *ring, int type, >> + odp_pool_t pool_hdl, int fanout) >> { >> int ret = 0; >> - unsigned blocks = 256; >> >> ring->sock = sock; >> ring->type = type; >> @@ -628,7 +650,7 @@ static int mmap_setup_ring(int sock, struct ring *ring, int type) >> return -1; >> } >> >> - mmap_fill_ring(ring, blocks); >> + mmap_fill_ring(ring, pool_hdl, fanout); >> >> ret = setsockopt(sock, SOL_PACKET, type, &ring->req, sizeof(ring->req)); >> if (ret == -1) { >> @@ -772,12 +794,12 @@ int setup_pkt_sock_mmap(pkt_sock_mmap_t *const pkt_sock, const char *netdev, >> return -1; >> >> ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring, >> - PACKET_TX_RING); >> + PACKET_TX_RING, pool, fanout); >> if (ret != 0) >> return -1; >> >> ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring, >> - PACKET_RX_RING); >> + PACKET_RX_RING, pool, fanout); >> if (ret != 0) >> return -1; >> >> diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c >> index 8df367d..f8b9ecc 100644 >> --- a/test/validation/odp_pktio.c >> +++ b/test/validation/odp_pktio.c >> @@ -15,6 +15,10 @@ >> >> #define PKT_BUF_NUM 32 >> #define PKT_BUF_SIZE 1856 >> +#define PKT_BUF_JUMBO_SIZE 9216 >> +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\ >> + (ODPH_UDPHDR_LEN +\ >> + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN)) >> #define MAX_NUM_IFACES 2 >> #define TEST_SEQ_INVALID ((uint32_t)~0) >> #define TEST_SEQ_MAGIC 0x92749451 >> @@ -33,12 +37,21 @@ typedef struct { >> odp_queue_t inq; >> } pktio_info_t; >> >> -/** structure of test packet UDP payload */ >> -typedef struct { >> +typedef struct ODP_PACKED { >> uint32be_t magic; >> uint32be_t seq; >> +} pkt_head_t; >> + >> +/** structure of test packet UDP payload */ >> +typedef struct ODP_PACKED { >> + pkt_head_t head; >> + char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(pkt_head_t) - >> + sizeof(uint32be_t)]; >> + uint32be_t magic2; >> } pkt_test_data_t; >> >> +static int test_jumbo; >> + >> /** default packet pool */ >> odp_pool_t default_pkt_pool = ODP_POOL_INVALID; >> >> @@ -59,14 +72,18 @@ static void pktio_pkt_set_macs(odp_packet_t pkt, >> CU_ASSERT(ret == ODPH_ETHADDR_LEN); >> } >> >> +static uint32_t pkt_payload_len(void) >> +{ >> + return test_jumbo ? sizeof(pkt_test_data_t) : sizeof(pkt_head_t); >> +} >> + >> static int pktio_pkt_set_seq(odp_packet_t pkt) >> { >> static uint32_t tstseq; >> size_t l4_off; >> - pkt_test_data_t data; >> + pkt_test_data_t *data; >> + uint32_t len = pkt_payload_len(); >> >> - data.magic = TEST_SEQ_MAGIC; >> - data.seq = tstseq; >> >> l4_off = odp_packet_l4_offset(pkt); >> if (!l4_off) { >> @@ -74,9 +91,16 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >> return -1; >> } >> >> + data = calloc(1, len); >> + CU_ASSERT_FATAL(data != NULL); >> + >> + data->head.magic = TEST_SEQ_MAGIC; >> + data->magic2 = TEST_SEQ_MAGIC; >> + data->head.seq = tstseq; >> + >> odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN, >> - sizeof(data), &data); >> - >> + len, data); >> + free(data); >> tstseq++; >> >> return 0; >> @@ -85,18 +109,30 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >> static uint32_t pktio_pkt_seq(odp_packet_t pkt) >> { >> size_t l4_off; >> - pkt_test_data_t data; >> + uint32_t seq = TEST_SEQ_INVALID; >> + pkt_test_data_t *data; >> + uint32_t len = pkt_payload_len(); >> >> l4_off = odp_packet_l4_offset(pkt); >> - if (l4_off) { >> - odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, >> - sizeof(data), &data); >> + if (!l4_off) >> + return TEST_SEQ_INVALID; >> >> - if (data.magic == TEST_SEQ_MAGIC) >> - return data.seq; >> + data = calloc(1, len); >> + CU_ASSERT_FATAL(data != NULL); >> + >> + odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, >> + len, data); >> + >> + if (data->head.magic == TEST_SEQ_MAGIC) { >> + if (test_jumbo && data->magic2 != TEST_SEQ_MAGIC) { >> + free(data); >> + return TEST_SEQ_INVALID; >> + } >> + seq = data->head.seq; >> } >> >> - return TEST_SEQ_INVALID; >> + free(data); >> + return seq; >> } >> >> static odp_packet_t pktio_create_packet(void) >> @@ -107,7 +143,7 @@ static odp_packet_t pktio_create_packet(void) >> odph_udphdr_t *udp; >> char *buf; >> uint16_t seq; >> - size_t payload_len = sizeof(pkt_test_data_t); >> + size_t payload_len = pkt_payload_len(); >> uint8_t mac[ODPH_ETHADDR_LEN] = {0}; >> >> pkt = odp_packet_alloc(default_pkt_pool, payload_len + ODPH_UDPHDR_LEN + >> @@ -187,8 +223,8 @@ static int default_pool_create(void) >> return -1; >> >> memset(¶ms, 0, sizeof(params)); >> - params.pkt.seg_len = PKT_BUF_SIZE; >> - params.pkt.len = PKT_BUF_SIZE; >> + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; >> + params.pkt.len = PKT_BUF_JUMBO_SIZE; >> params.pkt.num = PKT_BUF_NUM; >> params.type = ODP_POOL_PACKET; >> >> @@ -208,15 +244,24 @@ static odp_pktio_t create_pktio(const char *iface) >> odp_pool_param_t params; >> >> memset(¶ms, 0, sizeof(params)); >> - params.pkt.seg_len = PKT_BUF_SIZE; >> - params.pkt.len = PKT_BUF_SIZE; >> + if (test_jumbo) { >> + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; >> + params.pkt.len = PKT_BUF_JUMBO_SIZE; >> + >> + } else { >> + params.pkt.seg_len = PKT_BUF_SIZE; >> + params.pkt.len = PKT_BUF_SIZE; >> + } >> params.pkt.num = PKT_BUF_NUM; >> params.type = ODP_POOL_PACKET; >> >> snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface); >> + >> pool = odp_pool_lookup(pool_name); >> - if (pool == ODP_POOL_INVALID) >> - pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); >> + if (pool != ODP_POOL_INVALID) >> + odp_pool_destroy(pool); >> + >> + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); >> CU_ASSERT(pool != ODP_POOL_INVALID); >> >> pktio = odp_pktio_open(iface, pool); >> @@ -450,6 +495,13 @@ static void test_odp_pktio_sched_multi(void) >> pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4); >> } >> >> +static void test_odp_pktio_jumbo(void) >> +{ >> + test_jumbo = 1; >> + test_odp_pktio_sched_multi(); >> + test_jumbo = 0; >> +} >> + >> static void test_odp_pktio_mtu(void) >> { >> int ret; >> @@ -668,6 +720,7 @@ CU_TestInfo pktio_tests[] = { >> {"pktio poll multi", test_odp_pktio_poll_multi}, >> {"pktio sched queues", test_odp_pktio_sched_queue}, >> {"pktio sched multi", test_odp_pktio_sched_multi}, >> + {"pktio jumbo frames", test_odp_pktio_jumbo}, >> {"pktio mtu", test_odp_pktio_mtu}, >> {"pktio promisc mode", test_odp_pktio_promisc}, >> {"pktio mac", test_odp_pktio_mac}, >> diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run >> index 08288e6..b9d7e3c 100755 >> --- a/test/validation/odp_pktio_run >> +++ b/test/validation/odp_pktio_run >> @@ -56,8 +56,8 @@ setup_env1() >> echo "pktio: error: unable to create veth pair" >> exit $TEST_SKIPPED >> fi >> - ip link set $IF0 up >> - ip link set $IF1 up >> + ip link set $IF0 mtu 9216 up >> + ip link set $IF1 mtu 9216 up >> >> # network needs a little time to come up >> sleep 1 >> -- >> 1.9.1 >> >> >> _______________________________________________ >> lng-odp mailing list >> lng-odp@lists.linaro.org >> http://lists.linaro.org/mailman/listinfo/lng-odp
On 03/04/15 19:29, Ciprian Barbu wrote: > When running odp_l2fwd, I traced mmap_setup_ring which gave me these values: > > {tp_block_size = 1130496, tp_block_nr = 4, tp_frame_size = 2004, > tp_frame_nr = 2256} > > The frame_size is not aligned to TPACKET_ALIGNMENT. > > I corrected that and ran odp_l2fwd with the test setup for ovs, but > for some reason there are no more packets received after a while > (after exactly 564 packets sent to the other side). I traced that to > ring->req.tp_block_nr = odp_cpu_count(); when I set it to 1 odp_l2fwd > seems to work fine. That is strange for me. I added aligment for frame and l2fwd works for me. Maxim. > I'm also not sure about including the number of buffers in the pool to > compute tp_block_size. I played with that and I got the best results > when replacing pool->s.buf_num with 4096. > > Overall odp_l2fwd didn't exceed 500 kpps with 64 byte packets. > > > On Wed, Mar 4, 2015 at 5:52 PM, Ciprian Barbu <ciprian.barbu@linaro.org> wrote: >> v6 gives me an error: >> odp_packet_socket.c:658:mmap_setup_ring():setsockopt(pkt mmap): Invalid argument >> >> I'm trying to figure out what is the problem. >> >> On Wed, Mar 4, 2015 at 3:15 PM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote: >>> Support for jumbo frames for linux-generic with unsegmented buffers. >>> Test for pkio is also adjusted to work with 9*1024=9216 bytes packets. >>> https://bugs.linaro.org/show_bug.cgi?id=509 >>> >>> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> >>> --- >>> v6: - rewrite mmap_fill_ring to take into account pool settings. >>> v5: - test_4_jumbo_pkts -> test_jumbo >>> - do not use stack for jumbo packet, simple allocate it. >>> >>> v4: - fix work on real interfaces (make check under root) >>> - better define jumbo packet payload size >>> >>> platform/linux-generic/odp_packet_socket.c | 40 ++++++++++--- >>> test/validation/odp_pktio.c | 95 +++++++++++++++++++++++------- >>> test/validation/odp_pktio_run | 4 +- >>> 3 files changed, 107 insertions(+), 32 deletions(-) >>> >>> diff --git a/platform/linux-generic/odp_packet_socket.c b/platform/linux-generic/odp_packet_socket.c >>> index 55c212e..0b066cf 100644 >>> --- a/platform/linux-generic/odp_packet_socket.c >>> +++ b/platform/linux-generic/odp_packet_socket.c >>> @@ -584,11 +584,33 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring, >>> return i; >>> } >>> >>> -static void mmap_fill_ring(struct ring *ring, unsigned blocks) >>> +static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout) >>> { >>> - ring->req.tp_block_size = getpagesize() << 2; >>> - ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; >>> - ring->req.tp_block_nr = blocks; >>> + /*@todo add Huge Pages support*/ >>> + int pz = getpagesize(); >>> + uint32_t pool_id = pool_handle_to_index(pool_hdl); >>> + pool_entry_t *pool = get_pool_entry(pool_id); >>> + >>> + if (pool == NULL) >>> + ODP_ABORT("NULL pool entry"); >>> + >>> + /* Frame has to capture full packet which can fit to the pool block.*/ >>> + ring->req.tp_frame_size = pool->s.blk_size + >>> + TPACKET_HDRLEN + TPACKET_ALIGNMENT; >>> + /* Calculate how many pages do we need to hold all pool packets >>> + * and align size to page boundary. >>> + */ >>> + ring->req.tp_block_size = (ring->req.tp_frame_size * pool->s.buf_num >>> + + (pz - 1)) & (-pz); >>> + >>> + if (!fanout) { >>> + /* Single socket is in use. Use 1 block with buf_num frames. */ >>> + ring->req.tp_block_nr = 1; >>> + } else { >>> + /* Fanout is in use, more likely taffic split accodring to >>> + * number of cpu threads. Use cpu blocks and buf_num frames. */ >>> + ring->req.tp_block_nr = odp_cpu_count(); >>> + } >>> >>> ring->req.tp_frame_nr = ring->req.tp_block_size / >>> ring->req.tp_frame_size * ring->req.tp_block_nr; >>> @@ -613,10 +635,10 @@ static int mmap_set_packet_loss_discard(int sock) >>> return 0; >>> } >>> >>> -static int mmap_setup_ring(int sock, struct ring *ring, int type) >>> +static int mmap_setup_ring(int sock, struct ring *ring, int type, >>> + odp_pool_t pool_hdl, int fanout) >>> { >>> int ret = 0; >>> - unsigned blocks = 256; >>> >>> ring->sock = sock; >>> ring->type = type; >>> @@ -628,7 +650,7 @@ static int mmap_setup_ring(int sock, struct ring *ring, int type) >>> return -1; >>> } >>> >>> - mmap_fill_ring(ring, blocks); >>> + mmap_fill_ring(ring, pool_hdl, fanout); >>> >>> ret = setsockopt(sock, SOL_PACKET, type, &ring->req, sizeof(ring->req)); >>> if (ret == -1) { >>> @@ -772,12 +794,12 @@ int setup_pkt_sock_mmap(pkt_sock_mmap_t *const pkt_sock, const char *netdev, >>> return -1; >>> >>> ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring, >>> - PACKET_TX_RING); >>> + PACKET_TX_RING, pool, fanout); >>> if (ret != 0) >>> return -1; >>> >>> ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring, >>> - PACKET_RX_RING); >>> + PACKET_RX_RING, pool, fanout); >>> if (ret != 0) >>> return -1; >>> >>> diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c >>> index 8df367d..f8b9ecc 100644 >>> --- a/test/validation/odp_pktio.c >>> +++ b/test/validation/odp_pktio.c >>> @@ -15,6 +15,10 @@ >>> >>> #define PKT_BUF_NUM 32 >>> #define PKT_BUF_SIZE 1856 >>> +#define PKT_BUF_JUMBO_SIZE 9216 >>> +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\ >>> + (ODPH_UDPHDR_LEN +\ >>> + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN)) >>> #define MAX_NUM_IFACES 2 >>> #define TEST_SEQ_INVALID ((uint32_t)~0) >>> #define TEST_SEQ_MAGIC 0x92749451 >>> @@ -33,12 +37,21 @@ typedef struct { >>> odp_queue_t inq; >>> } pktio_info_t; >>> >>> -/** structure of test packet UDP payload */ >>> -typedef struct { >>> +typedef struct ODP_PACKED { >>> uint32be_t magic; >>> uint32be_t seq; >>> +} pkt_head_t; >>> + >>> +/** structure of test packet UDP payload */ >>> +typedef struct ODP_PACKED { >>> + pkt_head_t head; >>> + char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(pkt_head_t) - >>> + sizeof(uint32be_t)]; >>> + uint32be_t magic2; >>> } pkt_test_data_t; >>> >>> +static int test_jumbo; >>> + >>> /** default packet pool */ >>> odp_pool_t default_pkt_pool = ODP_POOL_INVALID; >>> >>> @@ -59,14 +72,18 @@ static void pktio_pkt_set_macs(odp_packet_t pkt, >>> CU_ASSERT(ret == ODPH_ETHADDR_LEN); >>> } >>> >>> +static uint32_t pkt_payload_len(void) >>> +{ >>> + return test_jumbo ? sizeof(pkt_test_data_t) : sizeof(pkt_head_t); >>> +} >>> + >>> static int pktio_pkt_set_seq(odp_packet_t pkt) >>> { >>> static uint32_t tstseq; >>> size_t l4_off; >>> - pkt_test_data_t data; >>> + pkt_test_data_t *data; >>> + uint32_t len = pkt_payload_len(); >>> >>> - data.magic = TEST_SEQ_MAGIC; >>> - data.seq = tstseq; >>> >>> l4_off = odp_packet_l4_offset(pkt); >>> if (!l4_off) { >>> @@ -74,9 +91,16 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >>> return -1; >>> } >>> >>> + data = calloc(1, len); >>> + CU_ASSERT_FATAL(data != NULL); >>> + >>> + data->head.magic = TEST_SEQ_MAGIC; >>> + data->magic2 = TEST_SEQ_MAGIC; >>> + data->head.seq = tstseq; >>> + >>> odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN, >>> - sizeof(data), &data); >>> - >>> + len, data); >>> + free(data); >>> tstseq++; >>> >>> return 0; >>> @@ -85,18 +109,30 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >>> static uint32_t pktio_pkt_seq(odp_packet_t pkt) >>> { >>> size_t l4_off; >>> - pkt_test_data_t data; >>> + uint32_t seq = TEST_SEQ_INVALID; >>> + pkt_test_data_t *data; >>> + uint32_t len = pkt_payload_len(); >>> >>> l4_off = odp_packet_l4_offset(pkt); >>> - if (l4_off) { >>> - odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, >>> - sizeof(data), &data); >>> + if (!l4_off) >>> + return TEST_SEQ_INVALID; >>> >>> - if (data.magic == TEST_SEQ_MAGIC) >>> - return data.seq; >>> + data = calloc(1, len); >>> + CU_ASSERT_FATAL(data != NULL); >>> + >>> + odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, >>> + len, data); >>> + >>> + if (data->head.magic == TEST_SEQ_MAGIC) { >>> + if (test_jumbo && data->magic2 != TEST_SEQ_MAGIC) { >>> + free(data); >>> + return TEST_SEQ_INVALID; >>> + } >>> + seq = data->head.seq; >>> } >>> >>> - return TEST_SEQ_INVALID; >>> + free(data); >>> + return seq; >>> } >>> >>> static odp_packet_t pktio_create_packet(void) >>> @@ -107,7 +143,7 @@ static odp_packet_t pktio_create_packet(void) >>> odph_udphdr_t *udp; >>> char *buf; >>> uint16_t seq; >>> - size_t payload_len = sizeof(pkt_test_data_t); >>> + size_t payload_len = pkt_payload_len(); >>> uint8_t mac[ODPH_ETHADDR_LEN] = {0}; >>> >>> pkt = odp_packet_alloc(default_pkt_pool, payload_len + ODPH_UDPHDR_LEN + >>> @@ -187,8 +223,8 @@ static int default_pool_create(void) >>> return -1; >>> >>> memset(¶ms, 0, sizeof(params)); >>> - params.pkt.seg_len = PKT_BUF_SIZE; >>> - params.pkt.len = PKT_BUF_SIZE; >>> + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; >>> + params.pkt.len = PKT_BUF_JUMBO_SIZE; >>> params.pkt.num = PKT_BUF_NUM; >>> params.type = ODP_POOL_PACKET; >>> >>> @@ -208,15 +244,24 @@ static odp_pktio_t create_pktio(const char *iface) >>> odp_pool_param_t params; >>> >>> memset(¶ms, 0, sizeof(params)); >>> - params.pkt.seg_len = PKT_BUF_SIZE; >>> - params.pkt.len = PKT_BUF_SIZE; >>> + if (test_jumbo) { >>> + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; >>> + params.pkt.len = PKT_BUF_JUMBO_SIZE; >>> + >>> + } else { >>> + params.pkt.seg_len = PKT_BUF_SIZE; >>> + params.pkt.len = PKT_BUF_SIZE; >>> + } >>> params.pkt.num = PKT_BUF_NUM; >>> params.type = ODP_POOL_PACKET; >>> >>> snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface); >>> + >>> pool = odp_pool_lookup(pool_name); >>> - if (pool == ODP_POOL_INVALID) >>> - pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); >>> + if (pool != ODP_POOL_INVALID) >>> + odp_pool_destroy(pool); >>> + >>> + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); >>> CU_ASSERT(pool != ODP_POOL_INVALID); >>> >>> pktio = odp_pktio_open(iface, pool); >>> @@ -450,6 +495,13 @@ static void test_odp_pktio_sched_multi(void) >>> pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4); >>> } >>> >>> +static void test_odp_pktio_jumbo(void) >>> +{ >>> + test_jumbo = 1; >>> + test_odp_pktio_sched_multi(); >>> + test_jumbo = 0; >>> +} >>> + >>> static void test_odp_pktio_mtu(void) >>> { >>> int ret; >>> @@ -668,6 +720,7 @@ CU_TestInfo pktio_tests[] = { >>> {"pktio poll multi", test_odp_pktio_poll_multi}, >>> {"pktio sched queues", test_odp_pktio_sched_queue}, >>> {"pktio sched multi", test_odp_pktio_sched_multi}, >>> + {"pktio jumbo frames", test_odp_pktio_jumbo}, >>> {"pktio mtu", test_odp_pktio_mtu}, >>> {"pktio promisc mode", test_odp_pktio_promisc}, >>> {"pktio mac", test_odp_pktio_mac}, >>> diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run >>> index 08288e6..b9d7e3c 100755 >>> --- a/test/validation/odp_pktio_run >>> +++ b/test/validation/odp_pktio_run >>> @@ -56,8 +56,8 @@ setup_env1() >>> echo "pktio: error: unable to create veth pair" >>> exit $TEST_SKIPPED >>> fi >>> - ip link set $IF0 up >>> - ip link set $IF1 up >>> + ip link set $IF0 mtu 9216 up >>> + ip link set $IF1 mtu 9216 up >>> >>> # network needs a little time to come up >>> sleep 1 >>> -- >>> 1.9.1 >>> >>> >>> _______________________________________________ >>> lng-odp mailing list >>> lng-odp@lists.linaro.org >>> http://lists.linaro.org/mailman/listinfo/lng-odp
diff --git a/platform/linux-generic/odp_packet_socket.c b/platform/linux-generic/odp_packet_socket.c index 55c212e..0b066cf 100644 --- a/platform/linux-generic/odp_packet_socket.c +++ b/platform/linux-generic/odp_packet_socket.c @@ -584,11 +584,33 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring, return i; } -static void mmap_fill_ring(struct ring *ring, unsigned blocks) +static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout) { - ring->req.tp_block_size = getpagesize() << 2; - ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; - ring->req.tp_block_nr = blocks; + /*@todo add Huge Pages support*/ + int pz = getpagesize(); + uint32_t pool_id = pool_handle_to_index(pool_hdl); + pool_entry_t *pool = get_pool_entry(pool_id); + + if (pool == NULL) + ODP_ABORT("NULL pool entry"); + + /* Frame has to capture full packet which can fit to the pool block.*/ + ring->req.tp_frame_size = pool->s.blk_size + + TPACKET_HDRLEN + TPACKET_ALIGNMENT; + /* Calculate how many pages do we need to hold all pool packets + * and align size to page boundary. + */ + ring->req.tp_block_size = (ring->req.tp_frame_size * pool->s.buf_num + + (pz - 1)) & (-pz); + + if (!fanout) { + /* Single socket is in use. Use 1 block with buf_num frames. */ + ring->req.tp_block_nr = 1; + } else { + /* Fanout is in use, more likely taffic split accodring to + * number of cpu threads. Use cpu blocks and buf_num frames. */ + ring->req.tp_block_nr = odp_cpu_count(); + } ring->req.tp_frame_nr = ring->req.tp_block_size / ring->req.tp_frame_size * ring->req.tp_block_nr; @@ -613,10 +635,10 @@ static int mmap_set_packet_loss_discard(int sock) return 0; } -static int mmap_setup_ring(int sock, struct ring *ring, int type) +static int mmap_setup_ring(int sock, struct ring *ring, int type, + odp_pool_t pool_hdl, int fanout) { int ret = 0; - unsigned blocks = 256; ring->sock = sock; ring->type = type; @@ -628,7 +650,7 @@ static int mmap_setup_ring(int sock, struct ring *ring, int type) return -1; } - mmap_fill_ring(ring, blocks); + mmap_fill_ring(ring, pool_hdl, fanout); ret = setsockopt(sock, SOL_PACKET, type, &ring->req, sizeof(ring->req)); if (ret == -1) { @@ -772,12 +794,12 @@ int setup_pkt_sock_mmap(pkt_sock_mmap_t *const pkt_sock, const char *netdev, return -1; ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring, - PACKET_TX_RING); + PACKET_TX_RING, pool, fanout); if (ret != 0) return -1; ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring, - PACKET_RX_RING); + PACKET_RX_RING, pool, fanout); if (ret != 0) return -1; diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c index 8df367d..f8b9ecc 100644 --- a/test/validation/odp_pktio.c +++ b/test/validation/odp_pktio.c @@ -15,6 +15,10 @@ #define PKT_BUF_NUM 32 #define PKT_BUF_SIZE 1856 +#define PKT_BUF_JUMBO_SIZE 9216 +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\ + (ODPH_UDPHDR_LEN +\ + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN)) #define MAX_NUM_IFACES 2 #define TEST_SEQ_INVALID ((uint32_t)~0) #define TEST_SEQ_MAGIC 0x92749451 @@ -33,12 +37,21 @@ typedef struct { odp_queue_t inq; } pktio_info_t; -/** structure of test packet UDP payload */ -typedef struct { +typedef struct ODP_PACKED { uint32be_t magic; uint32be_t seq; +} pkt_head_t; + +/** structure of test packet UDP payload */ +typedef struct ODP_PACKED { + pkt_head_t head; + char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(pkt_head_t) - + sizeof(uint32be_t)]; + uint32be_t magic2; } pkt_test_data_t; +static int test_jumbo; + /** default packet pool */ odp_pool_t default_pkt_pool = ODP_POOL_INVALID; @@ -59,14 +72,18 @@ static void pktio_pkt_set_macs(odp_packet_t pkt, CU_ASSERT(ret == ODPH_ETHADDR_LEN); } +static uint32_t pkt_payload_len(void) +{ + return test_jumbo ? sizeof(pkt_test_data_t) : sizeof(pkt_head_t); +} + static int pktio_pkt_set_seq(odp_packet_t pkt) { static uint32_t tstseq; size_t l4_off; - pkt_test_data_t data; + pkt_test_data_t *data; + uint32_t len = pkt_payload_len(); - data.magic = TEST_SEQ_MAGIC; - data.seq = tstseq; l4_off = odp_packet_l4_offset(pkt); if (!l4_off) { @@ -74,9 +91,16 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) return -1; } + data = calloc(1, len); + CU_ASSERT_FATAL(data != NULL); + + data->head.magic = TEST_SEQ_MAGIC; + data->magic2 = TEST_SEQ_MAGIC; + data->head.seq = tstseq; + odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN, - sizeof(data), &data); - + len, data); + free(data); tstseq++; return 0; @@ -85,18 +109,30 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) static uint32_t pktio_pkt_seq(odp_packet_t pkt) { size_t l4_off; - pkt_test_data_t data; + uint32_t seq = TEST_SEQ_INVALID; + pkt_test_data_t *data; + uint32_t len = pkt_payload_len(); l4_off = odp_packet_l4_offset(pkt); - if (l4_off) { - odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, - sizeof(data), &data); + if (!l4_off) + return TEST_SEQ_INVALID; - if (data.magic == TEST_SEQ_MAGIC) - return data.seq; + data = calloc(1, len); + CU_ASSERT_FATAL(data != NULL); + + odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, + len, data); + + if (data->head.magic == TEST_SEQ_MAGIC) { + if (test_jumbo && data->magic2 != TEST_SEQ_MAGIC) { + free(data); + return TEST_SEQ_INVALID; + } + seq = data->head.seq; } - return TEST_SEQ_INVALID; + free(data); + return seq; } static odp_packet_t pktio_create_packet(void) @@ -107,7 +143,7 @@ static odp_packet_t pktio_create_packet(void) odph_udphdr_t *udp; char *buf; uint16_t seq; - size_t payload_len = sizeof(pkt_test_data_t); + size_t payload_len = pkt_payload_len(); uint8_t mac[ODPH_ETHADDR_LEN] = {0}; pkt = odp_packet_alloc(default_pkt_pool, payload_len + ODPH_UDPHDR_LEN + @@ -187,8 +223,8 @@ static int default_pool_create(void) return -1; memset(¶ms, 0, sizeof(params)); - params.pkt.seg_len = PKT_BUF_SIZE; - params.pkt.len = PKT_BUF_SIZE; + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; + params.pkt.len = PKT_BUF_JUMBO_SIZE; params.pkt.num = PKT_BUF_NUM; params.type = ODP_POOL_PACKET; @@ -208,15 +244,24 @@ static odp_pktio_t create_pktio(const char *iface) odp_pool_param_t params; memset(¶ms, 0, sizeof(params)); - params.pkt.seg_len = PKT_BUF_SIZE; - params.pkt.len = PKT_BUF_SIZE; + if (test_jumbo) { + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; + params.pkt.len = PKT_BUF_JUMBO_SIZE; + + } else { + params.pkt.seg_len = PKT_BUF_SIZE; + params.pkt.len = PKT_BUF_SIZE; + } params.pkt.num = PKT_BUF_NUM; params.type = ODP_POOL_PACKET; snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface); + pool = odp_pool_lookup(pool_name); - if (pool == ODP_POOL_INVALID) - pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); + if (pool != ODP_POOL_INVALID) + odp_pool_destroy(pool); + + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); CU_ASSERT(pool != ODP_POOL_INVALID); pktio = odp_pktio_open(iface, pool); @@ -450,6 +495,13 @@ static void test_odp_pktio_sched_multi(void) pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4); } +static void test_odp_pktio_jumbo(void) +{ + test_jumbo = 1; + test_odp_pktio_sched_multi(); + test_jumbo = 0; +} + static void test_odp_pktio_mtu(void) { int ret; @@ -668,6 +720,7 @@ CU_TestInfo pktio_tests[] = { {"pktio poll multi", test_odp_pktio_poll_multi}, {"pktio sched queues", test_odp_pktio_sched_queue}, {"pktio sched multi", test_odp_pktio_sched_multi}, + {"pktio jumbo frames", test_odp_pktio_jumbo}, {"pktio mtu", test_odp_pktio_mtu}, {"pktio promisc mode", test_odp_pktio_promisc}, {"pktio mac", test_odp_pktio_mac}, diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run index 08288e6..b9d7e3c 100755 --- a/test/validation/odp_pktio_run +++ b/test/validation/odp_pktio_run @@ -56,8 +56,8 @@ setup_env1() echo "pktio: error: unable to create veth pair" exit $TEST_SKIPPED fi - ip link set $IF0 up - ip link set $IF1 up + ip link set $IF0 mtu 9216 up + ip link set $IF1 mtu 9216 up # network needs a little time to come up sleep 1
Support for jumbo frames for linux-generic with unsegmented buffers. Test for pkio is also adjusted to work with 9*1024=9216 bytes packets. https://bugs.linaro.org/show_bug.cgi?id=509 Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> --- v6: - rewrite mmap_fill_ring to take into account pool settings. v5: - test_4_jumbo_pkts -> test_jumbo - do not use stack for jumbo packet, simple allocate it. v4: - fix work on real interfaces (make check under root) - better define jumbo packet payload size platform/linux-generic/odp_packet_socket.c | 40 ++++++++++--- test/validation/odp_pktio.c | 95 +++++++++++++++++++++++------- test/validation/odp_pktio_run | 4 +- 3 files changed, 107 insertions(+), 32 deletions(-)