Message ID | 1425402363-15206-1-git-send-email-maxim.uvarov@linaro.org |
---|---|
State | New |
Headers | show |
On Tue, Mar 3, 2015 at 11:06 AM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote: > Support for jumbo frames for linux-generic with unsegmented buffers. > Test for pkio is also adjusted to work with 9*1024=9216 bytes packets. > https://bugs.linaro.org/show_bug.cgi?id=509 > > Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> > --- > v4: - fix work on real interfaces (make check under root) > - better define jumbo packet payload size > > platform/linux-generic/odp_packet_socket.c | 2 +- > test/validation/odp_pktio.c | 91 > +++++++++++++++++++++++------- > test/validation/odp_pktio_run | 4 +- > 3 files changed, 74 insertions(+), 23 deletions(-) > > diff --git a/platform/linux-generic/odp_packet_socket.c > b/platform/linux-generic/odp_packet_socket.c > index 55c212e..4dcb111 100644 > --- a/platform/linux-generic/odp_packet_socket.c > +++ b/platform/linux-generic/odp_packet_socket.c > @@ -587,7 +587,7 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct > ring *ring, > static void mmap_fill_ring(struct ring *ring, unsigned blocks) > { > ring->req.tp_block_size = getpagesize() << 2; > - ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; > + ring->req.tp_frame_size = 9216 + TPACKET_HDRLEN; > ring->req.tp_block_nr = blocks; > > ring->req.tp_frame_nr = ring->req.tp_block_size / > diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c > index 8df367d..ce1ed46 100644 > --- a/test/validation/odp_pktio.c > +++ b/test/validation/odp_pktio.c > @@ -15,6 +15,10 @@ > > #define PKT_BUF_NUM 32 > #define PKT_BUF_SIZE 1856 > +#define PKT_BUF_JUMBO_SIZE 9216 > +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\ > + (ODPH_UDPHDR_LEN +\ > + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN)) > #define MAX_NUM_IFACES 2 > #define TEST_SEQ_INVALID ((uint32_t)~0) > #define TEST_SEQ_MAGIC 0x92749451 > @@ -34,11 +38,17 @@ typedef struct { > } pktio_info_t; > > /** structure of test packet UDP payload */ > -typedef struct { > - uint32be_t magic; > - uint32be_t seq; > +typedef struct ODP_PACKED { > + struct { > + uint32be_t magic; > + uint32be_t seq; > + } head; > + char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(uint32be_t)*3]; > + uint32be_t magic2; > } pkt_test_data_t; > > +static int test_4_jumbo_pkts; > + > /** default packet pool */ > odp_pool_t default_pkt_pool = ODP_POOL_INVALID; > > @@ -64,9 +74,15 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) > static uint32_t tstseq; > size_t l4_off; > pkt_test_data_t data; > + uint32_t len; > > - data.magic = TEST_SEQ_MAGIC; > - data.seq = tstseq; > + data.head.magic = TEST_SEQ_MAGIC; > + data.magic2 = TEST_SEQ_MAGIC; > + data.head.seq = tstseq; > + if (test_4_jumbo_pkts) > + len = sizeof(data); > + else > + len = sizeof(data.head); > > l4_off = odp_packet_l4_offset(pkt); > if (!l4_off) { > @@ -75,8 +91,7 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) > } > > odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN, > - sizeof(data), &data); > - > + len, &data); > tstseq++; > > return 0; > @@ -85,18 +100,31 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) > static uint32_t pktio_pkt_seq(odp_packet_t pkt) > { > size_t l4_off; > + uint32_t seq = TEST_SEQ_INVALID; > pkt_test_data_t data; > + uint32_t len; > > l4_off = odp_packet_l4_offset(pkt); > - if (l4_off) { > - odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, > - sizeof(data), &data); > + if (!l4_off) > + return TEST_SEQ_INVALID; > > - if (data.magic == TEST_SEQ_MAGIC) > - return data.seq; > + if (test_4_jumbo_pkts) > + len = sizeof(data); > + else > + len = sizeof(data.head); > + > + odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, > + len, &data); > + > + if (data.head.magic == TEST_SEQ_MAGIC) { > + if (test_4_jumbo_pkts && > + data.magic2 != TEST_SEQ_MAGIC) > + return TEST_SEQ_INVALID; > + > + seq = data.head.seq; > } > > - return TEST_SEQ_INVALID; > + return seq; > } > > static odp_packet_t pktio_create_packet(void) > @@ -107,8 +135,14 @@ static odp_packet_t pktio_create_packet(void) > odph_udphdr_t *udp; > char *buf; > uint16_t seq; > - size_t payload_len = sizeof(pkt_test_data_t); > + size_t payload_len; > uint8_t mac[ODPH_ETHADDR_LEN] = {0}; > + pkt_test_data_t data; > + > + if (test_4_jumbo_pkts) > + payload_len = sizeof(data); > + else > + payload_len = sizeof(data.head); > > pkt = odp_packet_alloc(default_pkt_pool, payload_len + > ODPH_UDPHDR_LEN + > ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN); > @@ -187,8 +221,8 @@ static int default_pool_create(void) > return -1; > > memset(¶ms, 0, sizeof(params)); > - params.pkt.seg_len = PKT_BUF_SIZE; > - params.pkt.len = PKT_BUF_SIZE; > + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; > + params.pkt.len = PKT_BUF_JUMBO_SIZE; > params.pkt.num = PKT_BUF_NUM; > params.type = ODP_POOL_PACKET; > > @@ -208,15 +242,24 @@ static odp_pktio_t create_pktio(const char *iface) > odp_pool_param_t params; > > memset(¶ms, 0, sizeof(params)); > - params.pkt.seg_len = PKT_BUF_SIZE; > - params.pkt.len = PKT_BUF_SIZE; > + if (test_4_jumbo_pkts) { > + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; > + params.pkt.len = PKT_BUF_JUMBO_SIZE; > + > + } else { > + params.pkt.seg_len = PKT_BUF_SIZE; > + params.pkt.len = PKT_BUF_SIZE; > + } > params.pkt.num = PKT_BUF_NUM; > params.type = ODP_POOL_PACKET; > > snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface); > + > pool = odp_pool_lookup(pool_name); > - if (pool == ODP_POOL_INVALID) > - pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); > + if (pool != ODP_POOL_INVALID) > + odp_pool_destroy(pool); > + > + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); > The original code tries to look up a pool name and if it doesn't exist, creates one. It tests as an assertion that the pool create succeeds but if it does not proceeds anyway. Since odp_pktio_open() does not check whether it's been passed a valid pool handle, things will go awry if that happens. Not the best situation for a validation test. The replacement code, however, looks up a pool name and if it does exist attempts to destroy it and then re-create it, however the success of the destroy is not checked, so if that fails the pool handle is lost when it's overwritten by the subsequent reallocation attempt. From there we have the same situation in that a failed (re)create of the pool will have the same issues as before. It's not clear why this latter code is an improvement over the former, as both seem to have potential problems. > CU_ASSERT(pool != ODP_POOL_INVALID); > > pktio = odp_pktio_open(iface, pool); > @@ -450,6 +493,13 @@ static void test_odp_pktio_sched_multi(void) > pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4); > } > > +static void test_odp_pktio_jumbo(void) > +{ > + test_4_jumbo_pkts = 1; > + test_odp_pktio_sched_multi(); > + test_4_jumbo_pkts = 0; > +} > + > static void test_odp_pktio_mtu(void) > { > int ret; > @@ -668,6 +718,7 @@ CU_TestInfo pktio_tests[] = { > {"pktio poll multi", test_odp_pktio_poll_multi}, > {"pktio sched queues", test_odp_pktio_sched_queue}, > {"pktio sched multi", test_odp_pktio_sched_multi}, > + {"pktio jumbo frames", test_odp_pktio_jumbo}, > {"pktio mtu", test_odp_pktio_mtu}, > {"pktio promisc mode", test_odp_pktio_promisc}, > {"pktio mac", test_odp_pktio_mac}, > diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run > index 08288e6..b9d7e3c 100755 > --- a/test/validation/odp_pktio_run > +++ b/test/validation/odp_pktio_run > @@ -56,8 +56,8 @@ setup_env1() > echo "pktio: error: unable to create veth pair" > exit $TEST_SKIPPED > fi > - ip link set $IF0 up > - ip link set $IF1 up > + ip link set $IF0 mtu 9216 up > + ip link set $IF1 mtu 9216 up > > # network needs a little time to come up > sleep 1 > -- > 1.9.1 > > > _______________________________________________ > lng-odp mailing list > lng-odp@lists.linaro.org > http://lists.linaro.org/mailman/listinfo/lng-odp >
On 03/04/15 06:42, Bill Fischofer wrote: > > > On Tue, Mar 3, 2015 at 11:06 AM, Maxim Uvarov <maxim.uvarov@linaro.org > <mailto:maxim.uvarov@linaro.org>> wrote: > > Support for jumbo frames for linux-generic with unsegmented buffers. > Test for pkio is also adjusted to work with 9*1024=9216 bytes packets. > https://bugs.linaro.org/show_bug.cgi?id=509 > > Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org > <mailto:maxim.uvarov@linaro.org>> > --- > v4: - fix work on real interfaces (make check under root) > - better define jumbo packet payload size > > platform/linux-generic/odp_packet_socket.c | 2 +- > test/validation/odp_pktio.c | 91 > +++++++++++++++++++++++------- > test/validation/odp_pktio_run | 4 +- > 3 files changed, 74 insertions(+), 23 deletions(-) > > diff --git a/platform/linux-generic/odp_packet_socket.c > b/platform/linux-generic/odp_packet_socket.c > index 55c212e..4dcb111 100644 > --- a/platform/linux-generic/odp_packet_socket.c > +++ b/platform/linux-generic/odp_packet_socket.c > @@ -587,7 +587,7 @@ static inline unsigned pkt_mmap_v2_tx(int > sock, struct ring *ring, > static void mmap_fill_ring(struct ring *ring, unsigned blocks) > { > ring->req.tp_block_size = getpagesize() << 2; > - ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; > + ring->req.tp_frame_size = 9216 + TPACKET_HDRLEN; > ring->req.tp_block_nr = blocks; > > ring->req.tp_frame_nr = ring->req.tp_block_size / > diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c > index 8df367d..ce1ed46 100644 > --- a/test/validation/odp_pktio.c > +++ b/test/validation/odp_pktio.c > @@ -15,6 +15,10 @@ > > #define PKT_BUF_NUM 32 > #define PKT_BUF_SIZE 1856 > +#define PKT_BUF_JUMBO_SIZE 9216 > +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\ > + (ODPH_UDPHDR_LEN +\ > + ODPH_IPV4HDR_LEN + > ODPH_ETHHDR_LEN)) > #define MAX_NUM_IFACES 2 > #define TEST_SEQ_INVALID ((uint32_t)~0) > #define TEST_SEQ_MAGIC 0x92749451 > @@ -34,11 +38,17 @@ typedef struct { > } pktio_info_t; > > /** structure of test packet UDP payload */ > -typedef struct { > - uint32be_t magic; > - uint32be_t seq; > +typedef struct ODP_PACKED { > + struct { > + uint32be_t magic; > + uint32be_t seq; > + } head; > + char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(uint32be_t)*3]; > + uint32be_t magic2; > } pkt_test_data_t; > > +static int test_4_jumbo_pkts; > + > /** default packet pool */ > odp_pool_t default_pkt_pool = ODP_POOL_INVALID; > > @@ -64,9 +74,15 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) > static uint32_t tstseq; > size_t l4_off; > pkt_test_data_t data; > + uint32_t len; > > - data.magic = TEST_SEQ_MAGIC; > - data.seq = tstseq; > + data.head.magic = TEST_SEQ_MAGIC; > + data.magic2 = TEST_SEQ_MAGIC; > + data.head.seq = tstseq; > + if (test_4_jumbo_pkts) > + len = sizeof(data); > + else > + len = sizeof(data.head); > > l4_off = odp_packet_l4_offset(pkt); > if (!l4_off) { > @@ -75,8 +91,7 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) > } > > odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN, > - sizeof(data), &data); > - > + len, &data); > tstseq++; > > return 0; > @@ -85,18 +100,31 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) > static uint32_t pktio_pkt_seq(odp_packet_t pkt) > { > size_t l4_off; > + uint32_t seq = TEST_SEQ_INVALID; > pkt_test_data_t data; > + uint32_t len; > > l4_off = odp_packet_l4_offset(pkt); > - if (l4_off) { > - odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, > - sizeof(data), &data); > + if (!l4_off) > + return TEST_SEQ_INVALID; > > - if (data.magic == TEST_SEQ_MAGIC) > - return data.seq; > + if (test_4_jumbo_pkts) > + len = sizeof(data); > + else > + len = sizeof(data.head); > + > + odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, > + len, &data); > + > + if (data.head.magic == TEST_SEQ_MAGIC) { > + if (test_4_jumbo_pkts && > + data.magic2 != TEST_SEQ_MAGIC) > + return TEST_SEQ_INVALID; > + > + seq = data.head.seq; > } > > - return TEST_SEQ_INVALID; > + return seq; > } > > static odp_packet_t pktio_create_packet(void) > @@ -107,8 +135,14 @@ static odp_packet_t pktio_create_packet(void) > odph_udphdr_t *udp; > char *buf; > uint16_t seq; > - size_t payload_len = sizeof(pkt_test_data_t); > + size_t payload_len; > uint8_t mac[ODPH_ETHADDR_LEN] = {0}; > + pkt_test_data_t data; > + > + if (test_4_jumbo_pkts) > + payload_len = sizeof(data); > + else > + payload_len = sizeof(data.head); > > pkt = odp_packet_alloc(default_pkt_pool, payload_len + > ODPH_UDPHDR_LEN + > ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN); > @@ -187,8 +221,8 @@ static int default_pool_create(void) > return -1; > > memset(¶ms, 0, sizeof(params)); > - params.pkt.seg_len = PKT_BUF_SIZE; > - params.pkt.len = PKT_BUF_SIZE; > + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; > + params.pkt.len = PKT_BUF_JUMBO_SIZE; > params.pkt.num = PKT_BUF_NUM; > params.type = ODP_POOL_PACKET; > > @@ -208,15 +242,24 @@ static odp_pktio_t create_pktio(const char > *iface) > odp_pool_param_t params; > > memset(¶ms, 0, sizeof(params)); > - params.pkt.seg_len = PKT_BUF_SIZE; > - params.pkt.len = PKT_BUF_SIZE; > + if (test_4_jumbo_pkts) { > + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; > + params.pkt.len = PKT_BUF_JUMBO_SIZE; > + > + } else { > + params.pkt.seg_len = PKT_BUF_SIZE; > + params.pkt.len = PKT_BUF_SIZE; > + } > params.pkt.num = PKT_BUF_NUM; > params.type = ODP_POOL_PACKET; > > snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface); > + > pool = odp_pool_lookup(pool_name); > - if (pool == ODP_POOL_INVALID) > - pool = odp_pool_create(pool_name, ODP_SHM_NULL, > ¶ms); > + if (pool != ODP_POOL_INVALID) > + odp_pool_destroy(pool); > + > + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); > > > The original code tries to look up a pool name and if it doesn't > exist, creates one. It tests as an assertion that the pool create > succeeds but if it does not proceeds anyway. Since odp_pktio_open() > does not check whether it's been passed a valid pool handle, things > will go awry if that happens. Not the best situation for a validation > test. > > The replacement code, however, looks up a pool name and if it does > exist attempts to destroy it and then re-create it, however the > success of the destroy is not checked, so if that fails the pool > handle is lost when it's overwritten by the subsequent reallocation > attempt. From there we have the same situation in that a failed > (re)create of the pool will have the same issues as before. It's not > clear why this latter code is an improvement over the former, as both > seem to have potential problems. Bill, my idea here was that I use pool with jumbo packets for jumbo test and pool with small packets for original test. The main reason is to show support of transmission/receive large unsegmented buffer. For that I set up different params.pkt.seg_len and params.pkt.len. Theoretically if hardware does not support jumbo frames only that test should fail, but other not jumbo have to pass. I also plan in new patch add one more test where first interface has pool for jumbo packets and second interface has pool with small packets and jumbo packet is segmented there. I think it should be separate patch from that. Best regards, Maxim. > CU_ASSERT(pool != ODP_POOL_INVALID); > > pktio = odp_pktio_open(iface, pool); > @@ -450,6 +493,13 @@ static void test_odp_pktio_sched_multi(void) > pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4); > } > > +static void test_odp_pktio_jumbo(void) > +{ > + test_4_jumbo_pkts = 1; > + test_odp_pktio_sched_multi(); > + test_4_jumbo_pkts = 0; > +} > + > static void test_odp_pktio_mtu(void) > { > int ret; > @@ -668,6 +718,7 @@ CU_TestInfo pktio_tests[] = { > {"pktio poll multi", test_odp_pktio_poll_multi}, > {"pktio sched queues", test_odp_pktio_sched_queue}, > {"pktio sched multi", test_odp_pktio_sched_multi}, > + {"pktio jumbo frames", test_odp_pktio_jumbo}, > {"pktio mtu", test_odp_pktio_mtu}, > {"pktio promisc mode", test_odp_pktio_promisc}, > {"pktio mac", test_odp_pktio_mac}, > diff --git a/test/validation/odp_pktio_run > b/test/validation/odp_pktio_run > index 08288e6..b9d7e3c 100755 > --- a/test/validation/odp_pktio_run > +++ b/test/validation/odp_pktio_run > @@ -56,8 +56,8 @@ setup_env1() > echo "pktio: error: unable to create veth pair" > exit $TEST_SKIPPED > fi > - ip link set $IF0 up > - ip link set $IF1 up > + ip link set $IF0 mtu 9216 up > + ip link set $IF1 mtu 9216 up > > # network needs a little time to come up > sleep 1 > -- > 1.9.1 > > > _______________________________________________ > lng-odp mailing list > lng-odp@lists.linaro.org <mailto:lng-odp@lists.linaro.org> > http://lists.linaro.org/mailman/listinfo/lng-odp > >
On Tue, Mar 3, 2015 at 7:06 PM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote: > Support for jumbo frames for linux-generic with unsegmented buffers. > Test for pkio is also adjusted to work with 9*1024=9216 bytes packets. > https://bugs.linaro.org/show_bug.cgi?id=509 > > Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> > --- > v4: - fix work on real interfaces (make check under root) > - better define jumbo packet payload size > > platform/linux-generic/odp_packet_socket.c | 2 +- > test/validation/odp_pktio.c | 91 +++++++++++++++++++++++------- > test/validation/odp_pktio_run | 4 +- > 3 files changed, 74 insertions(+), 23 deletions(-) > > diff --git a/platform/linux-generic/odp_packet_socket.c b/platform/linux-generic/odp_packet_socket.c > index 55c212e..4dcb111 100644 > --- a/platform/linux-generic/odp_packet_socket.c > +++ b/platform/linux-generic/odp_packet_socket.c > @@ -587,7 +587,7 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring, > static void mmap_fill_ring(struct ring *ring, unsigned blocks) > { > ring->req.tp_block_size = getpagesize() << 2; > - ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; > + ring->req.tp_frame_size = 9216 + TPACKET_HDRLEN; > ring->req.tp_block_nr = blocks; > > ring->req.tp_frame_nr = ring->req.tp_block_size / > diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c > index 8df367d..ce1ed46 100644 > --- a/test/validation/odp_pktio.c > +++ b/test/validation/odp_pktio.c > @@ -15,6 +15,10 @@ > > #define PKT_BUF_NUM 32 > #define PKT_BUF_SIZE 1856 > +#define PKT_BUF_JUMBO_SIZE 9216 > +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\ > + (ODPH_UDPHDR_LEN +\ > + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN)) > #define MAX_NUM_IFACES 2 > #define TEST_SEQ_INVALID ((uint32_t)~0) > #define TEST_SEQ_MAGIC 0x92749451 > @@ -34,11 +38,17 @@ typedef struct { > } pktio_info_t; > > /** structure of test packet UDP payload */ > -typedef struct { > - uint32be_t magic; > - uint32be_t seq; > +typedef struct ODP_PACKED { > + struct { > + uint32be_t magic; > + uint32be_t seq; > + } head; > + char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(uint32be_t)*3]; > + uint32be_t magic2; Why do we need the second magic? Isn't it enough to reuse head.magic for both jumbo and regular frame sizes? > } pkt_test_data_t; > > +static int test_4_jumbo_pkts; I find this name a bit peculiar, maybe a cleaner "test_jumbo" should do it? > + > /** default packet pool */ > odp_pool_t default_pkt_pool = ODP_POOL_INVALID; > > @@ -64,9 +74,15 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) > static uint32_t tstseq; > size_t l4_off; > pkt_test_data_t data; I don't think reserving 4 pages on the stack is very portable, we are not targeting linux-generic x86 only with our validation suites. > + uint32_t len; > > - data.magic = TEST_SEQ_MAGIC; > - data.seq = tstseq; > + data.head.magic = TEST_SEQ_MAGIC; > + data.magic2 = TEST_SEQ_MAGIC; > + data.head.seq = tstseq; > + if (test_4_jumbo_pkts) > + len = sizeof(data); > + else > + len = sizeof(data.head); > > l4_off = odp_packet_l4_offset(pkt); > if (!l4_off) { > @@ -75,8 +91,7 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) > } > > odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN, > - sizeof(data), &data); > - > + len, &data); > tstseq++; > > return 0; > @@ -85,18 +100,31 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) > static uint32_t pktio_pkt_seq(odp_packet_t pkt) > { > size_t l4_off; > + uint32_t seq = TEST_SEQ_INVALID; > pkt_test_data_t data; > + uint32_t len; > > l4_off = odp_packet_l4_offset(pkt); > - if (l4_off) { > - odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, > - sizeof(data), &data); > + if (!l4_off) > + return TEST_SEQ_INVALID; > > - if (data.magic == TEST_SEQ_MAGIC) > - return data.seq; > + if (test_4_jumbo_pkts) > + len = sizeof(data); > + else > + len = sizeof(data.head); > + > + odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, > + len, &data); > + > + if (data.head.magic == TEST_SEQ_MAGIC) { > + if (test_4_jumbo_pkts && > + data.magic2 != TEST_SEQ_MAGIC) > + return TEST_SEQ_INVALID; > + > + seq = data.head.seq; > } > > - return TEST_SEQ_INVALID; > + return seq; > } > > static odp_packet_t pktio_create_packet(void) > @@ -107,8 +135,14 @@ static odp_packet_t pktio_create_packet(void) > odph_udphdr_t *udp; > char *buf; > uint16_t seq; > - size_t payload_len = sizeof(pkt_test_data_t); > + size_t payload_len; > uint8_t mac[ODPH_ETHADDR_LEN] = {0}; > + pkt_test_data_t data; > + > + if (test_4_jumbo_pkts) > + payload_len = sizeof(data); > + else > + payload_len = sizeof(data.head); > > pkt = odp_packet_alloc(default_pkt_pool, payload_len + ODPH_UDPHDR_LEN + > ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN); > @@ -187,8 +221,8 @@ static int default_pool_create(void) > return -1; > > memset(¶ms, 0, sizeof(params)); > - params.pkt.seg_len = PKT_BUF_SIZE; > - params.pkt.len = PKT_BUF_SIZE; > + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; > + params.pkt.len = PKT_BUF_JUMBO_SIZE; > params.pkt.num = PKT_BUF_NUM; > params.type = ODP_POOL_PACKET; > > @@ -208,15 +242,24 @@ static odp_pktio_t create_pktio(const char *iface) > odp_pool_param_t params; > > memset(¶ms, 0, sizeof(params)); > - params.pkt.seg_len = PKT_BUF_SIZE; > - params.pkt.len = PKT_BUF_SIZE; > + if (test_4_jumbo_pkts) { > + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; > + params.pkt.len = PKT_BUF_JUMBO_SIZE; > + > + } else { > + params.pkt.seg_len = PKT_BUF_SIZE; > + params.pkt.len = PKT_BUF_SIZE; > + } > params.pkt.num = PKT_BUF_NUM; > params.type = ODP_POOL_PACKET; > > snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface); > + > pool = odp_pool_lookup(pool_name); > - if (pool == ODP_POOL_INVALID) > - pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); > + if (pool != ODP_POOL_INVALID) > + odp_pool_destroy(pool); > + > + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); > CU_ASSERT(pool != ODP_POOL_INVALID); > > pktio = odp_pktio_open(iface, pool); > @@ -450,6 +493,13 @@ static void test_odp_pktio_sched_multi(void) > pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4); > } > > +static void test_odp_pktio_jumbo(void) > +{ > + test_4_jumbo_pkts = 1; > + test_odp_pktio_sched_multi(); > + test_4_jumbo_pkts = 0; > +} > + > static void test_odp_pktio_mtu(void) > { > int ret; > @@ -668,6 +718,7 @@ CU_TestInfo pktio_tests[] = { > {"pktio poll multi", test_odp_pktio_poll_multi}, > {"pktio sched queues", test_odp_pktio_sched_queue}, > {"pktio sched multi", test_odp_pktio_sched_multi}, > + {"pktio jumbo frames", test_odp_pktio_jumbo}, > {"pktio mtu", test_odp_pktio_mtu}, > {"pktio promisc mode", test_odp_pktio_promisc}, > {"pktio mac", test_odp_pktio_mac}, > diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run > index 08288e6..b9d7e3c 100755 > --- a/test/validation/odp_pktio_run > +++ b/test/validation/odp_pktio_run > @@ -56,8 +56,8 @@ setup_env1() > echo "pktio: error: unable to create veth pair" > exit $TEST_SKIPPED > fi > - ip link set $IF0 up > - ip link set $IF1 up > + ip link set $IF0 mtu 9216 up > + ip link set $IF1 mtu 9216 up > > # network needs a little time to come up > sleep 1 > -- > 1.9.1 > > > _______________________________________________ > lng-odp mailing list > lng-odp@lists.linaro.org > http://lists.linaro.org/mailman/listinfo/lng-odp
On 03/04/15 12:21, Ciprian Barbu wrote: > On Tue, Mar 3, 2015 at 7:06 PM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote: >> Support for jumbo frames for linux-generic with unsegmented buffers. >> Test for pkio is also adjusted to work with 9*1024=9216 bytes packets. >> https://bugs.linaro.org/show_bug.cgi?id=509 >> >> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> >> --- >> v4: - fix work on real interfaces (make check under root) >> - better define jumbo packet payload size >> >> platform/linux-generic/odp_packet_socket.c | 2 +- >> test/validation/odp_pktio.c | 91 +++++++++++++++++++++++------- >> test/validation/odp_pktio_run | 4 +- >> 3 files changed, 74 insertions(+), 23 deletions(-) >> >> diff --git a/platform/linux-generic/odp_packet_socket.c b/platform/linux-generic/odp_packet_socket.c >> index 55c212e..4dcb111 100644 >> --- a/platform/linux-generic/odp_packet_socket.c >> +++ b/platform/linux-generic/odp_packet_socket.c >> @@ -587,7 +587,7 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring, >> static void mmap_fill_ring(struct ring *ring, unsigned blocks) >> { >> ring->req.tp_block_size = getpagesize() << 2; >> - ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; >> + ring->req.tp_frame_size = 9216 + TPACKET_HDRLEN; >> ring->req.tp_block_nr = blocks; >> >> ring->req.tp_frame_nr = ring->req.tp_block_size / >> diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c >> index 8df367d..ce1ed46 100644 >> --- a/test/validation/odp_pktio.c >> +++ b/test/validation/odp_pktio.c >> @@ -15,6 +15,10 @@ >> >> #define PKT_BUF_NUM 32 >> #define PKT_BUF_SIZE 1856 >> +#define PKT_BUF_JUMBO_SIZE 9216 >> +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\ >> + (ODPH_UDPHDR_LEN +\ >> + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN)) >> #define MAX_NUM_IFACES 2 >> #define TEST_SEQ_INVALID ((uint32_t)~0) >> #define TEST_SEQ_MAGIC 0x92749451 >> @@ -34,11 +38,17 @@ typedef struct { >> } pktio_info_t; >> >> /** structure of test packet UDP payload */ >> -typedef struct { >> - uint32be_t magic; >> - uint32be_t seq; >> +typedef struct ODP_PACKED { >> + struct { >> + uint32be_t magic; >> + uint32be_t seq; >> + } head; >> + char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(uint32be_t)*3]; >> + uint32be_t magic2; > Why do we need the second magic? Isn't it enough to reuse head.magic > for both jumbo and regular frame sizes? To make sure that packet was not truncated and tail magic also match expected value. > >> } pkt_test_data_t; >> >> +static int test_4_jumbo_pkts; > I find this name a bit peculiar, maybe a cleaner "test_jumbo" should do it? > ok, that is not problem. >> + >> /** default packet pool */ >> odp_pool_t default_pkt_pool = ODP_POOL_INVALID; >> >> @@ -64,9 +74,15 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >> static uint32_t tstseq; >> size_t l4_off; >> pkt_test_data_t data; > I don't think reserving 4 pages on the stack is very portable, we are > not targeting linux-generic x86 only with our validation suites. ok, will fix it. Maxim. > >> + uint32_t len; >> >> - data.magic = TEST_SEQ_MAGIC; >> - data.seq = tstseq; >> + data.head.magic = TEST_SEQ_MAGIC; >> + data.magic2 = TEST_SEQ_MAGIC; >> + data.head.seq = tstseq; >> + if (test_4_jumbo_pkts) >> + len = sizeof(data); >> + else >> + len = sizeof(data.head); >> >> l4_off = odp_packet_l4_offset(pkt); >> if (!l4_off) { >> @@ -75,8 +91,7 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >> } >> >> odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN, >> - sizeof(data), &data); >> - >> + len, &data); >> tstseq++; >> >> return 0; >> @@ -85,18 +100,31 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >> static uint32_t pktio_pkt_seq(odp_packet_t pkt) >> { >> size_t l4_off; >> + uint32_t seq = TEST_SEQ_INVALID; >> pkt_test_data_t data; >> + uint32_t len; >> >> l4_off = odp_packet_l4_offset(pkt); >> - if (l4_off) { >> - odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, >> - sizeof(data), &data); >> + if (!l4_off) >> + return TEST_SEQ_INVALID; >> >> - if (data.magic == TEST_SEQ_MAGIC) >> - return data.seq; >> + if (test_4_jumbo_pkts) >> + len = sizeof(data); >> + else >> + len = sizeof(data.head); >> + >> + odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, >> + len, &data); >> + >> + if (data.head.magic == TEST_SEQ_MAGIC) { >> + if (test_4_jumbo_pkts && >> + data.magic2 != TEST_SEQ_MAGIC) >> + return TEST_SEQ_INVALID; >> + >> + seq = data.head.seq; >> } >> >> - return TEST_SEQ_INVALID; >> + return seq; >> } >> >> static odp_packet_t pktio_create_packet(void) >> @@ -107,8 +135,14 @@ static odp_packet_t pktio_create_packet(void) >> odph_udphdr_t *udp; >> char *buf; >> uint16_t seq; >> - size_t payload_len = sizeof(pkt_test_data_t); >> + size_t payload_len; >> uint8_t mac[ODPH_ETHADDR_LEN] = {0}; >> + pkt_test_data_t data; >> + >> + if (test_4_jumbo_pkts) >> + payload_len = sizeof(data); >> + else >> + payload_len = sizeof(data.head); >> >> pkt = odp_packet_alloc(default_pkt_pool, payload_len + ODPH_UDPHDR_LEN + >> ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN); >> @@ -187,8 +221,8 @@ static int default_pool_create(void) >> return -1; >> >> memset(¶ms, 0, sizeof(params)); >> - params.pkt.seg_len = PKT_BUF_SIZE; >> - params.pkt.len = PKT_BUF_SIZE; >> + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; >> + params.pkt.len = PKT_BUF_JUMBO_SIZE; >> params.pkt.num = PKT_BUF_NUM; >> params.type = ODP_POOL_PACKET; >> >> @@ -208,15 +242,24 @@ static odp_pktio_t create_pktio(const char *iface) >> odp_pool_param_t params; >> >> memset(¶ms, 0, sizeof(params)); >> - params.pkt.seg_len = PKT_BUF_SIZE; >> - params.pkt.len = PKT_BUF_SIZE; >> + if (test_4_jumbo_pkts) { >> + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; >> + params.pkt.len = PKT_BUF_JUMBO_SIZE; >> + >> + } else { >> + params.pkt.seg_len = PKT_BUF_SIZE; >> + params.pkt.len = PKT_BUF_SIZE; >> + } >> params.pkt.num = PKT_BUF_NUM; >> params.type = ODP_POOL_PACKET; >> >> snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface); >> + >> pool = odp_pool_lookup(pool_name); >> - if (pool == ODP_POOL_INVALID) >> - pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); >> + if (pool != ODP_POOL_INVALID) >> + odp_pool_destroy(pool); >> + >> + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); >> CU_ASSERT(pool != ODP_POOL_INVALID); >> >> pktio = odp_pktio_open(iface, pool); >> @@ -450,6 +493,13 @@ static void test_odp_pktio_sched_multi(void) >> pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4); >> } >> >> +static void test_odp_pktio_jumbo(void) >> +{ >> + test_4_jumbo_pkts = 1; >> + test_odp_pktio_sched_multi(); >> + test_4_jumbo_pkts = 0; >> +} >> + >> static void test_odp_pktio_mtu(void) >> { >> int ret; >> @@ -668,6 +718,7 @@ CU_TestInfo pktio_tests[] = { >> {"pktio poll multi", test_odp_pktio_poll_multi}, >> {"pktio sched queues", test_odp_pktio_sched_queue}, >> {"pktio sched multi", test_odp_pktio_sched_multi}, >> + {"pktio jumbo frames", test_odp_pktio_jumbo}, >> {"pktio mtu", test_odp_pktio_mtu}, >> {"pktio promisc mode", test_odp_pktio_promisc}, >> {"pktio mac", test_odp_pktio_mac}, >> diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run >> index 08288e6..b9d7e3c 100755 >> --- a/test/validation/odp_pktio_run >> +++ b/test/validation/odp_pktio_run >> @@ -56,8 +56,8 @@ setup_env1() >> echo "pktio: error: unable to create veth pair" >> exit $TEST_SKIPPED >> fi >> - ip link set $IF0 up >> - ip link set $IF1 up >> + ip link set $IF0 mtu 9216 up >> + ip link set $IF1 mtu 9216 up >> >> # network needs a little time to come up >> sleep 1 >> -- >> 1.9.1 >> >> >> _______________________________________________ >> lng-odp mailing list >> lng-odp@lists.linaro.org >> http://lists.linaro.org/mailman/listinfo/lng-odp
On 4 March 2015 at 10:37, Maxim Uvarov <maxim.uvarov@linaro.org> wrote: > On 03/04/15 12:21, Ciprian Barbu wrote: >> >> On Tue, Mar 3, 2015 at 7:06 PM, Maxim Uvarov <maxim.uvarov@linaro.org> >> wrote: >>> >>> Support for jumbo frames for linux-generic with unsegmented buffers. >>> Test for pkio is also adjusted to work with 9*1024=9216 bytes packets. >>> https://bugs.linaro.org/show_bug.cgi?id=509 >>> >>> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> >>> --- >>> v4: - fix work on real interfaces (make check under root) >>> - better define jumbo packet payload size >>> >>> platform/linux-generic/odp_packet_socket.c | 2 +- >>> test/validation/odp_pktio.c | 91 >>> +++++++++++++++++++++++------- >>> test/validation/odp_pktio_run | 4 +- >>> 3 files changed, 74 insertions(+), 23 deletions(-) >>> >>> diff --git a/platform/linux-generic/odp_packet_socket.c >>> b/platform/linux-generic/odp_packet_socket.c >>> index 55c212e..4dcb111 100644 >>> --- a/platform/linux-generic/odp_packet_socket.c >>> +++ b/platform/linux-generic/odp_packet_socket.c >>> @@ -587,7 +587,7 @@ static inline unsigned pkt_mmap_v2_tx(int sock, >>> struct ring *ring, >>> static void mmap_fill_ring(struct ring *ring, unsigned blocks) >>> { >>> ring->req.tp_block_size = getpagesize() << 2; >>> - ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; >>> + ring->req.tp_frame_size = 9216 + TPACKET_HDRLEN; >>> ring->req.tp_block_nr = blocks; >>> >>> ring->req.tp_frame_nr = ring->req.tp_block_size / >>> diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c >>> index 8df367d..ce1ed46 100644 >>> --- a/test/validation/odp_pktio.c >>> +++ b/test/validation/odp_pktio.c >>> @@ -15,6 +15,10 @@ >>> >>> #define PKT_BUF_NUM 32 >>> #define PKT_BUF_SIZE 1856 >>> +#define PKT_BUF_JUMBO_SIZE 9216 >>> +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\ >>> + (ODPH_UDPHDR_LEN +\ >>> + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN)) I don't understand this computation or the exact purpose of this symbolic constant. Are we somehow limiting us to IP/UDP? Is this validation test only using IP/UDP? What happens if some other frame type is received? >>> #define MAX_NUM_IFACES 2 >>> #define TEST_SEQ_INVALID ((uint32_t)~0) >>> #define TEST_SEQ_MAGIC 0x92749451 >>> @@ -34,11 +38,17 @@ typedef struct { >>> } pktio_info_t; >>> >>> /** structure of test packet UDP payload */ >>> -typedef struct { >>> - uint32be_t magic; >>> - uint32be_t seq; >>> +typedef struct ODP_PACKED { >>> + struct { >>> + uint32be_t magic; >>> + uint32be_t seq; >>> + } head; >>> + char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(uint32be_t)*3]; >>> + uint32be_t magic2; >> >> Why do we need the second magic? Isn't it enough to reuse head.magic >> for both jumbo and regular frame sizes? > > > To make sure that packet was not truncated and tail magic also match > expected value. >> >> >>> } pkt_test_data_t; >>> >>> +static int test_4_jumbo_pkts; >> >> I find this name a bit peculiar, maybe a cleaner "test_jumbo" should do >> it? >> > > ok, that is not problem. > >>> + >>> /** default packet pool */ >>> odp_pool_t default_pkt_pool = ODP_POOL_INVALID; >>> >>> @@ -64,9 +74,15 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >>> static uint32_t tstseq; >>> size_t l4_off; >>> pkt_test_data_t data; >> >> I don't think reserving 4 pages on the stack is very portable, we are >> not targeting linux-generic x86 only with our validation suites. > > > ok, will fix it. > > Maxim. > >> >>> + uint32_t len; >>> >>> - data.magic = TEST_SEQ_MAGIC; >>> - data.seq = tstseq; >>> + data.head.magic = TEST_SEQ_MAGIC; >>> + data.magic2 = TEST_SEQ_MAGIC; >>> + data.head.seq = tstseq; >>> + if (test_4_jumbo_pkts) >>> + len = sizeof(data); >>> + else >>> + len = sizeof(data.head); >>> >>> l4_off = odp_packet_l4_offset(pkt); >>> if (!l4_off) { >>> @@ -75,8 +91,7 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >>> } >>> >>> odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN, >>> - sizeof(data), &data); >>> - >>> + len, &data); >>> tstseq++; >>> >>> return 0; >>> @@ -85,18 +100,31 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >>> static uint32_t pktio_pkt_seq(odp_packet_t pkt) >>> { >>> size_t l4_off; >>> + uint32_t seq = TEST_SEQ_INVALID; >>> pkt_test_data_t data; >>> + uint32_t len; >>> >>> l4_off = odp_packet_l4_offset(pkt); >>> - if (l4_off) { >>> - odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, >>> - sizeof(data), &data); >>> + if (!l4_off) >>> + return TEST_SEQ_INVALID; >>> >>> - if (data.magic == TEST_SEQ_MAGIC) >>> - return data.seq; >>> + if (test_4_jumbo_pkts) >>> + len = sizeof(data); >>> + else >>> + len = sizeof(data.head); >>> + >>> + odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, >>> + len, &data); >>> + >>> + if (data.head.magic == TEST_SEQ_MAGIC) { >>> + if (test_4_jumbo_pkts && >>> + data.magic2 != TEST_SEQ_MAGIC) >>> + return TEST_SEQ_INVALID; >>> + >>> + seq = data.head.seq; >>> } >>> >>> - return TEST_SEQ_INVALID; >>> + return seq; >>> } >>> >>> static odp_packet_t pktio_create_packet(void) >>> @@ -107,8 +135,14 @@ static odp_packet_t pktio_create_packet(void) >>> odph_udphdr_t *udp; >>> char *buf; >>> uint16_t seq; >>> - size_t payload_len = sizeof(pkt_test_data_t); >>> + size_t payload_len; >>> uint8_t mac[ODPH_ETHADDR_LEN] = {0}; >>> + pkt_test_data_t data; >>> + >>> + if (test_4_jumbo_pkts) >>> + payload_len = sizeof(data); >>> + else >>> + payload_len = sizeof(data.head); >>> >>> pkt = odp_packet_alloc(default_pkt_pool, payload_len + >>> ODPH_UDPHDR_LEN + >>> ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN); >>> @@ -187,8 +221,8 @@ static int default_pool_create(void) >>> return -1; >>> >>> memset(¶ms, 0, sizeof(params)); >>> - params.pkt.seg_len = PKT_BUF_SIZE; >>> - params.pkt.len = PKT_BUF_SIZE; >>> + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; >>> + params.pkt.len = PKT_BUF_JUMBO_SIZE; >>> params.pkt.num = PKT_BUF_NUM; >>> params.type = ODP_POOL_PACKET; >>> >>> @@ -208,15 +242,24 @@ static odp_pktio_t create_pktio(const char *iface) >>> odp_pool_param_t params; >>> >>> memset(¶ms, 0, sizeof(params)); >>> - params.pkt.seg_len = PKT_BUF_SIZE; >>> - params.pkt.len = PKT_BUF_SIZE; >>> + if (test_4_jumbo_pkts) { >>> + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; >>> + params.pkt.len = PKT_BUF_JUMBO_SIZE; >>> + >>> + } else { >>> + params.pkt.seg_len = PKT_BUF_SIZE; >>> + params.pkt.len = PKT_BUF_SIZE; >>> + } >>> params.pkt.num = PKT_BUF_NUM; >>> params.type = ODP_POOL_PACKET; >>> >>> snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface); >>> + >>> pool = odp_pool_lookup(pool_name); >>> - if (pool == ODP_POOL_INVALID) >>> - pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); >>> + if (pool != ODP_POOL_INVALID) >>> + odp_pool_destroy(pool); >>> + >>> + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); >>> CU_ASSERT(pool != ODP_POOL_INVALID); >>> >>> pktio = odp_pktio_open(iface, pool); >>> @@ -450,6 +493,13 @@ static void test_odp_pktio_sched_multi(void) >>> pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4); >>> } >>> >>> +static void test_odp_pktio_jumbo(void) >>> +{ >>> + test_4_jumbo_pkts = 1; >>> + test_odp_pktio_sched_multi(); >>> + test_4_jumbo_pkts = 0; >>> +} >>> + >>> static void test_odp_pktio_mtu(void) >>> { >>> int ret; >>> @@ -668,6 +718,7 @@ CU_TestInfo pktio_tests[] = { >>> {"pktio poll multi", test_odp_pktio_poll_multi}, >>> {"pktio sched queues", test_odp_pktio_sched_queue}, >>> {"pktio sched multi", test_odp_pktio_sched_multi}, >>> + {"pktio jumbo frames", test_odp_pktio_jumbo}, >>> {"pktio mtu", test_odp_pktio_mtu}, >>> {"pktio promisc mode", test_odp_pktio_promisc}, >>> {"pktio mac", test_odp_pktio_mac}, >>> diff --git a/test/validation/odp_pktio_run >>> b/test/validation/odp_pktio_run >>> index 08288e6..b9d7e3c 100755 >>> --- a/test/validation/odp_pktio_run >>> +++ b/test/validation/odp_pktio_run >>> @@ -56,8 +56,8 @@ setup_env1() >>> echo "pktio: error: unable to create veth pair" >>> exit $TEST_SKIPPED >>> fi >>> - ip link set $IF0 up >>> - ip link set $IF1 up >>> + ip link set $IF0 mtu 9216 up >>> + ip link set $IF1 mtu 9216 up >>> >>> # network needs a little time to come up >>> sleep 1 >>> -- >>> 1.9.1 >>> >>> >>> _______________________________________________ >>> lng-odp mailing list >>> lng-odp@lists.linaro.org >>> http://lists.linaro.org/mailman/listinfo/lng-odp > > > > _______________________________________________ > lng-odp mailing list > lng-odp@lists.linaro.org > http://lists.linaro.org/mailman/listinfo/lng-odp
On 03/04/15 12:55, Ola Liljedahl wrote: > On 4 March 2015 at 10:37, Maxim Uvarov <maxim.uvarov@linaro.org> wrote: >> On 03/04/15 12:21, Ciprian Barbu wrote: >>> On Tue, Mar 3, 2015 at 7:06 PM, Maxim Uvarov <maxim.uvarov@linaro.org> >>> wrote: >>>> Support for jumbo frames for linux-generic with unsegmented buffers. >>>> Test for pkio is also adjusted to work with 9*1024=9216 bytes packets. >>>> https://bugs.linaro.org/show_bug.cgi?id=509 >>>> >>>> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> >>>> --- >>>> v4: - fix work on real interfaces (make check under root) >>>> - better define jumbo packet payload size >>>> >>>> platform/linux-generic/odp_packet_socket.c | 2 +- >>>> test/validation/odp_pktio.c | 91 >>>> +++++++++++++++++++++++------- >>>> test/validation/odp_pktio_run | 4 +- >>>> 3 files changed, 74 insertions(+), 23 deletions(-) >>>> >>>> diff --git a/platform/linux-generic/odp_packet_socket.c >>>> b/platform/linux-generic/odp_packet_socket.c >>>> index 55c212e..4dcb111 100644 >>>> --- a/platform/linux-generic/odp_packet_socket.c >>>> +++ b/platform/linux-generic/odp_packet_socket.c >>>> @@ -587,7 +587,7 @@ static inline unsigned pkt_mmap_v2_tx(int sock, >>>> struct ring *ring, >>>> static void mmap_fill_ring(struct ring *ring, unsigned blocks) >>>> { >>>> ring->req.tp_block_size = getpagesize() << 2; >>>> - ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; >>>> + ring->req.tp_frame_size = 9216 + TPACKET_HDRLEN; >>>> ring->req.tp_block_nr = blocks; >>>> >>>> ring->req.tp_frame_nr = ring->req.tp_block_size / >>>> diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c >>>> index 8df367d..ce1ed46 100644 >>>> --- a/test/validation/odp_pktio.c >>>> +++ b/test/validation/odp_pktio.c >>>> @@ -15,6 +15,10 @@ >>>> >>>> #define PKT_BUF_NUM 32 >>>> #define PKT_BUF_SIZE 1856 >>>> +#define PKT_BUF_JUMBO_SIZE 9216 >>>> +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\ >>>> + (ODPH_UDPHDR_LEN +\ >>>> + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN)) > I don't understand this computation or the exact purpose of this > symbolic constant. > Are we somehow limiting us to IP/UDP? Is this validation test only > using IP/UDP? What happens if some other frame type is received? > That is only related to current validation test. In the code UDP packet is allocated with: pkt = odp_packet_alloc(default_pkt_pool, payload_len + ODPH_UDPHDR_LEN + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN); then all fields are filled. And then there are 2 pktio. One for relieve and other for send. So That is number is related to value to odp_packet_alloc(). Best regards, Maxim. >>>> #define MAX_NUM_IFACES 2 >>>> #define TEST_SEQ_INVALID ((uint32_t)~0) >>>> #define TEST_SEQ_MAGIC 0x92749451 >>>> @@ -34,11 +38,17 @@ typedef struct { >>>> } pktio_info_t; >>>> >>>> /** structure of test packet UDP payload */ >>>> -typedef struct { >>>> - uint32be_t magic; >>>> - uint32be_t seq; >>>> +typedef struct ODP_PACKED { >>>> + struct { >>>> + uint32be_t magic; >>>> + uint32be_t seq; >>>> + } head; >>>> + char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(uint32be_t)*3]; >>>> + uint32be_t magic2; >>> Why do we need the second magic? Isn't it enough to reuse head.magic >>> for both jumbo and regular frame sizes? >> >> To make sure that packet was not truncated and tail magic also match >> expected value. >>> >>>> } pkt_test_data_t; >>>> >>>> +static int test_4_jumbo_pkts; >>> I find this name a bit peculiar, maybe a cleaner "test_jumbo" should do >>> it? >>> >> ok, that is not problem. >> >>>> + >>>> /** default packet pool */ >>>> odp_pool_t default_pkt_pool = ODP_POOL_INVALID; >>>> >>>> @@ -64,9 +74,15 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >>>> static uint32_t tstseq; >>>> size_t l4_off; >>>> pkt_test_data_t data; >>> I don't think reserving 4 pages on the stack is very portable, we are >>> not targeting linux-generic x86 only with our validation suites. >> >> ok, will fix it. >> >> Maxim. >> >>>> + uint32_t len; >>>> >>>> - data.magic = TEST_SEQ_MAGIC; >>>> - data.seq = tstseq; >>>> + data.head.magic = TEST_SEQ_MAGIC; >>>> + data.magic2 = TEST_SEQ_MAGIC; >>>> + data.head.seq = tstseq; >>>> + if (test_4_jumbo_pkts) >>>> + len = sizeof(data); >>>> + else >>>> + len = sizeof(data.head); >>>> >>>> l4_off = odp_packet_l4_offset(pkt); >>>> if (!l4_off) { >>>> @@ -75,8 +91,7 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >>>> } >>>> >>>> odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN, >>>> - sizeof(data), &data); >>>> - >>>> + len, &data); >>>> tstseq++; >>>> >>>> return 0; >>>> @@ -85,18 +100,31 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) >>>> static uint32_t pktio_pkt_seq(odp_packet_t pkt) >>>> { >>>> size_t l4_off; >>>> + uint32_t seq = TEST_SEQ_INVALID; >>>> pkt_test_data_t data; >>>> + uint32_t len; >>>> >>>> l4_off = odp_packet_l4_offset(pkt); >>>> - if (l4_off) { >>>> - odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, >>>> - sizeof(data), &data); >>>> + if (!l4_off) >>>> + return TEST_SEQ_INVALID; >>>> >>>> - if (data.magic == TEST_SEQ_MAGIC) >>>> - return data.seq; >>>> + if (test_4_jumbo_pkts) >>>> + len = sizeof(data); >>>> + else >>>> + len = sizeof(data.head); >>>> + >>>> + odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, >>>> + len, &data); >>>> + >>>> + if (data.head.magic == TEST_SEQ_MAGIC) { >>>> + if (test_4_jumbo_pkts && >>>> + data.magic2 != TEST_SEQ_MAGIC) >>>> + return TEST_SEQ_INVALID; >>>> + >>>> + seq = data.head.seq; >>>> } >>>> >>>> - return TEST_SEQ_INVALID; >>>> + return seq; >>>> } >>>> >>>> static odp_packet_t pktio_create_packet(void) >>>> @@ -107,8 +135,14 @@ static odp_packet_t pktio_create_packet(void) >>>> odph_udphdr_t *udp; >>>> char *buf; >>>> uint16_t seq; >>>> - size_t payload_len = sizeof(pkt_test_data_t); >>>> + size_t payload_len; >>>> uint8_t mac[ODPH_ETHADDR_LEN] = {0}; >>>> + pkt_test_data_t data; >>>> + >>>> + if (test_4_jumbo_pkts) >>>> + payload_len = sizeof(data); >>>> + else >>>> + payload_len = sizeof(data.head); >>>> >>>> pkt = odp_packet_alloc(default_pkt_pool, payload_len + >>>> ODPH_UDPHDR_LEN + >>>> ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN); >>>> @@ -187,8 +221,8 @@ static int default_pool_create(void) >>>> return -1; >>>> >>>> memset(¶ms, 0, sizeof(params)); >>>> - params.pkt.seg_len = PKT_BUF_SIZE; >>>> - params.pkt.len = PKT_BUF_SIZE; >>>> + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; >>>> + params.pkt.len = PKT_BUF_JUMBO_SIZE; >>>> params.pkt.num = PKT_BUF_NUM; >>>> params.type = ODP_POOL_PACKET; >>>> >>>> @@ -208,15 +242,24 @@ static odp_pktio_t create_pktio(const char *iface) >>>> odp_pool_param_t params; >>>> >>>> memset(¶ms, 0, sizeof(params)); >>>> - params.pkt.seg_len = PKT_BUF_SIZE; >>>> - params.pkt.len = PKT_BUF_SIZE; >>>> + if (test_4_jumbo_pkts) { >>>> + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; >>>> + params.pkt.len = PKT_BUF_JUMBO_SIZE; >>>> + >>>> + } else { >>>> + params.pkt.seg_len = PKT_BUF_SIZE; >>>> + params.pkt.len = PKT_BUF_SIZE; >>>> + } >>>> params.pkt.num = PKT_BUF_NUM; >>>> params.type = ODP_POOL_PACKET; >>>> >>>> snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface); >>>> + >>>> pool = odp_pool_lookup(pool_name); >>>> - if (pool == ODP_POOL_INVALID) >>>> - pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); >>>> + if (pool != ODP_POOL_INVALID) >>>> + odp_pool_destroy(pool); >>>> + >>>> + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); >>>> CU_ASSERT(pool != ODP_POOL_INVALID); >>>> >>>> pktio = odp_pktio_open(iface, pool); >>>> @@ -450,6 +493,13 @@ static void test_odp_pktio_sched_multi(void) >>>> pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4); >>>> } >>>> >>>> +static void test_odp_pktio_jumbo(void) >>>> +{ >>>> + test_4_jumbo_pkts = 1; >>>> + test_odp_pktio_sched_multi(); >>>> + test_4_jumbo_pkts = 0; >>>> +} >>>> + >>>> static void test_odp_pktio_mtu(void) >>>> { >>>> int ret; >>>> @@ -668,6 +718,7 @@ CU_TestInfo pktio_tests[] = { >>>> {"pktio poll multi", test_odp_pktio_poll_multi}, >>>> {"pktio sched queues", test_odp_pktio_sched_queue}, >>>> {"pktio sched multi", test_odp_pktio_sched_multi}, >>>> + {"pktio jumbo frames", test_odp_pktio_jumbo}, >>>> {"pktio mtu", test_odp_pktio_mtu}, >>>> {"pktio promisc mode", test_odp_pktio_promisc}, >>>> {"pktio mac", test_odp_pktio_mac}, >>>> diff --git a/test/validation/odp_pktio_run >>>> b/test/validation/odp_pktio_run >>>> index 08288e6..b9d7e3c 100755 >>>> --- a/test/validation/odp_pktio_run >>>> +++ b/test/validation/odp_pktio_run >>>> @@ -56,8 +56,8 @@ setup_env1() >>>> echo "pktio: error: unable to create veth pair" >>>> exit $TEST_SKIPPED >>>> fi >>>> - ip link set $IF0 up >>>> - ip link set $IF1 up >>>> + ip link set $IF0 mtu 9216 up >>>> + ip link set $IF1 mtu 9216 up >>>> >>>> # network needs a little time to come up >>>> sleep 1 >>>> -- >>>> 1.9.1 >>>> >>>> >>>> _______________________________________________ >>>> lng-odp mailing list >>>> lng-odp@lists.linaro.org >>>> http://lists.linaro.org/mailman/listinfo/lng-odp >> >> >> _______________________________________________ >> lng-odp mailing list >> lng-odp@lists.linaro.org >> http://lists.linaro.org/mailman/listinfo/lng-odp
diff --git a/platform/linux-generic/odp_packet_socket.c b/platform/linux-generic/odp_packet_socket.c index 55c212e..4dcb111 100644 --- a/platform/linux-generic/odp_packet_socket.c +++ b/platform/linux-generic/odp_packet_socket.c @@ -587,7 +587,7 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring, static void mmap_fill_ring(struct ring *ring, unsigned blocks) { ring->req.tp_block_size = getpagesize() << 2; - ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; + ring->req.tp_frame_size = 9216 + TPACKET_HDRLEN; ring->req.tp_block_nr = blocks; ring->req.tp_frame_nr = ring->req.tp_block_size / diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c index 8df367d..ce1ed46 100644 --- a/test/validation/odp_pktio.c +++ b/test/validation/odp_pktio.c @@ -15,6 +15,10 @@ #define PKT_BUF_NUM 32 #define PKT_BUF_SIZE 1856 +#define PKT_BUF_JUMBO_SIZE 9216 +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\ + (ODPH_UDPHDR_LEN +\ + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN)) #define MAX_NUM_IFACES 2 #define TEST_SEQ_INVALID ((uint32_t)~0) #define TEST_SEQ_MAGIC 0x92749451 @@ -34,11 +38,17 @@ typedef struct { } pktio_info_t; /** structure of test packet UDP payload */ -typedef struct { - uint32be_t magic; - uint32be_t seq; +typedef struct ODP_PACKED { + struct { + uint32be_t magic; + uint32be_t seq; + } head; + char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(uint32be_t)*3]; + uint32be_t magic2; } pkt_test_data_t; +static int test_4_jumbo_pkts; + /** default packet pool */ odp_pool_t default_pkt_pool = ODP_POOL_INVALID; @@ -64,9 +74,15 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) static uint32_t tstseq; size_t l4_off; pkt_test_data_t data; + uint32_t len; - data.magic = TEST_SEQ_MAGIC; - data.seq = tstseq; + data.head.magic = TEST_SEQ_MAGIC; + data.magic2 = TEST_SEQ_MAGIC; + data.head.seq = tstseq; + if (test_4_jumbo_pkts) + len = sizeof(data); + else + len = sizeof(data.head); l4_off = odp_packet_l4_offset(pkt); if (!l4_off) { @@ -75,8 +91,7 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) } odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN, - sizeof(data), &data); - + len, &data); tstseq++; return 0; @@ -85,18 +100,31 @@ static int pktio_pkt_set_seq(odp_packet_t pkt) static uint32_t pktio_pkt_seq(odp_packet_t pkt) { size_t l4_off; + uint32_t seq = TEST_SEQ_INVALID; pkt_test_data_t data; + uint32_t len; l4_off = odp_packet_l4_offset(pkt); - if (l4_off) { - odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, - sizeof(data), &data); + if (!l4_off) + return TEST_SEQ_INVALID; - if (data.magic == TEST_SEQ_MAGIC) - return data.seq; + if (test_4_jumbo_pkts) + len = sizeof(data); + else + len = sizeof(data.head); + + odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN, + len, &data); + + if (data.head.magic == TEST_SEQ_MAGIC) { + if (test_4_jumbo_pkts && + data.magic2 != TEST_SEQ_MAGIC) + return TEST_SEQ_INVALID; + + seq = data.head.seq; } - return TEST_SEQ_INVALID; + return seq; } static odp_packet_t pktio_create_packet(void) @@ -107,8 +135,14 @@ static odp_packet_t pktio_create_packet(void) odph_udphdr_t *udp; char *buf; uint16_t seq; - size_t payload_len = sizeof(pkt_test_data_t); + size_t payload_len; uint8_t mac[ODPH_ETHADDR_LEN] = {0}; + pkt_test_data_t data; + + if (test_4_jumbo_pkts) + payload_len = sizeof(data); + else + payload_len = sizeof(data.head); pkt = odp_packet_alloc(default_pkt_pool, payload_len + ODPH_UDPHDR_LEN + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN); @@ -187,8 +221,8 @@ static int default_pool_create(void) return -1; memset(¶ms, 0, sizeof(params)); - params.pkt.seg_len = PKT_BUF_SIZE; - params.pkt.len = PKT_BUF_SIZE; + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; + params.pkt.len = PKT_BUF_JUMBO_SIZE; params.pkt.num = PKT_BUF_NUM; params.type = ODP_POOL_PACKET; @@ -208,15 +242,24 @@ static odp_pktio_t create_pktio(const char *iface) odp_pool_param_t params; memset(¶ms, 0, sizeof(params)); - params.pkt.seg_len = PKT_BUF_SIZE; - params.pkt.len = PKT_BUF_SIZE; + if (test_4_jumbo_pkts) { + params.pkt.seg_len = PKT_BUF_JUMBO_SIZE; + params.pkt.len = PKT_BUF_JUMBO_SIZE; + + } else { + params.pkt.seg_len = PKT_BUF_SIZE; + params.pkt.len = PKT_BUF_SIZE; + } params.pkt.num = PKT_BUF_NUM; params.type = ODP_POOL_PACKET; snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface); + pool = odp_pool_lookup(pool_name); - if (pool == ODP_POOL_INVALID) - pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); + if (pool != ODP_POOL_INVALID) + odp_pool_destroy(pool); + + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); CU_ASSERT(pool != ODP_POOL_INVALID); pktio = odp_pktio_open(iface, pool); @@ -450,6 +493,13 @@ static void test_odp_pktio_sched_multi(void) pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4); } +static void test_odp_pktio_jumbo(void) +{ + test_4_jumbo_pkts = 1; + test_odp_pktio_sched_multi(); + test_4_jumbo_pkts = 0; +} + static void test_odp_pktio_mtu(void) { int ret; @@ -668,6 +718,7 @@ CU_TestInfo pktio_tests[] = { {"pktio poll multi", test_odp_pktio_poll_multi}, {"pktio sched queues", test_odp_pktio_sched_queue}, {"pktio sched multi", test_odp_pktio_sched_multi}, + {"pktio jumbo frames", test_odp_pktio_jumbo}, {"pktio mtu", test_odp_pktio_mtu}, {"pktio promisc mode", test_odp_pktio_promisc}, {"pktio mac", test_odp_pktio_mac}, diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run index 08288e6..b9d7e3c 100755 --- a/test/validation/odp_pktio_run +++ b/test/validation/odp_pktio_run @@ -56,8 +56,8 @@ setup_env1() echo "pktio: error: unable to create veth pair" exit $TEST_SKIPPED fi - ip link set $IF0 up - ip link set $IF1 up + ip link set $IF0 mtu 9216 up + ip link set $IF1 mtu 9216 up # network needs a little time to come up sleep 1
Support for jumbo frames for linux-generic with unsegmented buffers. Test for pkio is also adjusted to work with 9*1024=9216 bytes packets. https://bugs.linaro.org/show_bug.cgi?id=509 Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> --- v4: - fix work on real interfaces (make check under root) - better define jumbo packet payload size platform/linux-generic/odp_packet_socket.c | 2 +- test/validation/odp_pktio.c | 91 +++++++++++++++++++++++------- test/validation/odp_pktio_run | 4 +- 3 files changed, 74 insertions(+), 23 deletions(-)