@@ -318,13 +318,14 @@ int recv_pkt_netmap(pkt_netmap_t * const pkt_nm, odp_packet_t pkt_table[],
args.pkt_nm = pkt_nm;
odp_ticketlock_lock(&pkt_nm->nm_dev->rx_lock);
- ret = poll(fds, 1, POLL_TMO);
- if (ret <= 0 || (fds[0].revents & POLLERR)) {
- odp_ticketlock_unlock(&pkt_nm->nm_dev->rx_lock);
- return 0;
- }
-
nm_dispatch(pkt_nm->desc, len, nm_recv_cb, (uint8_t *)&args);
+ if (args.nb_rx == 0) {
+ ret = poll(fds, 1, POLL_TMO);
+ if (ret <= 0 || (fds[0].revents & POLLERR)) {
+ odp_ticketlock_unlock(&pkt_nm->nm_dev->rx_lock);
+ return 0;
+ }
+ }
odp_ticketlock_unlock(&pkt_nm->nm_dev->rx_lock);
return args.nb_rx;
@@ -342,12 +343,6 @@ int send_pkt_netmap(pkt_netmap_t * const pkt_nm, odp_packet_t pkt_table[],
fds[0].events = POLLOUT;
odp_ticketlock_lock(&pkt_nm->nm_dev->tx_lock);
- ret = poll(fds, 1, POLL_TMO);
- if (ret <= 0 || (fds[0].revents & POLLERR)) {
- odp_ticketlock_unlock(&pkt_nm->nm_dev->tx_lock);
- goto out;
- }
-
for (nb_tx = 0; nb_tx < len; nb_tx++) {
odp_packet_t pkt = pkt_table[nb_tx];
uint8_t *frame = odp_packet_l2(pkt);
@@ -355,6 +350,13 @@ int send_pkt_netmap(pkt_netmap_t * const pkt_nm, odp_packet_t pkt_table[],
if (nm_inject(pkt_nm->desc, frame, frame_len) == 0)
break;
}
+ if (nb_tx == 0) {
+ ret = poll(fds, 1, POLL_TMO);
+ if (ret <= 0 || (fds[0].revents & POLLERR)) {
+ odp_ticketlock_unlock(&pkt_nm->nm_dev->tx_lock);
+ goto out;
+ }
+ }
odp_ticketlock_unlock(&pkt_nm->nm_dev->tx_lock);
out:
Calling poll each time in send and recv routines has its cost, calling poll only when needed gives a better performance with the cost of possibly losing some frames on the TX side. Signed-off-by: Ciprian Barbu <ciprian.barbu@linaro.org> --- platform/linux-netmap/odp_packet_netmap.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-)