@@ -1,36 +1,33 @@
+# Copyright (c) 2014, Linaro Limited
+# Copyright (c) 2014, Texas Instruments Incorporated
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
include $(top_srcdir)/Makefile.inc
include $(top_srcdir)/platform/Makefile.inc
+include Makefile.inc
-KS2_PLATFORM = DEVICE_K2K
-if SDK_INSTALL_PATH_
-AM_CFLAGS += -I$(SDK_INSTALL_PATH)/usr/include
-
-AM_LDFLAGS += -L$(SDK_INSTALL_PATH)/usr/lib
-endif
-
-PLAT_CFLAGS = -D$(KS2_PLATFORM)
-PLAT_CFLAGS += -D_GNU_SOURCE -DEM_32_BIT -DTI_EM_CENTRAL_SCHED
-PLAT_CFLAGS += -DTI_EM_TRACE_LEVEL=3 -DEM_CHECK_LEVEL=1
-PLAT_CFLAGS += -DTI_EM_LINUX -DTI_EM_GCC -DTI_EM_ARM_A15 -DTI_EM_C6638
-PLAT_CFLAGS += -D_LITTLE_ENDIAN -DTI_EM_USE_MSM -DTI_EM_XGE_LOOPBACK
-PLAT_CFLAGS += -DTI_ODP
-
-AM_CFLAGS += $(PLAT_CFLAGS)
AM_CFLAGS += -I$(srcdir)/include
AM_CFLAGS += -I$(srcdir)/include/api
AM_CFLAGS += -I$(top_srcdir)/platform/linux-generic/include
AM_CFLAGS += -I$(top_srcdir)/platform/linux-generic/include/api
AM_CFLAGS += -I$(top_srcdir)/helper/include
-KS2_LIBS="-lopenem_rh -lopenem_osal"
+KS2_LIBS = -lnwalsa_$(KS2_DEVICE) -lpktlib -lpa -lsa -lcppi_$(KS2_DEVICE) -lqmss_$(KS2_DEVICE) -lrm -lhplib_$(KS2_DEVICE)
LIBS += $(KS2_LIBS)
include_HEADERS = \
$(srcdir)/include/api/odp_buffer.h \
+ $(srcdir)/include/api/odp_buffer_pool.h \
+ $(srcdir)/include/api/odp_packet.h \
+ $(srcdir)/include/api/odp_packet_io.h \
+ $(srcdir)/include/api/odp_state.h \
+ $(srcdir)/include/api/odp_ti_mcsdk.h \
+ $(srcdir)/include/api/mcsdk_tune.h \
$(top_srcdir)/platform/linux-generic/include/api/odp.h \
$(top_srcdir)/platform/linux-generic/include/api/odp_align.h \
$(top_srcdir)/platform/linux-generic/include/api/odp_atomic.h \
$(top_srcdir)/platform/linux-generic/include/api/odp_barrier.h \
- $(top_srcdir)/platform/linux-generic/include/api/odp_buffer_pool.h \
$(top_srcdir)/platform/linux-generic/include/api/odp_byteorder.h \
$(top_srcdir)/platform/linux-generic/include/api/odp_compiler.h \
$(top_srcdir)/platform/linux-generic/include/api/odp_config.h \
@@ -39,8 +36,6 @@ include_HEADERS = \
$(top_srcdir)/platform/linux-generic/include/api/odp_hints.h \
$(top_srcdir)/platform/linux-generic/include/api/odp_init.h \
$(top_srcdir)/platform/linux-generic/include/api/odp_packet_flags.h \
- $(top_srcdir)/platform/linux-generic/include/api/odp_packet.h \
- $(top_srcdir)/platform/linux-generic/include/api/odp_packet_io.h \
$(top_srcdir)/platform/linux-generic/include/api/odp_queue.h \
$(top_srcdir)/platform/linux-generic/include/api/odp_rwlock.h \
$(top_srcdir)/platform/linux-generic/include/api/odp_schedule.h \
@@ -66,21 +61,24 @@ subdirheaders_HEADERS = \
$(top_srcdir)/helper/include/odph_udp.h
__LIB__libodp_la_SOURCES = \
- ../linux-generic/odp_barrier.c \
odp_buffer.c \
odp_buffer_pool.c \
- ../linux-generic/odp_coremask.c \
odp_init.c \
- ../linux-generic/odp_linux.c \
odp_packet.c \
- ../linux-generic/odp_packet_flags.c \
odp_packet_io.c \
- ../linux-generic/odp_packet_socket.c \
odp_queue.c \
+ mcsdk/mcsdk_init.c \
+ mcsdk/mcsdk_navig.c \
+ mcsdk/mcsdk_rmclient.c \
+ mcsdk/sockutils.c \
+ ../linux-generic/odp_barrier.c \
+ ../linux-generic/odp_coremask.c \
+ ../linux-generic/odp_linux.c \
+ ../linux-generic/odp_packet_flags.c \
../linux-generic/odp_ring.c \
../linux-generic/odp_rwlock.c \
../linux-generic/odp_schedule.c \
- odp_shared_memory.c \
+ ../linux-generic/odp_shared_memory.c \
../linux-generic/odp_spinlock.c \
../linux-generic/odp_system_info.c \
../linux-generic/odp_thread.c \
@@ -0,0 +1,18 @@
+# Copyright (c) 2014, Linaro Limited
+# Copyright (c) 2014, Texas Instruments Incorporated
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+# MCSDK_CFLAGS should match flags used to build McSDK libraries
+
+KS2_DEVICE ?= k2k
+CSL_DEVICE = DEVICE_K2K
+
+MCSDK_CFLAGS = -D$(CSL_DEVICE) -D_GNU_SOURCE -D_LITTLE_ENDIAN
+MCSDK_CFLAGS += -D__ARMv7 -D_VIRTUAL_ADDR_SUPPORT -DMAKEFILE_BUILD
+MCSDK_CFLAGS += -DNWAL_ENABLE_SA
+
+AM_CFLAGS += $(MCSDK_CFLAGS)
+AM_CFLAGS += -I$(SDK_INSTALL_PATH)/usr/include
+AM_LDFLAGS += -L$(SDK_INSTALL_PATH)/usr/lib
@@ -1,3 +1,9 @@
+Copyright (c) 2014, Linaro Limited
+Copyright (c) 2014, Texas Instruments Incorporated
+All rights reserved.
+
+SPDX-License-Identifier: BSD-3-Clause
+
1. Intro
OpenDataPlane implementation for TI Keystone2 SoC's. Current version supports
@@ -5,34 +11,62 @@ HW buffer, queues and Packet IO management. This drop does not target high
performance. It is rather proof of ODP API functionality. It still uses
linux-generic's SW scheduler.
-2. Prerequisites
+2. ODP build-time dependecies
+
+ 2.1 McSDK libraries
+
+Keystone ODP is based on slightly modified TI Multicore SDK libraries.
+Check a following README file for instructions:
+https://git.linaro.org/people/taras.kondratiuk/ks2-odp-build.git/blob/refs/heads/master:/README
+
+3. ODP build
+
+./bootstrap
+./configure --host=arm-linux-gnueabihf --with-platform=linux-keystone2 --with-sdk-install-path=<sdk-install-path> --enable-static --disable-shared
+# if openem is installed to the default path then you don't need to specify
+# --with-sdk-install-path=<sdk-install-path> when you run configure.
+make
+
+4. ODP run-time dependencies
+
+ 4.1 U-boot
+
+Sources: git://git.ti.com/keystone-linux/u-boot.git
+Tag: K2_UBOOT_2013_01_14.07
+config: make <soc>_evm_config # <soc> is "k2hk", "k2l" or "k2e"
+Build instructions:
+http://processors.wiki.ti.com/index.php/MCSDK_UG_Chapter_Exploring#U-Boot_Build_instructions
+
+ 4.2 Boot monitor
- 2.1 Linux kernel
+Sources: git://git.ti.com/keystone-linux/boot-monitor.git
+Tag: K2_BM_14.07
+Build instructions:
+http://processors.wiki.ti.com/index.php/MCSDK_UG_Chapter_Exploring#Boot_Monitor_Build_instructions
-Sources: git://git.ti.com/keystone-linux/linux.git master
+ 4.3 Linux kernel
+
+Sources: git://git.ti.com/keystone-linux/linux.git
+Tag: K2_LINUX_03.10.10_14.07
config: keystone2_defconfig
DTB: k2hk-evm.dtb
Current implementation relies on kernel to enable and configure network
-interfaces. Implementation does not support Packet and Security accelerators
-now so they should be disable in kernel config:
-
-# CONFIG_TI_KEYSTONE_NET_SA is not set
-# CONFIG_TI_KEYSTONE_QOS is not set
-# CONFIG_TI_KEYSTONE_PA is not set
+interfaces.
- 2.2 OpenEM libs and kernel module
+ 4.3 McSDK parts
-Keystone ODP uses OpenEM libraries as low level drivers.
-Before any ODP application started an OpenEM kernel module (em_mod.ko)
-should be inserted.
+Parts are built along with McSDK libraries (see item #2.1 of this instruction)
-Sources: git://git.linaro.org/people/taras.kondratiuk/keystone2-odp/openem.git for_odp
-Check README file in OpenEM root directory for build instructions.
+- HPLIB kernel module (hplibmod.ko)
+- Resource Manager server
-3. Keystone2 ODP build
+4. Limitations
-./bootstrap
-./configure --host=arm-linux-gnueabihf --with-platform=linux-keystone2 --with-sdk-install-path=<oem-install-path>
-# if openem is installed to the default path then you don't need to specify --with-sdk-install-path=<oem-install-path> when you run configure.
-make
+ODP does not provide API to free resources on application exit. So on the next
+application start there maybe no resources available or HW resource can be in
+unpredictable state which may lead to unpredictable behaviour. Hence, it is
+recommended to reboot the system after ODP application exit. This limitation
+will be partially addressed with 'free' API in future, but this doesn't handle
+an application crash case. There maybe a need to add clean-up capabilities to
+Resource Management server.
new file mode 100644
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MCSDK_TUNE_H_
+#define MCSDK_TUNE_H_
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @ingroup tune_parameters
+ * @def NETAPI_ENABLE_SECURITY
+ * Define this to enable securtiy.
+ * @note Libraries using netapi need to be built with SA enabled
+*/
+#ifdef NWAL_ENABLE_SA
+#define NETAPI_ENABLE_SECURITY
+#endif
+
+/**
+ * @ingroup tune_parameters
+ * @def NETAPI_USE_DDR
+ * Define this to enable use of cached DDR for buffers and descriptors.
+ * @note Do not define if USE_MSMC defined below
+*/
+#define NETAPI_USE_DDR
+
+/**
+ * @ingroup tune_parameters
+ * @def NETAPI_USE_MSMC
+ * Define this to enable use of un-cached MSMC for buffers and descriptors
+ * @note Do not define if USE_DDR defined above
+*/
+
+#if defined(NETAPI_USE_MSMC) && defined(NETAPI_USE_DDR)
+#error "only define NETAPI_USE_MSMC or NETAPI_USE_DDR"
+#endif
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_NUM_CORES
+ * This defines the number of cores (theads)
+ */
+#define TUNE_NETAPI_NUM_CORES 5
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_PERM_MEM_SZ
+ * This defines how much contiguous memory to grab. This is used for
+ * descriptors and buffers in the case of uncached configuration only.
+ * descriptors and buffers. Can't be bigger than msmc if
+ * MSMC memory is being using uncached.
+ */
+#define TUNE_NETAPI_PERM_MEM_SZ (2*1024*1024)
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_DEFAULT_BUFFER_SIZE
+ * This defines the size of the netapi default pktlib heap buffers This
+ * can be set at @ref netapi_init
+ */
+#define TUNE_NETAPI_DEFAULT_BUFFER_SIZE 1600
+
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_DEFAULT_NUM_BUFFERS
+ * This defines the number of netapi default pktlib heap buffers
+ * (and assoc descriptors) this can be set at @ref netapi_init
+ */
+#define TUNE_NETAPI_DEFAULT_NUM_BUFFERS 200
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_QM_CONFIG_MAX_DESC_NUM
+ * Defines the number of of QM descriptors (total).
+ * @note Must be a power or 2. 16384 is abs max.
+ */
+#define TUNE_NETAPI_QM_CONFIG_MAX_DESC_NUM 0x4000
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_NUM_GLOBAL_DESC
+ * This defines the number of global descriptors.
+ * @note Must be a power or 2
+*/
+#define TUNE_NETAPI_NUM_GLOBAL_DESC TUNE_NETAPI_QM_CONFIG_MAX_DESC_NUM
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_DESC_SIZE
+ * This defines the descriptor size
+ * @note This define should NOT be changes
+ */
+#define TUNE_NETAPI_DESC_SIZE 128
+
+#ifdef NETAPI_USE_DDR
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_QM_START_INDEX
+ * This defines the queue manager start index
+ * @note This must reflect what the kernel is uding for their region,
+ * see device tree blob for details.
+ */
+#define TUNE_NETAPI_QM_START_INDEX 0
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_QM_GLOBAL_REGION
+ * This defines the queue manager global region
+ * @note This must reflect what the kernel is using for their region,
+ * see device tree blob for details.
+ */
+#define TUNE_NETAPI_QM_GLOBAL_REGION 18
+
+#else /* use msmc */
+#define TUNE_NETAPI_QM_START_INDEX 0
+#define TUNE_NETAPI_QM_GLOBAL_REGION 0
+#endif
+
+
+/* NWAL internal config. Should not have to change */
+#define TUNE_NETAPI_CONFIG_MAX_PA_TO_SA_DESC 32
+#define TUNE_NETAPI_CONFIG_MAX_SA_TO_PA_DESC 200
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_MAX_NUM_MAC
+ * This defines the number of logical mac addresses
+ */
+#define TUNE_NETAPI_MAX_NUM_MAC 64
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_MAX_NUM_IP
+ * This defines the number of ip addresses
+ */
+#define TUNE_NETAPI_MAX_NUM_IP 64
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_MAX_NUM_PORTS_PER_CORE
+ * This defines the number of ports per core
+ */
+#define TUNE_NETAPI_MAX_NUM_PORTS_PER_CORE 4
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_MAX_NUM_PORTS
+ * This defines the number maximum number of ports
+ */
+#define TUNE_NETAPI_MAX_NUM_PORTS (TUNE_NETAPI_MAX_NUM_PORTS_PER_CORE * \
+ TUNE_NETAPI_NUM_CORES)
+
+#ifdef NETAPI_ENABLE_SECURITY
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS
+ * This defines the number maximum number of ipsec channels
+ */
+#define TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS 128
+#else
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS
+ * This defines the number maximum number of ipsec channels
+ */
+#define TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS 0
+#endif
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_MAX_NUM_L2_L3_HDRS
+ * This defines the number maximum number of L2_L3 headers to reserve
+ * in the nwal layer. This should be kept small as transport lib does not
+ * expose this nwal feature by default
+ */
+#define TUNE_NETAPI_MAX_NUM_L2_L3_HDRS 3
+
+/**
+ * @ingroup tune_parameters
+ * @def TUNE_NETAPI_MAX_NUM_TRANS
+ * This defines the number maximum number of transactions with NETCP that
+ * can be outstanding at any one time
+ */
+#define TUNE_NETAPI_MAX_NUM_TRANS (TUNE_NETAPI_MAX_NUM_MAC + \
+ TUNE_NETAPI_MAX_NUM_IP + \
+ TUNE_NETAPI_MAX_NUM_PORTS + \
+ TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS)
+
+/* PA control buffer pool (internal) */
+#define TUNE_NETAPI_CONFIG_MAX_CTL_RXTX_BUF_SIZE 520
+#define TUNE_NETAPI_CONFIG_NUM_CTL_RX_BUF 16
+#define TUNE_NETAPI_CONFIG_NUM_CTL_TX_BUF 16
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* MCSDK_TUNE_H_ */
@@ -1,10 +1,11 @@
-/* Copyright (c) 2013, Linaro Limited
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -19,41 +20,86 @@ extern "C" {
#endif
#include <odp_std_types.h>
+#include <odp_ti_mcsdk.h>
/**
* ODP buffer
*/
-typedef uint32_t odp_buffer_t;
+typedef Ti_Pkt * odp_buffer_t;
-#define ODP_BUFFER_INVALID (0) /**< Invalid buffer */
+#define ODP_BUFFER_INVALID ((odp_buffer_t)0) /**< Invalid buffer */
+/**
+ * @internal Convert ODP buffer to PKTLIB packet handle
+ *
+ * @param buf Buffer handle
+ *
+ * @return PKTLIB packet handle
+ */
+static inline Ti_Pkt *_odp_buf_to_ti_pkt(odp_buffer_t buf)
+{
+ return (Ti_Pkt *)buf;
+}
/**
- * Buffer start address
+ * @internal Convert PKTLIB packet handle to ODP buffer
*
- * @param buf Buffer handle
+ * @param pkt PKTLIB packet handle
*
- * @return Buffer start address
+ * @return ODP buffer handle
*/
-void *odp_buffer_addr(odp_buffer_t buf);
+static inline odp_buffer_t _ti_pkt_to_odp_buf(Ti_Pkt *pkt)
+{
+ return (odp_buffer_t)pkt;
+}
/**
- * Buffer maximum data size
+ * @internal Convert ODP buffer to CPPI descriptor
+ *
+ * @param buf Buffer handle
+ *
+ * @return CPPI descriptor
+ */
+static inline Cppi_HostDesc *_odp_buf_to_cppi_desc(odp_buffer_t buf)
+{
+ return Pktlib_getDescFromPacket(_odp_buf_to_ti_pkt(buf));
+}
+
+/**
+ * @internal Convert CPPI descriptor to ODP buffer
+ *
+ * @param desc CPPI descriptor pointer
+ *
+ * @return ODP buffer handle
+ */
+static inline odp_buffer_t _cppi_desc_to_odp_buf(Cppi_HostDesc *desc)
+{
+ return _ti_pkt_to_odp_buf(Pktlib_getPacketFromDesc(desc));
+}
+
+/**
+ * Buffer start address
*
* @param buf Buffer handle
*
- * @return Buffer maximum data size
+ * @return Buffer start address
*/
-size_t odp_buffer_size(odp_buffer_t buf);
+static inline void *odp_buffer_addr(odp_buffer_t buf)
+{
+ return (void *)_odp_buf_to_cppi_desc(buf)->buffPtr;
+}
/**
- * Buffer type
+ * Buffer maximum data size
*
* @param buf Buffer handle
*
- * @return Buffer type
+ * @return Buffer maximum data size
*/
-int odp_buffer_type(odp_buffer_t buf);
+static inline size_t odp_buffer_size(odp_buffer_t buf)
+{
+ return _odp_buf_to_cppi_desc(buf)->buffLen;
+}
#define ODP_BUFFER_TYPE_INVALID (-1) /**< Buffer type invalid */
#define ODP_BUFFER_TYPE_ANY 0 /**< Buffer that can hold any other
@@ -61,15 +107,17 @@ int odp_buffer_type(odp_buffer_t buf);
#define ODP_BUFFER_TYPE_RAW 1 /**< Raw buffer, no additional metadata */
#define ODP_BUFFER_TYPE_PACKET 2 /**< Packet buffer */
#define ODP_BUFFER_TYPE_TIMEOUT 3 /**< Timeout buffer */
-
/**
- * Tests if buffer is part of a scatter/gather list
+ * Buffer type
*
* @param buf Buffer handle
*
- * @return 1 if belongs to a scatter list, otherwise 0
+ * @return Buffer type
*/
-int odp_buffer_is_scatter(odp_buffer_t buf);
+static inline int odp_buffer_type(odp_buffer_t buf)
+{
+ return Pktlib_getUsrFlags(_odp_buf_to_ti_pkt(buf));
+}
/**
* Tests if buffer is valid
@@ -78,16 +126,46 @@ int odp_buffer_is_scatter(odp_buffer_t buf);
*
* @return 1 if valid, otherwise 0
*/
-int odp_buffer_is_valid(odp_buffer_t buf);
+static inline int odp_buffer_is_valid(odp_buffer_t buf)
+{
+ return (buf != ODP_BUFFER_INVALID);
+}
/**
* Print buffer metadata to STDOUT
*
* @param buf Buffer handle
- *
*/
void odp_buffer_print(odp_buffer_t buf);
+/**
+ * @internal Set buffer user context
+ *
+ * @param buffer Buffer handle
+ * @param context User context
+ */
+static inline void odp_buffer_set_ctx(odp_buffer_t buffer, void *context)
+{
+ Cppi_setTimeStamp(Cppi_DescType_HOST,
+ (Cppi_Desc *)_odp_buf_to_cppi_desc(buffer),
+ (uint32_t) context);
+}
+
+/**
+ * @internal Get buffer user context
+ *
+ * @param buffer Buffer handle
+ *
+ * @return User context
+ */
+static inline void *odp_buffer_get_ctx(odp_buffer_t buffer)
+{
+ uint32_t app_ctx_id = 0;
+ Cppi_getTimeStamp(Cppi_DescType_HOST,
+ (Cppi_Desc *)_odp_buf_to_cppi_desc(buffer),
+ &app_ctx_id);
+ return (void *)app_ctx_id;
+}
#ifdef __cplusplus
}
new file mode 100644
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP buffer pool
+ */
+
+#ifndef ODP_BUFFER_POOL_H_
+#define ODP_BUFFER_POOL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+
+#include <odp_std_types.h>
+#include <odp_buffer.h>
+#include <ti/runtime/pktlib/pktlib.h>
+
+/** Maximum queue name length in chars */
+/* #define ODP_BUFFER_POOL_NAME_LEN PKTLIB_MAX_HEAP_NAME */
+
+/** Invalid buffer pool */
+#define ODP_BUFFER_POOL_INVALID (NULL)
+
+/** ODP buffer pool */
+typedef Pktlib_HeapHandle odp_buffer_pool_t;
+
+
+/**
+ * Create a buffer pool
+ *
+ * @param name Name of the pool (max ODP_BUFFER_POOL_NAME_LEN - 1 chars)
+ * @param base_addr Pool base address
+ * @param size Pool size in bytes
+ * @param buf_size Buffer size in bytes
+ * @param buf_align Minimum buffer alignment
+ * @param buf_type Buffer type
+ *
+ * @return Buffer pool handle
+ */
+odp_buffer_pool_t odp_buffer_pool_create(const char *name,
+ void *base_addr, uint64_t size,
+ size_t buf_size, size_t buf_align,
+ int buf_type);
+
+
+/**
+ * Find a buffer pool by name
+ *
+ * @param name Name of the pool
+ *
+ * @return Buffer pool handle, or ODP_BUFFER_POOL_INVALID if not found.
+ */
+odp_buffer_pool_t odp_buffer_pool_lookup(const char *name);
+
+
+/**
+ * Print buffer pool info
+ *
+ * @param pool Pool handle
+ *
+ */
+void odp_buffer_pool_print(odp_buffer_pool_t pool);
+
+
+
+/**
+ * Buffer alloc
+ *
+ * @param pool Pool handle
+ *
+ * @return Buffer handle or ODP_BUFFER_INVALID
+ */
+odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool);
+
+
+/**
+ * Buffer free
+ *
+ * @param buf Buffer handle
+ *
+ */
+void odp_buffer_free(odp_buffer_t buf);
+
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet descriptor
+ */
+
+#ifndef ODP_PACKET_H_
+#define ODP_PACKET_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_buffer.h>
+
+/**
+ * ODP packet descriptor
+ */
+typedef odp_buffer_t odp_packet_t;
+
+/** Invalid packet */
+#define ODP_PACKET_INVALID ODP_BUFFER_INVALID
+
+/** Invalid offset */
+#define ODP_PACKET_OFFSET_INVALID ((size_t)-1)
+
+
+/**
+ * Initialize the packet
+ *
+ * Needs to be called if the user allocates a packet buffer, i.e. the packet
+ * has not been received from I/O through ODP.
+ *
+ * @param pkt Packet handle
+ */
+void odp_packet_init(odp_packet_t pkt);
+
+/**
+ * Convert from packet handle to buffer handle
+ *
+ * @param buf Buffer handle
+ *
+ * @return Packet handle
+ */
+static inline odp_packet_t odp_packet_from_buffer(odp_buffer_t buf)
+{
+ return buf;
+}
+
+/**
+ * Convert from buffer handle to packet handle
+ *
+ * @param pkt Packet handle
+ *
+ * @return Buffer handle
+ */
+static inline odp_buffer_t odp_buffer_from_packet(odp_packet_t pkt)
+{
+ return pkt;
+}
+
+/**
+ * Set the packet length
+ *
+ * @param pkt Packet handle
+ * @param len Length of packet in bytes
+ */
+void odp_packet_set_len(odp_packet_t pkt, size_t len);
+
+/**
+ * Get the packet length
+ *
+ * @param pkt Packet handle
+ *
+ * @return Packet length in bytes
+ */
+size_t odp_packet_get_len(odp_packet_t pkt);
+
+/**
+ * Get address to the start of the packet buffer
+ *
+ * The address of the packet buffer is not necessarily the same as the start
+ * address of the received frame, e.g. an eth frame may be offset by 2 or 6
+ * bytes to ensure 32 or 64-bit alignment of the IP header.
+ * Use odp_packet_l2(pkt) to get the start address of a received valid frame
+ * or odp_packet_start(pkt) to get the start address even if no valid L2 header
+ * could be found.
+ *
+ * @param pkt Packet handle
+ *
+ * @return Pointer to the start of the packet buffer
+ *
+ * @see odp_packet_l2(), odp_packet_start()
+ */
+uint8_t *odp_packet_buf_addr(odp_packet_t pkt);
+
+/**
+ * Get pointer to the start of the received frame
+ *
+ * The address of the packet buffer is not necessarily the same as the start
+ * address of the received frame, e.g. an eth frame may be offset by 2 or 6
+ * bytes to ensure 32 or 64-bit alignment of the IP header.
+ * Use odp_packet_l2(pkt) to get the start address of a received valid eth frame
+ *
+ * odp_packet_start() will always return a pointer to the start of the frame,
+ * even if the frame is unrecognized and no valid L2 header could be found.
+ *
+ * @param pkt Packet handle
+ *
+ * @return Pointer to the start of the received frame
+ *
+ * @see odp_packet_l2(), odp_packet_buf_addr()
+ */
+uint8_t *odp_packet_start(odp_packet_t pkt);
+
+/**
+ * Get pointer to the start of the L2 frame
+ *
+ * The L2 frame header address is not necessarily the same as the address of the
+ * packet buffer, see odp_packet_buf_addr()
+ *
+ * @param pkt Packet handle
+ *
+ * @return Pointer to L2 header or NULL if not found
+ *
+ * @see odp_packet_buf_addr(), odp_packet_start()
+ */
+uint8_t *odp_packet_l2(odp_packet_t pkt);
+
+/**
+ * Return the byte offset from the packet buffer to the L2 frame
+ *
+ * @param pkt Packet handle
+ *
+ * @return L2 byte offset or ODP_PACKET_OFFSET_INVALID if not found
+ */
+size_t odp_packet_l2_offset(odp_packet_t pkt);
+
+/**
+ * Set the byte offset to the L2 frame
+ *
+ * @param pkt Packet handle
+ * @param offset L2 byte offset
+ */
+void odp_packet_set_l2_offset(odp_packet_t pkt, size_t offset);
+
+
+/**
+ * Get pointer to the start of the L3 packet
+ *
+ * @param pkt Packet handle
+ *
+ * @return Pointer to L3 packet or NULL if not found
+ *
+ */
+uint8_t *odp_packet_l3(odp_packet_t pkt);
+
+/**
+ * Return the byte offset from the packet buffer to the L3 packet
+ *
+ * @param pkt Packet handle
+ *
+ * @return L3 byte offset or ODP_PACKET_OFFSET_INVALID if not found
+ */
+size_t odp_packet_l3_offset(odp_packet_t pkt);
+
+/**
+ * Set the byte offset to the L3 packet
+ *
+ * @param pkt Packet handle
+ * @param offset L3 byte offset
+ */
+void odp_packet_set_l3_offset(odp_packet_t pkt, size_t offset);
+
+
+/**
+ * Get pointer to the start of the L4 packet
+ *
+ * @param pkt Packet handle
+ *
+ * @return Pointer to L4 packet or NULL if not found
+ *
+ */
+uint8_t *odp_packet_l4(odp_packet_t pkt);
+
+/**
+ * Return the byte offset from the packet buffer to the L4 packet
+ *
+ * @param pkt Packet handle
+ *
+ * @return L4 byte offset or ODP_PACKET_OFFSET_INVALID if not found
+ */
+size_t odp_packet_l4_offset(odp_packet_t pkt);
+
+/**
+ * Set the byte offset to the L4 packet
+ *
+ * @param pkt Packet handle
+ * @param offset L4 byte offset
+ */
+void odp_packet_set_l4_offset(odp_packet_t pkt, size_t offset);
+
+/**
+ * Print (debug) information about the packet
+ *
+ * @param pkt Packet handle
+ */
+void odp_packet_print(odp_packet_t pkt);
+
+/**
+ * Copy contents and metadata from pkt_src to pkt_dst
+ * Useful when creating copies of packets
+ *
+ * @param pkt_dst Destination packet
+ * @param pkt_src Source packet
+ *
+ * @return 0 if successful
+ */
+int odp_packet_copy(odp_packet_t pkt_dst, odp_packet_t pkt_src);
+
+/**
+ * Set packet user context
+ *
+ * @param pkt Packet handle
+ * @param ctx User context
+ *
+ */
+static inline void odp_packet_set_ctx(odp_packet_t pkt, void *ctx)
+{
+ odp_buffer_set_ctx(odp_buffer_from_packet(pkt), ctx);
+}
+
+/**
+ * Get packet user context
+ *
+ * @param pkt Packet handle
+ *
+ * @return User context
+ */
+static inline void *odp_packet_get_ctx(odp_packet_t pkt)
+{
+ return odp_buffer_get_ctx(odp_buffer_from_packet(pkt));
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP Packet IO
+ */
+
+#ifndef ODP_PACKET_IO_H_
+#define ODP_PACKET_IO_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_std_types.h>
+#include <odp_buffer_pool.h>
+#include <odp_packet.h>
+#include <odp_queue.h>
+
+#include <odp_pktio_types.h>
+
+/** ODP packet IO handle */
+typedef uint32_t odp_pktio_t;
+
+/** Invalid packet IO handle */
+#define ODP_PKTIO_INVALID ((odp_pktio_t)-1)
+
+/**
+ * Open an ODP packet IO instance
+ *
+ * @param dev Packet IO device
+ * @param pool Pool to use for packet IO
+ * @param params Set of parameters to pass to the arch dependent implementation
+ *
+ * @return ODP packet IO handle or ODP_PKTIO_INVALID on error
+ */
+odp_pktio_t odp_pktio_open(const char *dev, odp_buffer_pool_t pool,
+ odp_pktio_params_t *params);
+
+/**
+ * Close an ODP packet IO instance
+ *
+ * @param id ODP packet IO handle
+ *
+ * @return 0 on success or -1 on error
+ */
+int odp_pktio_close(odp_pktio_t id);
+
+/**
+ * Receive packets
+ *
+ * @param id ODP packet IO handle
+ * @param pkt_table[] Storage for received packets (filled by function)
+ * @param len Length of pkt_table[], i.e. max number of pkts to receive
+ *
+ * @return Number of packets received or -1 on error
+ */
+int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], unsigned len);
+
+/**
+ * Send packets
+ *
+ * @param id ODP packet IO handle
+ * @param pkt_table[] Array of packets to send
+ * @param len length of pkt_table[]
+ *
+ * @return Number of packets sent or -1 on error
+ */
+int odp_pktio_send(odp_pktio_t id, odp_packet_t pkt_table[], unsigned len);
+
+/**
+ * Set the default input queue to be associated with a pktio handle
+ *
+ * @param id ODP packet IO handle
+ * @param queue default input queue set
+ * @return 0 on success or -1 on error
+ */
+int odp_pktio_inq_setdef(odp_pktio_t id, odp_queue_t queue);
+
+/**
+ * Get default input queue associated with a pktio handle
+ *
+ * @param id ODP packet IO handle
+ *
+ * @return Default input queue set or ODP_QUEUE_INVALID on error
+ */
+odp_queue_t odp_pktio_inq_getdef(odp_pktio_t id);
+
+/**
+ * Remove default input queue (if set)
+ *
+ * @param id ODP packet IO handle
+ *
+ * @return 0 on success or -1 on error
+ */
+int odp_pktio_inq_remdef(odp_pktio_t id);
+
+/**
+ * Query default output queue
+ *
+ * @param id ODP packet IO handle
+ *
+ * @return Default out queue or ODP_QUEUE_INVALID on error
+ */
+odp_queue_t odp_pktio_outq_getdef(odp_pktio_t id);
+
+/**
+ * Store packet input handle into packet
+ *
+ * @param pkt ODP packet buffer handle
+ * @param id ODP packet IO handle
+ *
+ * @return
+ */
+void odp_pktio_set_input(odp_packet_t pkt, odp_pktio_t id);
+
+/**
+ * Get stored packet input handle from packet
+ *
+ * @param pkt ODP packet buffer handle
+ *
+ * @return Packet IO handle
+ */
+odp_pktio_t odp_pktio_get_input(odp_packet_t pkt);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_STATE_H_
+#define ODP_STATE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_ti_mcsdk.h>
+
+/**
+ * @internal Global ODP state
+ */
+struct odp_global_s {
+ struct mcsdk_cfg_s cfg; /**< McSDK configuration */
+ struct {
+ nwal_Inst handle; /**< NWAL handle */
+ Pktlib_HeapHandle sa2pa_heap; /**< Internal SA->PA heap */
+ Pktlib_HeapHandle pa2sa_heap; /**< Internal PA->SA head */
+ } nwal; /**< Global NWAL state */
+};
+
+/** @internal Per process ODP state */
+struct odp_proc_s {
+ struct {
+ Pktlib_HeapHandle netcp_heap; /**< internal default heap */
+ Pktlib_HeapHandle netcp_control_rx_heap; /**< rx control messages */
+ Pktlib_HeapHandle netcp_control_tx_heap; /**< tx control messages */
+ } nwal; /**< Per process NWAL state */
+ Rm_ServiceHandle *rm_service; /**< Resource Manager service handle */
+};
+
+/** @internal Per thread ODP state */
+struct odp_local_s {
+ struct {
+ nwalLocCfg_t cfg; /**< Local NWAL configuration */
+ } nwal; /**< thread NWAL state */
+ int is_main_thread; /**< Marks a main thread which run global init */
+};
+
+extern struct odp_global_s *odp_global;
+extern struct odp_proc_s odp_proc;
+extern __thread struct odp_local_s odp_local;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ODP_STATE_H_ */
new file mode 100644
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_TI_MCSDK_H_
+#define ODP_TI_MCSDK_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <ti/csl/cslr_device.h>
+#include <ti/runtime/hplib/hplib.h>
+#include <ti/runtime/pktlib/pktlib.h>
+#include <ti/drv/nwal/nwal.h>
+#include <ti/drv/nwal/nwal_osal.h>
+#include <mcsdk_tune.h>
+
+/** @internal McSDK initialization configuration */
+struct mcsdk_cfg_s {
+ int def_mem_size; /**< Bytes of CMA memory we have allocated */
+ int min_buf_headroom_size; /**< Minimal amount of headroom in a buffer */
+ int def_max_descriptors; /**< Number of descriptors in system (must be power of 2), 2^14 max */
+ int def_tot_descriptors_for_us; /**< Number of descriptors to create in our region (must be power of 2)*/
+ int def_heap_n_descriptors; /**< Number of descriptor plus buffers in default heap*/
+ int def_heap_n_zdescriptors; /**< Number of zero len descriptors in defaut heap*/
+ int def_heap_buf_size; /**< Size of buffers in default heap, max amount of area for packet data */
+ int def_heap_tailroom_size; /**< Size of tailroom in reserve */
+ int def_heap_extra_size; /**< Size of extra space at end of buffer */
+ int def_multi_process; /**< Flag to indicate if NETAPI init is for multi-process environment */
+};
+
+Rm_ServiceHandle *rm_client_init(void);
+int mcsdk_global_init(void);
+int mcsdk_local_init(int thread_id);
+int mcsdk_cppi_init(void);
+int mcsdk_qmss_init(int max_descriptors);
+int mcsdk_qmss_start(void);
+int mcsdk_cppi_start(void);
+int mcsdk_qmss_setup_memregion(uint32_t desc_num, uint32_t desc_size,
+ uint32_t *desc_mem_base, Qmss_MemRegion mem_region);
+int mcsdk_nwal_init(int region2use, Pktlib_HeapIfTable *p_table);
+int mcsdk_nwal_start(Pktlib_HeapHandle pkt_heap,
+ Pktlib_HeapHandle cmd_rx_heap,
+ Pktlib_HeapHandle cmd_tx_heap);
+
+extern Pktlib_HeapIfTable pktlib_if_table;
+extern hplib_virtualAddrInfo_T odp_vm_info;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ODP_TI_MCSDK_H_ */
deleted file mode 100644
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2012, Texas Instruments Incorporated - http://www.ti.com/
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Texas Instruments Incorporated nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef ODP_CONFIG_PLATFORM_H_
-#define ODP_CONFIG_PLATFORM_H_
-
-/* #include <openem/event_machine.h> */
-#if defined(TI_EM_C6678)
-#include <configs/odp_config_platform_c6678.h>
-#elif defined(TI_EM_C6614)
-#include <configs/odp_config_platform_c6614.h>
-#elif defined(TI_EM_C6638)
-#include <configs/odp_config_platform_c6638.h>
-#else
-#error "platform not defined or unsupported!"
-#endif
-
-#define TI_ODP_PUBLIC_DESC_NUM (4096u)
-#define TI_ODP_REGION_NUM (2) /* local regions are not used on Linux */
-
-#define MY_EM_DEVICE_ID (0)
-#define MY_EM_PROCESS_ID (0)
-
-/*
- * Queue, pool and event definitions
- */
-#define MY_EM_PROC_QUEUE_NUM (32)
-#define MY_EM_PROC_QUEUE_TYPE (EM_QUEUE_TYPE_PARALLEL)
-#define MY_EM_PROC_EVENT_TYPE (TI_EM_EVENT_TYPE_PRELOAD_OFF)
-
-#endif /* ODP_CONFIG_PLATFORM_H_ */
deleted file mode 100644
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2012, Texas Instruments Incorporated - http://www.ti.com/
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Texas Instruments Incorporated nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-/*
- * This is the typical configuration for TCI6638 (KeyStone 2, Linux ARM A15)
- *
- * Descriptors and PDSP communications memory must reside in contiguous and coherent DDR
- * (using CMA).
- *
- * On KeyStone2 QMSS regions do not need to be ordered.
- */
-#ifndef ODP_CONFIG_PLATFORM_C6638_H_
-#define ODP_CONFIG_PLATFORM_C6638_H_
-
-/* Cores are here "EM cores" that are not necessarily tied to real "CPU cores" */
-#define MY_EM_CORE_NUM (4) /* number of cores used by OpenEM */
-#define MY_EM_INIT_CORE_IDX (0) /* core on which the init will be started */
-
-/* Index of the QMSS PDSP that will be used by OpenEM, Linux use QMSS PDSP0 for accumulator */
-#define MY_EM_SCHED_PDSP_IDX (2)
-
-/* Define if we are polling or waiting on event interrupts when dispatching events */
-#define MY_EM_DISPATCH_MODE (TI_EM_RH_POLL_MODE)
-
-/*
- * Coherent contiguous memory used for PDSP <-> CPU communication
- * We use one page per slot and CORE_NUM + 2 slots
- */
-#ifdef TI_EM_USE_MSM
-#define MY_EM_PDSP_COMM_MEM_BASE (0x0c000000) /* MSM */
-#else
-#define MY_EM_PDSP_COMM_MEM_BASE (0x0) /* use DDR from CMA (contiguous & coherent)*/
-#endif
-#define MY_EM_PDSP_COMM_MEM_VBASE (0x0) /* dynamic mapping */
-#define MY_EM_PDSP_COMM_MEM_SIZE (0x00010000) /* allowing 16 slots */
-#define MY_EM_PDSP_COMM_MEM_OFFSET (0x0) /* no offset */
-
-/*
- * Base physical address for event descriptors.
- * In the future in will be managed by Linux or platform resource manager.
- */
-#ifdef TI_EM_USE_MSM
-#define TI_ODP_PUBLIC_DESC_BASE (0x0c100000) /* MSM */
-#define TI_ODP_PUBLIC_DESC_VBASE (0x0) /* dynamic mapping */
-#define TI_ODP_PUBLIC_DESC_OFFSET (0x0) /* no offset, QMSS/PDSP mapping equal to CPU mapping */
-#else /* TI_EM_USE_MSM */
-#define TI_ODP_PUBLIC_DESC_BASE (0x0) /* use DDR from CMA (contiguous & coherent)*/
-#define TI_ODP_PUBLIC_DESC_VBASE (0x0) /* dynamic mapping */
-#define TI_ODP_PUBLIC_DESC_OFFSET (0x0) /* no offset, QMSS/PDSP mapping equal to CPU mapping */
-#endif /* TI_EM_USE_MSM */
-
-#define TI_ODP_PUBLIC_REGION_IDX (1) /* Linux uses 12 & 13 on ARM, set in DTS */
-#define TI_ODP_PRIVATE_REGION_IDX (2)
-#define TI_ODP_PUBLIC_START_DESC_IDX (0) /* start index for desc (Linux starts at 0x4000, set in DTS) */
-#define TI_ODP_PRIVATE_START_DESC_IDX (-1) /* Automatically computed */
-
-#define TI_ODP_PRIVATE_DESC_BASE (TI_EM_PDSPSH_DRAM) /* use PDSP data RAM */
-#define TI_ODP_PRIVATE_DESC_OFFSET (TI_EM_PDSP_DRAM_OFFSET) /* offset between CPU and QMSS/PDSP mapping */
-#define TI_ODP_PRIVATE_DESC_VBASE (0x0) /* dynamic mapping */
-
-/*
- * For the time being, free queues that can be used from user application are
- * harcoded here. In the future it will be provided by platform resource manager.
- */
-#define TI_ODP_PUBLIC_QUEUE_BASE_IDX (QMSS_GENERAL_PURPOSE_USER_QUEUE_BASE)
-#define TI_ODP_FREE_QUEUE_BASE_IDX (TI_ODP_PUBLIC_QUEUE_BASE_IDX + ODP_CONFIG_QUEUES)
-#define MY_EM_PRIVATE_FREE_QUEUE_IDX (TI_ODP_FREE_QUEUE_BASE_IDX + ODP_CONFIG_BUFFER_POOLS)
-#define MY_EM_SCHED_QUEUE_IDX (MY_EM_PRIVATE_FREE_QUEUE_IDX + 2)
-
-#endif /* ODP_CONFIG_PLATFORM_C6638_H_ */
@@ -1,10 +1,11 @@
-/* Copyright (c) 2013, Linaro Limited
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -23,60 +24,25 @@ extern "C" {
#include <odp_buffer_pool.h>
#include <odp_buffer.h>
#include <odp_queue.h>
-#include <odp_debug.h>
+#include <odp_debug_internal.h>
#include <odp_align.h>
-#include <event_machine_macros.h>
-#include <event_machine_types.h>
-#include <event_machine_group.h>
-#include <event_machine_hw_macros.h>
-#include <event_machine_hw_types.h>
-#include <event_machine_hw_ti_macros.h>
-#include <event_machine_hw_ti_types.h>
-#include <ti_em_osal_cppi.h>
-#include <src/event_machine_hwpform.h>
-
-/* TODO: move these to correct files */
-
-typedef uintptr_t odp_phys_addr_t;
-
-#define ODP_BUFFER_POOL_BITS 4
-#define ODP_BUFFER_INDEX_BITS (32 - ODP_BUFFER_POOL_BITS)
-#define ODP_BUFFER_MAX_POOLS (1 << ODP_BUFFER_POOL_BITS)
-#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS)
-
-typedef union odp_buffer_bits_t {
- uint32_t u32;
- odp_buffer_t handle;
-
- struct {
- uint32_t pool:ODP_BUFFER_POOL_BITS;
- uint32_t index:ODP_BUFFER_INDEX_BITS;
- };
-} odp_buffer_bits_t;
-
-typedef struct odp_buffer_hdr_t {
- Cppi_HostDesc desc;
- void *buf_vaddr;
- uint32_t free_queue;
+typedef struct odp_bufhdr {
int type;
} odp_buffer_hdr_t;
+ODP_STATIC_ASSERT(sizeof(Cppi_HostDesc) <= ODP_CACHE_LINE_SIZE,
+ "ODP_BUFFER_HDR_T__SIZE_ERROR");
-/*
- * Chunk of buffers (in single pool)
- */
-
-ODP_STATIC_ASSERT(sizeof(odp_buffer_hdr_t) <= ODP_CACHE_LINE_SIZE*2,
- "ODP_BUFFER_HDR_T__SIZE_ERROR");
-
-static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf)
+static inline struct odp_bufhdr *odp_buffer_hdr(odp_buffer_t buf)
{
- return (odp_buffer_hdr_t *)buf;
+ return (struct odp_bufhdr *)(_odp_buf_to_cppi_desc(buf)->origBuffPtr);
}
-static inline odp_buffer_t hdr_to_odp_buf(odp_buffer_hdr_t *hdr)
+
+/* Compatibility function for timer code reused from linux-generic */
+static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf)
{
- return (odp_buffer_t)hdr;
+ return (odp_buffer_hdr_t *)odp_buffer_hdr(buf);
}
extern odp_buffer_pool_t odp_buf_to_pool(odp_buffer_t buf);
@@ -1,10 +1,11 @@
-/* Copyright (c) 2013, Linaro Limited
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -20,58 +21,7 @@ extern "C" {
#include <odp_std_types.h>
#include <odp_buffer_pool.h>
-#include <odp_buffer_internal.h>
-#include <odp_align.h>
-#include <odp_hints.h>
-#include <odp_config.h>
-#include <odp_debug.h>
-
-/* Use ticketlock instead of spinlock */
-#define POOL_USE_TICKETLOCK
-
-/* Extra error checks */
-/* #define POOL_ERROR_CHECK */
-
-
-#ifdef POOL_USE_TICKETLOCK
-#include <odp_ticketlock.h>
-#else
-#include <odp_spinlock.h>
-#endif
-
-struct pool_entry_s {
-#ifdef POOL_USE_TICKETLOCK
- odp_ticketlock_t lock ODP_ALIGNED_CACHE;
-#else
- odp_spinlock_t lock ODP_ALIGNED_CACHE;
-#endif
-
- uint64_t free_bufs;
- char name[ODP_BUFFER_POOL_NAME_LEN];
-
- odp_buffer_pool_t pool ODP_ALIGNED_CACHE;
- uint64_t num_bufs;
- void *pool_base_addr;
- uintptr_t pool_base_paddr;
- uint64_t pool_size;
- size_t payload_size;
- size_t payload_align;
- int buf_type;
- uint32_t free_queue;
-
- uintptr_t buf_base;
- size_t buf_size;
- size_t buf_offset;
- size_t hdr_size;
-};
-extern void *pool_entry_ptr[];
-
-
-static inline void *get_pool_entry(odp_buffer_pool_t pool_id)
-{
- return pool_entry_ptr[pool_id];
-}
uint32_t _odp_pool_get_free_queue(odp_buffer_pool_t pool_id);
#ifdef __cplusplus
new file mode 100644
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_DEBUG_INTERNAL_H_
+#define ODP_DEBUG_INTERNAL_H_
+
+#include <stdio.h>
+#include <odp_debug.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ODP_PRINT_LEVEL_DISABLED 0
+#define ODP_PRINT_LEVEL_CRIT 1
+#define ODP_PRINT_LEVEL_ERR 2
+#define ODP_PRINT_LEVEL_WARN 3
+#define ODP_PRINT_LEVEL_INFO 4
+#define ODP_PRINT_LEVEL_DBG 5
+#define ODP_PRINT_LEVEL_VDBG 6
+#define ODP_PRINT_LEVEL_MAX 7
+
+#define ODP_PRINT_LEVEL ODP_PRINT_LEVEL_WARN
+
+/**
+ * Internal debug printing macro
+ */
+#ifndef ODP_NO_PRINT
+#define odp_print(level, fmt, ...) \
+ do { if (level <= ODP_PRINT_LEVEL) \
+ fprintf(stderr, "%s():%d: " fmt, \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ } while (0)
+#else
+#define odp_print(level, fmt, ...)
+#endif
+
+#define odp_pr_err(fmt, ...) \
+ odp_print(ODP_PRINT_LEVEL_ERR, fmt, ##__VA_ARGS__)
+#define odp_pr_warn(fmt, ...) \
+ odp_print(ODP_PRINT_LEVEL_WARN, fmt, ##__VA_ARGS__)
+#define odp_pr_info(fmt, ...) \
+ odp_print(ODP_PRINT_LEVEL_INFO, fmt, ##__VA_ARGS__)
+#define odp_pr_dbg(fmt, ...) \
+ odp_print(ODP_PRINT_LEVEL_DBG, fmt, ##__VA_ARGS__)
+#define odp_pr_vdbg(fmt, ...) \
+ odp_print(ODP_PRINT_LEVEL_VDBG, fmt, ##__VA_ARGS__)
+
+void odp_print_mem(void *addr, size_t size, const char *desc);
+
+static inline void odp_pr_mem(int level, void *addr, size_t size,
+ const char *desc)
+{
+ if (level <= ODP_PRINT_LEVEL)
+ odp_print_mem(addr, size, desc);
+}
+
+#define odp_pr_err_mem(...) odp_pr_mem(ODP_PRINT_LEVEL_ERR, ##__VA_ARGS__)
+#define odp_pr_dbg_mem(...) odp_pr_mem(ODP_PRINT_LEVEL_DBG, ##__VA_ARGS__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP HW system information
+ */
+
+#ifndef ODP_INTERNAL_H_
+#define ODP_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_state.h>
+
+int odp_system_info_init(void);
+
+void odp_thread_init_global(void);
+void odp_thread_init_local(int thr_id);
+
+int odp_shm_init_global(void);
+int odp_shm_init_local(void);
+
+int odp_buffer_pool_init_global(void);
+
+int odp_pktio_init_global(void);
+
+int odp_queue_init_global(void);
+
+int odp_schedule_init_global(void);
+int odp_schedule_init_local(void);
+
+int odp_timer_init_global(void);
+int odp_timer_disarm_all(void);
+
+int mcsdk_global_init(void);
+int mcsdk_local_init(int thread_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
@@ -1,10 +1,11 @@
-/* Copyright (c) 2013, Linaro Limited
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -97,36 +98,48 @@ ODP_STATIC_ASSERT(sizeof(output_flags_t) == sizeof(uint32_t), "OUTPUT_FLAGS_SIZE
/**
* Internal Packet header
*/
-typedef struct {
+struct odp_pkthdr {
/* common buffer header */
- odp_buffer_hdr_t buf_hdr;
+ struct odp_bufhdr buf_hdr;
input_flags_t input_flags;
error_flags_t error_flags;
output_flags_t output_flags;
- uint32_t frame_offset; /**< offset to start of frame, even on error */
- uint32_t l2_offset; /**< offset to L2 hdr, e.g. Eth */
- uint32_t l3_offset; /**< offset to L3 hdr, e.g. IPv4, IPv6 */
- uint32_t l4_offset; /**< offset to L4 hdr (TCP, UDP, SCTP, also ICMP) */
+ uint16_t frame_offset; /**< offset to start of frame, even on error */
+ uint16_t l2_offset; /**< offset to L2 hdr, e.g. Eth */
+ uint16_t l3_offset; /**< offset to L3 hdr, e.g. IPv4, IPv6 */
+ uint16_t l4_offset; /**< offset to L4 hdr (TCP, UDP, SCTP, also ICMP) */
+
+ uint32_t frame_len;
odp_pktio_t input;
-} odp_packet_hdr_t;
+ struct {
+ int16_t saved_buf_offset;
+ uint32_t hash_offset;
+ union {
+ struct {
+ } enc;
+ struct {
+ uint32_t hash_tag[5];
+ } dec;
+ };
+
+ } crypto;
+
+};
-ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) <= 128, "ODP_PACKET_HDR_T_SIZE_ERROR");
+ODP_STATIC_ASSERT(sizeof(struct odp_pkthdr) <= ODP_CACHE_LINE_SIZE,
+ "PACKET_HDR_T_SIZE_ERROR");
/**
* Return the packet header
*/
-static inline odp_packet_hdr_t *odp_packet_hdr(odp_packet_t pkt)
-{
- return (odp_packet_hdr_t *)odp_buf_to_hdr((odp_buffer_t)pkt);
-}
-
-static inline odp_packet_hdr_t *odp_bufhdr_to_pkthdr(odp_buffer_hdr_t *hdr)
+static inline struct odp_pkthdr *odp_packet_hdr(odp_packet_t pkt)
{
- return (odp_packet_hdr_t *)hdr;
+ odp_buffer_t buf = odp_buffer_from_packet(pkt);
+ return (struct odp_pkthdr *)odp_buffer_hdr(buf);
}
/**
@@ -134,6 +147,19 @@ static inline odp_packet_hdr_t *odp_bufhdr_to_pkthdr(odp_buffer_hdr_t *hdr)
*/
void odp_packet_parse(odp_packet_t pkt, size_t len, size_t l2_offset);
+static inline void odp_pr_packet(int level, odp_packet_t pkt)
+{
+ if (level <= ODP_PRINT_LEVEL)
+ odp_packet_print(pkt);
+}
+
+#define odp_pr_err_packet(...) \
+ odp_pr_packet(ODP_PRINT_LEVEL_ERR, ##__VA_ARGS__)
+#define odp_pr_dbg_packet(...) \
+ odp_pr_packet(ODP_PRINT_LEVEL_DBG, ##__VA_ARGS__)
+#define odp_pr_vdbg_packet(...) \
+ odp_pr_packet(ODP_PRINT_LEVEL_VDBG, ##__VA_ARGS__)
+
#ifdef __cplusplus
}
#endif
@@ -1,10 +1,11 @@
-/* Copyright (c) 2013, Linaro Limited
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -20,26 +21,18 @@ extern "C" {
#include <odp_spinlock.h>
#include <odp_packet_socket.h>
-#ifdef ODP_HAVE_NETMAP
-#include <odp_packet_netmap.h>
-#endif
-
-#define PKTIO_DEV_MAX_NAME_LEN 10
-struct pktio_device {
- const char name[PKTIO_DEV_MAX_NAME_LEN];
- uint32_t tx_hw_queue;
- uint32_t rx_channel;
- uint32_t rx_flow;
- uint32_t port_id;
-};
+#include <ti/drv/nwal/nwal.h>
+#include <ti/drv/nwal/nwal_util.h>
struct pktio_entry {
- odp_spinlock_t lock; /**< entry spinlock */
- int taken; /**< is entry taken(1) or free(0) */
- odp_queue_t inq_default; /**< default input queue, if set */
- odp_queue_t outq_default; /**< default out queue */
- odp_buffer_pool_t in_pool;
- struct pktio_device *dev;
+ odp_spinlock_t lock; /**< entry spinlock */
+ int taken; /**< is entry taken(1) or free(0) */
+ odp_queue_t inq_default; /**< default input queue, if set */
+ odp_queue_t outq_default; /**< default out queue */
+ odp_buffer_pool_t in_pool; /**< pool for incoming packets */
+ odp_pktio_t id; /**< pktio handle */
+ nwalTxPSCmdInfo_t tx_ps_cmdinfo; /**< saved Command Label */
+ int port; /**< netcp port number */
};
typedef union {
new file mode 100644
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet IO - implementation internal
+ */
+
+#ifndef ODP_PACKET_IO_QUEUE_H_
+#define ODP_PACKET_IO_QUEUE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_queue_internal.h>
+#include <odp_buffer_internal.h>
+
+odp_buffer_t pktin_dequeue(queue_entry_t *queue);
+int pktin_deq_multi(queue_entry_t *queue, odp_buffer_t buf[], int num);
+int pktout_enqueue(queue_entry_t *queue, odp_buffer_t buf);
+int pktout_enq_multi(queue_entry_t *queue, odp_buffer_t buf[], int num);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
@@ -1,10 +1,11 @@
-/* Copyright (c) 2013, Linaro Limited
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -21,8 +22,8 @@ extern "C" {
#include <odp_queue.h>
#include <odp_buffer_internal.h>
#include <odp_packet_io.h>
+#include <odp_packet_io_internal.h>
#include <odp_align.h>
-#include <configs/odp_config_platform.h>
#define USE_TICKETLOCK
@@ -43,13 +44,11 @@ extern "C" {
/* forward declaration */
union queue_entry_u;
-typedef int (*enq_func_t)(union queue_entry_u *, odp_buffer_hdr_t *);
-typedef odp_buffer_hdr_t *(*deq_func_t)(union queue_entry_u *);
+typedef int (*enq_func_t)(union queue_entry_u *, odp_buffer_t);
+typedef odp_buffer_t (*deq_func_t)(union queue_entry_u *);
-typedef int (*enq_multi_func_t)(union queue_entry_u *,
- odp_buffer_hdr_t **, int);
-typedef int (*deq_multi_func_t)(union queue_entry_u *,
- odp_buffer_hdr_t **, int);
+typedef int (*enq_multi_func_t)(union queue_entry_u *, odp_buffer_t *, int);
+typedef int (*deq_multi_func_t)(union queue_entry_u *, odp_buffer_t *, int);
struct queue_entry_s {
#ifdef USE_TICKETLOCK
@@ -58,23 +57,20 @@ struct queue_entry_s {
odp_spinlock_t lock ODP_ALIGNED_CACHE;
#endif
- odp_buffer_hdr_t *head;
- odp_buffer_hdr_t *tail;
int status;
- enq_func_t enqueue ODP_ALIGNED_CACHE;
- deq_func_t dequeue;
- enq_multi_func_t enqueue_multi;
- deq_multi_func_t dequeue_multi;
+ enq_func_t enqueue ODP_ALIGNED_CACHE;
+ deq_func_t dequeue;
+ enq_multi_func_t enqueue_multi;
+ deq_multi_func_t dequeue_multi;
odp_queue_t handle;
odp_buffer_t sched_buf;
odp_queue_type_t type;
odp_queue_param_t param;
odp_pktio_t pktin;
- odp_pktio_t pktout;
- uint32_t out_port_id;
- uint32_t hw_queue;
+ pktio_entry_t *pktout_entry;
+ Qmss_QueueHnd qmss_queue;
char name[ODP_QUEUE_NAME_LEN];
};
@@ -86,11 +82,11 @@ typedef union queue_entry_u {
queue_entry_t *get_qentry(uint32_t queue_id);
-int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr);
-odp_buffer_hdr_t *queue_deq(queue_entry_t *queue);
+int queue_enq(queue_entry_t *queue, odp_buffer_t buf);
+odp_buffer_t queue_deq(queue_entry_t *queue);
-int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
-int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
+int queue_enq_multi(queue_entry_t *queue, odp_buffer_t buf[], int num);
+int queue_deq_multi(queue_entry_t *queue, odp_buffer_t buf[], int num);
void queue_lock(queue_entry_t *queue);
void queue_unlock(queue_entry_t *queue);
@@ -116,23 +112,20 @@ static inline queue_entry_t *queue_to_qentry(odp_queue_t handle)
return get_qentry(queue_id);
}
-static inline void _ti_hw_queue_push_desc(uint32_t hw_queue,
- odp_buffer_hdr_t *buf_hdr)
+static inline const char *odp_queue_name(odp_queue_t handle)
{
- ti_em_osal_hw_queue_push_size(hw_queue,
- (void *)&buf_hdr->desc,
- sizeof(Cppi_HostDesc),
- TI_EM_MEM_PUBLIC_DESC);
+ return queue_to_qentry(handle)->s.name;
}
-static inline odp_buffer_hdr_t *_ti_hw_queue_pop_desc(uint32_t hw_queue)
+
+static inline Qmss_QueueHnd _odp_queue_to_qmss_queue(odp_queue_t queue)
{
- return ti_em_osal_hw_queue_pop(hw_queue,
- TI_EM_MEM_PUBLIC_DESC);
+ queue_entry_t *entry = queue_to_qentry(queue);
+ return entry->s.qmss_queue;
}
odp_queue_t _odp_queue_create(const char *name, odp_queue_type_t type,
- odp_queue_param_t *param, uint32_t hw_queue);
+ odp_queue_param_t *param, int32_t hw_queue);
#ifdef __cplusplus
}
deleted file mode 100644
@@ -1,29 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP shared memory internal
- */
-
-#ifndef ODP_SHARED_MEMORY_INTERNAL_H_
-#define ODP_SHARED_MEMORY_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void *_odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
- int type);
-uintptr_t _odp_shm_get_paddr(void *vaddr);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
new file mode 100644
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SOCKRMMSG_H__
+#define __SOCKRMMSG_H__
+
+#include <stdint.h>
+
+#define RM_SERVER_SOCKET_NAME "/tmp/var/run/rm/rm_server"
+
+#define msg_alloc(p) \
+ do { \
+ p = calloc(1, sizeof(*p)); \
+ if (p) { \
+ p->length = sizeof(*p); \
+ } \
+ } while (0)
+
+#define msg_length(x) ((x) ? (sizeof(*x) + x->length) : 0)
+#define msg_data(x) ((x->length) ? ((char *)x + sizeof(*x)) : NULL)
+
+#endif /* __SOCKRMMSG_H__ */
new file mode 100644
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SOCKUTILS_H__
+#define __SOCKUTILS_H__
+
+#include <sys/socket.h>
+#include <sys/un.h>
+
+#ifndef UNIX_PATH_MAX
+#define UNIX_PATH_MAX 108
+#endif
+
+
+typedef enum {
+ sock_name_e,
+ sock_addr_e
+} sock_name_type;
+
+typedef struct {
+ sock_name_type type;
+ union sock {
+ char *name;
+ struct sockaddr_un *addr;
+ } s;
+} sock_name_t;
+
+#define sock_h void *
+
+sock_h sock_open(sock_name_t *sock_name);
+
+int sock_close(sock_h handle);
+
+int sock_send(sock_h handle, const char *data, int length,
+ sock_name_t *to);
+
+int sock_wait(sock_h handle, int *size, struct timeval *timeout, int extern_fd);
+
+int sock_recv(sock_h handle, char *data, int length, sock_name_t *from);
+
+#endif
new file mode 100644
@@ -0,0 +1,690 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <odp_align.h>
+#include <odp_thread.h>
+#include <odp_internal.h>
+#include <odp_ti_mcsdk.h>
+#include <odp_debug_internal.h>
+
+/* Global variables to hold virtual address of various subsystems */
+hplib_virtualAddrInfo_T odp_vm_info;
+
+/*
+ * Global variables which needs to be populated with memory pool attributes
+ * which is passed to HPLIB for memory pool initialization
+ */
+void *global_descriptor_mem_base;
+void *sa_context_mem_base;
+
+static uint8_t *cma_mem_alloc(uint32_t size);
+static void cma_mem_free(uint8_t *ptr, uint32_t size);
+
+Pktlib_HeapIfTable pktlib_if_table = {
+ .data_malloc = cma_mem_alloc,
+ .data_free = cma_mem_free,
+};
+
+struct mcsdk_cfg_s default_mcsdk_cfg = {
+ .def_mem_size = TUNE_NETAPI_PERM_MEM_SZ,
+ .def_max_descriptors = TUNE_NETAPI_QM_CONFIG_MAX_DESC_NUM,
+ .def_tot_descriptors_for_us = TUNE_NETAPI_NUM_GLOBAL_DESC,
+ .def_heap_n_descriptors = TUNE_NETAPI_DEFAULT_NUM_BUFFERS,
+ .def_heap_n_zdescriptors = 0,
+ .def_heap_buf_size = TUNE_NETAPI_DEFAULT_BUFFER_SIZE,
+ .def_heap_tailroom_size = 0,
+ .def_heap_extra_size = 0,
+ .min_buf_headroom_size = ODP_CACHE_LINE_SIZE,
+};
+
+/**
+ * NWAL Memory Buffer Configuration
+ * @todo: Buffers for NWAL can be allocated dynamically
+ */
+#define NWAL_CONFIG_SEC_CONTEXT_SZ 384
+
+#define NWAL_CONFIG_BUFSIZE_NWAL_HANDLE 3400
+
+#define NWAL_CONFIG_BUFSIZE_NWAL_PER_MAC 256
+#define NWAL_CONFIG_BUFSIZE_NWAL_IPSEC_HANDLE_PER_CHAN 256
+#define NWAL_CONFIG_BUFSIZE_NWAL_PER_IP 128
+#define NWAL_CONFIG_BUFSIZE_NWAL_PER_PORT 128
+#define NWAL_CONFIG_BUFSIZE_NWAL_PER_L2L3_HDR 128
+#define NWAL_CONFIG_BUFSIZE_NWAL_PER_LOC_CONTEXT 384
+#define NWAL_CHAN_HANDLE_SIZE \
+ ((NWAL_CONFIG_BUFSIZE_NWAL_PER_MAC * TUNE_NETAPI_MAX_NUM_MAC) + \
+ (NWAL_CONFIG_BUFSIZE_NWAL_IPSEC_HANDLE_PER_CHAN * \
+ TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS*2) + \
+ (NWAL_CONFIG_BUFSIZE_NWAL_PER_IP * TUNE_NETAPI_MAX_NUM_IP) + \
+ (NWAL_CONFIG_BUFSIZE_NWAL_PER_PORT * TUNE_NETAPI_MAX_NUM_PORTS) + \
+ (NWAL_CONFIG_BUFSIZE_NWAL_PER_LOC_CONTEXT * TUNE_NETAPI_NUM_CORES) + \
+ (NWAL_CONFIG_BUFSIZE_NWAL_PER_L2L3_HDR * \
+ TUNE_NETAPI_MAX_NUM_L2_L3_HDRS))
+
+uint8_t nwal_inst_mem[NWAL_CONFIG_BUFSIZE_NWAL_HANDLE] ODP_ALIGNED_CACHE;
+uint8_t nwal_handle_mem[NWAL_CHAN_HANDLE_SIZE] ODP_ALIGNED_CACHE;
+
+/**
+ * @todo: Check if below size information can be made available
+ * from PA interface file
+ */
+#define NWAL_CONFIG_BUFSIZE_PA_BUF0 256
+#define NWAL_CONFIG_BUFSIZE_PA_BUF1 (128 * TUNE_NETAPI_MAX_NUM_MAC)
+#define NWAL_CONFIG_BUFSIZE_PA_BUF2 13824
+
+struct pa_global {
+ /* Memory used for the PA Instance.*/
+ uint8_t pa_buf0[NWAL_CONFIG_BUFSIZE_PA_BUF0] ODP_ALIGNED_CACHE;
+ /* Memory used for PA handles */
+ uint8_t pa_buf1[NWAL_CONFIG_BUFSIZE_PA_BUF1] ODP_ALIGNED_CACHE;
+ uint8_t pa_buf2[NWAL_CONFIG_BUFSIZE_PA_BUF2] ODP_ALIGNED_CACHE;
+};
+
+
+#define NWAL_CONFIG_BUFSIZE_SA_HANDLE 512
+#define NWAL_CONFIG_BUFSIZE_SA_HANDLE_PER_CHAN 512
+
+struct sa_global {
+ /* Memory used for SA LLD global Handle */
+ uint8_t salld_handle[NWAL_CONFIG_BUFSIZE_SA_HANDLE] ODP_ALIGNED_CACHE;
+ /* Memory used by SA LLD per Channel */
+ uint8_t salld_chan_handle[NWAL_CONFIG_BUFSIZE_SA_HANDLE_PER_CHAN *
+ TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS*2]
+ ODP_ALIGNED_CACHE;
+};
+
+static uint8_t *cma_mem_alloc(uint32_t size)
+{
+ return (uint8_t *)hplib_vmMemAlloc(
+ size + odp_global->cfg.def_heap_extra_size, 128, 0);
+}
+
+static void cma_mem_free(uint8_t *ptr ODP_UNUSED, uint32_t size ODP_UNUSED)
+{
+ /* Do Nothing. */
+ odp_pr_err("need to provide a free () for some reason!!\n");
+ return;
+}
+
+/********************************************************************
+ * FUNCTION PURPOSE: Internal NETAPI function to initialize NWAL subsystem
+ ********************************************************************
+ * DESCRIPTION: Internal NETAPI function to initialize NWAL subsytem
+ ********************************************************************/
+int mcsdk_nwal_init(int region2use, Pktlib_HeapIfTable *p_table)
+{
+ nwalSizeInfo_t nwal_size_info;
+ nwal_RetValue nwal_ret;
+ nwalGlobCfg_t nwal_global_cfg;
+ uint8_t count;
+ int sizes[nwal_N_BUFS];
+ int aligns[nwal_N_BUFS];
+ void *bases[nwal_N_BUFS];
+ Pktlib_HeapCfg heap_cfg;
+ int32_t pktlib_err;
+ void *base = NULL;
+ struct pa_global *pa_entry = NULL;
+ struct sa_global *sa_entry = NULL;
+
+ memset(&odp_global->nwal, 0, sizeof(odp_global->nwal));
+ memset(&nwal_global_cfg, 0, sizeof(nwal_global_cfg));
+
+ nwal_global_cfg.rmHandle = odp_proc.rm_service;
+
+ base = hplib_shmOpen();
+ if (base) {
+ if (hplib_shmAddEntry(base, sizeof(struct pa_global), PA_ENTRY)
+ == hplib_OK) {
+ pa_entry = (struct pa_global *)hplib_shmGetEntry(
+ base, PA_ENTRY);
+ nwal_global_cfg.instPoolBaseAddr = (void *)pa_entry;
+ } else {
+ odp_pr_err("Unable to Add shared memory segment for PASS\n");
+ return -1;
+ }
+ if (hplib_shmAddEntry(base, sizeof(struct sa_global), SA_ENTRY)
+ == hplib_OK) {
+ sa_entry = (struct sa_global *)hplib_shmGetEntry(
+ base, SA_ENTRY);
+ nwal_global_cfg.instPoolSaBaseAddr = (void *)sa_entry;
+ } else {
+ odp_pr_err("Unable to Add shared memory segment for SASS\n");
+ return -1;
+ }
+ }
+ /* Initialize Buffer Pool for NetCP PA to SA packets */
+ nwal_global_cfg.pa2SaBufPool.numBufPools = 1;
+ nwal_global_cfg.pa2SaBufPool.bufPool[0].descSize =
+ TUNE_NETAPI_DESC_SIZE;
+ nwal_global_cfg.pa2SaBufPool.bufPool[0].bufSize =
+ odp_global->cfg.def_heap_buf_size;
+
+ /* Initialize the heap configuration. */
+ memset((void *)&heap_cfg, 0, sizeof(Pktlib_HeapCfg));
+ /* Populate the heap configuration */
+ heap_cfg.name = "nwal PA2SA";
+ heap_cfg.memRegion = region2use;
+ heap_cfg.sharedHeap = 0;
+ heap_cfg.useStarvationQueue = 0;
+ heap_cfg.dataBufferSize = odp_global->cfg.def_heap_buf_size;
+ heap_cfg.numPkts = TUNE_NETAPI_CONFIG_MAX_PA_TO_SA_DESC;
+ heap_cfg.numZeroBufferPackets = 0;
+ heap_cfg.heapInterfaceTable.data_malloc = p_table->data_malloc;
+ heap_cfg.heapInterfaceTable.data_free = p_table->data_free;
+ heap_cfg.dataBufferPktThreshold = 0;
+ heap_cfg.zeroBufferPktThreshold = 0;
+
+ nwal_global_cfg.pa2SaBufPool.bufPool[0].heapHandle =
+ Pktlib_createHeap(&heap_cfg, &pktlib_err);
+ if (nwal_global_cfg.pa2SaBufPool.bufPool[0].heapHandle == NULL) {
+ odp_pr_err("Heap Creation Failed for PA to SA Buffer Pool, Error Code: %d\n",
+ pktlib_err);
+ return -1;
+ }
+ odp_global->nwal.pa2sa_heap =
+ nwal_global_cfg.pa2SaBufPool.bufPool[0].heapHandle;
+ /* Initialize Buffer Pool for NetCP SA to PA packets */
+ nwal_global_cfg.sa2PaBufPool.numBufPools = 1;
+ nwal_global_cfg.sa2PaBufPool.bufPool[0].descSize =
+ TUNE_NETAPI_DESC_SIZE;
+ nwal_global_cfg.sa2PaBufPool.bufPool[0].bufSize =
+ odp_global->cfg.def_heap_buf_size;
+
+ /* Populate the heap configuration */
+ heap_cfg.name = "nwal SA2PA";
+ heap_cfg.numPkts = TUNE_NETAPI_CONFIG_MAX_SA_TO_PA_DESC;
+
+ nwal_global_cfg.sa2PaBufPool.bufPool[0].heapHandle =
+ Pktlib_createHeap(&heap_cfg, &pktlib_err);
+ if (nwal_global_cfg.sa2PaBufPool.bufPool[0].heapHandle == NULL) {
+ odp_pr_err("Heap Creation Failed for SA to PA Buffer Pool, Error Code: %d\n",
+ pktlib_err);
+ return -1;
+ }
+ odp_global->nwal.sa2pa_heap =
+ nwal_global_cfg.sa2PaBufPool.bufPool[0].heapHandle;
+ nwal_global_cfg.hopLimit = 5;/* Default TTL / Hop Limit */
+ nwal_global_cfg.paPowerOn = nwal_TRUE;
+ nwal_global_cfg.saPowerOn = nwal_TRUE;
+ nwal_global_cfg.paFwActive = nwal_TRUE;
+ nwal_global_cfg.saFwActive = nwal_FALSE;
+
+ /* Pick Default Physical Address */
+ nwal_global_cfg.paVirtBaseAddr = (uint32_t)odp_vm_info.passCfgVaddr;
+ nwal_global_cfg.saVirtBaseAddr = (uint32_t)odp_vm_info.passCfgVaddr +
+ CSL_NETCP_CFG_SA_CFG_REGS -
+ CSL_NETCP_CFG_REGS;
+
+ nwal_global_cfg.rxDefPktQ = QMSS_PARAM_NOT_SPECIFIED;
+
+ /* Get the Buffer Requirement from NWAL */
+ memset(&nwal_size_info, 0, sizeof(nwal_size_info));
+ nwal_size_info.nMaxMacAddress = TUNE_NETAPI_MAX_NUM_MAC;
+ nwal_size_info.nMaxIpAddress = TUNE_NETAPI_MAX_NUM_IP;
+ nwal_size_info.nMaxL4Ports = TUNE_NETAPI_MAX_NUM_PORTS;
+ nwal_size_info.nMaxIpSecChannels = TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS;
+ nwal_size_info.nMaxDmSecChannels = TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS;
+ nwal_size_info.nMaxL2L3Hdr = TUNE_NETAPI_MAX_NUM_L2_L3_HDRS;
+ /**
+ * @todo: nProc increased by 1, because nwal_getLocContext()
+ * checks for >=. Better to fix nwal_getLocContext()
+ */
+ nwal_size_info.nProc = TUNE_NETAPI_NUM_CORES + 1;
+ nwal_ret = nwal_getBufferReq(&nwal_size_info, sizes, aligns);
+ if (nwal_ret != nwal_OK) {
+ odp_pr_err("nwal_getBufferReq Failed %d\n",
+ nwal_ret);
+ return nwal_FALSE;
+ }
+
+ /* Check for memory size requirement and update the base */
+ count = 0;
+ bases[nwal_BUF_INDEX_INST] = (uint32_t *)Osal_nwalLocToGlobAddr(
+ (uint32_t)nwal_inst_mem);
+ if (NWAL_CONFIG_BUFSIZE_NWAL_HANDLE < sizes[nwal_BUF_INDEX_INST]) {
+ /* Resize Memory */
+ while (1)
+ ;
+ }
+ count++;
+
+ bases[nwal_BUF_INDEX_INT_HANDLES] = (uint32_t *)Osal_nwalLocToGlobAddr(
+ (uint32_t)nwal_handle_mem);
+ if (NWAL_CHAN_HANDLE_SIZE < sizes[nwal_BUF_INDEX_INT_HANDLES]) {
+ /* Resize Memory */
+ while (1)
+ ;
+ }
+ count++;
+ bases[nwal_BUF_INDEX_PA_LLD_BUF0] = (uint32_t *)Osal_nwalLocToGlobAddr(
+ (uint32_t)pa_entry->pa_buf0);
+ if ((NWAL_CONFIG_BUFSIZE_PA_BUF0) < sizes[nwal_BUF_INDEX_PA_LLD_BUF0]) {
+ /* Resize Memory */
+ while (1)
+ ;
+ }
+ count++;
+
+ bases[nwal_BUF_INDEX_PA_LLD_BUF1] = (uint32_t *)Osal_nwalLocToGlobAddr(
+ (uint32_t)pa_entry->pa_buf1);
+ if ((NWAL_CONFIG_BUFSIZE_PA_BUF1) < sizes[nwal_BUF_INDEX_PA_LLD_BUF1]) {
+ /* Resize Memory */
+ while (1)
+ ;
+ }
+ count++;
+
+ bases[nwal_BUF_INDEX_PA_LLD_BUF2] = (uint32_t *)Osal_nwalLocToGlobAddr(
+ (uint32_t)pa_entry->pa_buf2);
+ if ((NWAL_CONFIG_BUFSIZE_PA_BUF2) < sizes[nwal_BUF_INDEX_PA_LLD_BUF2]) {
+ /* Resize Memory */
+ while (1)
+ ;
+ }
+ count++;
+#ifdef NETAPI_ENABLE_SECURITY
+ bases[nwal_BUF_INDEX_SA_LLD_HANDLE] =
+ (uint32_t *)Osal_nwalLocToGlobAddr(
+ (uint32_t)sa_entry->salld_handle);
+ if ((NWAL_CONFIG_BUFSIZE_SA_HANDLE)
+ < sizes[nwal_BUF_INDEX_SA_LLD_HANDLE]) {
+ /* Resize Memory */
+ while (1)
+ ;
+ }
+ count++;
+
+ bases[nwal_BUF_INDEX_SA_CONTEXT] = (uint32_t *)Osal_nwalLocToGlobAddr(
+ (uint32_t)sa_context_mem_base);
+ /* also save this here for easy access to sa_start */
+ nwal_global_cfg.scPoolBaseAddr = bases[nwal_BUF_INDEX_SA_CONTEXT];
+ count++;
+
+ bases[nwal_BUF_INDEX_SA_LLD_CHAN_HANDLE] =
+ (uint32_t *)Osal_nwalLocToGlobAddr(
+ (uint32_t)sa_entry->salld_chan_handle);
+ if ((NWAL_CONFIG_BUFSIZE_SA_HANDLE_PER_CHAN
+ * TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS * 2)
+ < sizes[nwal_BUF_INDEX_SA_LLD_CHAN_HANDLE]) {
+ /* Resize Memory */
+ while (1)
+ ;
+ }
+ count++;
+#else
+ bases[nwal_BUF_INDEX_SA_LLD_HANDLE] = 0;
+ bases[nwal_BUF_INDEX_SA_CONTEXT] = 0;
+ bases[nwal_BUF_INDEX_SA_LLD_CHAN_HANDLE] = 0;
+ count = count+3;
+#endif
+ if (count != nwal_N_BUFS) {
+ while (1)
+ ;
+ }
+
+ /* Initialize NWAL module */
+ nwal_ret = nwal_create(&nwal_global_cfg, &nwal_size_info, sizes, bases,
+ &odp_global->nwal.handle);
+ if (nwal_ret != nwal_OK) {
+ odp_pr_err("nwal_create Failed %d\n",
+ nwal_ret);
+ return -1;
+ }
+
+ odp_pr_dbg("Global and Local Network initialization Successful\n");
+ return 1;
+}
+
+/********************************************************************
+ * FUNCTION PURPOSE: Internal NETAPI function to start NWAL
+ ********************************************************************
+ * DESCRIPTION: Internal NETAPI function to start NWAL, per thread/core
+ ********************************************************************/
+int mcsdk_nwal_start(Pktlib_HeapHandle pkt_heap, Pktlib_HeapHandle cmd_rx_heap,
+ Pktlib_HeapHandle cmd_tx_heap)
+{
+ nwalLocCfg_t nwal_local_cfg;
+ nwal_RetValue nwal_ret;
+
+ memset(&nwal_local_cfg, 0, sizeof(nwal_local_cfg));
+
+ /*
+ * Update the Start of Packet Offset for the default flows created
+ * by NWAL
+ */
+ nwal_local_cfg.rxSopPktOffset = odp_global->cfg.min_buf_headroom_size;
+ nwal_local_cfg.rxPktTailRoomSz = odp_global->cfg.def_heap_tailroom_size;
+
+ /* Call back registration for the core */
+ nwal_local_cfg.pRxPktCallBack = NULL;
+ nwal_local_cfg.pCmdCallBack = NULL;
+ nwal_local_cfg.pPaStatsCallBack = NULL;
+ nwal_local_cfg.pRxDmCallBack = NULL;
+
+ /* Initialize Buffer Pool for Control packets from NetCP to Host */
+ nwal_local_cfg.rxCtlPool.numBufPools = 1;
+ nwal_local_cfg.rxCtlPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;
+ nwal_local_cfg.rxCtlPool.bufPool[0].bufSize =
+ TUNE_NETAPI_CONFIG_MAX_CTL_RXTX_BUF_SIZE;
+ nwal_local_cfg.rxCtlPool.bufPool[0].heapHandle = cmd_rx_heap;
+
+ /* Initialize Buffer Pool for Control packets from Host to NetCP */
+ nwal_local_cfg.txCtlPool.numBufPools = 1;
+ nwal_local_cfg.txCtlPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;
+ nwal_local_cfg.txCtlPool.bufPool[0].bufSize =
+ TUNE_NETAPI_CONFIG_MAX_CTL_RXTX_BUF_SIZE;
+ nwal_local_cfg.txCtlPool.bufPool[0].heapHandle = cmd_tx_heap;
+
+ /* Initialize Buffer Pool for Packets from NetCP to Host */
+ nwal_local_cfg.rxPktPool.numBufPools = 1;
+ nwal_local_cfg.rxPktPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;
+ nwal_local_cfg.rxPktPool.bufPool[0].bufSize =
+ odp_global->cfg.def_heap_buf_size;
+ nwal_local_cfg.rxPktPool.bufPool[0].heapHandle = pkt_heap;
+
+ /* Initialize Buffer Pool for Packets from Host to NetCP */
+ nwal_local_cfg.txPktPool.numBufPools = 1;
+ nwal_local_cfg.txPktPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;
+ nwal_local_cfg.txPktPool.bufPool[0].bufSize =
+ odp_global->cfg.def_heap_buf_size;
+ nwal_local_cfg.txPktPool.bufPool[0].heapHandle = pkt_heap;
+
+ memcpy(&odp_local.nwal.cfg, &nwal_local_cfg, sizeof(nwalLocCfg_t));
+ while (1) {
+ nwal_ret = nwal_start(odp_global->nwal.handle, &nwal_local_cfg);
+ if (nwal_ret == nwal_ERR_INVALID_STATE)
+ continue;
+ break;
+ }
+
+ if (nwal_ret != nwal_OK) {
+ odp_pr_err(">nwal_start:Failed ->err %d !!!\n", nwal_ret);
+ return -1;
+ }
+ return 1;
+}
+
+int mcsdk_global_init(void)
+{
+ int32_t result;
+ Pktlib_HeapHandle shared_heap;
+ Pktlib_HeapHandle control_rx_heap, control_tx_heap;
+ Pktlib_HeapCfg heap_cfg;
+ int32_t pktlib_err;
+ void *base;
+ hplib_memPoolAttr_T mem_pool_attr[HPLIB_MAX_MEM_POOLS];
+ int thread_id;
+
+ thread_id = odp_thread_create(0);
+ odp_thread_init_local(thread_id);
+ hplib_utilSetupThread(thread_id, NULL, hplib_spinLock_Type_LOL);
+
+ odp_local.is_main_thread = 1; /*Prevent local_init on this thread */
+
+ base = hplib_shmCreate(HPLIB_SHM_SIZE);
+ if (base == NULL) {
+ odp_pr_err("hplib_shmCreate failure\n");
+ return -1;
+ } else {
+ odp_pr_dbg("hplib_shmCreate success\n");
+ }
+
+ if (hplib_shmAddEntry(base, sizeof(struct odp_global_s), NETAPI_ENTRY)
+ != hplib_OK) {
+ odp_pr_err("hplib_shmAddEntry failed for NETAPI_ENTRY\n");
+ return -1;
+ } else {
+ odp_pr_dbg("hplib_shmAddEntry success for NETAPI_ENTRY\n");
+ odp_global = hplib_shmGetEntry(base, NETAPI_ENTRY);
+ odp_global->cfg = default_mcsdk_cfg;
+ }
+
+ hplib_utilModOpen();
+ hplib_utilOsalCreate();
+
+ odp_proc.rm_service = rm_client_init();
+
+#ifdef NETAPI_USE_DDR
+ /* Init attributes for DDR */
+ mem_pool_attr[0].attr = HPLIB_ATTR_KM_CACHED0;
+ mem_pool_attr[0].phys_addr = 0;
+ mem_pool_attr[0].size = 0;
+
+ /* Init attributes for un-cached MSMC */
+ mem_pool_attr[1].attr = HPLIB_ATTR_UN_CACHED;
+ mem_pool_attr[1].phys_addr = CSL_MSMC_SRAM_REGS;
+ mem_pool_attr[1].size = TUNE_NETAPI_PERM_MEM_SZ;
+#else
+ mem_pool_attr[1].attr = HPLIB_ATTR_KM_CACHED0;
+ mem_pool_attr[1].phys_addr = 0;
+ mem_pool_attr[1].size = 0;
+
+ /* Init attributes for un-cached MSMC */
+ mem_pool_attr[0].attr = HPLIB_ATTR_UN_CACHED;
+ mem_pool_attr[0].phys_addr = CSL_MSMC_SRAM_REGS;
+ mem_pool_attr[0].size = TUNE_NETAPI_PERM_MEM_SZ;
+#endif
+ /* initialize all the memory we are going to use
+ - chunk for buffers, descriptors
+ - memory mapped peripherals we use, such as QMSS, PA, etc */
+ result = hplib_vmInit(&odp_vm_info, 2, &mem_pool_attr[0]);
+
+ hplib_initMallocArea(0);
+ hplib_initMallocArea(1);
+
+#ifdef NETAPI_ENABLE_SECURITY
+ /*
+ * allocate 2x number of tunnels since we need one for inflow and
+ * one for data mode
+ */
+ sa_context_mem_base = hplib_vmMemAlloc(
+ (TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS * 2 *
+ NWAL_CONFIG_SEC_CONTEXT_SZ),
+ 128, 0);
+ if (!sa_context_mem_base) {
+ odp_pr_err("Failed to map SA context memory region\n");
+ return -1;
+ }
+ odp_pr_dbg("SA Memory mapped/allocated at address %p.\n",
+ sa_context_mem_base);
+
+#else
+ sa_context_mem_base = NULL;
+#endif
+
+ /* Allocate QM region from contiguous chunk above */
+ global_descriptor_mem_base = hplib_vmMemAlloc(
+ (odp_global->cfg.def_tot_descriptors_for_us
+ * TUNE_NETAPI_DESC_SIZE),
+ 128, 0);
+
+ odp_pr_dbg("global desc region=%p\n", global_descriptor_mem_base);
+
+ /* Initialize Queue Manager Sub System */
+ result = mcsdk_qmss_init(odp_global->cfg.def_max_descriptors);
+
+ if (result != 1) {
+ odp_pr_err("returned from netapip_initQm with failure\n");
+ return -1;
+ }
+
+ /* Start the QMSS. */
+ if (mcsdk_qmss_start() != 1) {
+ odp_pr_err("returned from netapip_startQm with failure\n");
+ return -1;
+ }
+
+ /* Initialize the global descriptor memory region. */
+ result = mcsdk_qmss_setup_memregion(
+ odp_global->cfg.def_tot_descriptors_for_us,
+ TUNE_NETAPI_DESC_SIZE,
+ global_descriptor_mem_base,
+ TUNE_NETAPI_QM_GLOBAL_REGION);
+
+ if (result < 0) {
+ odp_pr_err("can't setup QM shared region\n");
+ return -1;
+ }
+
+ odp_pr_dbg("returned from netapip_qmSetupMemRegion\n");
+ /* Initialize CPPI CPDMA */
+
+ result = mcsdk_cppi_init();
+ odp_pr_dbg("returned from netapip_initCppi\n");
+ if (result != 1) {
+ odp_pr_err("Error initializing CPPI SubSystem error code : %d\n",
+ result);
+ return -1;
+ }
+ mcsdk_cppi_start();
+
+ /* CPPI and Queue Manager are initialized. */
+ odp_pr_dbg("Queue Manager and CPPI are initialized.\n");
+
+ /* create main pkt heap */
+ /* Initialize the Shared Heaps. */
+ Pktlib_sharedHeapInit();
+ odp_pr_dbg("returned from Pktlib_sharedHeapInit\n");
+
+ /* Initialize the heap configuration. */
+ memset((void *)&heap_cfg, 0, sizeof(Pktlib_HeapCfg));
+ /* Populate the heap configuration */
+ heap_cfg.name = "nwal_packet";
+ heap_cfg.memRegion = TUNE_NETAPI_QM_GLOBAL_REGION;
+ heap_cfg.sharedHeap = 1;
+ heap_cfg.useStarvationQueue = 0;
+ heap_cfg.dataBufferSize = odp_global->cfg.def_heap_buf_size;
+ heap_cfg.numPkts = odp_global->cfg.def_heap_n_descriptors;
+ heap_cfg.numZeroBufferPackets = odp_global->cfg.def_heap_n_zdescriptors;
+ heap_cfg.heapInterfaceTable.data_malloc =
+ pktlib_if_table.data_malloc;
+ heap_cfg.heapInterfaceTable.data_free = pktlib_if_table.data_free;
+ heap_cfg.dataBufferPktThreshold = 0;
+ heap_cfg.zeroBufferPktThreshold = 0;
+
+ /* Create Shared Heap with specified configuration. */
+ shared_heap = Pktlib_createHeap(&heap_cfg, &pktlib_err);
+ odp_pr_dbg("returned from Pktlib_createHeap1\n");
+ if (!shared_heap) {
+ /** @todo: cleanup on failure */
+ odp_pr_err("heap create failed, Error Code: %d\n",
+ pktlib_err);
+ return -1;
+ }
+ odp_proc.nwal.netcp_heap = shared_heap;
+
+ /* Update for Control */
+ heap_cfg.name = "nwal_control_rx";
+ heap_cfg.sharedHeap = 1;
+ heap_cfg.dataBufferSize = TUNE_NETAPI_CONFIG_MAX_CTL_RXTX_BUF_SIZE;
+ heap_cfg.numPkts = TUNE_NETAPI_CONFIG_NUM_CTL_RX_BUF;
+ heap_cfg.numZeroBufferPackets = 0;
+
+ control_rx_heap = Pktlib_createHeap(&heap_cfg, &pktlib_err);
+ odp_pr_dbg("returned from Pktlib_createHeap2\n");
+ if (!control_rx_heap) {
+ /** @todo: cleanup on failure */
+ odp_pr_err("control rx heap create failed, Error Code: %d\n",
+ pktlib_err);
+ return -1;
+ }
+ odp_proc.nwal.netcp_control_rx_heap = control_rx_heap;
+
+ heap_cfg.name = "nwal_control_tx";
+ heap_cfg.numPkts = TUNE_NETAPI_CONFIG_NUM_CTL_TX_BUF;
+
+ control_tx_heap = Pktlib_createHeap(&heap_cfg, &pktlib_err);
+ odp_pr_dbg("returned from Pktlib_createHeap3\n");
+ if (!control_tx_heap) {
+ /** @todo: cleanup on failure */
+ odp_pr_err("control tx heap create failed, Error Code: %d\n",
+ pktlib_err);
+ return -1;
+ }
+ odp_proc.nwal.netcp_control_tx_heap = control_tx_heap;
+
+ /* Init NWAL */
+ result = mcsdk_nwal_init(TUNE_NETAPI_QM_GLOBAL_REGION,
+ &pktlib_if_table);
+ if (result < 0) {
+ odp_pr_err("netapi init_nwal() failed\n");
+ return -1;
+ }
+ odp_pr_dbg("returned from netapip_initNwal\n");
+
+ /* start NWAL */
+ result = mcsdk_nwal_start(shared_heap, control_rx_heap,
+ control_tx_heap);
+ if (result < 0) {
+ odp_pr_err("netapi start_nwal() failed\n");
+ return -1;
+ }
+ odp_pr_dbg("returned from netapip_startNwal\n");
+ return 0;
+}
+
+int mcsdk_local_init(int thread_id)
+{
+ int ret;
+ /* Main thread already finished initialization */
+ if (odp_local.is_main_thread) {
+ odp_pr_dbg("Skip odp_local_init() for the main thread\n");
+ return 1;
+ }
+ odp_pr_dbg("thread_id: %d\n", thread_id);
+
+ hplib_utilSetupThread(thread_id, NULL, hplib_spinLock_Type_LOL);
+ /* Start the QMSS. */
+ if (mcsdk_qmss_start() != 1)
+ return -1;
+
+ mcsdk_cppi_start();
+
+ ret = mcsdk_nwal_start(odp_proc.nwal.netcp_heap,
+ odp_proc.nwal.netcp_control_rx_heap,
+ odp_proc.nwal.netcp_control_tx_heap);
+
+ if (ret < 0) {
+ odp_pr_err("mcsdk_nwal_start() failed\n");
+ return -1;
+ }
+ odp_pr_dbg("thread_id: %d\n", thread_id);
+ return 0;
+}
+
+void odp_print_mem(void *addr, size_t size, const char *desc)
+{
+ uint8_t *start_ptr, *end_ptr, *ptr;
+ int i;
+
+ if (!size)
+ return;
+
+ if (desc)
+ printf("\n%s (%u bytes)\n", desc, size);
+ else
+ printf("Dumping %u bytes at address 0x%08x\n",
+ size, (unsigned int)addr);
+
+ start_ptr = addr;
+ end_ptr = start_ptr + size;
+ ptr = (typeof(ptr))(((uintptr_t)start_ptr) & ~0xF);
+
+ while (ptr < end_ptr) {
+ printf("0x%08x: ", (unsigned int)ptr);
+ for (i = 0; i < 16; i++) {
+ if (start_ptr <= ptr && ptr < end_ptr)
+ printf("%02x ", *ptr);
+ else
+ printf("__ ");
+ ptr++;
+ }
+ printf("\n");
+ }
+}
new file mode 100644
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <unistd.h>
+#include <odp_state.h>
+
+#include <odp_ti_mcsdk.h>
+#include <odp_debug_internal.h>
+
+extern Qmss_GlobalConfigParams qmssGblCfgParams;
+extern Cppi_GlobalConfigParams cppiGblCfgParams;
+
+/**
+ * Internal NETAPI macro to convert to IP Register Virtual Address
+ * from a mapped base Virtual Address.
+ *
+ * @param virt_base_addr Virtual base address mapped using mmap for IP
+ * @param phys_base_addr Physical base address for the IP
+ * @param phys_reg_addr Physical register address
+ *
+ * @return virtual address
+ */
+static inline void *reg_phys2virt(void *virt_base_addr,
+ uint32_t phys_base_addr,
+ uint32_t phys_reg_addr)
+{
+ return (void *)((uint8_t *)virt_base_addr +
+ (phys_reg_addr - phys_base_addr));
+}
+
+/*****************************************************************************
+ * FUNCTION PURPOSE: Global Initialization of CPPI. Once Per System
+ *****************************************************************************
+ * DESCRIPTION: The function will initialize the CPPI
+ *****************************************************************************/
+int mcsdk_cppi_init(void)
+{
+ int32_t result;
+ Cppi_GlobalConfigParams config_params;
+ Cppi_GlobalCPDMAConfigParams *dma_cfgs;
+
+ config_params = cppiGblCfgParams;
+ /* Convert Physical address to Virtual address for LLD access */
+ /* PASS CPDMA regs */
+ dma_cfgs = &config_params.cpDmaCfgs[Cppi_CpDma_PASS_CPDMA];
+ dma_cfgs->gblCfgRegs = reg_phys2virt(odp_vm_info.passCfgVaddr,
+ CSL_NETCP_CFG_REGS, (uint32_t)dma_cfgs->gblCfgRegs);
+
+ dma_cfgs->txChRegs = reg_phys2virt(odp_vm_info.passCfgVaddr,
+ CSL_NETCP_CFG_REGS, (uint32_t)dma_cfgs->txChRegs);
+
+ dma_cfgs->rxChRegs = reg_phys2virt(odp_vm_info.passCfgVaddr,
+ CSL_NETCP_CFG_REGS, (uint32_t)dma_cfgs->rxChRegs);
+
+ dma_cfgs->txSchedRegs = reg_phys2virt(odp_vm_info.passCfgVaddr,
+ CSL_NETCP_CFG_REGS, (uint32_t)dma_cfgs->txSchedRegs);
+
+ dma_cfgs->rxFlowRegs = reg_phys2virt(odp_vm_info.passCfgVaddr,
+ CSL_NETCP_CFG_REGS, (uint32_t)dma_cfgs->rxFlowRegs);
+
+ /* QMSS CPDMA regs */
+ dma_cfgs = &config_params.cpDmaCfgs[Cppi_CpDma_QMSS_CPDMA];
+ dma_cfgs->gblCfgRegs = reg_phys2virt(odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE, (uint32_t)dma_cfgs->gblCfgRegs);
+
+ dma_cfgs->txChRegs = reg_phys2virt(odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE, (uint32_t)dma_cfgs->txChRegs);
+
+ dma_cfgs->rxChRegs = reg_phys2virt(odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE, (uint32_t)dma_cfgs->rxChRegs);
+
+ dma_cfgs->txSchedRegs = reg_phys2virt(odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE, (uint32_t)dma_cfgs->txSchedRegs);
+
+ dma_cfgs->rxFlowRegs = reg_phys2virt(odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE, (uint32_t)dma_cfgs->rxFlowRegs);
+
+ result = Cppi_init(&config_params);
+ if (result != CPPI_SOK) {
+ odp_pr_err("Cppi_init failed with error code %d\n", result);
+ return -1;
+ }
+ return 1;
+}
+
+/*****************************************************************************
+ * FUNCTION PURPOSE: Global Initialization of Queue Manager. Once Per System
+ *****************************************************************************
+ * DESCRIPTION: The function will initialize the Queue Manager
+ *****************************************************************************/
+int mcsdk_qmss_init(int max_descriptors)
+{
+ Qmss_InitCfg init_config;
+ int32_t result;
+ Qmss_GlobalConfigParams config_params;
+ Qmss_GlobalConfigRegs *regs;
+ uint32_t count;
+
+ memset(&init_config, 0, sizeof(Qmss_InitCfg));
+
+ /* Use Internal Linking RAM for optimal performance */
+ init_config.linkingRAM0Base = 0;
+ init_config.linkingRAM0Size = 0;
+ init_config.linkingRAM1Base = 0;
+ init_config.maxDescNum = max_descriptors;
+ init_config.qmssHwStatus = QMSS_HW_INIT_COMPLETE;
+
+ config_params = qmssGblCfgParams;
+ config_params.qmRmServiceHandle = odp_proc.rm_service;
+ regs = &config_params.regs;
+
+ /* Convert address to Virtual address */
+ for (count = 0; count < config_params.maxQueMgrGroups; count++) {
+ Qmss_GlobalConfigGroupRegs *group_regs;
+ group_regs = &config_params.groupRegs[count];
+ group_regs->qmConfigReg = reg_phys2virt(
+ odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE,
+ (uint32_t)group_regs->qmConfigReg);
+
+ group_regs->qmDescReg = reg_phys2virt(
+ odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE,
+ (uint32_t)group_regs->qmDescReg);
+
+ group_regs->qmQueMgmtReg = reg_phys2virt(
+ odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE,
+ (uint32_t)group_regs->qmQueMgmtReg);
+
+ group_regs->qmQueMgmtProxyReg = reg_phys2virt(
+ odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE,
+ (uint32_t)group_regs->qmQueMgmtProxyReg);
+
+ group_regs->qmQueStatReg = reg_phys2virt(
+ odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE,
+ (uint32_t)group_regs->qmQueStatReg);
+
+ group_regs->qmStatusRAM = reg_phys2virt(
+ odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE,
+ (uint32_t)group_regs->qmStatusRAM);
+
+ group_regs->qmQueMgmtDataReg = reg_phys2virt(
+ odp_vm_info.qmssDataVaddr,
+ CSL_QMSS_DATA_BASE,
+ (uint32_t)group_regs->qmQueMgmtDataReg);
+
+ group_regs->qmQueMgmtProxyDataReg =
+ NULL;
+ }
+
+ for (count = 0; count < QMSS_MAX_INTD; count++) {
+ regs->qmQueIntdReg[count] = reg_phys2virt(
+ odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE,
+ (uint32_t)regs->qmQueIntdReg[count]);
+ }
+
+ for (count = 0; count < QMSS_MAX_PDSP; count++) {
+ regs->qmPdspCmdReg[count] = reg_phys2virt(
+ odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE,
+ (uint32_t)regs->qmPdspCmdReg[count]);
+
+ regs->qmPdspCtrlReg[count] = reg_phys2virt(
+ odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE,
+ (uint32_t)regs->qmPdspCtrlReg[count]);
+
+ regs->qmPdspIRamReg[count] = reg_phys2virt(
+ odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE,
+ (uint32_t)regs->qmPdspIRamReg[count]);
+ }
+
+ regs->qmLinkingRAMReg = reg_phys2virt(odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE, (uint32_t)regs->qmLinkingRAMReg);
+
+ regs->qmBaseAddr = reg_phys2virt(odp_vm_info.qmssCfgVaddr,
+ CSL_QMSS_CFG_BASE, (uint32_t)regs->qmBaseAddr);
+
+ result = Qmss_init(&init_config, &config_params);
+ if (result != QMSS_SOK) {
+ odp_pr_err("%s: qmss_Init failed with error code %d\n",
+ __func__, result);
+ return nwal_FALSE;
+ }
+ return 1;
+}
+
+/********************************************************************
+ * FUNCTION PURPOSE: Internal NETAPI function to start QM
+ ********************************************************************
+ * DESCRIPTION: Internal NETAPI function to start QM
+ * once per thread/core
+ ********************************************************************/
+int mcsdk_qmss_start(void)
+{
+ int32_t result;
+ Qmss_StartCfg start_cfg;
+
+ start_cfg.rmServiceHandle = odp_proc.rm_service;
+
+ result = Qmss_startCfg(&start_cfg);
+ if (result != QMSS_SOK) {
+ odp_pr_err("Qmss_start failed with error code %d\n", result);
+ return -1;
+ }
+ return 1;
+}
+
+int mcsdk_cppi_start(void)
+{
+ Cppi_StartCfg start_cfg;
+
+ start_cfg.rmServiceHandle = odp_proc.rm_service;
+
+ Cppi_startCfg(&start_cfg);
+
+ return 1;
+}
+
+/********************************************************************
+ * FUNCTION PURPOSE: Internal NETAPI function to setup the QM memory region
+ ********************************************************************
+ * DESCRIPTION: Internal NETAPI function to setup the QM memory region,
+ * once per SOC
+ ********************************************************************/
+int mcsdk_qmss_setup_memregion(uint32_t desc_num, uint32_t desc_size,
+ uint32_t *desc_mem_base, Qmss_MemRegion mem_region)
+{
+ Qmss_MemRegInfo mem_info;
+ Int32 result;
+
+ memset(&mem_info, 0, sizeof(Qmss_MemRegInfo));
+ mem_info.descBase = desc_mem_base;
+ mem_info.descSize = desc_size;
+ mem_info.descNum = desc_num;
+ mem_info.manageDescFlag = Qmss_ManageDesc_MANAGE_DESCRIPTOR;
+ mem_info.memRegion = mem_region;
+ mem_info.startIndex = TUNE_NETAPI_QM_START_INDEX;
+
+ memset(desc_mem_base, 0, (desc_size * desc_num));
+
+ result = Qmss_insertMemoryRegion(&mem_info);
+ if (result < QMSS_SOK) {
+ odp_pr_err("Qmss_insertMemoryRegion returned error code %d\n",
+ result);
+ return -1;
+ }
+
+ return 1;
+}
new file mode 100644
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Based on TI McSDK NETAPI library
+ */
+
+/* Standard includes */
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <odp_ti_mcsdk.h>
+#include <odp_debug_internal.h>
+
+/* Socket Includes */
+#include "sockutils.h"
+#include "sockrmmsg.h"
+
+/* RM Includes */
+#include <ti/drv/rm/rm.h>
+#include <ti/drv/rm/rm_transport.h>
+#include <ti/drv/rm/rm_services.h>
+
+/* Test FALSE */
+#define RM_TEST_FALSE 0
+/* Test TRUE */
+#define RM_TEST_TRUE 1
+
+/* Socket timeout */
+#define CLIENT_SOCK_TIMEOUT_USEC 500
+
+/* Application's registered RM transport indices */
+#define SERVER_TO_CLIENT 0
+/* Maximum number of registered RM transports */
+#define MAX_MAPPING_ENTRIES 1
+
+/* RM registered transport mapping structure */
+struct trans_map_entry_s {
+ /* Registered RM transport handle */
+ Rm_TransportHandle transportHandle;
+ /* Remote socket tied to the transport handle */
+ sock_name_t *remote_sock;
+};
+
+/* Client instance name */
+char rm_client_name[RM_NAME_MAX_CHARS] = "RM_Client0";
+
+/* Client socket name */
+char rm_client_sock_name[] = "/tmp/var/run/rm/rm_client";
+
+/* Client socket handle */
+sock_h rm_client_socket;
+
+/* Client instance handles */
+Rm_Handle rm_client_handle;
+
+/* Transport map stores the RM transport handle to IPC MessageQ mapping */
+struct trans_map_entry_s rm_transport_map[MAX_MAPPING_ENTRIES];
+
+hplib_spinLock_T net_rm_lock;
+
+
+static Rm_Packet *transport_alloc(Rm_AppTransportHandle transport ODP_UNUSED,
+ uint32_t pkt_size,
+ Rm_PacketHandle *pkt_handle)
+{
+ Rm_Packet *rm_pkt = NULL;
+
+ rm_pkt = calloc(1, sizeof(*rm_pkt));
+ if (!rm_pkt) {
+ odp_pr_err("can't malloc for RM send message (err: %s)\n",
+ strerror(errno));
+ return NULL;
+ }
+ rm_pkt->pktLenBytes = pkt_size;
+ *pkt_handle = rm_pkt;
+
+ return rm_pkt;
+}
+
+static void transport_free(Rm_Packet *rm_pkt)
+{
+ if (rm_pkt)
+ free(rm_pkt);
+}
+
+static void transport_receive(void)
+{
+ int32_t rm_result;
+ int retval;
+ int length = 0;
+ sock_name_t server_sock_addr;
+ Rm_Packet *rm_pkt = NULL;
+ struct sockaddr_un server_addr;
+
+ retval = sock_wait(rm_client_socket, &length, NULL, -1);
+ if (retval == -2) {
+ /* Timeout */
+ return;
+ } else if (retval < 0) {
+ odp_pr_err("Error in reading from socket, error %d\n", retval);
+ return;
+ }
+
+ if (length < (int)sizeof(*rm_pkt)) {
+ odp_pr_err("invalid RM message length %d\n", length);
+ return;
+ }
+ rm_pkt = calloc(1, length);
+ if (!rm_pkt) {
+ odp_pr_err("can't malloc for recv'd RM message (err: %s)\n",
+ strerror(errno));
+ return;
+ }
+
+ server_sock_addr.type = sock_addr_e;
+ server_sock_addr.s.addr = &server_addr;
+ retval = sock_recv(rm_client_socket, (char *)rm_pkt, length,
+ &server_sock_addr);
+ if (retval != length) {
+ odp_pr_err("recv RM pkt failed from socket, received = %d, expected = %d\n",
+ retval, length);
+ return;
+ }
+
+ odp_pr_vdbg("received RM pkt of size %d bytes from %s\n", length,
+ server_sock_addr.s.addr->sun_path);
+
+ /* Provide packet to RM Server for processing */
+ rm_result = Rm_receivePacket(
+ rm_transport_map[SERVER_TO_CLIENT].transportHandle,
+ rm_pkt);
+ if (rm_result != RM_OK)
+ odp_pr_err("RM failed to process received packet: %d\n",
+ rm_result);
+
+ transport_free(rm_pkt);
+}
+
+static int32_t transport_send_rcv(Rm_AppTransportHandle app_transport,
+ Rm_PacketHandle pkt_handle)
+{
+ sock_name_t *server_sock_name = (sock_name_t *)app_transport;
+ Rm_Packet *rm_pkt = (Rm_Packet *)pkt_handle;
+
+ hplib_mSpinLockLock(&net_rm_lock);
+ if (sock_send(rm_client_socket, (char *)rm_pkt,
+ (int)rm_pkt->pktLenBytes, server_sock_name)) {
+ odp_pr_err("send data failed\n");
+ hplib_mSpinLockUnlock(&net_rm_lock);
+ return -1;
+ }
+
+ /* Wait for response from Server */
+ transport_receive();
+ hplib_mSpinLockUnlock(&net_rm_lock);
+
+ return 0;
+}
+
+static int connection_setup(void)
+{
+ Rm_TransportCfg transport_cfg;
+ int i;
+ sock_name_t sock_name;
+ int32_t result = 0;
+ char server_sock_name[] = RM_SERVER_SOCKET_NAME;
+
+ /* Initialize the transport map */
+ for (i = 0; i < MAX_MAPPING_ENTRIES; i++)
+ rm_transport_map[i].transportHandle = NULL;
+
+ sock_name.type = sock_name_e;
+ sock_name.s.name = rm_client_sock_name;
+
+ rm_client_socket = sock_open(&sock_name);
+ if (!rm_client_socket) {
+ odp_pr_err("Client socket open failed\n");
+ return -1;
+ }
+
+ rm_transport_map[SERVER_TO_CLIENT].remote_sock =
+ calloc(1, sizeof(sock_name_t));
+ rm_transport_map[SERVER_TO_CLIENT].remote_sock->type =
+ sock_name_e;
+ rm_transport_map[SERVER_TO_CLIENT].remote_sock->s.name =
+ calloc(1, strlen(server_sock_name) + 1);
+ strncpy(rm_transport_map[SERVER_TO_CLIENT].remote_sock->s.name,
+ server_sock_name, strlen(server_sock_name) + 1);
+
+ /* Register the Server with the Client instance */
+ transport_cfg.rmHandle = rm_client_handle;
+ transport_cfg.appTransportHandle = (Rm_AppTransportHandle)
+ rm_transport_map[SERVER_TO_CLIENT].remote_sock;
+ transport_cfg.remoteInstType = Rm_instType_SERVER;
+ transport_cfg.transportCallouts.rmAllocPkt = transport_alloc;
+ transport_cfg.transportCallouts.rmSendPkt = transport_send_rcv;
+ rm_transport_map[SERVER_TO_CLIENT].transportHandle =
+ Rm_transportRegister(&transport_cfg, &result);
+
+ return 0;
+}
+
+static int free_all_resources(Rm_ServiceHandle *rm_service)
+{
+ Rm_ServiceReqInfo request;
+ Rm_ServiceRespInfo response;
+ return 0;
+ memset((void *)&request, 0, sizeof(request));
+ memset((void *)&response, 0, sizeof(response));
+
+ request.type = Rm_service_RESOURCE_FREE;
+ request.resourceName = "ALL";
+ request.resourceBase = RM_RESOURCE_BASE_UNSPECIFIED;
+ request.resourceLength = 0;
+ request.resourceAlignment = 0;
+ /* RM will block until resource is returned since callback is NULL */
+ request.callback.serviceCallback = NULL;
+ odp_pr_dbg("resourceName: %s\n", request.resourceName);
+ rm_service->Rm_serviceHandler(rm_service->rmHandle, &request,
+ &response);
+ odp_pr_dbg("serviceState: %d\n", response.serviceState);
+
+ return (response.serviceState == RM_SERVICE_APPROVED) ? 0 : 1;
+}
+
+Rm_ServiceHandle *rm_client_init(void)
+{
+ Rm_InitCfg init_cfg;
+ int32_t result;
+ Rm_ServiceHandle *service_handle = NULL;
+
+ hplib_mSpinLockInit(&net_rm_lock);
+
+ odp_pr_dbg("RM Version : 0x%08x\nVersion String: %s\n", Rm_getVersion(),
+ Rm_getVersionStr());
+
+ /* Initialize the RM Client */
+ memset(&init_cfg, 0, sizeof(init_cfg));
+ init_cfg.instName = rm_client_name;
+ init_cfg.instType = Rm_instType_CLIENT;
+ init_cfg.instCfg.clientCfg.staticPolicy = NULL;
+
+ rm_client_handle = Rm_init(&init_cfg, &result);
+ if (result != RM_OK) {
+ odp_pr_err("%s: Initialization failed\n", rm_client_name);
+ return NULL;
+ }
+
+ odp_pr_dbg("Initialized %s\n", rm_client_name);
+
+ /* Open Client service handle */
+ service_handle = Rm_serviceOpenHandle(rm_client_handle, &result);
+ if (result != RM_OK) {
+ odp_pr_err("%s: Service handle open failed\n", rm_client_name);
+ return NULL;
+ }
+
+ if (connection_setup())
+ return NULL;
+
+ free_all_resources(service_handle);
+
+ return service_handle;
+}
new file mode 100644
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Based on TI McSDK NETAPI library
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <malloc.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+
+#include "sockutils.h"
+#include "odp_debug_internal.h"
+
+typedef struct sock_data {
+ struct sockaddr_un addr;
+ fd_set readfds;
+ int fd;
+} sock_data_t;
+
+static int check_and_create_path(char *path)
+{
+ char *d = path;
+ if (!d)
+ return -1;
+
+ while ((d = strchr(d + 1, '/'))) {
+ *d = 0;
+ if (mkdir(path, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH) < 0) {
+ if (errno != EEXIST) {
+ *d = '/';
+ odp_pr_err("can't create path %s (error: %s)",
+ path, strerror(errno));
+ return -1;
+ }
+ }
+ *d = '/';
+ }
+ return 0;
+}
+
+sock_h sock_open(sock_name_t *sock_name)
+{
+ sock_data_t *sd = 0;
+ int retval = 0;
+
+ if (!sock_name)
+ return 0;
+
+ sd = calloc(1, sizeof(sock_data_t));
+
+ if (sock_name->type == sock_addr_e) {
+ memcpy(&sd->addr, sock_name->s.addr,
+ sizeof(struct sockaddr_un));
+ } else {
+ if (check_and_create_path(sock_name->s.name) < 0)
+ goto check_n_return;
+ sd->addr.sun_family = AF_UNIX;
+ strncpy(sd->addr.sun_path, sock_name->s.name, UNIX_PATH_MAX);
+ }
+
+ sd->fd = socket(AF_UNIX, SOCK_DGRAM, 0);
+ if (sd->fd < 0) {
+ odp_pr_err("can't open socket %s (error: %s)",
+ sd->addr.sun_path, strerror(errno));
+ goto check_n_return;
+ }
+
+ unlink(sd->addr.sun_path);
+ if (bind(sd->fd, (struct sockaddr *)&sd->addr,
+ sizeof(struct sockaddr_un)) < 0) {
+ odp_pr_err("can't bind socket %s (error: %s)",
+ sd->addr.sun_path, strerror(errno));
+ goto check_n_return;
+ }
+
+ FD_ZERO(&sd->readfds);
+ FD_SET(sd->fd, &sd->readfds);
+
+ retval = (int) sd;
+
+check_n_return:
+ if (!retval)
+ sock_close((sock_h)sd);
+
+ return (sock_h)retval;
+}
+
+int sock_close(sock_h handle)
+{
+ sock_data_t *sd = (sock_data_t *)handle;
+
+ if (!sd)
+ return -1;
+
+ if (sd->fd)
+ close(sd->fd);
+ free(sd);
+
+ return 0;
+}
+
+int sock_send(sock_h handle, const char *data, int length,
+ sock_name_t *to)
+{
+ int fd;
+ sock_data_t *sd = (sock_data_t *)handle;
+ struct sockaddr_un to_addr;
+
+ if (!to)
+ return -1;
+
+ if (to->type == sock_addr_e) {
+ memcpy(&to_addr, to->s.addr, sizeof(struct sockaddr_un));
+ } else {
+ to_addr.sun_family = AF_UNIX;
+ strncpy(to_addr.sun_path, to->s.name, UNIX_PATH_MAX);
+ }
+
+ if (sd) {
+ fd = sd->fd;
+ } else {
+ fd = socket(AF_UNIX, SOCK_DGRAM, 0);
+ if (fd < 0) {
+ odp_pr_err("can't open socket %s (error: %s)",
+ to_addr.sun_path, strerror(errno));
+ return -1;
+ }
+ }
+
+ if (sendto(fd, data, length, 0, (struct sockaddr *)&to_addr,
+ sizeof(struct sockaddr_un)) < 0) {
+ odp_pr_err("can't send data to %s (error: %s)",
+ to_addr.sun_path, strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+int sock_wait(sock_h handle, int *size, struct timeval *timeout, int extern_fd)
+{
+ sock_data_t *sd = (sock_data_t *)handle;
+ int retval;
+ fd_set fds;
+
+ if (!sd) {
+ odp_pr_err("invalid hanlde");
+ return -1;
+ }
+
+ fds = sd->readfds;
+
+ if (extern_fd != -1)
+ FD_SET(extern_fd, &fds);
+
+ retval = select(FD_SETSIZE, &fds, NULL, NULL, timeout);
+ if (retval == -1) {
+ odp_pr_err("select failed for %s (error: %s)",
+ sd->addr.sun_path, strerror(errno));
+ return -1;
+ }
+
+ if ((extern_fd != -1) && (FD_ISSET(extern_fd, &fds)))
+ return 1;
+
+ if (!FD_ISSET(sd->fd, &fds))
+ return -2; /* Wait timedout */
+
+ if (!retval)
+ return 0;
+
+ if (size != 0) {
+ retval = ioctl(sd->fd, FIONREAD, size);
+ if (retval == -1) {
+ odp_pr_err("can't read datagram size for %s (error: %s)",
+ sd->addr.sun_path, strerror(errno));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int sock_recv(sock_h handle, char *data, int length, sock_name_t *from)
+{
+ int size;
+ sock_data_t *sd = (sock_data_t *)handle;
+ socklen_t from_length = 0;
+ struct sockaddr *sock_addr;
+
+ if (!sd) {
+ odp_pr_err("invalid handle");
+ return -1;
+ }
+
+ if (from) {
+ from->type = sock_addr_e;
+ if (from->type && from->s.addr) {
+ from_length = sizeof(struct sockaddr_un);
+ } else {
+ odp_pr_err("invalid from parameter");
+ return -1;
+ }
+ }
+
+ sock_addr = (struct sockaddr *)((from_length) ? from->s.addr : NULL);
+ size = recvfrom(sd->fd, data, length, 0, sock_addr, &from_length);
+ if (size < 1) {
+ odp_pr_err("can't read datagram from socket for %s (error: %s), size %d",
+ sd->addr.sun_path, strerror(errno), size);
+ return -1;
+ }
+
+ return size;
+}
@@ -1,4 +1,6 @@
-/* Copyright (c) 2013, Linaro Limited
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,38 +9,10 @@
#include <odp_buffer.h>
#include <odp_buffer_internal.h>
#include <odp_buffer_pool_internal.h>
-#include <ti_em_rh.h>
-
-void *odp_buffer_addr(odp_buffer_t buf)
-{
- return odp_buf_to_hdr(buf)->buf_vaddr;
-}
-
-size_t odp_buffer_size(odp_buffer_t buf)
-{
- return (size_t)odp_buf_to_hdr(buf)->desc.origBufferLen;
-}
-
-int odp_buffer_type(odp_buffer_t buf)
-{
- return odp_buf_to_hdr(buf)->type;
-}
-
-int odp_buffer_is_scatter(odp_buffer_t buf)
-{
- return (odp_buf_to_hdr(buf)->desc.nextBDPtr) ? 1 : 0;
-}
-
-
-int odp_buffer_is_valid(odp_buffer_t buf)
-{
- return (buf != ODP_BUFFER_INVALID);
-}
-
int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf)
{
- odp_buffer_hdr_t *desc;
+ Cppi_HostDesc *desc;
int len = 0;
if (!odp_buffer_is_valid(buf)) {
@@ -46,39 +20,34 @@ int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf)
return len;
}
- desc = odp_buf_to_hdr(buf);
+ desc = _odp_buf_to_cppi_desc(buf);
len += snprintf(&str[len], n-len,
"Buffer\n");
len += snprintf(&str[len], n-len,
" desc_vaddr %p\n", desc);
len += snprintf(&str[len], n-len,
- " buf_vaddr %p\n", desc->buf_vaddr);
- len += snprintf(&str[len], n-len,
- " buf_paddr_o 0x%x\n", desc->desc.origBuffPtr);
+ " buf_paddr_o 0x%x\n", desc->origBuffPtr);
len += snprintf(&str[len], n-len,
- " buf_paddr 0x%x\n", desc->desc.buffPtr);
+ " buf_paddr 0x%x\n", desc->buffPtr);
len += snprintf(&str[len], n-len,
- " buf_len_o 0x%x\n", desc->desc.origBufferLen);
+ " buf_len_o 0x%x\n", desc->origBufferLen);
len += snprintf(&str[len], n-len,
- " buf_len 0x%x\n", desc->desc.buffLen);
+ " buf_len 0x%x\n", desc->buffLen);
len += snprintf(&str[len], n-len,
- " pool %i\n", odp_buf_to_pool(buf));
- len += snprintf(&str[len], n-len,
- " free_queue %u\n", desc->free_queue);
+ " pool %p\n", odp_buf_to_pool(buf));
len += snprintf(&str[len], n-len, "\n");
return len;
}
-
void odp_buffer_print(odp_buffer_t buf)
{
int max_len = 512;
char str[max_len];
int len;
- odp_buffer_hdr_t *desc;
+ Cppi_HostDesc *desc;
len = odp_buffer_snprint(str, max_len-1, buf);
if (!len)
@@ -87,9 +56,11 @@ void odp_buffer_print(odp_buffer_t buf)
printf("\n%s\n", str);
- desc = odp_buf_to_hdr(buf);
- ti_em_rh_dump_mem(desc, sizeof(*desc), "Descriptor dump");
- ti_em_rh_dump_mem(desc->buf_vaddr, 64, "Buffer start");
+ desc = _odp_buf_to_cppi_desc(buf);
+ odp_print_mem(desc, sizeof(*desc), "Descriptor dump");
+ odp_print_mem((void *)desc->origBuffPtr,
+ desc->buffPtr - desc->origBuffPtr + 128,
+ "Buffer start");
}
void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src)
@@ -1,4 +1,6 @@
-/* Copyright (c) 2013, Linaro Limited
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -8,300 +10,110 @@
#include <odp_buffer_pool.h>
#include <odp_buffer_pool_internal.h>
#include <odp_buffer_internal.h>
-#include <odp_packet_internal.h>
-#include <odp_shared_memory.h>
-#include <odp_shared_memory_internal.h>
#include <odp_align.h>
#include <odp_internal.h>
#include <odp_config.h>
-#include <configs/odp_config_platform.h>
#include <odp_hints.h>
#include <odp_debug.h>
-#include <odp_sync.h>
-#include <odp_queue_internal.h>
#include <string.h>
#include <stdlib.h>
-#include <ti_em_rh.h>
-#ifdef POOL_USE_TICKETLOCK
-#include <odp_ticketlock.h>
-#define LOCK(a) odp_ticketlock_lock(a)
-#define UNLOCK(a) odp_ticketlock_unlock(a)
-#define LOCK_INIT(a) odp_ticketlock_init(a)
-#else
-#include <odp_spinlock.h>
-#define LOCK(a) odp_spinlock_lock(a)
-#define UNLOCK(a) odp_spinlock_unlock(a)
-#define LOCK_INIT(a) odp_spinlock_init(a)
-#endif
-
-
-#define NULL_INDEX ((uint32_t)-1)
-
-
-
-
-typedef union pool_entry_u {
- struct pool_entry_s s;
-
- uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))];
-
-} pool_entry_t;
-
-
-typedef struct pool_table_t {
- pool_entry_t pool[ODP_CONFIG_BUFFER_POOLS];
-
-} pool_table_t;
-
-typedef struct {
- uintptr_t p;
- uintptr_t v;
-} pvaddr_t;
-
-/* The pool table */
-static pool_table_t *pool_tbl;
-
-/* Pool entry pointers (for inlining) */
-void *pool_entry_ptr[ODP_CONFIG_BUFFER_POOLS];
-
-static uint32_t ti_odp_alloc_public_desc(uint32_t num)
-{
- static uint32_t free_desc_id;
- uint32_t tmp;
-
- if (free_desc_id + num > TI_ODP_PUBLIC_DESC_NUM)
- return -1;
-
- tmp = __sync_fetch_and_add(&free_desc_id, num);
-
- if (tmp + num > TI_ODP_PUBLIC_DESC_NUM) {
- __sync_fetch_and_sub(&free_desc_id, num);
- return -1;
- }
- return tmp;
-}
-
-odp_buffer_pool_t odp_buf_to_pool(odp_buffer_t buf)
-{
- odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
- pool_entry_t *pool = get_pool_entry(0);
- return hdr->free_queue - pool->s.free_queue;
-}
+/**
+ * @todo: Currently a number of HW descriptors is limited,
+ * so temporary limit max number of buffers per pool
+ * to be albe to run ODP example apps.
+ * Descriptor management have to be made more intelligent
+ * To remove this limitation.
+ */
+#define MAX_BUFS_PER_POOL 1024
int odp_buffer_pool_init_global(void)
{
- odp_buffer_pool_t i;
-
- pool_tbl = odp_shm_reserve("odp_buffer_pools",
- sizeof(pool_table_t),
- sizeof(pool_entry_t));
-
- if (pool_tbl == NULL)
- return -1;
-
- memset(pool_tbl, 0, sizeof(pool_table_t));
-
- for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) {
- /* init locks */
- pool_entry_t *pool = &pool_tbl->pool[i];
- LOCK_INIT(&pool->s.lock);
- pool->s.pool = i;
- pool_entry_ptr[i] = pool;
- pool->s.free_queue = TI_ODP_FREE_QUEUE_BASE_IDX + i;
- }
-
- ODP_DBG("\nBuffer pool init global\n");
- ODP_DBG(" pool_entry_s size %zu\n", sizeof(struct pool_entry_s));
- ODP_DBG(" pool_entry_t size %zu\n", sizeof(pool_entry_t));
- ODP_DBG("\n");
+ /* Pktlib initialized in mcsdk_global_init() */
return 0;
}
-#define MAX_BUFS_PER_POOL 2000
-
-static int link_bufs(pool_entry_t *pool)
+odp_buffer_pool_t odp_buffer_pool_create(const char *name,
+ void *base_addr ODP_UNUSED, uint64_t size,
+ size_t buf_size, size_t buf_align,
+ int buf_type ODP_UNUSED)
{
- size_t buf_size, buf_align;
- uint64_t pool_size;
- uintptr_t pool_base;
- pvaddr_t buf_addr, desc_addr;
- uint32_t desc_index;
- uint32_t num_bufs, i;
-
- buf_align = pool->s.payload_align;
- buf_size = ODP_ALIGN_ROUNDUP(pool->s.payload_size, buf_align);
- pool_size = pool->s.pool_size;
- pool_base = (uintptr_t) pool->s.pool_base_addr;
- /* First buffer */
- buf_addr.v = ODP_ALIGN_ROUNDUP(pool_base, buf_align);
- buf_addr.p = _odp_shm_get_paddr((void *)buf_addr.v);
- pool->s.buf_base = buf_addr.v;
+ Pktlib_HeapCfg heap_cfg;
+ Pktlib_HeapHandle heap_handle;
+ int num_bufs;
+ int err_code;
- num_bufs = (pool_size - (buf_addr.v - pool_base)) / buf_size;
+ buf_size = ODP_ALIGN_ROUNDUP(buf_size, buf_align);
/*
- * FIXME: Currently a number of HW descriptors is limited,
- * so temporary limit max number of buffers per pool
- * to be albe to run ODP example apps.
- * Descriptor management have to be made more intelligent
- * To remove this limitation.
+ * XXX: size is used only to get number of buffers.
+ * Memory is allocated for each buffer separately
*/
- if (num_bufs > MAX_BUFS_PER_POOL) {
- ODP_DBG("Limiting number of buffer in %s from %d to %d\n",
- pool->s.name, num_bufs, MAX_BUFS_PER_POOL);
- num_bufs = MAX_BUFS_PER_POOL;
- }
-
- desc_index = ti_odp_alloc_public_desc(num_bufs);
-
- ODP_DBG("%s: buf_size: %zu, buf_align: %zu\n", __func__,
- buf_size, buf_align);
- ODP_DBG("%s: pool_size: %llu, pool_base: 0x%p\n", __func__,
- pool_size, (void *)pool_base);
- ODP_DBG("%s: buf_addr.v: 0x%p, buf_addr.p: 0x%p\n", __func__,
- (void *)buf_addr.v, (void *)buf_addr.p);
- ODP_DBG("%s: num_bufs: %u, desc_index: %u\n", __func__,
- num_bufs, desc_index);
- ODP_DBG("%s: free_queue: %u\n", __func__, pool->s.free_queue);
-
- /* FIXME: Need to define error codes somewhere */
- if (desc_index == (uint32_t)-1) {
- ODP_ERR("Failed to allocate %u descriptors for pool %s\n",
- num_bufs, pool->s.name);
- return -1;
- }
-
- if (ti_em_osal_hw_queue_open(pool->s.free_queue) != EM_OK) {
- ODP_ERR("Failed to open HW queue %u\n", pool->s.free_queue);
- return -1;
- }
-
- for (i = 0; i < num_bufs; i++) {
- Cppi_DescTag tag;
- odp_buffer_hdr_t *hdr;
-
- /*
- * TODO: Need to get descriptor size here and shift
- * descriptor address, but not query it on every iteration.
- */
- desc_addr.v = (uintptr_t)ti_em_rh_public_desc_addr(desc_index,
- &desc_addr.p);
- hdr = (odp_buffer_hdr_t *)desc_addr.v;
- memset((void *)hdr, 0, sizeof(*hdr));
-
- hdr->free_queue = pool->s.free_queue;
- hdr->buf_vaddr = (void *)buf_addr.v;
+ num_bufs = size / buf_size;
+ buf_size += odp_global->cfg.min_buf_headroom_size;
+ buf_size = ODP_CACHE_LINE_SIZE_ROUNDUP(buf_size);
- /* Set defaults in descriptor */
- hdr->desc.descInfo = (Cppi_DescType_HOST << 30) |
- (Cppi_PSLoc_PS_IN_DESC << 22) |
- (pool->s.payload_size & 0xFFFF);
- hdr->desc.packetInfo =
- (((uint32_t) Cppi_EPIB_EPIB_PRESENT) << 31) |
- (((uint32_t) Cppi_ReturnPolicy_RETURN_BUFFER) << 15) |
- (pool->s.free_queue & 0x3FFF);
- hdr->desc.origBuffPtr = buf_addr.p;
- hdr->desc.buffPtr = buf_addr.p;
- hdr->desc.origBufferLen = buf_size;
- hdr->desc.buffLen = buf_size;
- /* TODO: pslen is set to 0, but should be configurable */
- ti_em_cppi_set_pslen(Cppi_DescType_HOST,
- (Cppi_Desc *)(hdr), 0);
-
- tag.srcTagHi = 0x00;
- tag.srcTagLo = 0xFF;
- tag.destTagHi = 0x00;
- tag.destTagLo = 0x00;
- ti_em_cppi_set_tag(Cppi_DescType_HOST,
- (Cppi_Desc *)(hdr),
- &tag);
-
- odp_sync_stores();
- _ti_hw_queue_push_desc(pool->s.free_queue, hdr);
- buf_addr.v += buf_size;
- buf_addr.p += buf_size;
- desc_index++;
+ if (num_bufs > MAX_BUFS_PER_POOL) {
+ odp_pr_dbg("Limiting number of buffer in %s from %d to %d\n",
+ name, num_bufs, MAX_BUFS_PER_POOL);
+ num_bufs = MAX_BUFS_PER_POOL;
}
- return 0;
+ /* Initialize the heap configuration. */
+ memset((void *)&heap_cfg, 0, sizeof(Pktlib_HeapCfg));
+ /* Populate the heap configuration */
+ heap_cfg.name = name;
+ heap_cfg.memRegion = TUNE_NETAPI_QM_GLOBAL_REGION;
+ heap_cfg.sharedHeap = 1;
+ heap_cfg.useStarvationQueue = 0;
+ heap_cfg.dataBufferSize = buf_size;
+ heap_cfg.numPkts = num_bufs;
+ heap_cfg.numZeroBufferPackets = 0;
+ heap_cfg.heapInterfaceTable.data_malloc =
+ pktlib_if_table.data_malloc;
+ heap_cfg.heapInterfaceTable.data_free =
+ pktlib_if_table.data_free;
+ heap_cfg.dataBufferPktThreshold = 0;
+ heap_cfg.zeroBufferPktThreshold = 0;
+ odp_pr_dbg("name: %s, buf_size: %u, num_bufs: %u\n", name, buf_size,
+ num_bufs);
+ /* Create Shared Heap with specified configuration. */
+ heap_handle = Pktlib_createHeap(&heap_cfg, &err_code);
+ odp_pr_dbg("heap_handle: %p, err_code: %d\n", heap_handle, err_code);
+ return heap_handle;
}
-odp_buffer_pool_t odp_buffer_pool_create(const char *name,
- void *base_addr, uint64_t size,
- size_t buf_size, size_t buf_align,
- int buf_type)
+odp_buffer_pool_t odp_buffer_pool_lookup(const char *name)
{
- odp_buffer_pool_t i;
- pool_entry_t *pool;
- odp_buffer_pool_t pool_id = ODP_BUFFER_POOL_INVALID;
-
- for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) {
- pool = get_pool_entry(i);
-
- LOCK(&pool->s.lock);
-
- if (pool->s.buf_base == 0) {
- /* found free pool */
- ODP_DBG("%s: found free pool id: %u for %s\n", __func__,
- i, name);
- strncpy(pool->s.name, name,
- ODP_BUFFER_POOL_NAME_LEN - 1);
- pool->s.name[ODP_BUFFER_POOL_NAME_LEN - 1] = 0;
- pool->s.pool_base_addr = base_addr;
- pool->s.pool_size = size;
- pool->s.payload_size = buf_size;
- pool->s.payload_align = buf_align;
- pool->s.buf_type = buf_type;
- pool->s.buf_base = (uintptr_t)ODP_ALIGN_ROUNDUP_PTR(
- base_addr, buf_align);
-
- if (link_bufs(pool) != -1)
- pool_id = i;
- UNLOCK(&pool->s.lock);
- break;
- }
-
- UNLOCK(&pool->s.lock);
- }
-
- return pool_id;
+ return Pktlib_findHeapByName(name);
}
-odp_buffer_pool_t odp_buffer_pool_lookup(const char *name)
+odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool_id)
{
- odp_buffer_pool_t i;
- pool_entry_t *pool;
-
- for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) {
- pool = get_pool_entry(i);
+ Ti_Pkt *pkt;
+ odp_buffer_t buf;
+ Cppi_HostDesc *desc;
- LOCK(&pool->s.lock);
- if (strcmp(name, pool->s.name) == 0) {
- /* found it */
- UNLOCK(&pool->s.lock);
- return i;
- }
- UNLOCK(&pool->s.lock);
- }
+ pkt = Pktlib_allocPacket(pool_id, -1);
+ if (!pkt)
+ return ODP_BUFFER_INVALID;
- return ODP_BUFFER_POOL_INVALID;
-}
+ buf = _ti_pkt_to_odp_buf(pkt);
+ desc = _odp_buf_to_cppi_desc(buf);
+ /* Leave space for buffer metadata. There must be enough space. */
+ desc->buffPtr = desc->origBuffPtr +
+ odp_global->cfg.min_buf_headroom_size;
-odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool_id)
-{
- pool_entry_t *pool = get_pool_entry(pool_id);
- return (odp_buffer_t)ti_em_osal_hw_queue_pop(pool->s.free_queue,
- TI_EM_MEM_PUBLIC_DESC);
+ odp_pr_vdbg("pool_id: %p, pkt: %p, buf: %p\n", pool_id, pkt, buf);
+ return buf;
}
-
void odp_buffer_free(odp_buffer_t buf)
{
- odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
- _ti_hw_queue_push_desc(hdr->free_queue, hdr);
+ odp_pr_vdbg("buf: %p\n", buf);
+ Pktlib_freePacket(_odp_buf_to_ti_pkt(buf));
}
void odp_buffer_pool_print(odp_buffer_pool_t pool_id)
@@ -309,8 +121,12 @@ void odp_buffer_pool_print(odp_buffer_pool_t pool_id)
(void)pool_id;
}
+odp_buffer_pool_t odp_buf_to_pool(odp_buffer_t buf)
+{
+ return Pktlib_getPktHeap(_odp_buf_to_ti_pkt(buf));
+}
+
uint32_t _odp_pool_get_free_queue(odp_buffer_pool_t pool_id)
{
- pool_entry_t *pool = get_pool_entry(pool_id);
- return pool->s.free_queue;
+ return Pktlib_getInternalHeapQueue(pool_id);
}
@@ -1,4 +1,6 @@
-/* Copyright (c) 2013, Linaro Limited
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,88 +9,12 @@
#include <odp_init.h>
#include <odp_internal.h>
#include <odp_debug.h>
-#include <configs/odp_config_platform.h>
-#include <ti_em_osal_core.h>
-#include <ti_em_osal_queue.h>
-#include <ti_em_rh.h>
#include <odp_config.h>
#include <odp_packet_internal.h>
-/*
- * Make region_configs[] global, because hw_config is saved in
- * ti_em_rh_init_global() and it references region_configs[].
- */
-static ti_em_osal_hw_region_config_t region_configs[TI_ODP_REGION_NUM];
-
-static int ti_init_hw_config(void)
-{
- ti_em_rh_hw_config_t hw_config;
- ti_em_osal_hw_region_config_t *reg_config;
- memset(&hw_config, 0, sizeof(ti_em_rh_hw_config_t));
-
- /* Set ODP initialization parameters */
- hw_config.private_free_queue_idx = MY_EM_PRIVATE_FREE_QUEUE_IDX;
- hw_config.hw_queue_base_idx = MY_EM_SCHED_QUEUE_IDX;
- hw_config.dma_idx = -1; /* not used */
- hw_config.dma_queue_base_idx = 0; /* not used */
- hw_config.device_id = MY_EM_DEVICE_ID;
- hw_config.process_id = MY_EM_PROCESS_ID;
- hw_config.chain_config_ptr = NULL;
- hw_config.dispatch_mode = MY_EM_DISPATCH_MODE;
-
- /* The location of the PDSP communication memory (physical address) */
- hw_config.pdsp_comm_mem_config.paddr = MY_EM_PDSP_COMM_MEM_BASE;
- hw_config.pdsp_comm_mem_config.vaddr = MY_EM_PDSP_COMM_MEM_VBASE;
- hw_config.pdsp_comm_mem_config.size = MY_EM_PDSP_COMM_MEM_SIZE;
- hw_config.pdsp_comm_mem_config.offset = MY_EM_PDSP_COMM_MEM_OFFSET;
-
- TI_EM_OSAL_TRACE(2, "physical address of the PDSP communication memory is 0x%x\n",
- hw_config.pdsp_comm_mem_config.paddr);
-
- /* Define descriptor regions */
- reg_config = ®ion_configs[TI_EM_RH_PUBLIC];
- reg_config->region_idx = TI_ODP_PUBLIC_REGION_IDX;
- reg_config->desc_size =
- ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_packet_hdr_t));
- reg_config->desc_num = TI_ODP_PUBLIC_DESC_NUM;
- reg_config->desc_base = TI_ODP_PUBLIC_DESC_BASE;
- reg_config->desc_vbase = TI_ODP_PUBLIC_DESC_VBASE;
- reg_config->desc_offset = TI_ODP_PUBLIC_DESC_OFFSET;
- reg_config->desc_flag = TI_EM_RH_UNMANAGED_DESCRIPTOR;
- reg_config->start_idx = TI_ODP_PUBLIC_START_DESC_IDX;
-
- reg_config = ®ion_configs[TI_EM_RH_PRIVATE];
- reg_config->region_idx = TI_ODP_PRIVATE_REGION_IDX;
- reg_config->desc_size = TI_EM_PRIVATE_EVENT_DSC_SIZE;
- reg_config->desc_num = TI_EM_RH_PRIVATE_EVENT_NUM;
- reg_config->desc_base = TI_ODP_PRIVATE_DESC_BASE;
- reg_config->desc_vbase = TI_ODP_PRIVATE_DESC_VBASE;
- reg_config->desc_offset = TI_ODP_PRIVATE_DESC_OFFSET;
- reg_config->desc_flag = TI_EM_RH_UNMANAGED_DESCRIPTOR;
- reg_config->start_idx = TI_ODP_PRIVATE_START_DESC_IDX;
-
- hw_config.region_num = TI_ODP_REGION_NUM;
- hw_config.region_configs = ®ion_configs[0];
-
- /* Define PDSP configuration */
- hw_config.pdsp_num = 0;
- /* do not use router (no chaining) */
- hw_config.pdsp_router.pdsp_id = -1;
-
- TI_EM_OSAL_TRACE(1, "calling EM global initialization\n");
-
- /* call OpenEM global initialization */
- if (ti_em_rh_init_global(0,
- NULL,
- MY_EM_CORE_NUM,
- &hw_config) != EM_OK) {
- TI_EM_OSAL_ERROR("EM global initialization failed!\n");
- return -1;
- }
-
- return 0;
-}
-
+struct odp_global_s *odp_global;
+struct odp_proc_s odp_proc;
+__thread struct odp_local_s odp_local;
int odp_init_global(void)
{
@@ -96,36 +22,38 @@ int odp_init_global(void)
odp_system_info_init();
- ti_em_osal_core_init_global();
- ti_init_hw_config();
+ if (mcsdk_global_init()) {
+ odp_pr_err("ODP McSDK init failed.\n");
+ return -1;
+ }
if (odp_shm_init_global()) {
- ODP_ERR("ODP shm init failed.\n");
+ odp_pr_err("ODP shm init failed.\n");
return -1;
}
if (odp_buffer_pool_init_global()) {
- ODP_ERR("ODP buffer pool init failed.\n");
+ odp_pr_err("ODP buffer pool init failed.\n");
return -1;
}
if (odp_queue_init_global()) {
- ODP_ERR("ODP queue init failed.\n");
+ odp_pr_err("ODP queue init failed.\n");
return -1;
}
if (odp_schedule_init_global()) {
- ODP_ERR("ODP schedule init failed.\n");
+ odp_pr_err("ODP schedule init failed.\n");
return -1;
}
if (odp_pktio_init_global()) {
- ODP_ERR("ODP packet io init failed.\n");
+ odp_pr_err("ODP packet io init failed.\n");
return -1;
}
if (odp_timer_init_global()) {
- ODP_ERR("ODP timer init failed.\n");
+ odp_pr_err("ODP timer init failed.\n");
return -1;
}
@@ -135,19 +63,21 @@ int odp_init_global(void)
int odp_init_local(int thr_id)
{
+ int ret = 0;
odp_thread_init_local(thr_id);
- ti_em_rh_init_local();
-
- if (odp_pktio_init_local()) {
- ODP_ERR("ODP packet io local init failed.\n");
+ ret = mcsdk_local_init(thr_id);
+ if (ret < 0) {
+ odp_pr_err("Failed to local init McSDK\n");
return -1;
+ } else if (ret > 0) {
+ odp_pr_dbg("Skipping local init McSDK\n");
+ return 0;
}
if (odp_schedule_init_local()) {
- ODP_ERR("ODP schedule local init failed.\n");
+ odp_pr_err("ODP schedule local init failed.\n");
return -1;
}
-
return 0;
}
@@ -1,4 +1,6 @@
-/* Copyright (c) 2013, Linaro Limited
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -8,7 +10,6 @@
#include <odp_packet_internal.h>
#include <odp_hints.h>
#include <odp_byteorder.h>
-#include <ti_em_rh.h>
#include <odph_eth.h>
#include <odph_ip.h>
@@ -16,38 +17,39 @@
#include <string.h>
#include <stdio.h>
-static inline uint8_t parse_ipv4(odp_packet_hdr_t *pkt_hdr,
- odph_ipv4hdr_t *ipv4, size_t *offset_out);
-static inline uint8_t parse_ipv6(odp_packet_hdr_t *pkt_hdr,
- odph_ipv6hdr_t *ipv6, size_t *offset_out);
+#define ODP_PACKET_HDR_OFFSET_INVALID ((uint16_t)-1)
-void odp_packet_init(odp_packet_t pkt)
-{
- odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt);
-
- pkt_hdr->l2_offset = ODP_PACKET_OFFSET_INVALID;
- pkt_hdr->l3_offset = ODP_PACKET_OFFSET_INVALID;
- pkt_hdr->l4_offset = ODP_PACKET_OFFSET_INVALID;
-}
+static inline uint8_t parse_ipv4(struct odp_pkthdr *pkt_hdr,
+ odph_ipv4hdr_t *ipv4,
+ size_t *offset_out);
+static inline uint8_t parse_ipv6(struct odp_pkthdr *pkt_hdr,
+ odph_ipv6hdr_t *ipv6,
+ size_t *offset_out);
-odp_packet_t odp_packet_from_buffer(odp_buffer_t buf)
+void odp_packet_init(odp_packet_t pkt)
{
- return (odp_packet_t)buf;
-}
+ struct odp_pkthdr *const pkt_hdr = odp_packet_hdr(pkt);
-odp_buffer_t odp_buffer_from_packet(odp_packet_t pkt)
-{
- return (odp_buffer_t)pkt;
+ pkt_hdr->l2_offset = ODP_PACKET_HDR_OFFSET_INVALID;
+ pkt_hdr->l3_offset = ODP_PACKET_HDR_OFFSET_INVALID;
+ pkt_hdr->l4_offset = ODP_PACKET_HDR_OFFSET_INVALID;
}
void odp_packet_set_len(odp_packet_t pkt, size_t len)
{
- ti_em_cppi_set_pktlen(&odp_packet_hdr(pkt)->buf_hdr.desc, len);
+ odp_buffer_t buf = odp_buffer_from_packet(pkt);
+ Pktlib_setPacketLen(_odp_buf_to_ti_pkt(buf), len);
+ /**
+ * @todo: Buffer length should be modified by buffer API when it
+ * become available
+ */
+ _odp_buf_to_cppi_desc(buf)->buffLen = len;
}
size_t odp_packet_get_len(odp_packet_t pkt)
{
- return ti_em_cppi_get_pktlen(&odp_packet_hdr(pkt)->buf_hdr.desc);
+ odp_buffer_t buf = odp_buffer_from_packet(pkt);
+ return Pktlib_getPacketLen(_odp_buf_to_ti_pkt(buf));
}
uint8_t *odp_packet_buf_addr(odp_packet_t pkt)
@@ -65,7 +67,7 @@ uint8_t *odp_packet_l2(odp_packet_t pkt)
{
const size_t offset = odp_packet_l2_offset(pkt);
- if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
+ if (odp_unlikely(offset == ODP_PACKET_HDR_OFFSET_INVALID))
return NULL;
return odp_packet_buf_addr(pkt) + offset;
@@ -85,7 +87,7 @@ uint8_t *odp_packet_l3(odp_packet_t pkt)
{
const size_t offset = odp_packet_l3_offset(pkt);
- if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
+ if (odp_unlikely(offset == ODP_PACKET_HDR_OFFSET_INVALID))
return NULL;
return odp_packet_buf_addr(pkt) + offset;
@@ -105,7 +107,7 @@ uint8_t *odp_packet_l4(odp_packet_t pkt)
{
const size_t offset = odp_packet_l4_offset(pkt);
- if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
+ if (odp_unlikely(offset == ODP_PACKET_HDR_OFFSET_INVALID))
return NULL;
return odp_packet_buf_addr(pkt) + offset;
@@ -134,7 +136,7 @@ void odp_packet_set_l4_offset(odp_packet_t pkt, size_t offset)
*/
void odp_packet_parse(odp_packet_t pkt, size_t len, size_t frame_offset)
{
- odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt);
+ struct odp_pkthdr *const pkt_hdr = odp_packet_hdr(pkt);
odph_ethhdr_t *eth;
odph_vlanhdr_t *vlan;
odph_ipv4hdr_t *ipv4;
@@ -154,7 +156,7 @@ void odp_packet_parse(odp_packet_t pkt, size_t len, size_t frame_offset)
}
len -= 4; /* Crop L2 CRC */
- ti_em_cppi_set_pktlen(&pkt_hdr->buf_hdr.desc, len);
+ odp_packet_set_len(pkt, len);
/* Assume valid L2 header, no CRC/FCS check in SW */
pkt_hdr->input_flags.l2 = 1;
@@ -232,8 +234,9 @@ void odp_packet_parse(odp_packet_t pkt, size_t len, size_t frame_offset)
}
}
-static inline uint8_t parse_ipv4(odp_packet_hdr_t *pkt_hdr,
- odph_ipv4hdr_t *ipv4, size_t *offset_out)
+static inline uint8_t parse_ipv4(struct odp_pkthdr *pkt_hdr,
+ odph_ipv4hdr_t *ipv4,
+ size_t *offset_out)
{
uint8_t ihl;
uint16_t frag_offset;
@@ -272,8 +275,9 @@ static inline uint8_t parse_ipv4(odp_packet_hdr_t *pkt_hdr,
return ipv4->proto;
}
-static inline uint8_t parse_ipv6(odp_packet_hdr_t *pkt_hdr,
- odph_ipv6hdr_t *ipv6, size_t *offset_out)
+static inline uint8_t parse_ipv6(struct odp_pkthdr *pkt_hdr,
+ odph_ipv6hdr_t *ipv6,
+ size_t *offset_out)
{
if (ipv6->next_hdr == ODPH_IPPROTO_ESP ||
ipv6->next_hdr == ODPH_IPPROTO_AH) {
@@ -299,7 +303,9 @@ void odp_packet_print(odp_packet_t pkt)
char str[max_len];
int len = 0;
int n = max_len-1;
- odp_packet_hdr_t *hdr = odp_packet_hdr(pkt);
+ Cppi_HostDesc *desc;
+ struct odp_pkthdr *hdr = odp_packet_hdr(pkt);
+ odp_buffer_t buf = odp_buffer_from_packet(pkt);
len += snprintf(&str[len], n-len, "Packet ");
len += odp_buffer_snprint(&str[len], n-len, (odp_buffer_t) pkt);
@@ -324,8 +330,11 @@ void odp_packet_print(odp_packet_t pkt)
str[len] = '\0';
printf("\n%s\n", str);
- ti_em_rh_dump_mem(hdr, sizeof(*hdr), "Descriptor dump");
- ti_em_rh_dump_mem(hdr->buf_hdr.buf_vaddr, 64, "Buffer start");
+ desc = _odp_buf_to_cppi_desc(buf);
+ odp_print_mem(desc, sizeof(*desc), "Descriptor dump");
+ odp_print_mem((void *)desc->origBuffPtr,
+ desc->buffPtr - desc->origBuffPtr + 128,
+ "Buffer start");
}
int odp_packet_copy(odp_packet_t pkt_dst, odp_packet_t pkt_src)
@@ -1,4 +1,6 @@
-/* Copyright (c) 2013, Linaro Limited
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -22,6 +24,7 @@
#include <odp_schedule_internal.h>
#include <odp_debug.h>
#include <odp_buffer_pool_internal.h>
+#include <odp_sync.h>
#include <odp_pktio_socket.h>
#ifdef ODP_HAVE_NETMAP
@@ -30,28 +33,26 @@
#include <string.h>
+#define DUMMY_PKTIO
+
typedef struct {
pktio_entry_t entries[ODP_CONFIG_PKTIO_ENTRIES];
} pktio_table_t;
static pktio_table_t *pktio_tbl;
-struct pktio_device pktio_devs[] = {
- /* eth0 is used by Linux kernel */
- /* {.name = "eth0", .tx_hw_queue = 648, .rx_channel = 22, .rx_flow = 22, .port_id = 1}, */
- {.name = "eth1", .tx_hw_queue = 648, .rx_channel = 23, .rx_flow = 23, .port_id = 2},
- {.name = "eth2", .tx_hw_queue = 648, .rx_channel = 24, .rx_flow = 24, .port_id = 3},
- {.name = "eth3", .tx_hw_queue = 648, .rx_channel = 25, .rx_flow = 25, .port_id = 4},
-};
-
-static struct pktio_device *_odp_pktio_dev_lookup(const char *name)
+#define MAX_PORT_INDEX 4
+static int port_index(const char *interface)
{
- int i;
- int num = sizeof(pktio_devs)/sizeof(pktio_devs[0]);
- for (i = 0; i < num; i++)
- if (!strncmp(pktio_devs[i].name, name, PKTIO_DEV_MAX_NAME_LEN))
- return &pktio_devs[i];
- return NULL;
+ int ret, port;
+
+ ret = sscanf(interface, "eth%d", &port);
+ if (1 != ret)
+ return -1;
+ port++;
+ if (port > MAX_PORT_INDEX)
+ return -1;
+ return port;
}
static pktio_entry_t *get_entry(odp_pktio_t id)
@@ -60,14 +61,13 @@ static pktio_entry_t *get_entry(odp_pktio_t id)
id > ODP_CONFIG_PKTIO_ENTRIES))
return NULL;
- return &pktio_tbl->entries[id - 1];
+ return &pktio_tbl->entries[id];
}
int odp_pktio_init_global(void)
{
pktio_entry_t *pktio_entry;
- int id, i;
- int dev_num = sizeof(pktio_devs)/sizeof(pktio_devs[0]);
+ int id;
pktio_tbl = odp_shm_reserve("odp_pktio_entries",
sizeof(pktio_table_t),
@@ -82,17 +82,6 @@ int odp_pktio_init_global(void)
odp_spinlock_init(&pktio_entry->s.lock);
}
-
- /* Close all used RX channels */
- for (i = 0; i < dev_num; i++)
- ti_em_osal_cppi_rx_channel_close(Cppi_CpDma_PASS_CPDMA,
- pktio_devs[i].rx_channel);
-
- return 0;
-}
-
-int odp_pktio_init_local(void)
-{
return 0;
}
@@ -121,30 +110,6 @@ static void unlock_entry(pktio_entry_t *entry)
odp_spinlock_unlock(&entry->s.lock);
}
-static odp_pktio_t alloc_lock_pktio_entry(odp_pktio_params_t *params)
-{
- odp_pktio_t id;
- pktio_entry_t *entry;
- int i;
- (void)params;
- for (i = 0; i < ODP_CONFIG_PKTIO_ENTRIES; ++i) {
- entry = &pktio_tbl->entries[i];
- if (is_free(entry)) {
- lock_entry(entry);
- if (is_free(entry)) {
- set_taken(entry);
- entry->s.inq_default = ODP_QUEUE_INVALID;
- entry->s.outq_default = ODP_QUEUE_INVALID;
- id = i + 1;
- return id; /* return with entry locked! */
- }
- unlock_entry(entry);
- }
- }
-
- return ODP_PKTIO_INVALID;
-}
-
static int free_pktio_entry(odp_pktio_t id)
{
pktio_entry_t *entry = get_entry(id);
@@ -157,57 +122,92 @@ static int free_pktio_entry(odp_pktio_t id)
return 0;
}
+static nwalTxPktInfo_t tx_pkt_info = {
+ .pPkt = NULL,
+ .txFlag1 = NWAL_TX_FLAG1_META_DATA_VALID,
+ .lpbackPass = 0,
+ .enetPort = 0,
+ .mtuSize = 0,
+ .startOffset = 0,
+ .saOffBytes = 0,
+ .saPayloadLen = 0,
+ .saAhIcvOffBytes = 0,
+ .saAhMacSize = 0,
+ .etherLenOffBytes = 0,
+ .ipOffBytes = 0,
+ .l4OffBytes = 0,
+ .l4HdrLen = 0,
+ .pseudoHdrChecksum = 0,
+ .ploadLen = 0,
+};
+
odp_pktio_t odp_pktio_open(const char *dev, odp_buffer_pool_t pool,
- odp_pktio_params_t *params)
+ odp_pktio_params_t *params ODP_UNUSED)
{
odp_pktio_t id;
pktio_entry_t *pktio_entry;
char name[ODP_QUEUE_NAME_LEN];
queue_entry_t *queue_entry;
odp_queue_t qid = ODP_QUEUE_INVALID;
+ nwal_RetValue ret_nwal;
+ int port;
+
+ odp_pr_dbg("Allocating HW pktio\n");
- if (params == NULL) {
- ODP_ERR("Invalid pktio params\n");
+ /* Create a default output queue for each pktio resource */
+ port = port_index(dev);
+ if (port < 0) {
+ odp_pr_err("Wrong pktio name: %s\n", dev);
return ODP_PKTIO_INVALID;
}
- ODP_DBG("Allocating HW pktio\n");
+ /**
+ * Until classification API is in place there is no criteria to
+ * differentiate pktio except a port number. So map port directly
+ * to pktio entry.
+ */
+ id = port;
- id = alloc_lock_pktio_entry(params);
- if (id == ODP_PKTIO_INVALID) {
- ODP_ERR("No resources available.\n");
- return ODP_PKTIO_INVALID;
+ pktio_entry = get_entry(id);
+ lock_entry(pktio_entry);
+ if (!is_free(pktio_entry)) {
+ /* Entry already initialized */
+ odp_pr_dbg("PktIO %d is already initialized\n", id);
+ goto unlock;
}
- /* if successful, alloc_pktio_entry() returns with the entry locked */
- pktio_entry = get_entry(id);
+ set_taken(pktio_entry);
+ pktio_entry->s.inq_default = ODP_QUEUE_INVALID;
+ pktio_entry->s.outq_default = ODP_QUEUE_INVALID;
+ pktio_entry->s.port = port;
- /* Create a default output queue for each pktio resource */
snprintf(name, sizeof(name), "%i-pktio_outq_default", (int)id);
name[ODP_QUEUE_NAME_LEN-1] = '\0';
- pktio_entry->s.dev = _odp_pktio_dev_lookup(dev);
- if (!pktio_entry->s.dev) {
+ qid = odp_queue_create(name, ODP_QUEUE_TYPE_PKTOUT, NULL);
+ odp_pr_dbg("Created queue %u\n", (uint32_t)qid);
+ if (qid == ODP_QUEUE_INVALID) {
free_pktio_entry(id);
id = ODP_PKTIO_INVALID;
+ odp_pr_err("Couldn't create queue: %s\n", name);
goto unlock;
}
- qid = _odp_queue_create(name, ODP_QUEUE_TYPE_PKTOUT, NULL,
- pktio_entry->s.dev->tx_hw_queue);
- ODP_DBG("Created queue %u for hw queue %d\n", (uint32_t)qid,
- pktio_entry->s.dev->tx_hw_queue);
- if (qid == ODP_QUEUE_INVALID) {
- free_pktio_entry(id);
- id = ODP_PKTIO_INVALID;
+ ret_nwal = nwal_initPSCmdInfo(odp_global->nwal.handle,
+ &tx_pkt_info,
+ &pktio_entry->s.tx_ps_cmdinfo);
+
+ if (ret_nwal != nwal_OK) {
+ odp_pr_err("Couldn't create PSCmdInfo\n");
goto unlock;
}
+
pktio_entry->s.in_pool = pool;
pktio_entry->s.outq_default = qid;
+ pktio_entry->s.id = id;
queue_entry = queue_to_qentry(qid);
- queue_entry->s.pktout = id;
- queue_entry->s.out_port_id = pktio_entry->s.dev->port_id;
+ queue_entry->s.pktout_entry = pktio_entry;
unlock:
unlock_entry(pktio_entry);
return id;
@@ -216,22 +216,12 @@ unlock:
int odp_pktio_close(odp_pktio_t id)
{
pktio_entry_t *entry;
- int res = -1;
entry = get_entry(id);
if (entry == NULL)
return -1;
- lock_entry(entry);
- if (!is_free(entry)) {
- /* FIXME: Here rx/tx channels should be closed */
- res |= free_pktio_entry(id);
- }
-
- unlock_entry(entry);
-
- if (res != 0)
- return -1;
+ /* Only one entry per port exists, so no need to delete it */
return 0;
}
@@ -246,42 +236,115 @@ odp_pktio_t odp_pktio_get_input(odp_packet_t pkt)
return odp_packet_hdr(pkt)->input;
}
-int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], unsigned len)
+static int pktio_inq_setdef_locked(odp_pktio_t id, odp_queue_t queue)
{
+ nwal_RetValue nwal_ret;
+ nwal_Handle handle;
pktio_entry_t *pktio_entry = get_entry(id);
- unsigned pkts = 0;
- odp_buffer_t buf;
+ queue_entry_t *queue_entry = queue_to_qentry(queue);
+ nwalMacParam_t mac_info = {
+ .validParams = NWAL_SET_MAC_VALID_PARAM_IFNUM,
+ .ifNum = 0,
+ .vlanId = 0,
+ .macAddr = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .remMacAddr = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .matchAction = NWAL_MATCH_ACTION_HOST,
+ .failAction = NWAL_NEXT_ROUTE_FAIL_ACTION_HOST,
+ .appRxPktFlowId = CPPI_PARAM_NOT_SPECIFIED,
+ .appRxPktQueue = QMSS_PARAM_NOT_SPECIFIED,
+ .routeType = 0,
+ };
+
+ ODP_ASSERT(pktio_entry && queue_entry, "Not valid entries");
+ ODP_ASSERT(queue_entry->s.type == ODP_QUEUE_TYPE_PKTIN,
+ "Not PKTIN queue");
- if (pktio_entry == NULL)
+ pktio_entry->s.inq_default = queue;
+ odp_sync_stores();
+ mac_info.appRxPktQueue = _odp_queue_to_qmss_queue(queue);
+ /** @todo: Specify flow corresponding to the pool */
+ mac_info.appRxPktFlowId = QMSS_PARAM_NOT_SPECIFIED;
+ mac_info.ifNum = pktio_entry->s.port;
+
+ nwal_ret = nwal_setMacIface(odp_global->nwal.handle,
+ NWAL_TRANSID_SPIN_WAIT,
+ (nwal_AppId) (0x12345678),
+ &mac_info,
+ &handle);
+ if (nwal_ret != nwal_OK) {
+ odp_pr_err("nwal_setMacIface returned Error Code %d\n",
+ nwal_ret);
return -1;
+ }
+
+ odp_pr_info("MAC i/f added\n");
+
+ queue_lock(queue_entry);
+ queue_entry->s.pktin = id;
+ queue_entry->s.status = QUEUE_STATUS_SCHED;
+ queue_unlock(queue_entry);
+
+ odp_schedule_queue(queue, queue_entry->s.param.sched.prio);
+ return 0;
+}
+
+static int pktio_inq_create_setdef(odp_pktio_t id)
+{
+ char name[ODP_QUEUE_NAME_LEN];
+ odp_queue_param_t qparam;
+ odp_queue_t inq_def;
+ pktio_entry_t *pktio_entry = get_entry(id);
+ int ret = 0;
+
+ ODP_ASSERT(pktio_entry, "Not valid entry");
lock_entry(pktio_entry);
+ if (pktio_entry->s.inq_default != ODP_QUEUE_INVALID) {
+ ret = 0;
+ odp_pr_dbg("default input queue is already set: %u\n",
+ pktio_entry->s.inq_default);
+ goto unlock;
+ }
+
+ odp_pr_dbg("Creating default input queue\n");
+ qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.group = ODP_SCHED_GROUP_DEFAULT;
+ snprintf(name, sizeof(name), "%i-pktio_inq_default", (int)id);
+ name[ODP_QUEUE_NAME_LEN-1] = '\0';
+ inq_def = odp_queue_create(name, ODP_QUEUE_TYPE_PKTIN, &qparam);
+ if (inq_def == ODP_QUEUE_INVALID) {
+ odp_pr_err("pktio input queue creation failed\n");
+ ret = -1;
+ goto unlock;
+ }
+
+ if (pktio_inq_setdef_locked(id, inq_def)) {
+ odp_pr_err("default input-Q setup\n");
+ ret = -1;
+ goto unlock;
+ }
+unlock:
+ unlock_entry(pktio_entry);
+ return ret;
+}
+
+int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], unsigned len)
+{
+ pktio_entry_t *pktio_entry = get_entry(id);
+ unsigned pkts = 0;
+ odp_buffer_t buf;
+
+ ODP_ASSERT(pktio_entry, "Not valid entry");
if (pktio_entry->s.inq_default == ODP_QUEUE_INVALID) {
- char name[ODP_QUEUE_NAME_LEN];
- odp_queue_param_t qparam;
- odp_queue_t inq_def;
- /*
+ /**
* Create a default input queue.
- * FIXME: IT is a kind of WA for current ODP API usage.
+ * @todo: It is a kind of WA for current ODP API usage.
* It should be revised.
*/
- ODP_DBG("Creating default input queue\n");
- qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
- qparam.sched.group = ODP_SCHED_GROUP_DEFAULT;
- snprintf(name, sizeof(name), "%i-pktio_inq_default", (int)id);
- name[ODP_QUEUE_NAME_LEN-1] = '\0';
- inq_def = odp_queue_create(name, ODP_QUEUE_TYPE_PKTIN, &qparam);
- if (inq_def == ODP_QUEUE_INVALID) {
- ODP_ERR("pktio queue creation failed\n");
- goto unlock;
- }
-
- if (odp_pktio_inq_setdef(id, inq_def)) {
- ODP_ERR("default input-Q setup\n");
- goto unlock;
- }
+ if (pktio_inq_create_setdef(id))
+ return -1;
}
for (pkts = 0; pkts < len; pkts++) {
@@ -291,71 +354,54 @@ int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], unsigned len)
pkt_table[pkts] = odp_packet_from_buffer(buf);
}
-unlock:
- unlock_entry(pktio_entry);
return pkts;
}
+static inline void pktio_buffer_send(pktio_entry_t *pktio, odp_buffer_t buf)
+{
+ nwal_mCmdSetPort(_odp_buf_to_ti_pkt(buf),
+ &(pktio->s.tx_ps_cmdinfo),
+ pktio->s.port);
+
+ Qmss_queuePushDescSize(pktio->s.tx_ps_cmdinfo.txQueue,
+ _odp_buf_to_cppi_desc(buf),
+ NWAL_DESC_SIZE);
+}
+
int odp_pktio_send(odp_pktio_t id, odp_packet_t pkt_table[], unsigned len)
{
pktio_entry_t *pktio_entry = get_entry(id);
unsigned pkts;
- int ret;
if (pktio_entry == NULL)
return -1;
- lock_entry(pktio_entry);
-
for (pkts = 0; pkts < len; pkts++) {
- ret = odp_queue_enq(pktio_entry->s.outq_default,
- odp_buffer_from_packet(pkt_table[pkts]));
- if (ret)
- break;
+ pktio_buffer_send(pktio_entry,
+ odp_buffer_from_packet(pkt_table[pkts]));
}
- unlock_entry(pktio_entry);
return pkts;
}
int odp_pktio_inq_setdef(odp_pktio_t id, odp_queue_t queue)
{
pktio_entry_t *pktio_entry = get_entry(id);
- queue_entry_t *qentry = queue_to_qentry(queue);
+ int ret = 0;
- if (pktio_entry == NULL || qentry == NULL)
- return -1;
-
- if (qentry->s.type != ODP_QUEUE_TYPE_PKTIN)
- return -1;
+ ODP_ASSERT(pktio_entry, "Not valid entry");
- pktio_entry->s.inq_default = queue;
- {
- uint32_t free_queue =
- _odp_pool_get_free_queue(pktio_entry->s.in_pool);
- ti_em_osal_cppi_rx_channel_close(Cppi_CpDma_PASS_CPDMA,
- pktio_entry->s.dev->rx_channel);
- ti_em_osal_cppi_rx_flow_open(Cppi_CpDma_PASS_CPDMA,
- pktio_entry->s.dev->rx_flow,
- qentry->s.hw_queue,
- free_queue,
- 0);
- ti_em_osal_cppi_rx_channel_open(Cppi_CpDma_PASS_CPDMA,
- pktio_entry->s.dev->rx_channel);
- ODP_DBG("%s: Opened rx flow %u with dest queue: %u and free queue: %u\n",
- __func__,
- pktio_entry->s.dev->rx_flow,
- qentry->s.hw_queue,
- free_queue);
+ lock_entry(pktio_entry);
+ if (pktio_entry->s.inq_default == ODP_QUEUE_INVALID) {
+ ret = pktio_inq_setdef_locked(id, queue);
+ } else {
+ /* Default queue can be assigned only once */
+ odp_pr_err("pktio %u: default input queue %s is already set\n",
+ id,
+ odp_queue_name(pktio_entry->s.inq_default));
+ ret = -1;
}
-
- queue_lock(qentry);
- qentry->s.pktin = id;
- qentry->s.status = QUEUE_STATUS_SCHED;
- queue_unlock(qentry);
-
- odp_schedule_queue(queue, qentry->s.param.sched.prio);
-
- return 0;
+ unlock_entry(pktio_entry);
+ return ret;
}
int odp_pktio_inq_remdef(odp_pktio_t id)
@@ -383,54 +429,52 @@ odp_queue_t odp_pktio_outq_getdef(odp_pktio_t id)
return pktio_entry->s.outq_default;
}
-int pktout_enqueue(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
+int pktout_enqueue(queue_entry_t *queue, odp_buffer_t buf)
{
- /*
- * Set port number directly in a descriptor.
- * TODO: Remove it when PA will be used.
- */
- ti_em_cppi_set_psflags(&buf_hdr->desc, queue->s.out_port_id);
- return queue_enq(queue, buf_hdr);
+ pktio_entry_t *pktio = queue->s.pktout_entry;
+ odp_pr_vdbg("sending packet\n");
+ odp_pr_vdbg_packet(odp_packet_from_buffer(buf));
+ pktio_buffer_send(pktio, buf);
+ return 0;
}
-int pktout_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
+int pktout_enq_multi(queue_entry_t *queue, odp_buffer_t buf[], int num)
{
int i;
- uint32_t port_id = queue->s.out_port_id;
+ pktio_entry_t *pktio = queue->s.pktout_entry;
for (i = 0; i < num; i++)
- ti_em_cppi_set_psflags(&buf_hdr[i]->desc, port_id);
- return queue_enq_multi(queue, buf_hdr, num);
+ pktio_buffer_send(pktio, buf[i]);
+ return 0;
}
-static inline void update_in_packet(odp_buffer_hdr_t *buf_hdr,
+static inline void update_in_packet(odp_buffer_t buf,
odp_pktio_t pktin)
{
- if (!buf_hdr)
+ if (!odp_buffer_is_valid(buf))
return;
- odp_buffer_t buf = hdr_to_odp_buf(buf_hdr);
odp_packet_t pkt = odp_packet_from_buffer(buf);
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+ struct odp_pkthdr *pkt_hdr = odp_packet_hdr(pkt);
size_t len = odp_packet_get_len(pkt);
pkt_hdr->input = pktin;
odp_packet_parse(pkt, len, 0);
}
-odp_buffer_hdr_t *pktin_dequeue(queue_entry_t *queue)
+odp_buffer_t pktin_dequeue(queue_entry_t *queue)
{
- odp_buffer_hdr_t *buf_hdr;
- buf_hdr = queue_deq(queue);
+ odp_buffer_t buf;
+ buf = queue_deq(queue);
- update_in_packet(buf_hdr, queue->s.pktin);
- return buf_hdr;
+ update_in_packet(buf, queue->s.pktin);
+ return buf;
}
-int pktin_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
+int pktin_deq_multi(queue_entry_t *queue, odp_buffer_t buf[], int num)
{
int i;
- num = queue_deq_multi(queue, buf_hdr, num);
+ num = queue_deq_multi(queue, buf, num);
for (i = 0; i < num; i++)
- update_in_packet(buf_hdr[i], queue->s.pktin);
+ update_in_packet(buf[i], queue->s.pktin);
return num;
}
@@ -1,9 +1,12 @@
-/* Copyright (c) 2013, Linaro Limited
+/*
+ * Copyright (c) 2014, Linaro Limited
+ * Copyright (c) 2014, Texas Instruments Incorporated
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp_ti_mcsdk.h>
#include <odp_queue.h>
#include <odp_queue_internal.h>
#include <odp_std_types.h>
@@ -15,7 +18,6 @@
#include <odp_shared_memory.h>
#include <odp_schedule_internal.h>
#include <odp_config.h>
-#include <configs/odp_config_platform.h>
#include <odp_packet_io_internal.h>
#include <odp_packet_io_queue.h>
#include <odp_debug.h>
@@ -48,12 +50,26 @@ queue_entry_t *get_qentry(uint32_t queue_id)
return &queue_tbl->queue[queue_id];
}
-static void queue_init(queue_entry_t *queue, const char *name,
+static int queue_init(queue_entry_t *queue, const char *name,
odp_queue_type_t type, odp_queue_param_t *param)
{
strncpy(queue->s.name, name, ODP_QUEUE_NAME_LEN - 1);
queue->s.type = type;
+ if (type != ODP_QUEUE_TYPE_PKTOUT) {
+ uint8_t allocated = 0;
+ queue->s.qmss_queue = Qmss_queueOpen(
+ Qmss_QueueType_GENERAL_PURPOSE_QUEUE,
+ QMSS_PARAM_NOT_SPECIFIED,
+ &allocated);
+ if (allocated)
+ Qmss_queueEmpty(queue->s.qmss_queue);
+ odp_pr_vdbg(">>>>>> queue_s: %p, qmss_queue: %d\n",
+ queue, queue->s.qmss_queue);
+ if (queue->s.qmss_queue < 0)
+ return -1;
+ }
+
if (param) {
memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
} else {
@@ -66,16 +82,16 @@ static void queue_init(queue_entry_t *queue, const char *name,
switch (type) {
case ODP_QUEUE_TYPE_PKTIN:
- queue->s.enqueue = queue_enq;
+ queue->s.enqueue = NULL;
queue->s.dequeue = pktin_dequeue;
- queue->s.enqueue_multi = queue_enq_multi;
+ queue->s.enqueue_multi = NULL;
queue->s.dequeue_multi = pktin_deq_multi;
break;
case ODP_QUEUE_TYPE_PKTOUT:
queue->s.enqueue = pktout_enqueue;
- queue->s.dequeue = queue_deq;
+ queue->s.dequeue = NULL;
queue->s.enqueue_multi = pktout_enq_multi;
- queue->s.dequeue_multi = queue_deq_multi;
+ queue->s.dequeue_multi = NULL;
break;
default:
queue->s.enqueue = queue_enq;
@@ -85,9 +101,8 @@ static void queue_init(queue_entry_t *queue, const char *name,
break;
}
- queue->s.head = NULL;
- queue->s.tail = NULL;
queue->s.sched_buf = ODP_BUFFER_INVALID;
+ return 0;
}
@@ -95,7 +110,7 @@ int odp_queue_init_global(void)
{
uint32_t i;
- ODP_DBG("Queue init ... ");
+ odp_pr_dbg("Queue init ... ");
queue_tbl = odp_shm_reserve("odp_queues",
sizeof(queue_table_t),
@@ -112,20 +127,15 @@ int odp_queue_init_global(void)
LOCK_INIT(&queue->s.lock);
queue->s.handle = queue_from_id(i);
queue->s.status = QUEUE_STATUS_FREE;
- /*
- * TODO: HW queue is mapped dirrectly to queue_entry_t
- * instance. It may worth to allocate HW queue on open.
- */
- queue->s.hw_queue = TI_ODP_PUBLIC_QUEUE_BASE_IDX + i;
}
- ODP_DBG("done\n");
- ODP_DBG("Queue init global\n");
- ODP_DBG(" struct queue_entry_s size %zu\n",
- sizeof(struct queue_entry_s));
- ODP_DBG(" queue_entry_t size %zu\n",
- sizeof(queue_entry_t));
- ODP_DBG("\n");
+ odp_pr_dbg("done\n");
+ odp_pr_dbg("Queue init global\n");
+ odp_pr_dbg(" struct queue_entry_s size %zu\n",
+ sizeof(struct queue_entry_s));
+ odp_pr_dbg(" queue_entry_t size %zu\n",
+ sizeof(queue_entry_t));
+ odp_pr_dbg("\n");
return 0;
}
@@ -148,13 +158,15 @@ odp_schedule_sync_t odp_queue_sched_type(odp_queue_t handle)
return queue->s.param.sched.sync;
}
-odp_queue_t _odp_queue_create(const char *name, odp_queue_type_t type,
- odp_queue_param_t *param, uint32_t hw_queue)
+odp_queue_t odp_queue_create(const char *name, odp_queue_type_t type,
+ odp_queue_param_t *param)
{
uint32_t i;
queue_entry_t *queue;
odp_queue_t handle = ODP_QUEUE_INVALID;
+ odp_pr_vdbg(">>>>>> name: %s, type: %d\n", name, type);
+
for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
queue = &queue_tbl->queue[i];
@@ -162,32 +174,26 @@ odp_queue_t _odp_queue_create(const char *name, odp_queue_type_t type,
continue;
LOCK(&queue->s.lock);
- if (queue->s.status == QUEUE_STATUS_FREE) {
- if (hw_queue)
- queue->s.hw_queue = hw_queue;
- /*
- * Don't open hw queue if its number is specified
- * as it is most probably opened by Linux kernel
- */
- else if (ti_em_osal_hw_queue_open(queue->s.hw_queue)
- != EM_OK) {
- UNLOCK(&queue->s.lock);
- continue;
- }
-
- queue_init(queue, name, type, param);
-
- if (type == ODP_QUEUE_TYPE_SCHED ||
- type == ODP_QUEUE_TYPE_PKTIN)
- queue->s.status = QUEUE_STATUS_NOTSCHED;
- else
- queue->s.status = QUEUE_STATUS_READY;
+ if (queue->s.status != QUEUE_STATUS_FREE) {
+ UNLOCK(&queue->s.lock);
+ continue;
+ }
- handle = queue->s.handle;
+ if (queue_init(queue, name, type, param)) {
UNLOCK(&queue->s.lock);
break;
}
+
+ if (type == ODP_QUEUE_TYPE_SCHED ||
+ type == ODP_QUEUE_TYPE_PKTIN)
+ queue->s.status = QUEUE_STATUS_NOTSCHED;
+ else
+ queue->s.status = QUEUE_STATUS_READY;
+
+ handle = queue->s.handle;
+ odp_pr_vdbg(">>>>>> handle: %u\n", handle);
UNLOCK(&queue->s.lock);
+ break;
}
if (handle != ODP_QUEUE_INVALID &&
@@ -196,7 +202,7 @@ odp_queue_t _odp_queue_create(const char *name, odp_queue_type_t type,
buf = odp_schedule_buffer_alloc(handle);
if (buf == ODP_BUFFER_INVALID) {
- ODP_ERR("queue_init: sched buf alloc failed\n");
+ odp_pr_err("queue_init: sched buf alloc failed\n");
return ODP_QUEUE_INVALID;
}
@@ -207,13 +213,6 @@ odp_queue_t _odp_queue_create(const char *name, odp_queue_type_t type,
return handle;
}
-odp_queue_t odp_queue_create(const char *name, odp_queue_type_t type,
- odp_queue_param_t *param)
-{
- return _odp_queue_create(name, type, param, 0);
-}
-
-
odp_buffer_t queue_sched_buf(odp_queue_t handle)
{
queue_entry_t *queue;
@@ -255,10 +254,14 @@ odp_queue_t odp_queue_lookup(const char *name)
}
-int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
+int queue_enq(queue_entry_t *queue, odp_buffer_t buf)
{
- _ti_hw_queue_push_desc(queue->s.hw_queue, buf_hdr);
-
+ odp_pr_vdbg("queue: %s, buf: %p, qmss_queue: %d\n",
+ queue->s.name, buf, queue->s.qmss_queue);
+ Qmss_queuePushDescSize(queue->s.qmss_queue,
+ _odp_buf_to_cppi_desc(buf),
+ NWAL_DESC_SIZE);
+#if 1
if (queue->s.type == ODP_QUEUE_TYPE_SCHED) {
int sched = 0;
LOCK(&queue->s.lock);
@@ -266,27 +269,33 @@ int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
queue->s.status = QUEUE_STATUS_SCHED;
sched = 1;
}
+ odp_pr_vdbg("status: %d, sched: %d\n", queue->s.status, sched);
UNLOCK(&queue->s.lock);
/* Add queue to scheduling */
if (sched)
odp_schedule_queue(queue->s.handle,
queue->s.param.sched.prio);
}
+#endif
return 0;
}
-int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
+int queue_enq_multi(queue_entry_t *queue, odp_buffer_t buf[], int num)
{
int i;
- /*
- * TODO: Should this series of buffers be enqueued atomically?
+ /**
+ * @todo: Should this series of buffers be enqueued atomically?
* Can another buffer be pushed in this queue in the middle?
*/
for (i = 0; i < num; i++) {
- /* TODO: Implement multi dequeue a lower level */
- _ti_hw_queue_push_desc(queue->s.hw_queue, buf_hdr[i]);
+ /** @todo: Implement multi dequeue a lower level */
+ odp_pr_vdbg("queue: %s, buf: %p, qmss_queue: %d\n",
+ queue->s.name, buf[i], queue->s.qmss_queue);
+ Qmss_queuePushDescSize(queue->s.qmss_queue,
+ _odp_buf_to_cppi_desc(buf[i]),
+ NWAL_DESC_SIZE);
}
if (queue->s.type == ODP_QUEUE_TYPE_SCHED) {
@@ -296,6 +305,7 @@ int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
queue->s.status = QUEUE_STATUS_SCHED;
sched = 1;
}
+ odp_pr_vdbg("status: %d, sched: %d\n", queue->s.status, sched);
UNLOCK(&queue->s.lock);
/* Add queue to scheduling */
if (sched)
@@ -308,66 +318,65 @@ int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
int odp_queue_enq_multi(odp_queue_t handle, odp_buffer_t buf[], int num)
{
- odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
queue_entry_t *queue;
- int i;
if (num > QUEUE_MULTI_MAX)
num = QUEUE_MULTI_MAX;
queue = queue_to_qentry(handle);
- for (i = 0; i < num; i++)
- buf_hdr[i] = odp_buf_to_hdr(buf[i]);
-
- return queue->s.enqueue_multi(queue, buf_hdr, num);
+ ODP_ASSERT(queue->s.enqueue_multi, "No multi enqueue function");
+ return queue->s.enqueue_multi(queue, buf, num);
}
int odp_queue_enq(odp_queue_t handle, odp_buffer_t buf)
{
- odp_buffer_hdr_t *buf_hdr;
queue_entry_t *queue;
queue = queue_to_qentry(handle);
- buf_hdr = odp_buf_to_hdr(buf);
- return queue->s.enqueue(queue, buf_hdr);
+ odp_pr_vdbg(">>>>>> handle: %u, buf: %p\n", handle, buf);
+ ODP_ASSERT(queue->s.enqueue, "No enqueue function");
+ return queue->s.enqueue(queue, buf);
}
-
-odp_buffer_hdr_t *queue_deq(queue_entry_t *queue)
+odp_buffer_t queue_deq(queue_entry_t *queue)
{
- odp_buffer_hdr_t *buf_hdr;
+ Cppi_HostDesc *desc;
- buf_hdr = (odp_buffer_hdr_t *)ti_em_osal_hw_queue_pop(queue->s.hw_queue,
- TI_EM_MEM_PUBLIC_DESC);
+ desc = (void *)QMSS_DESC_PTR(Qmss_queuePop(queue->s.qmss_queue));
+ odp_pr_vdbg("queue: %s, buf: %p, qmss_queue: %d\n",
+ queue->s.name, desc, queue->s.qmss_queue);
- if (!buf_hdr && queue->s.type == ODP_QUEUE_TYPE_SCHED) {
+ if (!desc && queue->s.type == ODP_QUEUE_TYPE_SCHED) {
LOCK(&queue->s.lock);
- if (!buf_hdr && queue->s.status == QUEUE_STATUS_SCHED)
+ if (!desc && queue->s.status == QUEUE_STATUS_SCHED)
queue->s.status = QUEUE_STATUS_NOTSCHED;
+ odp_pr_vdbg("status: %d\n", queue->s.status);
UNLOCK(&queue->s.lock);
}
- return buf_hdr;
+ return _cppi_desc_to_odp_buf(desc);
}
-int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
+int queue_deq_multi(queue_entry_t *queue, odp_buffer_t buf[], int num)
{
int i;
for (i = 0; i < num; i++) {
- /* TODO: Implement multi dequeue a lower level */
- buf_hdr[i] = (odp_buffer_hdr_t *)ti_em_osal_hw_queue_pop(
- queue->s.hw_queue,
- TI_EM_MEM_PUBLIC_DESC);
- if (!buf_hdr[i]) {
+ Cppi_HostDesc *desc;
+ /** @todo: Implement multi dequeue a lower level */
+ desc = Qmss_queuePop(queue->s.qmss_queue);
+ desc = (void *)QMSS_DESC_PTR(desc);
+ buf[i] = _cppi_desc_to_odp_buf(desc);
+ if (!buf[i]) {
if (queue->s.type != ODP_QUEUE_TYPE_SCHED)
break;
LOCK(&queue->s.lock);
if (queue->s.status == QUEUE_STATUS_SCHED)
queue->s.status = QUEUE_STATUS_NOTSCHED;
+ odp_pr_vdbg("status: %d\n", queue->s.status);
UNLOCK(&queue->s.lock);
break;
}
@@ -380,35 +389,24 @@ int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
int odp_queue_deq_multi(odp_queue_t handle, odp_buffer_t buf[], int num)
{
queue_entry_t *queue;
- odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
- int i, ret;
if (num > QUEUE_MULTI_MAX)
num = QUEUE_MULTI_MAX;
queue = queue_to_qentry(handle);
- ret = queue->s.dequeue_multi(queue, buf_hdr, num);
-
- for (i = 0; i < ret; i++)
- buf[i] = hdr_to_odp_buf(buf_hdr[i]);
-
- return ret;
+ ODP_ASSERT(queue->s.dequeue_multi, "No multi dequeue function");
+ return queue->s.dequeue_multi(queue, buf, num);
}
odp_buffer_t odp_queue_deq(odp_queue_t handle)
{
queue_entry_t *queue;
- odp_buffer_hdr_t *buf_hdr;
queue = queue_to_qentry(handle);
- buf_hdr = queue->s.dequeue(queue);
-
- if (buf_hdr)
- return hdr_to_odp_buf(buf_hdr);
-
- return ODP_BUFFER_INVALID;
+ ODP_ASSERT(queue->s.dequeue, "No dequeue function");
+ return queue->s.dequeue(queue);
}
deleted file mode 100644
@@ -1,284 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_shared_memory.h>
-#include <odp_shared_memory_internal.h>
-#include <odp_internal.h>
-#include <odp_spinlock.h>
-#include <odp_align.h>
-#include <odp_system_info.h>
-#include <odp_debug.h>
-
-#include <sys/mman.h>
-#ifdef __powerpc__
-#include <asm/mman.h>
-#endif
-#include <fcntl.h>
-
-#include <stdio.h>
-#include <string.h>
-
-#include <ti_em_rh.h>
-
-#define ODP_SHM_NUM_BLOCKS 32
-
-
-typedef struct {
- char name[ODP_SHM_NAME_LEN];
- uint64_t size;
- uint64_t align;
- void *addr;
- int huge;
- ti_em_rh_mem_config_t mem_config;
-} odp_shm_block_t;
-
-
-typedef struct {
- odp_shm_block_t block[ODP_SHM_NUM_BLOCKS];
- odp_spinlock_t lock;
-
-} odp_shm_table_t;
-
-
-#define SHM_FLAGS (MAP_SHARED | MAP_ANONYMOUS)
-
-
-/* Global shared memory table */
-static odp_shm_table_t *odp_shm_tbl;
-
-
-int odp_shm_init_global(void)
-{
- void *addr;
-
-#ifndef MAP_HUGETLB
- ODP_DBG("NOTE: mmap does not support huge pages\n");
-#endif
-
- addr = mmap(NULL, sizeof(odp_shm_table_t),
- PROT_READ | PROT_WRITE, SHM_FLAGS, -1, 0);
-
- if (addr == MAP_FAILED)
- return -1;
-
- odp_shm_tbl = addr;
-
- memset(odp_shm_tbl, 0, sizeof(odp_shm_table_t));
- odp_spinlock_init(&odp_shm_tbl->lock);
-
- return 0;
-}
-
-
-int odp_shm_init_local(void)
-{
- return 0;
-}
-
-
-static int find_block(const char *name)
-{
- int i;
-
- for (i = 0; i < ODP_SHM_NUM_BLOCKS; i++) {
- if (strcmp(name, odp_shm_tbl->block[i].name) == 0) {
- /* found it */
- return i;
- }
- }
-
- return -1;
-}
-
-enum {
- ODP_SHM_MMAP,
- ODP_SHM_CMA
-};
-
-void *_odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
- int type)
-{
- int i;
- odp_shm_block_t *block;
-#ifdef MAP_HUGETLB
- uint64_t huge_sz, page_sz;
- ti_em_rh_mem_config_t mem_config = {0};
-
- huge_sz = odp_sys_huge_page_size();
- page_sz = odp_sys_page_size();
-#endif
-
- odp_spinlock_lock(&odp_shm_tbl->lock);
-
- if (find_block(name) >= 0) {
- /* Found a block with the same name */
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- return NULL;
- }
-
- for (i = 0; i < ODP_SHM_NUM_BLOCKS; i++) {
- if (odp_shm_tbl->block[i].addr == NULL) {
- /* Found free block */
- break;
- }
- }
-
- if (i > ODP_SHM_NUM_BLOCKS - 1) {
- /* Table full */
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- return NULL;
- }
-
- block = &odp_shm_tbl->block[i];
-
- /* Allocate memory */
- mem_config.size = size + align;
- mem_config.flags = TI_EM_OSAL_MEM_CACHED;
- /*
- * alloc free mapping id.
- * FIXME: mapping_id is uint32_t.
- */
- mem_config.mapping_id = -1;
-
- if (type == ODP_SHM_CMA) {
- ti_em_rh_alloc_map_cma(&mem_config);
-
- if (!mem_config.vaddr) {
- /* Alloc failed */
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- ODP_ERR("%s: failed to allocate block: %-24s %4"PRIu64" %4"PRIu64"\n",
- __func__,
- name,
- size,
- align);
- return NULL;
- }
-
- } else if (type == ODP_SHM_MMAP) {
- void *addr = MAP_FAILED;
- block->huge = 0;
-
-#ifdef MAP_HUGETLB
- /* Try first huge pages */
- if (huge_sz && (size + align) > page_sz) {
- addr = mmap(NULL, size + align, PROT_READ | PROT_WRITE,
- SHM_FLAGS | MAP_HUGETLB, -1, 0);
- }
-#endif
-
- /* Use normal pages for small or failed huge page allocations */
- if (addr == MAP_FAILED) {
- addr = mmap(NULL, size + align, PROT_READ | PROT_WRITE,
- SHM_FLAGS, -1, 0);
- } else {
- block->huge = 1;
- }
-
- if (addr == MAP_FAILED) {
- /* Alloc failed */
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- return NULL;
- }
- mem_config.vaddr = (uintptr_t)addr;
- } else {
- ODP_ERR("Unknown shared memory type: %d\n", type);
- }
-
- block->mem_config = mem_config;
-
- /* move to correct alignment */
- block->addr = ODP_ALIGN_ROUNDUP_PTR(mem_config.vaddr, align);
-
- strncpy(block->name, name, ODP_SHM_NAME_LEN - 1);
- block->name[ODP_SHM_NAME_LEN - 1] = 0;
- block->size = size;
- block->align = align;
-
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- ODP_DBG("%s: reserved block: %-24s %4"PRIu64" %4"PRIu64" %p\n",
- __func__,
- block->name,
- block->size,
- block->align,
- block->addr);
-
- return block->addr;
-}
-
-void *odp_shm_reserve(const char *name, uint64_t size, uint64_t align)
-{
- return _odp_shm_reserve(name, size, align, ODP_SHM_CMA);
-}
-
-uintptr_t _odp_shm_get_paddr(void *vaddr)
-{
- int i;
- uintptr_t addr = (uintptr_t)vaddr;
- for (i = 0; i < ODP_SHM_NUM_BLOCKS; i++) {
- ti_em_rh_mem_config_t *mem = &odp_shm_tbl->block[i].mem_config;
- if (mem->vaddr == 0)
- continue;
- if ((mem->vaddr <= addr) && (addr < mem->vaddr + mem->size)) {
- addr = (uintptr_t)odp_shm_tbl->block[i].addr;
- return (addr - mem->vaddr) + mem->paddr;
- }
- }
- return 0;
-}
-
-void *odp_shm_lookup(const char *name)
-{
- int i;
- void *addr;
-
- odp_spinlock_lock(&odp_shm_tbl->lock);
-
- i = find_block(name);
-
- if (i < 0) {
- odp_spinlock_unlock(&odp_shm_tbl->lock);
- return NULL;
- }
-
- addr = odp_shm_tbl->block[i].addr;
- odp_spinlock_unlock(&odp_shm_tbl->lock);
-
- return addr;
-}
-
-
-void odp_shm_print_all(void)
-{
- int i;
-
- printf("\nShared memory\n");
- printf("--------------\n");
- printf(" page size: %"PRIu64" kB\n", odp_sys_page_size() / 1024);
- printf(" huge page size: %"PRIu64" kB\n",
- odp_sys_huge_page_size() / 1024);
- printf("\n");
-
- printf(" id name kB align huge addr paddr\n");
-
- for (i = 0; i < ODP_SHM_NUM_BLOCKS; i++) {
- odp_shm_block_t *block;
-
- block = &odp_shm_tbl->block[i];
-
- if (block->addr) {
- printf(" %2i %-24s %4"PRIu64" %4"PRIu64" %2c %p 0x%08x\n",
- i,
- block->name,
- block->size/1024,
- block->align,
- (block->huge ? '*' : ' '),
- block->addr,
- block->mem_config.paddr);
- }
- }
-
- printf("\n");
-}