@@ -44,6 +44,8 @@
/* Maximum buffers cached in cached buffer list */
#define FASTRPC_MAX_CACHED_BUFS (32)
#define FASTRPC_MAX_CACHE_BUF_SIZE (8*1024*1024)
+/* Max no. of persistent headers pre-allocated per user process */
+#define FASTRPC_MAX_PERSISTENT_HEADERS (25)
/* Add memory to static PD pool, protection thru XPU */
#define ADSP_MMAP_HEAP_ADDR 4
@@ -54,7 +56,9 @@
/* Add memory to userPD pool, for user heap */
#define ADSP_MMAP_ADD_PAGES 0x1000
/* Add memory to userPD pool, for LLC heap */
-#define ADSP_MMAP_ADD_PAGES_LLC 0x3000,
+#define ADSP_MMAP_ADD_PAGES_LLC 0x3000
+/* Map persistent header buffer on DSP */
+#define ADSP_MMAP_PERSIST_HDR 0x4000
#define DSP_UNSUPPORTED_API (0x80000414)
/* MAX NUMBER of DSP ATTRIBUTES SUPPORTED */
@@ -208,6 +212,7 @@ struct fastrpc_buf {
/* mmap support */
struct list_head node; /* list of user requested mmaps */
uintptr_t raddr;
+ bool in_use;
};
struct fastrpc_dma_buf_attachment {
@@ -303,11 +308,17 @@ struct fastrpc_user {
struct fastrpc_channel_ctx *cctx;
struct fastrpc_session_ctx *sctx;
struct fastrpc_buf *init_mem;
+ /* Pre-allocated header buffer */
+ struct fastrpc_buf *pers_hdr_buf;
+ /* Pre-allocated buffer divided into N chunks */
+ struct fastrpc_buf *hdr_bufs;
int tgid;
int pd;
/* total cached buffers */
u32 num_cached_buf;
+ /* total persistent headers */
+ u32 num_pers_hdrs;
bool is_secure_dev;
/* Lock for lists */
spinlock_t lock;
@@ -399,6 +410,37 @@ static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
return ret;
}
+static bool fastrpc_get_persistent_buf(struct fastrpc_user *fl,
+ size_t size, int buf_type, struct fastrpc_buf **obuf)
+{
+ u32 i = 0;
+ bool found = false;
+ struct fastrpc_buf *buf = NULL;
+
+ spin_lock(&fl->lock);
+ /*
+ * Persistent header buffer can be used only if
+ * metadata length is less than 1 page size.
+ */
+ if (!fl->num_pers_hdrs || buf_type != METADATA_BUF || size > PAGE_SIZE) {
+ spin_unlock(&fl->lock);
+ return found;
+ }
+
+ for (i = 0; i < fl->num_pers_hdrs; i++) {
+ buf = &fl->hdr_bufs[i];
+ /* If buffer not in use, then assign it for requested alloc */
+ if (!buf->in_use) {
+ buf->in_use = true;
+ *obuf = buf;
+ found = true;
+ break;
+ }
+ }
+ spin_unlock(&fl->lock);
+ return found;
+}
+
static void __fastrpc_buf_free(struct fastrpc_buf *buf)
{
dma_free_coherent(buf->dev, buf->size, buf->virt,
@@ -430,6 +472,15 @@ static void fastrpc_cached_buf_list_add(struct fastrpc_buf *buf)
static void fastrpc_buf_free(struct fastrpc_buf *buf, bool cache)
{
+ struct fastrpc_user *fl = buf->fl;
+
+ if (buf->in_use) {
+ /* Don't free persistent header buf. Just mark as available */
+ spin_lock(&fl->lock);
+ buf->in_use = false;
+ spin_unlock(&fl->lock);
+ return;
+ }
if (cache)
fastrpc_cached_buf_list_add(buf);
else
@@ -523,6 +574,8 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
int ret;
struct fastrpc_buf *buf;
+ if (fastrpc_get_persistent_buf(fl, size, buf_type, obuf))
+ return 0;
if (fastrpc_get_cached_buf(fl, size, buf_type, obuf))
return 0;
ret = __fastrpc_buf_alloc(fl, dev, size, obuf);
@@ -1305,6 +1358,107 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
return err;
}
+static int fastrpc_mem_map_to_dsp(struct fastrpc_user *fl, int fd, int offset,
+ u32 flags, u32 va, u64 phys,
+ size_t size, uintptr_t *raddr)
+{
+ struct fastrpc_invoke_args args[4] = { [0 ... 3] = { 0 } };
+ struct fastrpc_mem_map_req_msg req_msg = { 0 };
+ struct fastrpc_mmap_rsp_msg rsp_msg = { 0 };
+ struct fastrpc_phy_page pages = { 0 };
+ struct device *dev = fl->sctx->dev;
+ int err = 0;
+ u32 sc;
+
+ req_msg.pgid = fl->tgid;
+ req_msg.fd = fd;
+ req_msg.offset = offset;
+ req_msg.vaddrin = va;
+ req_msg.flags = flags;
+ req_msg.num = sizeof(pages);
+ req_msg.data_len = 0;
+
+ args[0].ptr = (u64) (uintptr_t) &req_msg;
+ args[0].length = sizeof(req_msg);
+
+ pages.addr = phys;
+ pages.size = size;
+
+ args[1].ptr = (u64) (uintptr_t) &pages;
+ args[1].length = sizeof(pages);
+
+ args[2].ptr = (u64) (uintptr_t) &pages;
+ args[2].length = 0;
+
+ args[3].ptr = (u64) (uintptr_t) &rsp_msg;
+ args[3].length = sizeof(rsp_msg);
+
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
+ if (err) {
+ dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
+ fd, va, size);
+ return err;
+ }
+ *raddr = rsp_msg.vaddr;
+
+ return 0;
+}
+
+static int fastrpc_create_persistent_headers(struct fastrpc_user *fl)
+{
+ int err = 0;
+ int i = 0;
+ u64 virtb = 0;
+ struct device *dev = fl->sctx->dev;
+ struct fastrpc_buf *hdr_bufs, *buf, *pers_hdr_buf = NULL;
+ u32 num_pers_hdrs = 0;
+ size_t hdr_buf_alloc_len = 0;
+
+ /*
+ * Pre-allocate memory for persistent header buffers based
+ * on concurrency info passed by user. Upper limit enforced.
+ */
+ num_pers_hdrs = FASTRPC_MAX_PERSISTENT_HEADERS;
+ hdr_buf_alloc_len = num_pers_hdrs * PAGE_SIZE;
+ err = fastrpc_buf_alloc(fl, dev, hdr_buf_alloc_len,
+ METADATA_BUF, &pers_hdr_buf);
+ if (err)
+ return err;
+
+ virtb = (u64) (uintptr_t)(pers_hdr_buf->virt);
+ err = fastrpc_mem_map_to_dsp(fl, -1, 0,
+ ADSP_MMAP_PERSIST_HDR, 0, (u64) (uintptr_t)(pers_hdr_buf->phys),
+ pers_hdr_buf->size, &pers_hdr_buf->raddr);
+ if (err)
+ goto err_dsp_map;
+
+ hdr_bufs = kcalloc(num_pers_hdrs, sizeof(struct fastrpc_buf),
+ GFP_KERNEL);
+ if (!hdr_bufs)
+ return -ENOMEM;
+
+ spin_lock(&fl->lock);
+ fl->pers_hdr_buf = pers_hdr_buf;
+ fl->num_pers_hdrs = num_pers_hdrs;
+ fl->hdr_bufs = hdr_bufs;
+ for (i = 0; i < num_pers_hdrs; i++) {
+ buf = &fl->hdr_bufs[i];
+ buf->fl = fl;
+ buf->virt = (void *)(virtb + (i * PAGE_SIZE));
+ buf->phys = pers_hdr_buf->phys + (i * PAGE_SIZE);
+ buf->size = PAGE_SIZE;
+ buf->type = pers_hdr_buf->type;
+ buf->in_use = false;
+ }
+ spin_unlock(&fl->lock);
+
+ return 0;
+err_dsp_map:
+ fastrpc_buf_free(pers_hdr_buf, 0);
+ return err;
+}
+
static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request)
{
/* Check if the device node is non-secure and channel is secure*/
@@ -1537,6 +1691,12 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
if (err)
goto err_invoke;
+ if (fl->cctx->domain_id == CDSP_DOMAIN_ID) {
+ err = fastrpc_create_persistent_headers(fl);
+ if (err)
+ goto err_invoke;
+ }
+
kfree(args);
return 0;
@@ -1629,6 +1789,10 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
fastrpc_buf_free(buf, false);
}
+ if (fl->pers_hdr_buf)
+ fastrpc_buf_free(fl->pers_hdr_buf, false);
+ kfree(fl->hdr_bufs);
+
fastrpc_cached_buf_list_free(fl);
fastrpc_session_free(cctx, fl->sctx);
fastrpc_channel_ctx_put(cctx);
@@ -2089,16 +2253,11 @@ static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
{
- struct fastrpc_invoke_args args[4] = { [0 ... 3] = { 0 } };
- struct fastrpc_mem_map_req_msg req_msg = { 0 };
- struct fastrpc_mmap_rsp_msg rsp_msg = { 0 };
struct fastrpc_mem_unmap req_unmap = { 0 };
- struct fastrpc_phy_page pages = { 0 };
- struct fastrpc_mem_map req;
+ struct fastrpc_mem_map req = {0};
struct device *dev = fl->sctx->dev;
struct fastrpc_map *map = NULL;
int err;
- u32 sc;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -2110,57 +2269,29 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
return err;
}
- req_msg.pgid = fl->tgid;
- req_msg.fd = req.fd;
- req_msg.offset = req.offset;
- req_msg.vaddrin = req.vaddrin;
map->va = (void *) (uintptr_t) req.vaddrin;
- req_msg.flags = req.flags;
- req_msg.num = sizeof(pages);
- req_msg.data_len = 0;
-
- args[0].ptr = (u64) (uintptr_t) &req_msg;
- args[0].length = sizeof(req_msg);
-
- pages.addr = map->phys;
- pages.size = map->size;
-
- args[1].ptr = (u64) (uintptr_t) &pages;
- args[1].length = sizeof(pages);
-
- args[2].ptr = (u64) (uintptr_t) &pages;
- args[2].length = 0;
-
- args[3].ptr = (u64) (uintptr_t) &rsp_msg;
- args[3].length = sizeof(rsp_msg);
-
- sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
- err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
+ /* map to dsp, get virtual adrress for the user*/
+ err = fastrpc_mem_map_to_dsp(fl, map->fd, req.offset,
+ req.flags, req.vaddrin, map->phys,
+ map->size, (uintptr_t *)&req.vaddrout);
if (err) {
- dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
- req.fd, req.vaddrin, map->size);
+ dev_err(dev, "failed to map buffer on dsp, fd = %d\n", map->fd);
goto err_invoke;
}
/* update the buffer to be able to deallocate the memory on the DSP */
- map->raddr = rsp_msg.vaddr;
-
- /* let the client know the address to use */
- req.vaddrout = rsp_msg.vaddr;
+ map->raddr = req.vaddrout;
if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
/* unmap the memory and release the buffer */
- req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
+ req_unmap.vaddr = (uintptr_t)req.vaddrout;
req_unmap.length = map->size;
fastrpc_req_mem_unmap_impl(fl, &req_unmap);
return -EFAULT;
}
-
return 0;
-
err_invoke:
fastrpc_map_put(map);
-
return err;
}