@@ -221,7 +221,7 @@ int hmm_bo_allocated(struct hmm_buffer_object *bo);
*/
int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
enum hmm_bo_type type, int from_highmem,
- const void __user *userptr, bool cached);
+ const void __user *userptr);
void hmm_bo_free_pages(struct hmm_buffer_object *bo);
int hmm_bo_page_allocated(struct hmm_buffer_object *bo);
@@ -825,9 +825,6 @@ struct atomisp_s_runmode {
__u32 mode;
};
-#define ATOMISP_MAP_FLAG_NOFLUSH 0x0001 /* Do not flush cache */
-#define ATOMISP_MAP_FLAG_CACHED 0x0002 /* Enable cache */
-
struct atomisp_update_exposure {
unsigned int gain;
unsigned int digi_gain;
@@ -174,7 +174,6 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
{
unsigned int pgnr;
struct hmm_buffer_object *bo;
- bool cached = attrs & ATOMISP_MAP_FLAG_CACHED;
int ret;
/*
@@ -195,7 +194,7 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
}
/* Allocate pages for memory */
- ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached);
+ ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr);
if (ret) {
dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
goto alloc_page_err;
@@ -209,8 +208,8 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
}
dev_dbg(atomisp_dev,
- "%s: pages: 0x%08x (%zu bytes), type: %d from highmem %d, user ptr %p, cached %d\n",
- __func__, bo->start, bytes, type, from_highmem, userptr, cached);
+ "%s: pages: 0x%08x (%zu bytes), type: %d from highmem %d, user ptr %p\n",
+ __func__, bo->start, bytes, type, from_highmem, userptr);
return bo->start;
@@ -651,8 +651,7 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo,
/*Allocate pages which will be used only by ISP*/
static int alloc_private_pages(struct hmm_buffer_object *bo,
- int from_highmem,
- bool cached)
+ int from_highmem)
{
int ret;
unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
@@ -730,19 +729,17 @@ static int alloc_private_pages(struct hmm_buffer_object *bo,
} else {
blk_pgnr = order_to_nr(order);
- if (!cached) {
- /*
- * set memory to uncacheable -- UC_MINUS
- */
- ret = set_pages_uc(pages, blk_pgnr);
- if (ret) {
- dev_err(atomisp_dev,
- "set page uncacheablefailed.\n");
+ /*
+ * set memory to uncacheable -- UC_MINUS
+ */
+ ret = set_pages_uc(pages, blk_pgnr);
+ if (ret) {
+ dev_err(atomisp_dev,
+ "set page uncacheablefailed.\n");
- __free_pages(pages, order);
+ __free_pages(pages, order);
- goto cleanup;
- }
+ goto cleanup;
}
for (j = 0; j < blk_pgnr; j++, i++) {
@@ -797,7 +794,7 @@ static void free_user_pages(struct hmm_buffer_object *bo,
* Convert user space virtual address into pages list
*/
static int alloc_user_pages(struct hmm_buffer_object *bo,
- const void __user *userptr, bool cached)
+ const void __user *userptr)
{
int page_nr;
int i;
@@ -895,7 +892,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
*/
int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
enum hmm_bo_type type, int from_highmem,
- const void __user *userptr, bool cached)
+ const void __user *userptr)
{
int ret = -EINVAL;
@@ -909,9 +906,9 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
* add HMM_BO_USER type
*/
if (type == HMM_BO_PRIVATE) {
- ret = alloc_private_pages(bo, from_highmem, cached);
+ ret = alloc_private_pages(bo, from_highmem);
} else if (type == HMM_BO_USER) {
- ret = alloc_user_pages(bo, userptr, cached);
+ ret = alloc_user_pages(bo, userptr);
} else {
dev_err(atomisp_dev, "invalid buffer type.\n");
ret = -EINVAL;