@@ -148,6 +148,10 @@ void arch_lmb_reserve_generic(ulong sp, ulong end, ulong align);
*/
void lmb_reserve_common(void *fdt_blob);
+#if defined(CONFIG_SANDBOX)
+void lmb_init(void);
+#endif /* CONFIG_SANDBOX */
+
#endif /* __KERNEL__ */
#endif /* _LINUX_LMB_H */
@@ -718,3 +718,20 @@ static int efi_mem_map_update_sync(void *ctx, struct event *event)
}
EVENT_SPY_FULL(EVT_EFI_MEM_MAP_UPDATE, efi_mem_map_update_sync);
#endif /* MEM_MAP_UPDATE_NOTIFY */
+
+#if CONFIG_IS_ENABLED(SANDBOX)
+void lmb_init(void)
+{
+#if IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS)
+ lmb.memory.max = CONFIG_LMB_MAX_REGIONS;
+ lmb.reserved.max = CONFIG_LMB_MAX_REGIONS;
+#else
+ lmb.memory.max = CONFIG_LMB_MEMORY_REGIONS;
+ lmb.reserved.max = CONFIG_LMB_RESERVED_REGIONS;
+ lmb.memory.region = memory_regions;
+ lmb.reserved.region = reserved_regions;
+#endif
+ lmb.memory.cnt = 0;
+ lmb.reserved.cnt = 0;
+}
+#endif /* SANDBOX */
@@ -77,6 +77,8 @@ static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
ut_assert(alloc_64k_addr >= ram + 8);
ut_assert(alloc_64k_end <= ram_end - 8);
+ lmb_init();
+
if (ram0_size) {
ret = lmb_add(ram0, ram0_size);
ut_asserteq(ret, 0);
@@ -235,6 +237,8 @@ static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
+ lmb_init();
+
ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
@@ -299,6 +303,8 @@ static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
+ lmb_init();
+
ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
@@ -382,6 +388,8 @@ static int lib_test_lmb_at_0(struct unit_test_state *uts)
long ret;
phys_addr_t a, b;
+ lmb_init();
+
ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
@@ -418,6 +426,8 @@ static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
const phys_size_t ram_size = 0x20000000;
long ret;
+ lmb_init();
+
ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
@@ -473,6 +483,8 @@ static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
+ lmb_init();
+
ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
@@ -597,6 +609,8 @@ static int test_get_unreserved_size(struct unit_test_state *uts,
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
+ lmb_init();
+
ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
@@ -664,6 +678,8 @@ static int lib_test_lmb_max_regions(struct unit_test_state *uts)
phys_addr_t offset;
int ret, i;
+ lmb_init();
+
ut_asserteq(lmb.memory.cnt, 0);
ut_asserteq(lmb.memory.max, CONFIG_LMB_MAX_REGIONS);
ut_asserteq(lmb.reserved.cnt, 0);
@@ -722,6 +738,8 @@ static int lib_test_lmb_flags(struct unit_test_state *uts)
const phys_size_t ram_size = 0x20000000;
long ret;
+ lmb_init();
+
ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
The LMB allocations are now persistent and global, and with that all the local instances of the structure variable have been removed. Every LMB test cases that are run require a clean slate of the structure -- facilitate that by adding an initialisation function which gets called at the start of every test. Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org> --- include/lmb.h | 4 ++++ lib/lmb.c | 17 +++++++++++++++++ test/lib/lmb.c | 18 ++++++++++++++++++ 3 files changed, 39 insertions(+)