diff mbox series

[RFC,09/31] lmb: allow for resizing lmb regions

Message ID 20240607185240.1892031-10-sughosh.ganu@linaro.org
State Superseded
Headers show
Series Make U-Boot memory reservations coherent | expand

Commit Message

Sughosh Ganu June 7, 2024, 6:52 p.m. UTC
Allow for resizing of LMB regions if the region attributes match. The
current code returns a failure status on detecting an overlapping
address. This worked up until now since the LMB calls were not
persistent and global -- the LMB memory map was specific and private
to a given caller of the LMB API's.

With the change in the LMB code to make the LMB reservations
persistent, there needs to be a check on whether the memory region can
be resized, and then do it if so. To distinguish between memory that
cannot be resized, add a new flag, LMB_NOOVERWRITE. Reserving a region
of memory with this attribute would indicate that the region cannot be
resized.

Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org>
---
 include/lmb.h |   1 +
 lib/lmb.c     | 120 ++++++++++++++++++++++++++++++++++++++++++++------
 2 files changed, 107 insertions(+), 14 deletions(-)

Comments

Ilias Apalodimas June 10, 2024, 12:03 p.m. UTC | #1
Hi Sughosh


On Fri, 7 Jun 2024 at 21:54, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
>
> Allow for resizing of LMB regions if the region attributes match. The
> current code returns a failure status on detecting an overlapping
> address. This worked up until now since the LMB calls were not
> persistent and global -- the LMB memory map was specific and private
> to a given caller of the LMB API's.
>
> With the change in the LMB code to make the LMB reservations
> persistent, there needs to be a check on whether the memory region can
> be resized, and then do it if so. To distinguish between memory that
> cannot be resized, add a new flag, LMB_NOOVERWRITE. Reserving a region
> of memory with this attribute would indicate that the region cannot be
> resized.

Can you think of a memory region that needs to be protected from resizing?
I think we should design this a bit differently.  For example, think
of someone loading a file in a memory region and then a subsystem
trying to resize its reserved memory overwriting that region. Instead
of this flag why don't we add
- A flag that indicates whether this region can be re-used (IOW
overwrites it), but only if the 'subsytem id' defined below matches
- a u32 that indicates the subsystem ID that initiated the transaction
-- e.g EFI, load command, U-Boot core .. etc

Resizing can be enabled unconditionally in that case as long as there
is enough space. The only requirement would be that the request comes
from the same subsystem that reserved the memory in the beginning

Thanks
/Ilias

>
> Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org>
> ---
>  include/lmb.h |   1 +
>  lib/lmb.c     | 120 ++++++++++++++++++++++++++++++++++++++++++++------
>  2 files changed, 107 insertions(+), 14 deletions(-)
>
> diff --git a/include/lmb.h b/include/lmb.h
> index 03bce2a50c..1d4cd255d2 100644
> --- a/include/lmb.h
> +++ b/include/lmb.h
> @@ -20,6 +20,7 @@
>  enum lmb_flags {
>         LMB_NONE                = 0x0,
>         LMB_NOMAP               = 0x4,
> +       LMB_NOOVERWRITE         = 0x8,
>  };
>
>  /**
> diff --git a/lib/lmb.c b/lib/lmb.c
> index de5a2cf23b..0a4f3d5bcd 100644
> --- a/lib/lmb.c
> +++ b/lib/lmb.c
> @@ -260,12 +260,88 @@ void lmb_add_memory(struct bd_info *bd)
>         }
>  }
>
> +static bool lmb_region_flags_match(struct lmb_region *rgn, unsigned long r1,
> +                                  enum lmb_flags flags)
> +{
> +       return rgn->region[r1].flags == flags;
> +}
> +
> +static long lmb_merge_overlap_regions(struct lmb_region *rgn, unsigned long i,
> +                                     phys_addr_t base, phys_size_t size,
> +                                     enum lmb_flags flags)
> +{
> +       phys_size_t rgnsize;
> +       unsigned long rgn_cnt, idx;
> +       phys_addr_t rgnbase, rgnend;
> +       phys_addr_t mergebase, mergeend;
> +
> +       rgn_cnt = 0;
> +       idx = i;
> +       /*
> +        * First thing to do is to identify how many regions does
> +        * the requested region overlap.
> +        * If the flags match, combine all these overlapping
> +        * regions into a single region, and remove the merged
> +        * regions.
> +        */
> +       while (idx < rgn->cnt - 1) {
> +               rgnbase = rgn->region[idx].base;
> +               rgnsize = rgn->region[idx].size;
> +
> +               if (lmb_addrs_overlap(base, size, rgnbase,
> +                                     rgnsize)) {
> +                       if (!lmb_region_flags_match(rgn, idx, flags))
> +                               return -1;
> +                       rgn_cnt++;
> +                       idx++;
> +               }
> +       }
> +
> +       /* The merged region's base and size */
> +       rgnbase = rgn->region[i].base;
> +       mergebase = min(base, rgnbase);
> +       rgnend = rgn->region[idx].base + rgn->region[idx].size;
> +       mergeend = max(rgnend, (base + size));
> +
> +       rgn->region[i].base = mergebase;
> +       rgn->region[i].size = mergeend - mergebase;
> +
> +       /* Now remove the merged regions */
> +       while (--rgn_cnt)
> +               lmb_remove_region(rgn, i + 1);
> +
> +       return 0;
> +}
> +
> +static long lmb_resize_regions(struct lmb_region *rgn, unsigned long i,
> +                              phys_addr_t base, phys_size_t size,
> +                              enum lmb_flags flags)
> +{
> +       long ret = 0;
> +       phys_addr_t rgnend;
> +
> +       if (i == rgn->cnt - 1 ||
> +               base + size < rgn->region[i + 1].base) {
> +               if (!lmb_region_flags_match(rgn, i, flags))
> +                       return -1;
> +
> +               rgnend = rgn->region[i].base + rgn->region[i].size;
> +               rgn->region[i].base = min(base, rgn->region[i].base);
> +               rgnend = max(base + size, rgnend);
> +               rgn->region[i].size = rgnend - rgn->region[i].base;
> +       } else {
> +               ret = lmb_merge_overlap_regions(rgn, i, base, size, flags);
> +       }
> +
> +       return ret;
> +}
> +
>  /* This routine called with relocation disabled. */
>  static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
>                                  phys_size_t size, enum lmb_flags flags)
>  {
>         unsigned long coalesced = 0;
> -       long adjacent, i;
> +       long ret, i;
>
>         if (rgn->cnt == 0) {
>                 rgn->region[0].base = base;
> @@ -290,23 +366,32 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
>                                 return -1; /* regions with new flags */
>                 }
>
> -               adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> -               if (adjacent > 0) {
> +               ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> +               if (ret > 0) {
>                         if (flags != rgnflags)
>                                 break;
>                         rgn->region[i].base -= size;
>                         rgn->region[i].size += size;
>                         coalesced++;
>                         break;
> -               } else if (adjacent < 0) {
> +               } else if (ret < 0) {
>                         if (flags != rgnflags)
>                                 break;
>                         rgn->region[i].size += size;
>                         coalesced++;
>                         break;
>                 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
> -                       /* regions overlap */
> -                       return -1;
> +                       if (flags == LMB_NONE) {
> +                               ret = lmb_resize_regions(rgn, i, base, size,
> +                                                        flags);
> +                               if (ret < 0)
> +                                       return -1;
> +
> +                               coalesced++;
> +                               break;
> +                       } else {
> +                               return -1;
> +                       }
>                 }
>         }
>
> @@ -448,7 +533,7 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
>  }
>
>  static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
> -                                   phys_addr_t max_addr)
> +                                   phys_addr_t max_addr, enum lmb_flags flags)
>  {
>         long i, rgn;
>         phys_addr_t base = 0;
> @@ -498,7 +583,7 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
>  {
>         phys_addr_t alloc;
>
> -       alloc = __lmb_alloc_base(size, align, max_addr);
> +       alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
>
>         if (alloc == 0)
>                 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
> @@ -507,11 +592,8 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
>         return alloc;
>  }
>
> -/*
> - * Try to allocate a specific address range: must be in defined memory but not
> - * reserved
> - */
> -phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> +static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
> +                                   enum lmb_flags flags)
>  {
>         long rgn;
>
> @@ -526,13 +608,23 @@ phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
>                                       lmb.memory.region[rgn].size,
>                                       base + size - 1, 1)) {
>                         /* ok, reserve the memory */
> -                       if (lmb_reserve(base, size) >= 0)
> +                       if (lmb_reserve_flags(base, size, flags) >= 0)
>                                 return base;
>                 }
>         }
> +
>         return 0;
>  }
>
> +/*
> + * Try to allocate a specific address range: must be in defined memory but not
> + * reserved
> + */
> +phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> +{
> +       return __lmb_alloc_addr(base, size, LMB_NONE);
> +}
> +
>  /* Return number of bytes from a given address that are free */
>  phys_size_t lmb_get_free_size(phys_addr_t addr)
>  {
> --
> 2.34.1
>
Sughosh Ganu June 10, 2024, 12:20 p.m. UTC | #2
hi Ilias,

On Mon, 10 Jun 2024 at 17:34, Ilias Apalodimas
<ilias.apalodimas@linaro.org> wrote:
>
> Hi Sughosh
>
>
> On Fri, 7 Jun 2024 at 21:54, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
> >
> > Allow for resizing of LMB regions if the region attributes match. The
> > current code returns a failure status on detecting an overlapping
> > address. This worked up until now since the LMB calls were not
> > persistent and global -- the LMB memory map was specific and private
> > to a given caller of the LMB API's.
> >
> > With the change in the LMB code to make the LMB reservations
> > persistent, there needs to be a check on whether the memory region can
> > be resized, and then do it if so. To distinguish between memory that
> > cannot be resized, add a new flag, LMB_NOOVERWRITE. Reserving a region
> > of memory with this attribute would indicate that the region cannot be
> > resized.
>
> Can you think of a memory region that needs to be protected from resizing?

Actually, I think I could use a better term instead of 'resize'. The
aim of this patch is to allow re-allocation/re-reservation of the same
region of memory -- this will only be relevant for the LMB
reservations, as these are used for loading images into memory. All
other allocations(EFI currently) are true allocations in that these
should not get overwritten at all. Memory once allocated, say for
loading an EFI image, cannot be re-requested.


> I think we should design this a bit differently.  For example, think
> of someone loading a file in a memory region and then a subsystem
> trying to resize its reserved memory overwriting that region. Instead
> of this flag why don't we add
> - A flag that indicates whether this region can be re-used (IOW
> overwrites it), but only if the 'subsytem id' defined below matches
> - a u32 that indicates the subsystem ID that initiated the transaction
> -- e.g EFI, load command, U-Boot core .. etc
>
> Resizing can be enabled unconditionally in that case as long as there
> is enough space. The only requirement would be that the request comes
> from the same subsystem that reserved the memory in the beginning

Like I mentioned above, resizing(or rather re-allocations) should only
be allowed for the LMB subsystem. Any other module should not be
returned an already allocated address. Which is why I mark any memory
map update coming from the EFI module as no-overwrite.

-sughosh
>
> Thanks
> /Ilias
>
> >
> > Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org>
> > ---
> >  include/lmb.h |   1 +
> >  lib/lmb.c     | 120 ++++++++++++++++++++++++++++++++++++++++++++------
> >  2 files changed, 107 insertions(+), 14 deletions(-)
> >
> > diff --git a/include/lmb.h b/include/lmb.h
> > index 03bce2a50c..1d4cd255d2 100644
> > --- a/include/lmb.h
> > +++ b/include/lmb.h
> > @@ -20,6 +20,7 @@
> >  enum lmb_flags {
> >         LMB_NONE                = 0x0,
> >         LMB_NOMAP               = 0x4,
> > +       LMB_NOOVERWRITE         = 0x8,
> >  };
> >
> >  /**
> > diff --git a/lib/lmb.c b/lib/lmb.c
> > index de5a2cf23b..0a4f3d5bcd 100644
> > --- a/lib/lmb.c
> > +++ b/lib/lmb.c
> > @@ -260,12 +260,88 @@ void lmb_add_memory(struct bd_info *bd)
> >         }
> >  }
> >
> > +static bool lmb_region_flags_match(struct lmb_region *rgn, unsigned long r1,
> > +                                  enum lmb_flags flags)
> > +{
> > +       return rgn->region[r1].flags == flags;
> > +}
> > +
> > +static long lmb_merge_overlap_regions(struct lmb_region *rgn, unsigned long i,
> > +                                     phys_addr_t base, phys_size_t size,
> > +                                     enum lmb_flags flags)
> > +{
> > +       phys_size_t rgnsize;
> > +       unsigned long rgn_cnt, idx;
> > +       phys_addr_t rgnbase, rgnend;
> > +       phys_addr_t mergebase, mergeend;
> > +
> > +       rgn_cnt = 0;
> > +       idx = i;
> > +       /*
> > +        * First thing to do is to identify how many regions does
> > +        * the requested region overlap.
> > +        * If the flags match, combine all these overlapping
> > +        * regions into a single region, and remove the merged
> > +        * regions.
> > +        */
> > +       while (idx < rgn->cnt - 1) {
> > +               rgnbase = rgn->region[idx].base;
> > +               rgnsize = rgn->region[idx].size;
> > +
> > +               if (lmb_addrs_overlap(base, size, rgnbase,
> > +                                     rgnsize)) {
> > +                       if (!lmb_region_flags_match(rgn, idx, flags))
> > +                               return -1;
> > +                       rgn_cnt++;
> > +                       idx++;
> > +               }
> > +       }
> > +
> > +       /* The merged region's base and size */
> > +       rgnbase = rgn->region[i].base;
> > +       mergebase = min(base, rgnbase);
> > +       rgnend = rgn->region[idx].base + rgn->region[idx].size;
> > +       mergeend = max(rgnend, (base + size));
> > +
> > +       rgn->region[i].base = mergebase;
> > +       rgn->region[i].size = mergeend - mergebase;
> > +
> > +       /* Now remove the merged regions */
> > +       while (--rgn_cnt)
> > +               lmb_remove_region(rgn, i + 1);
> > +
> > +       return 0;
> > +}
> > +
> > +static long lmb_resize_regions(struct lmb_region *rgn, unsigned long i,
> > +                              phys_addr_t base, phys_size_t size,
> > +                              enum lmb_flags flags)
> > +{
> > +       long ret = 0;
> > +       phys_addr_t rgnend;
> > +
> > +       if (i == rgn->cnt - 1 ||
> > +               base + size < rgn->region[i + 1].base) {
> > +               if (!lmb_region_flags_match(rgn, i, flags))
> > +                       return -1;
> > +
> > +               rgnend = rgn->region[i].base + rgn->region[i].size;
> > +               rgn->region[i].base = min(base, rgn->region[i].base);
> > +               rgnend = max(base + size, rgnend);
> > +               rgn->region[i].size = rgnend - rgn->region[i].base;
> > +       } else {
> > +               ret = lmb_merge_overlap_regions(rgn, i, base, size, flags);
> > +       }
> > +
> > +       return ret;
> > +}
> > +
> >  /* This routine called with relocation disabled. */
> >  static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> >                                  phys_size_t size, enum lmb_flags flags)
> >  {
> >         unsigned long coalesced = 0;
> > -       long adjacent, i;
> > +       long ret, i;
> >
> >         if (rgn->cnt == 0) {
> >                 rgn->region[0].base = base;
> > @@ -290,23 +366,32 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> >                                 return -1; /* regions with new flags */
> >                 }
> >
> > -               adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> > -               if (adjacent > 0) {
> > +               ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> > +               if (ret > 0) {
> >                         if (flags != rgnflags)
> >                                 break;
> >                         rgn->region[i].base -= size;
> >                         rgn->region[i].size += size;
> >                         coalesced++;
> >                         break;
> > -               } else if (adjacent < 0) {
> > +               } else if (ret < 0) {
> >                         if (flags != rgnflags)
> >                                 break;
> >                         rgn->region[i].size += size;
> >                         coalesced++;
> >                         break;
> >                 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
> > -                       /* regions overlap */
> > -                       return -1;
> > +                       if (flags == LMB_NONE) {
> > +                               ret = lmb_resize_regions(rgn, i, base, size,
> > +                                                        flags);
> > +                               if (ret < 0)
> > +                                       return -1;
> > +
> > +                               coalesced++;
> > +                               break;
> > +                       } else {
> > +                               return -1;
> > +                       }
> >                 }
> >         }
> >
> > @@ -448,7 +533,7 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
> >  }
> >
> >  static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
> > -                                   phys_addr_t max_addr)
> > +                                   phys_addr_t max_addr, enum lmb_flags flags)
> >  {
> >         long i, rgn;
> >         phys_addr_t base = 0;
> > @@ -498,7 +583,7 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> >  {
> >         phys_addr_t alloc;
> >
> > -       alloc = __lmb_alloc_base(size, align, max_addr);
> > +       alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
> >
> >         if (alloc == 0)
> >                 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
> > @@ -507,11 +592,8 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> >         return alloc;
> >  }
> >
> > -/*
> > - * Try to allocate a specific address range: must be in defined memory but not
> > - * reserved
> > - */
> > -phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > +static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
> > +                                   enum lmb_flags flags)
> >  {
> >         long rgn;
> >
> > @@ -526,13 +608,23 @@ phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> >                                       lmb.memory.region[rgn].size,
> >                                       base + size - 1, 1)) {
> >                         /* ok, reserve the memory */
> > -                       if (lmb_reserve(base, size) >= 0)
> > +                       if (lmb_reserve_flags(base, size, flags) >= 0)
> >                                 return base;
> >                 }
> >         }
> > +
> >         return 0;
> >  }
> >
> > +/*
> > + * Try to allocate a specific address range: must be in defined memory but not
> > + * reserved
> > + */
> > +phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > +{
> > +       return __lmb_alloc_addr(base, size, LMB_NONE);
> > +}
> > +
> >  /* Return number of bytes from a given address that are free */
> >  phys_size_t lmb_get_free_size(phys_addr_t addr)
> >  {
> > --
> > 2.34.1
> >
Ilias Apalodimas June 10, 2024, 12:47 p.m. UTC | #3
On Mon, 10 Jun 2024 at 15:20, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
>
> hi Ilias,
>
> On Mon, 10 Jun 2024 at 17:34, Ilias Apalodimas
> <ilias.apalodimas@linaro.org> wrote:
> >
> > Hi Sughosh
> >
> >
> > On Fri, 7 Jun 2024 at 21:54, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
> > >
> > > Allow for resizing of LMB regions if the region attributes match. The
> > > current code returns a failure status on detecting an overlapping
> > > address. This worked up until now since the LMB calls were not
> > > persistent and global -- the LMB memory map was specific and private
> > > to a given caller of the LMB API's.
> > >
> > > With the change in the LMB code to make the LMB reservations
> > > persistent, there needs to be a check on whether the memory region can
> > > be resized, and then do it if so. To distinguish between memory that
> > > cannot be resized, add a new flag, LMB_NOOVERWRITE. Reserving a region
> > > of memory with this attribute would indicate that the region cannot be
> > > resized.
> >
> > Can you think of a memory region that needs to be protected from resizing?
>
> Actually, I think I could use a better term instead of 'resize'. The
> aim of this patch is to allow re-allocation/re-reservation of the same
> region of memory -- this will only be relevant for the LMB
> reservations, as these are used for loading images into memory. All
> other allocations(EFI currently) are true allocations in that these
> should not get overwritten at all. Memory once allocated, say for
> loading an EFI image, cannot be re-requested.
>
>
> > I think we should design this a bit differently.  For example, think
> > of someone loading a file in a memory region and then a subsystem
> > trying to resize its reserved memory overwriting that region. Instead
> > of this flag why don't we add
> > - A flag that indicates whether this region can be re-used (IOW
> > overwrites it), but only if the 'subsytem id' defined below matches
> > - a u32 that indicates the subsystem ID that initiated the transaction
> > -- e.g EFI, load command, U-Boot core .. etc
> >
> > Resizing can be enabled unconditionally in that case as long as there
> > is enough space. The only requirement would be that the request comes
> > from the same subsystem that reserved the memory in the beginning
>
> Like I mentioned above, resizing(or rather re-allocations) should only
> be allowed for the LMB subsystem. Any other module should not be
> returned an already allocated address. Which is why I mark any memory
> map update coming from the EFI module as no-overwrite.

And what happens if someone tries to overwrite 'load' memory? Won't
you still corrupt whatever is loaded on that region as it's not marked
for protection?

/Ilias
>
> -sughosh
> >
> > Thanks
> > /Ilias
> >
> > >
> > > Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org>
> > > ---
> > >  include/lmb.h |   1 +
> > >  lib/lmb.c     | 120 ++++++++++++++++++++++++++++++++++++++++++++------
> > >  2 files changed, 107 insertions(+), 14 deletions(-)
> > >
> > > diff --git a/include/lmb.h b/include/lmb.h
> > > index 03bce2a50c..1d4cd255d2 100644
> > > --- a/include/lmb.h
> > > +++ b/include/lmb.h
> > > @@ -20,6 +20,7 @@
> > >  enum lmb_flags {
> > >         LMB_NONE                = 0x0,
> > >         LMB_NOMAP               = 0x4,
> > > +       LMB_NOOVERWRITE         = 0x8,
> > >  };
> > >
> > >  /**
> > > diff --git a/lib/lmb.c b/lib/lmb.c
> > > index de5a2cf23b..0a4f3d5bcd 100644
> > > --- a/lib/lmb.c
> > > +++ b/lib/lmb.c
> > > @@ -260,12 +260,88 @@ void lmb_add_memory(struct bd_info *bd)
> > >         }
> > >  }
> > >
> > > +static bool lmb_region_flags_match(struct lmb_region *rgn, unsigned long r1,
> > > +                                  enum lmb_flags flags)
> > > +{
> > > +       return rgn->region[r1].flags == flags;
> > > +}
> > > +
> > > +static long lmb_merge_overlap_regions(struct lmb_region *rgn, unsigned long i,
> > > +                                     phys_addr_t base, phys_size_t size,
> > > +                                     enum lmb_flags flags)
> > > +{
> > > +       phys_size_t rgnsize;
> > > +       unsigned long rgn_cnt, idx;
> > > +       phys_addr_t rgnbase, rgnend;
> > > +       phys_addr_t mergebase, mergeend;
> > > +
> > > +       rgn_cnt = 0;
> > > +       idx = i;
> > > +       /*
> > > +        * First thing to do is to identify how many regions does
> > > +        * the requested region overlap.
> > > +        * If the flags match, combine all these overlapping
> > > +        * regions into a single region, and remove the merged
> > > +        * regions.
> > > +        */
> > > +       while (idx < rgn->cnt - 1) {
> > > +               rgnbase = rgn->region[idx].base;
> > > +               rgnsize = rgn->region[idx].size;
> > > +
> > > +               if (lmb_addrs_overlap(base, size, rgnbase,
> > > +                                     rgnsize)) {
> > > +                       if (!lmb_region_flags_match(rgn, idx, flags))
> > > +                               return -1;
> > > +                       rgn_cnt++;
> > > +                       idx++;
> > > +               }
> > > +       }
> > > +
> > > +       /* The merged region's base and size */
> > > +       rgnbase = rgn->region[i].base;
> > > +       mergebase = min(base, rgnbase);
> > > +       rgnend = rgn->region[idx].base + rgn->region[idx].size;
> > > +       mergeend = max(rgnend, (base + size));
> > > +
> > > +       rgn->region[i].base = mergebase;
> > > +       rgn->region[i].size = mergeend - mergebase;
> > > +
> > > +       /* Now remove the merged regions */
> > > +       while (--rgn_cnt)
> > > +               lmb_remove_region(rgn, i + 1);
> > > +
> > > +       return 0;
> > > +}
> > > +
> > > +static long lmb_resize_regions(struct lmb_region *rgn, unsigned long i,
> > > +                              phys_addr_t base, phys_size_t size,
> > > +                              enum lmb_flags flags)
> > > +{
> > > +       long ret = 0;
> > > +       phys_addr_t rgnend;
> > > +
> > > +       if (i == rgn->cnt - 1 ||
> > > +               base + size < rgn->region[i + 1].base) {
> > > +               if (!lmb_region_flags_match(rgn, i, flags))
> > > +                       return -1;
> > > +
> > > +               rgnend = rgn->region[i].base + rgn->region[i].size;
> > > +               rgn->region[i].base = min(base, rgn->region[i].base);
> > > +               rgnend = max(base + size, rgnend);
> > > +               rgn->region[i].size = rgnend - rgn->region[i].base;
> > > +       } else {
> > > +               ret = lmb_merge_overlap_regions(rgn, i, base, size, flags);
> > > +       }
> > > +
> > > +       return ret;
> > > +}
> > > +
> > >  /* This routine called with relocation disabled. */
> > >  static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> > >                                  phys_size_t size, enum lmb_flags flags)
> > >  {
> > >         unsigned long coalesced = 0;
> > > -       long adjacent, i;
> > > +       long ret, i;
> > >
> > >         if (rgn->cnt == 0) {
> > >                 rgn->region[0].base = base;
> > > @@ -290,23 +366,32 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> > >                                 return -1; /* regions with new flags */
> > >                 }
> > >
> > > -               adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> > > -               if (adjacent > 0) {
> > > +               ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> > > +               if (ret > 0) {
> > >                         if (flags != rgnflags)
> > >                                 break;
> > >                         rgn->region[i].base -= size;
> > >                         rgn->region[i].size += size;
> > >                         coalesced++;
> > >                         break;
> > > -               } else if (adjacent < 0) {
> > > +               } else if (ret < 0) {
> > >                         if (flags != rgnflags)
> > >                                 break;
> > >                         rgn->region[i].size += size;
> > >                         coalesced++;
> > >                         break;
> > >                 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
> > > -                       /* regions overlap */
> > > -                       return -1;
> > > +                       if (flags == LMB_NONE) {
> > > +                               ret = lmb_resize_regions(rgn, i, base, size,
> > > +                                                        flags);
> > > +                               if (ret < 0)
> > > +                                       return -1;
> > > +
> > > +                               coalesced++;
> > > +                               break;
> > > +                       } else {
> > > +                               return -1;
> > > +                       }
> > >                 }
> > >         }
> > >
> > > @@ -448,7 +533,7 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
> > >  }
> > >
> > >  static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
> > > -                                   phys_addr_t max_addr)
> > > +                                   phys_addr_t max_addr, enum lmb_flags flags)
> > >  {
> > >         long i, rgn;
> > >         phys_addr_t base = 0;
> > > @@ -498,7 +583,7 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> > >  {
> > >         phys_addr_t alloc;
> > >
> > > -       alloc = __lmb_alloc_base(size, align, max_addr);
> > > +       alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
> > >
> > >         if (alloc == 0)
> > >                 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
> > > @@ -507,11 +592,8 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> > >         return alloc;
> > >  }
> > >
> > > -/*
> > > - * Try to allocate a specific address range: must be in defined memory but not
> > > - * reserved
> > > - */
> > > -phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > > +static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
> > > +                                   enum lmb_flags flags)
> > >  {
> > >         long rgn;
> > >
> > > @@ -526,13 +608,23 @@ phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > >                                       lmb.memory.region[rgn].size,
> > >                                       base + size - 1, 1)) {
> > >                         /* ok, reserve the memory */
> > > -                       if (lmb_reserve(base, size) >= 0)
> > > +                       if (lmb_reserve_flags(base, size, flags) >= 0)
> > >                                 return base;
> > >                 }
> > >         }
> > > +
> > >         return 0;
> > >  }
> > >
> > > +/*
> > > + * Try to allocate a specific address range: must be in defined memory but not
> > > + * reserved
> > > + */
> > > +phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > > +{
> > > +       return __lmb_alloc_addr(base, size, LMB_NONE);
> > > +}
> > > +
> > >  /* Return number of bytes from a given address that are free */
> > >  phys_size_t lmb_get_free_size(phys_addr_t addr)
> > >  {
> > > --
> > > 2.34.1
> > >
Heinrich Schuchardt June 10, 2024, 12:54 p.m. UTC | #4
On 10.06.24 14:20, Sughosh Ganu wrote:
> hi Ilias,
>
> On Mon, 10 Jun 2024 at 17:34, Ilias Apalodimas
> <ilias.apalodimas@linaro.org> wrote:
>>
>> Hi Sughosh
>>
>>
>> On Fri, 7 Jun 2024 at 21:54, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
>>>
>>> Allow for resizing of LMB regions if the region attributes match. The
>>> current code returns a failure status on detecting an overlapping
>>> address. This worked up until now since the LMB calls were not
>>> persistent and global -- the LMB memory map was specific and private
>>> to a given caller of the LMB API's.
>>>
>>> With the change in the LMB code to make the LMB reservations
>>> persistent, there needs to be a check on whether the memory region can
>>> be resized, and then do it if so. To distinguish between memory that
>>> cannot be resized, add a new flag, LMB_NOOVERWRITE. Reserving a region
>>> of memory with this attribute would indicate that the region cannot be
>>> resized.
>>
>> Can you think of a memory region that needs to be protected from resizing?
>
> Actually, I think I could use a better term instead of 'resize'. The
> aim of this patch is to allow re-allocation/re-reservation of the same
> region of memory -- this will only be relevant for the LMB
> reservations, as these are used for loading images into memory. All
> other allocations(EFI currently) are true allocations in that these
> should not get overwritten at all. Memory once allocated, say for
> loading an EFI image, cannot be re-requested.
>
>
>> I think we should design this a bit differently.  For example, think
>> of someone loading a file in a memory region and then a subsystem
>> trying to resize its reserved memory overwriting that region. Instead
>> of this flag why don't we add
>> - A flag that indicates whether this region can be re-used (IOW
>> overwrites it), but only if the 'subsytem id' defined below matches
>> - a u32 that indicates the subsystem ID that initiated the transaction
>> -- e.g EFI, load command, U-Boot core .. etc
>>
>> Resizing can be enabled unconditionally in that case as long as there
>> is enough space. The only requirement would be that the request comes
>> from the same subsystem that reserved the memory in the beginning
>
> Like I mentioned above, resizing(or rather re-allocations) should only
> be allowed for the LMB subsystem. Any other module should not be
> returned an already allocated address. Which is why I mark any memory
> map update coming from the EFI module as no-overwrite.
>
> -sughosh
>>
>> Thanks
>> /Ilias
>>
>>>
>>> Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org>
>>> ---
>>>   include/lmb.h |   1 +
>>>   lib/lmb.c     | 120 ++++++++++++++++++++++++++++++++++++++++++++------
>>>   2 files changed, 107 insertions(+), 14 deletions(-)
>>>
>>> diff --git a/include/lmb.h b/include/lmb.h
>>> index 03bce2a50c..1d4cd255d2 100644
>>> --- a/include/lmb.h
>>> +++ b/include/lmb.h
>>> @@ -20,6 +20,7 @@
>>>   enum lmb_flags {
>>>          LMB_NONE                = 0x0,
>>>          LMB_NOMAP               = 0x4,
>>> +       LMB_NOOVERWRITE         = 0x8,
>>>   };
>>>
>>>   /**
>>> diff --git a/lib/lmb.c b/lib/lmb.c
>>> index de5a2cf23b..0a4f3d5bcd 100644
>>> --- a/lib/lmb.c
>>> +++ b/lib/lmb.c
>>> @@ -260,12 +260,88 @@ void lmb_add_memory(struct bd_info *bd)
>>>          }
>>>   }
>>>
>>> +static bool lmb_region_flags_match(struct lmb_region *rgn, unsigned long r1,
>>> +                                  enum lmb_flags flags)
>>> +{
>>> +       return rgn->region[r1].flags == flags;
>>> +}
>>> +
>>> +static long lmb_merge_overlap_regions(struct lmb_region *rgn, unsigned long i,
>>> +                                     phys_addr_t base, phys_size_t size,
>>> +                                     enum lmb_flags flags)
>>> +{
>>> +       phys_size_t rgnsize;
>>> +       unsigned long rgn_cnt, idx;
>>> +       phys_addr_t rgnbase, rgnend;
>>> +       phys_addr_t mergebase, mergeend;
>>> +
>>> +       rgn_cnt = 0;
>>> +       idx = i;
>>> +       /*
>>> +        * First thing to do is to identify how many regions does
>>> +        * the requested region overlap.
>>> +        * If the flags match, combine all these overlapping
>>> +        * regions into a single region, and remove the merged
>>> +        * regions.
>>> +        */
>>> +       while (idx < rgn->cnt - 1) {
>>> +               rgnbase = rgn->region[idx].base;
>>> +               rgnsize = rgn->region[idx].size;

It would be preferable to replace fixed size arrays by linked lists.
This will allow us to eliminate CONFIG_LMB_MAX_REGIONS.

EFI may create any number of non-coalescable memory regions.

Best regards

Heinrich

>>> +
>>> +               if (lmb_addrs_overlap(base, size, rgnbase,
>>> +                                     rgnsize)) {
>>> +                       if (!lmb_region_flags_match(rgn, idx, flags))
>>> +                               return -1;
>>> +                       rgn_cnt++;
>>> +                       idx++;
>>> +               }
>>> +       }
>>> +
>>> +       /* The merged region's base and size */
>>> +       rgnbase = rgn->region[i].base;
>>> +       mergebase = min(base, rgnbase);
>>> +       rgnend = rgn->region[idx].base + rgn->region[idx].size;
>>> +       mergeend = max(rgnend, (base + size));
>>> +
>>> +       rgn->region[i].base = mergebase;
>>> +       rgn->region[i].size = mergeend - mergebase;
>>> +
>>> +       /* Now remove the merged regions */
>>> +       while (--rgn_cnt)
>>> +               lmb_remove_region(rgn, i + 1);
>>> +
>>> +       return 0;
>>> +}
>>> +
>>> +static long lmb_resize_regions(struct lmb_region *rgn, unsigned long i,
>>> +                              phys_addr_t base, phys_size_t size,
>>> +                              enum lmb_flags flags)
>>> +{
>>> +       long ret = 0;
>>> +       phys_addr_t rgnend;
>>> +
>>> +       if (i == rgn->cnt - 1 ||
>>> +               base + size < rgn->region[i + 1].base) {
>>> +               if (!lmb_region_flags_match(rgn, i, flags))
>>> +                       return -1;
>>> +
>>> +               rgnend = rgn->region[i].base + rgn->region[i].size;
>>> +               rgn->region[i].base = min(base, rgn->region[i].base);
>>> +               rgnend = max(base + size, rgnend);
>>> +               rgn->region[i].size = rgnend - rgn->region[i].base;
>>> +       } else {
>>> +               ret = lmb_merge_overlap_regions(rgn, i, base, size, flags);
>>> +       }
>>> +
>>> +       return ret;
>>> +}
>>> +
>>>   /* This routine called with relocation disabled. */
>>>   static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
>>>                                   phys_size_t size, enum lmb_flags flags)
>>>   {
>>>          unsigned long coalesced = 0;
>>> -       long adjacent, i;
>>> +       long ret, i;
>>>
>>>          if (rgn->cnt == 0) {
>>>                  rgn->region[0].base = base;
>>> @@ -290,23 +366,32 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
>>>                                  return -1; /* regions with new flags */
>>>                  }
>>>
>>> -               adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
>>> -               if (adjacent > 0) {
>>> +               ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
>>> +               if (ret > 0) {
>>>                          if (flags != rgnflags)
>>>                                  break;
>>>                          rgn->region[i].base -= size;
>>>                          rgn->region[i].size += size;
>>>                          coalesced++;
>>>                          break;
>>> -               } else if (adjacent < 0) {
>>> +               } else if (ret < 0) {
>>>                          if (flags != rgnflags)
>>>                                  break;
>>>                          rgn->region[i].size += size;
>>>                          coalesced++;
>>>                          break;
>>>                  } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
>>> -                       /* regions overlap */
>>> -                       return -1;
>>> +                       if (flags == LMB_NONE) {
>>> +                               ret = lmb_resize_regions(rgn, i, base, size,
>>> +                                                        flags);
>>> +                               if (ret < 0)
>>> +                                       return -1;
>>> +
>>> +                               coalesced++;
>>> +                               break;
>>> +                       } else {
>>> +                               return -1;
>>> +                       }
>>>                  }
>>>          }
>>>
>>> @@ -448,7 +533,7 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
>>>   }
>>>
>>>   static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
>>> -                                   phys_addr_t max_addr)
>>> +                                   phys_addr_t max_addr, enum lmb_flags flags)
>>>   {
>>>          long i, rgn;
>>>          phys_addr_t base = 0;
>>> @@ -498,7 +583,7 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
>>>   {
>>>          phys_addr_t alloc;
>>>
>>> -       alloc = __lmb_alloc_base(size, align, max_addr);
>>> +       alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
>>>
>>>          if (alloc == 0)
>>>                  printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
>>> @@ -507,11 +592,8 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
>>>          return alloc;
>>>   }
>>>
>>> -/*
>>> - * Try to allocate a specific address range: must be in defined memory but not
>>> - * reserved
>>> - */
>>> -phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
>>> +static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
>>> +                                   enum lmb_flags flags)
>>>   {
>>>          long rgn;
>>>
>>> @@ -526,13 +608,23 @@ phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
>>>                                        lmb.memory.region[rgn].size,
>>>                                        base + size - 1, 1)) {
>>>                          /* ok, reserve the memory */
>>> -                       if (lmb_reserve(base, size) >= 0)
>>> +                       if (lmb_reserve_flags(base, size, flags) >= 0)
>>>                                  return base;
>>>                  }
>>>          }
>>> +
>>>          return 0;
>>>   }
>>>
>>> +/*
>>> + * Try to allocate a specific address range: must be in defined memory but not
>>> + * reserved
>>> + */
>>> +phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
>>> +{
>>> +       return __lmb_alloc_addr(base, size, LMB_NONE);
>>> +}
>>> +
>>>   /* Return number of bytes from a given address that are free */
>>>   phys_size_t lmb_get_free_size(phys_addr_t addr)
>>>   {
>>> --
>>> 2.34.1
>>>
Sughosh Ganu June 10, 2024, 12:57 p.m. UTC | #5
On Mon, 10 Jun 2024 at 18:17, Ilias Apalodimas
<ilias.apalodimas@linaro.org> wrote:
>
> On Mon, 10 Jun 2024 at 15:20, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
> >
> > hi Ilias,
> >
> > On Mon, 10 Jun 2024 at 17:34, Ilias Apalodimas
> > <ilias.apalodimas@linaro.org> wrote:
> > >
> > > Hi Sughosh
> > >
> > >
> > > On Fri, 7 Jun 2024 at 21:54, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
> > > >
> > > > Allow for resizing of LMB regions if the region attributes match. The
> > > > current code returns a failure status on detecting an overlapping
> > > > address. This worked up until now since the LMB calls were not
> > > > persistent and global -- the LMB memory map was specific and private
> > > > to a given caller of the LMB API's.
> > > >
> > > > With the change in the LMB code to make the LMB reservations
> > > > persistent, there needs to be a check on whether the memory region can
> > > > be resized, and then do it if so. To distinguish between memory that
> > > > cannot be resized, add a new flag, LMB_NOOVERWRITE. Reserving a region
> > > > of memory with this attribute would indicate that the region cannot be
> > > > resized.
> > >
> > > Can you think of a memory region that needs to be protected from resizing?
> >
> > Actually, I think I could use a better term instead of 'resize'. The
> > aim of this patch is to allow re-allocation/re-reservation of the same
> > region of memory -- this will only be relevant for the LMB
> > reservations, as these are used for loading images into memory. All
> > other allocations(EFI currently) are true allocations in that these
> > should not get overwritten at all. Memory once allocated, say for
> > loading an EFI image, cannot be re-requested.
> >
> >
> > > I think we should design this a bit differently.  For example, think
> > > of someone loading a file in a memory region and then a subsystem
> > > trying to resize its reserved memory overwriting that region. Instead
> > > of this flag why don't we add
> > > - A flag that indicates whether this region can be re-used (IOW
> > > overwrites it), but only if the 'subsytem id' defined below matches
> > > - a u32 that indicates the subsystem ID that initiated the transaction
> > > -- e.g EFI, load command, U-Boot core .. etc
> > >
> > > Resizing can be enabled unconditionally in that case as long as there
> > > is enough space. The only requirement would be that the request comes
> > > from the same subsystem that reserved the memory in the beginning
> >
> > Like I mentioned above, resizing(or rather re-allocations) should only
> > be allowed for the LMB subsystem. Any other module should not be
> > returned an already allocated address. Which is why I mark any memory
> > map update coming from the EFI module as no-overwrite.
>
> And what happens if someone tries to overwrite 'load' memory? Won't
> you still corrupt whatever is loaded on that region as it's not marked
> for protection?

Yes, but that is the expected behaviour in U-Boot for LMB memory.
Consider the following flow,

1) load hostfs - $some_addr $some_image
2) fs.write $some_addr $some_destination $filesize
3) load hostfs - $some_addr $some_other_image
4) fs.write $some_addr $some_other_destination $filesize

The above flow is very much valid, and this exercises the same region
of LMB memory. Which is why we need to allow re-using the same memory
address for LMB. This worked up until now since all LMB
allocations/reservations were private. This is user visible behaviour,
and used in many scripts used by platforms to read and write files. So
even after making the LMB memory map global and persistent, this
aspect of re-use of LMB memory must be maintained.

-sughosh

>
> /Ilias
> >
> > -sughosh
> > >
> > > Thanks
> > > /Ilias
> > >
> > > >
> > > > Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org>
> > > > ---
> > > >  include/lmb.h |   1 +
> > > >  lib/lmb.c     | 120 ++++++++++++++++++++++++++++++++++++++++++++------
> > > >  2 files changed, 107 insertions(+), 14 deletions(-)
> > > >
> > > > diff --git a/include/lmb.h b/include/lmb.h
> > > > index 03bce2a50c..1d4cd255d2 100644
> > > > --- a/include/lmb.h
> > > > +++ b/include/lmb.h
> > > > @@ -20,6 +20,7 @@
> > > >  enum lmb_flags {
> > > >         LMB_NONE                = 0x0,
> > > >         LMB_NOMAP               = 0x4,
> > > > +       LMB_NOOVERWRITE         = 0x8,
> > > >  };
> > > >
> > > >  /**
> > > > diff --git a/lib/lmb.c b/lib/lmb.c
> > > > index de5a2cf23b..0a4f3d5bcd 100644
> > > > --- a/lib/lmb.c
> > > > +++ b/lib/lmb.c
> > > > @@ -260,12 +260,88 @@ void lmb_add_memory(struct bd_info *bd)
> > > >         }
> > > >  }
> > > >
> > > > +static bool lmb_region_flags_match(struct lmb_region *rgn, unsigned long r1,
> > > > +                                  enum lmb_flags flags)
> > > > +{
> > > > +       return rgn->region[r1].flags == flags;
> > > > +}
> > > > +
> > > > +static long lmb_merge_overlap_regions(struct lmb_region *rgn, unsigned long i,
> > > > +                                     phys_addr_t base, phys_size_t size,
> > > > +                                     enum lmb_flags flags)
> > > > +{
> > > > +       phys_size_t rgnsize;
> > > > +       unsigned long rgn_cnt, idx;
> > > > +       phys_addr_t rgnbase, rgnend;
> > > > +       phys_addr_t mergebase, mergeend;
> > > > +
> > > > +       rgn_cnt = 0;
> > > > +       idx = i;
> > > > +       /*
> > > > +        * First thing to do is to identify how many regions does
> > > > +        * the requested region overlap.
> > > > +        * If the flags match, combine all these overlapping
> > > > +        * regions into a single region, and remove the merged
> > > > +        * regions.
> > > > +        */
> > > > +       while (idx < rgn->cnt - 1) {
> > > > +               rgnbase = rgn->region[idx].base;
> > > > +               rgnsize = rgn->region[idx].size;
> > > > +
> > > > +               if (lmb_addrs_overlap(base, size, rgnbase,
> > > > +                                     rgnsize)) {
> > > > +                       if (!lmb_region_flags_match(rgn, idx, flags))
> > > > +                               return -1;
> > > > +                       rgn_cnt++;
> > > > +                       idx++;
> > > > +               }
> > > > +       }
> > > > +
> > > > +       /* The merged region's base and size */
> > > > +       rgnbase = rgn->region[i].base;
> > > > +       mergebase = min(base, rgnbase);
> > > > +       rgnend = rgn->region[idx].base + rgn->region[idx].size;
> > > > +       mergeend = max(rgnend, (base + size));
> > > > +
> > > > +       rgn->region[i].base = mergebase;
> > > > +       rgn->region[i].size = mergeend - mergebase;
> > > > +
> > > > +       /* Now remove the merged regions */
> > > > +       while (--rgn_cnt)
> > > > +               lmb_remove_region(rgn, i + 1);
> > > > +
> > > > +       return 0;
> > > > +}
> > > > +
> > > > +static long lmb_resize_regions(struct lmb_region *rgn, unsigned long i,
> > > > +                              phys_addr_t base, phys_size_t size,
> > > > +                              enum lmb_flags flags)
> > > > +{
> > > > +       long ret = 0;
> > > > +       phys_addr_t rgnend;
> > > > +
> > > > +       if (i == rgn->cnt - 1 ||
> > > > +               base + size < rgn->region[i + 1].base) {
> > > > +               if (!lmb_region_flags_match(rgn, i, flags))
> > > > +                       return -1;
> > > > +
> > > > +               rgnend = rgn->region[i].base + rgn->region[i].size;
> > > > +               rgn->region[i].base = min(base, rgn->region[i].base);
> > > > +               rgnend = max(base + size, rgnend);
> > > > +               rgn->region[i].size = rgnend - rgn->region[i].base;
> > > > +       } else {
> > > > +               ret = lmb_merge_overlap_regions(rgn, i, base, size, flags);
> > > > +       }
> > > > +
> > > > +       return ret;
> > > > +}
> > > > +
> > > >  /* This routine called with relocation disabled. */
> > > >  static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> > > >                                  phys_size_t size, enum lmb_flags flags)
> > > >  {
> > > >         unsigned long coalesced = 0;
> > > > -       long adjacent, i;
> > > > +       long ret, i;
> > > >
> > > >         if (rgn->cnt == 0) {
> > > >                 rgn->region[0].base = base;
> > > > @@ -290,23 +366,32 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> > > >                                 return -1; /* regions with new flags */
> > > >                 }
> > > >
> > > > -               adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> > > > -               if (adjacent > 0) {
> > > > +               ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> > > > +               if (ret > 0) {
> > > >                         if (flags != rgnflags)
> > > >                                 break;
> > > >                         rgn->region[i].base -= size;
> > > >                         rgn->region[i].size += size;
> > > >                         coalesced++;
> > > >                         break;
> > > > -               } else if (adjacent < 0) {
> > > > +               } else if (ret < 0) {
> > > >                         if (flags != rgnflags)
> > > >                                 break;
> > > >                         rgn->region[i].size += size;
> > > >                         coalesced++;
> > > >                         break;
> > > >                 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
> > > > -                       /* regions overlap */
> > > > -                       return -1;
> > > > +                       if (flags == LMB_NONE) {
> > > > +                               ret = lmb_resize_regions(rgn, i, base, size,
> > > > +                                                        flags);
> > > > +                               if (ret < 0)
> > > > +                                       return -1;
> > > > +
> > > > +                               coalesced++;
> > > > +                               break;
> > > > +                       } else {
> > > > +                               return -1;
> > > > +                       }
> > > >                 }
> > > >         }
> > > >
> > > > @@ -448,7 +533,7 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
> > > >  }
> > > >
> > > >  static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
> > > > -                                   phys_addr_t max_addr)
> > > > +                                   phys_addr_t max_addr, enum lmb_flags flags)
> > > >  {
> > > >         long i, rgn;
> > > >         phys_addr_t base = 0;
> > > > @@ -498,7 +583,7 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> > > >  {
> > > >         phys_addr_t alloc;
> > > >
> > > > -       alloc = __lmb_alloc_base(size, align, max_addr);
> > > > +       alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
> > > >
> > > >         if (alloc == 0)
> > > >                 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
> > > > @@ -507,11 +592,8 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> > > >         return alloc;
> > > >  }
> > > >
> > > > -/*
> > > > - * Try to allocate a specific address range: must be in defined memory but not
> > > > - * reserved
> > > > - */
> > > > -phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > > > +static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
> > > > +                                   enum lmb_flags flags)
> > > >  {
> > > >         long rgn;
> > > >
> > > > @@ -526,13 +608,23 @@ phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > > >                                       lmb.memory.region[rgn].size,
> > > >                                       base + size - 1, 1)) {
> > > >                         /* ok, reserve the memory */
> > > > -                       if (lmb_reserve(base, size) >= 0)
> > > > +                       if (lmb_reserve_flags(base, size, flags) >= 0)
> > > >                                 return base;
> > > >                 }
> > > >         }
> > > > +
> > > >         return 0;
> > > >  }
> > > >
> > > > +/*
> > > > + * Try to allocate a specific address range: must be in defined memory but not
> > > > + * reserved
> > > > + */
> > > > +phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > > > +{
> > > > +       return __lmb_alloc_addr(base, size, LMB_NONE);
> > > > +}
> > > > +
> > > >  /* Return number of bytes from a given address that are free */
> > > >  phys_size_t lmb_get_free_size(phys_addr_t addr)
> > > >  {
> > > > --
> > > > 2.34.1
> > > >
Sughosh Ganu June 10, 2024, 1:01 p.m. UTC | #6
On Mon, 10 Jun 2024 at 18:29, Heinrich Schuchardt <xypron.glpk@gmx.de> wrote:
>
> On 10.06.24 14:20, Sughosh Ganu wrote:
> > hi Ilias,
> >
> > On Mon, 10 Jun 2024 at 17:34, Ilias Apalodimas
> > <ilias.apalodimas@linaro.org> wrote:
> >>
> >> Hi Sughosh
> >>
> >>
> >> On Fri, 7 Jun 2024 at 21:54, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
> >>>
> >>> Allow for resizing of LMB regions if the region attributes match. The
> >>> current code returns a failure status on detecting an overlapping
> >>> address. This worked up until now since the LMB calls were not
> >>> persistent and global -- the LMB memory map was specific and private
> >>> to a given caller of the LMB API's.
> >>>
> >>> With the change in the LMB code to make the LMB reservations
> >>> persistent, there needs to be a check on whether the memory region can
> >>> be resized, and then do it if so. To distinguish between memory that
> >>> cannot be resized, add a new flag, LMB_NOOVERWRITE. Reserving a region
> >>> of memory with this attribute would indicate that the region cannot be
> >>> resized.
> >>
> >> Can you think of a memory region that needs to be protected from resizing?
> >
> > Actually, I think I could use a better term instead of 'resize'. The
> > aim of this patch is to allow re-allocation/re-reservation of the same
> > region of memory -- this will only be relevant for the LMB
> > reservations, as these are used for loading images into memory. All
> > other allocations(EFI currently) are true allocations in that these
> > should not get overwritten at all. Memory once allocated, say for
> > loading an EFI image, cannot be re-requested.
> >
> >
> >> I think we should design this a bit differently.  For example, think
> >> of someone loading a file in a memory region and then a subsystem
> >> trying to resize its reserved memory overwriting that region. Instead
> >> of this flag why don't we add
> >> - A flag that indicates whether this region can be re-used (IOW
> >> overwrites it), but only if the 'subsytem id' defined below matches
> >> - a u32 that indicates the subsystem ID that initiated the transaction
> >> -- e.g EFI, load command, U-Boot core .. etc
> >>
> >> Resizing can be enabled unconditionally in that case as long as there
> >> is enough space. The only requirement would be that the request comes
> >> from the same subsystem that reserved the memory in the beginning
> >
> > Like I mentioned above, resizing(or rather re-allocations) should only
> > be allowed for the LMB subsystem. Any other module should not be
> > returned an already allocated address. Which is why I mark any memory
> > map update coming from the EFI module as no-overwrite.
> >
> > -sughosh
> >>
> >> Thanks
> >> /Ilias
> >>
> >>>
> >>> Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org>
> >>> ---
> >>>   include/lmb.h |   1 +
> >>>   lib/lmb.c     | 120 ++++++++++++++++++++++++++++++++++++++++++++------
> >>>   2 files changed, 107 insertions(+), 14 deletions(-)
> >>>
> >>> diff --git a/include/lmb.h b/include/lmb.h
> >>> index 03bce2a50c..1d4cd255d2 100644
> >>> --- a/include/lmb.h
> >>> +++ b/include/lmb.h
> >>> @@ -20,6 +20,7 @@
> >>>   enum lmb_flags {
> >>>          LMB_NONE                = 0x0,
> >>>          LMB_NOMAP               = 0x4,
> >>> +       LMB_NOOVERWRITE         = 0x8,
> >>>   };
> >>>
> >>>   /**
> >>> diff --git a/lib/lmb.c b/lib/lmb.c
> >>> index de5a2cf23b..0a4f3d5bcd 100644
> >>> --- a/lib/lmb.c
> >>> +++ b/lib/lmb.c
> >>> @@ -260,12 +260,88 @@ void lmb_add_memory(struct bd_info *bd)
> >>>          }
> >>>   }
> >>>
> >>> +static bool lmb_region_flags_match(struct lmb_region *rgn, unsigned long r1,
> >>> +                                  enum lmb_flags flags)
> >>> +{
> >>> +       return rgn->region[r1].flags == flags;
> >>> +}
> >>> +
> >>> +static long lmb_merge_overlap_regions(struct lmb_region *rgn, unsigned long i,
> >>> +                                     phys_addr_t base, phys_size_t size,
> >>> +                                     enum lmb_flags flags)
> >>> +{
> >>> +       phys_size_t rgnsize;
> >>> +       unsigned long rgn_cnt, idx;
> >>> +       phys_addr_t rgnbase, rgnend;
> >>> +       phys_addr_t mergebase, mergeend;
> >>> +
> >>> +       rgn_cnt = 0;
> >>> +       idx = i;
> >>> +       /*
> >>> +        * First thing to do is to identify how many regions does
> >>> +        * the requested region overlap.
> >>> +        * If the flags match, combine all these overlapping
> >>> +        * regions into a single region, and remove the merged
> >>> +        * regions.
> >>> +        */
> >>> +       while (idx < rgn->cnt - 1) {
> >>> +               rgnbase = rgn->region[idx].base;
> >>> +               rgnsize = rgn->region[idx].size;
>
> It would be preferable to replace fixed size arrays by linked lists.
> This will allow us to eliminate CONFIG_LMB_MAX_REGIONS.
>
> EFI may create any number of non-coalescable memory regions.

Noted. I will work on this change for the next version.

-sughosh

>
> Best regards
>
> Heinrich
>
> >>> +
> >>> +               if (lmb_addrs_overlap(base, size, rgnbase,
> >>> +                                     rgnsize)) {
> >>> +                       if (!lmb_region_flags_match(rgn, idx, flags))
> >>> +                               return -1;
> >>> +                       rgn_cnt++;
> >>> +                       idx++;
> >>> +               }
> >>> +       }
> >>> +
> >>> +       /* The merged region's base and size */
> >>> +       rgnbase = rgn->region[i].base;
> >>> +       mergebase = min(base, rgnbase);
> >>> +       rgnend = rgn->region[idx].base + rgn->region[idx].size;
> >>> +       mergeend = max(rgnend, (base + size));
> >>> +
> >>> +       rgn->region[i].base = mergebase;
> >>> +       rgn->region[i].size = mergeend - mergebase;
> >>> +
> >>> +       /* Now remove the merged regions */
> >>> +       while (--rgn_cnt)
> >>> +               lmb_remove_region(rgn, i + 1);
> >>> +
> >>> +       return 0;
> >>> +}
> >>> +
> >>> +static long lmb_resize_regions(struct lmb_region *rgn, unsigned long i,
> >>> +                              phys_addr_t base, phys_size_t size,
> >>> +                              enum lmb_flags flags)
> >>> +{
> >>> +       long ret = 0;
> >>> +       phys_addr_t rgnend;
> >>> +
> >>> +       if (i == rgn->cnt - 1 ||
> >>> +               base + size < rgn->region[i + 1].base) {
> >>> +               if (!lmb_region_flags_match(rgn, i, flags))
> >>> +                       return -1;
> >>> +
> >>> +               rgnend = rgn->region[i].base + rgn->region[i].size;
> >>> +               rgn->region[i].base = min(base, rgn->region[i].base);
> >>> +               rgnend = max(base + size, rgnend);
> >>> +               rgn->region[i].size = rgnend - rgn->region[i].base;
> >>> +       } else {
> >>> +               ret = lmb_merge_overlap_regions(rgn, i, base, size, flags);
> >>> +       }
> >>> +
> >>> +       return ret;
> >>> +}
> >>> +
> >>>   /* This routine called with relocation disabled. */
> >>>   static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> >>>                                   phys_size_t size, enum lmb_flags flags)
> >>>   {
> >>>          unsigned long coalesced = 0;
> >>> -       long adjacent, i;
> >>> +       long ret, i;
> >>>
> >>>          if (rgn->cnt == 0) {
> >>>                  rgn->region[0].base = base;
> >>> @@ -290,23 +366,32 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> >>>                                  return -1; /* regions with new flags */
> >>>                  }
> >>>
> >>> -               adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> >>> -               if (adjacent > 0) {
> >>> +               ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> >>> +               if (ret > 0) {
> >>>                          if (flags != rgnflags)
> >>>                                  break;
> >>>                          rgn->region[i].base -= size;
> >>>                          rgn->region[i].size += size;
> >>>                          coalesced++;
> >>>                          break;
> >>> -               } else if (adjacent < 0) {
> >>> +               } else if (ret < 0) {
> >>>                          if (flags != rgnflags)
> >>>                                  break;
> >>>                          rgn->region[i].size += size;
> >>>                          coalesced++;
> >>>                          break;
> >>>                  } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
> >>> -                       /* regions overlap */
> >>> -                       return -1;
> >>> +                       if (flags == LMB_NONE) {
> >>> +                               ret = lmb_resize_regions(rgn, i, base, size,
> >>> +                                                        flags);
> >>> +                               if (ret < 0)
> >>> +                                       return -1;
> >>> +
> >>> +                               coalesced++;
> >>> +                               break;
> >>> +                       } else {
> >>> +                               return -1;
> >>> +                       }
> >>>                  }
> >>>          }
> >>>
> >>> @@ -448,7 +533,7 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
> >>>   }
> >>>
> >>>   static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
> >>> -                                   phys_addr_t max_addr)
> >>> +                                   phys_addr_t max_addr, enum lmb_flags flags)
> >>>   {
> >>>          long i, rgn;
> >>>          phys_addr_t base = 0;
> >>> @@ -498,7 +583,7 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> >>>   {
> >>>          phys_addr_t alloc;
> >>>
> >>> -       alloc = __lmb_alloc_base(size, align, max_addr);
> >>> +       alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
> >>>
> >>>          if (alloc == 0)
> >>>                  printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
> >>> @@ -507,11 +592,8 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> >>>          return alloc;
> >>>   }
> >>>
> >>> -/*
> >>> - * Try to allocate a specific address range: must be in defined memory but not
> >>> - * reserved
> >>> - */
> >>> -phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> >>> +static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
> >>> +                                   enum lmb_flags flags)
> >>>   {
> >>>          long rgn;
> >>>
> >>> @@ -526,13 +608,23 @@ phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> >>>                                        lmb.memory.region[rgn].size,
> >>>                                        base + size - 1, 1)) {
> >>>                          /* ok, reserve the memory */
> >>> -                       if (lmb_reserve(base, size) >= 0)
> >>> +                       if (lmb_reserve_flags(base, size, flags) >= 0)
> >>>                                  return base;
> >>>                  }
> >>>          }
> >>> +
> >>>          return 0;
> >>>   }
> >>>
> >>> +/*
> >>> + * Try to allocate a specific address range: must be in defined memory but not
> >>> + * reserved
> >>> + */
> >>> +phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> >>> +{
> >>> +       return __lmb_alloc_addr(base, size, LMB_NONE);
> >>> +}
> >>> +
> >>>   /* Return number of bytes from a given address that are free */
> >>>   phys_size_t lmb_get_free_size(phys_addr_t addr)
> >>>   {
> >>> --
> >>> 2.34.1
> >>>
>
Ilias Apalodimas June 10, 2024, 2:21 p.m. UTC | #7
Hi Sughosh

On Mon, 10 Jun 2024 at 15:57, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
>
> On Mon, 10 Jun 2024 at 18:17, Ilias Apalodimas
> <ilias.apalodimas@linaro.org> wrote:
> >
> > On Mon, 10 Jun 2024 at 15:20, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
> > >
> > > hi Ilias,
> > >
> > > On Mon, 10 Jun 2024 at 17:34, Ilias Apalodimas
> > > <ilias.apalodimas@linaro.org> wrote:
> > > >
> > > > Hi Sughosh
> > > >
> > > >
> > > > On Fri, 7 Jun 2024 at 21:54, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
> > > > >
> > > > > Allow for resizing of LMB regions if the region attributes match. The
> > > > > current code returns a failure status on detecting an overlapping
> > > > > address. This worked up until now since the LMB calls were not
> > > > > persistent and global -- the LMB memory map was specific and private
> > > > > to a given caller of the LMB API's.
> > > > >
> > > > > With the change in the LMB code to make the LMB reservations
> > > > > persistent, there needs to be a check on whether the memory region can
> > > > > be resized, and then do it if so. To distinguish between memory that
> > > > > cannot be resized, add a new flag, LMB_NOOVERWRITE. Reserving a region
> > > > > of memory with this attribute would indicate that the region cannot be
> > > > > resized.
> > > >
> > > > Can you think of a memory region that needs to be protected from resizing?
> > >
> > > Actually, I think I could use a better term instead of 'resize'. The
> > > aim of this patch is to allow re-allocation/re-reservation of the same
> > > region of memory -- this will only be relevant for the LMB
> > > reservations, as these are used for loading images into memory. All
> > > other allocations(EFI currently) are true allocations in that these
> > > should not get overwritten at all. Memory once allocated, say for
> > > loading an EFI image, cannot be re-requested.
> > >
> > >
> > > > I think we should design this a bit differently.  For example, think
> > > > of someone loading a file in a memory region and then a subsystem
> > > > trying to resize its reserved memory overwriting that region. Instead
> > > > of this flag why don't we add
> > > > - A flag that indicates whether this region can be re-used (IOW
> > > > overwrites it), but only if the 'subsytem id' defined below matches
> > > > - a u32 that indicates the subsystem ID that initiated the transaction
> > > > -- e.g EFI, load command, U-Boot core .. etc
> > > >
> > > > Resizing can be enabled unconditionally in that case as long as there
> > > > is enough space. The only requirement would be that the request comes
> > > > from the same subsystem that reserved the memory in the beginning
> > >
> > > Like I mentioned above, resizing(or rather re-allocations) should only
> > > be allowed for the LMB subsystem. Any other module should not be
> > > returned an already allocated address. Which is why I mark any memory
> > > map update coming from the EFI module as no-overwrite.
> >
> > And what happens if someone tries to overwrite 'load' memory? Won't
> > you still corrupt whatever is loaded on that region as it's not marked
> > for protection?
>
> Yes, but that is the expected behavior in U-Boot for LMB memory.
> Consider the following flow,
>
> 1) load hostfs - $some_addr $some_image
> 2) fs.write $some_addr $some_destination $filesize
> 3) load hostfs - $some_addr $some_other_image
> 4) fs.write $some_addr $some_other_destination $filesize
>
> The above flow is very much valid, and this exercises the same region
> of LMB memory. Which is why we need to allow re-using the same memory
> address for LMB. This worked up until now since all LMB
> allocations/reservations were private. This is user visible behaviour,
> and used in many scripts used by platforms to read and write files. So
> even after making the LMB memory map global and persistent, this
> aspect of re-use of LMB memory must be maintained.

I am not talking about this. I haven't gone through all the patches
yet, so I might be missing something but...
What if a user loads a file to boot and then an EFI subsystem decides
to allocate memory -- e.g the EventLog and that allocation routine
returns memory within the space you just loaded the binary from? Yes
memory should be overwritten to preserve the current behavior, but
only if you perform the same action/command

Thanks
/Ilias
>
> -sughosh
>
> >
> > /Ilias
> > >
> > > -sughosh
> > > >
> > > > Thanks
> > > > /Ilias
> > > >
> > > > >
> > > > > Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org>
> > > > > ---
> > > > >  include/lmb.h |   1 +
> > > > >  lib/lmb.c     | 120 ++++++++++++++++++++++++++++++++++++++++++++------
> > > > >  2 files changed, 107 insertions(+), 14 deletions(-)
> > > > >
> > > > > diff --git a/include/lmb.h b/include/lmb.h
> > > > > index 03bce2a50c..1d4cd255d2 100644
> > > > > --- a/include/lmb.h
> > > > > +++ b/include/lmb.h
> > > > > @@ -20,6 +20,7 @@
> > > > >  enum lmb_flags {
> > > > >         LMB_NONE                = 0x0,
> > > > >         LMB_NOMAP               = 0x4,
> > > > > +       LMB_NOOVERWRITE         = 0x8,
> > > > >  };
> > > > >
> > > > >  /**
> > > > > diff --git a/lib/lmb.c b/lib/lmb.c
> > > > > index de5a2cf23b..0a4f3d5bcd 100644
> > > > > --- a/lib/lmb.c
> > > > > +++ b/lib/lmb.c
> > > > > @@ -260,12 +260,88 @@ void lmb_add_memory(struct bd_info *bd)
> > > > >         }
> > > > >  }
> > > > >
> > > > > +static bool lmb_region_flags_match(struct lmb_region *rgn, unsigned long r1,
> > > > > +                                  enum lmb_flags flags)
> > > > > +{
> > > > > +       return rgn->region[r1].flags == flags;
> > > > > +}
> > > > > +
> > > > > +static long lmb_merge_overlap_regions(struct lmb_region *rgn, unsigned long i,
> > > > > +                                     phys_addr_t base, phys_size_t size,
> > > > > +                                     enum lmb_flags flags)
> > > > > +{
> > > > > +       phys_size_t rgnsize;
> > > > > +       unsigned long rgn_cnt, idx;
> > > > > +       phys_addr_t rgnbase, rgnend;
> > > > > +       phys_addr_t mergebase, mergeend;
> > > > > +
> > > > > +       rgn_cnt = 0;
> > > > > +       idx = i;
> > > > > +       /*
> > > > > +        * First thing to do is to identify how many regions does
> > > > > +        * the requested region overlap.
> > > > > +        * If the flags match, combine all these overlapping
> > > > > +        * regions into a single region, and remove the merged
> > > > > +        * regions.
> > > > > +        */
> > > > > +       while (idx < rgn->cnt - 1) {
> > > > > +               rgnbase = rgn->region[idx].base;
> > > > > +               rgnsize = rgn->region[idx].size;
> > > > > +
> > > > > +               if (lmb_addrs_overlap(base, size, rgnbase,
> > > > > +                                     rgnsize)) {
> > > > > +                       if (!lmb_region_flags_match(rgn, idx, flags))
> > > > > +                               return -1;
> > > > > +                       rgn_cnt++;
> > > > > +                       idx++;
> > > > > +               }
> > > > > +       }
> > > > > +
> > > > > +       /* The merged region's base and size */
> > > > > +       rgnbase = rgn->region[i].base;
> > > > > +       mergebase = min(base, rgnbase);
> > > > > +       rgnend = rgn->region[idx].base + rgn->region[idx].size;
> > > > > +       mergeend = max(rgnend, (base + size));
> > > > > +
> > > > > +       rgn->region[i].base = mergebase;
> > > > > +       rgn->region[i].size = mergeend - mergebase;
> > > > > +
> > > > > +       /* Now remove the merged regions */
> > > > > +       while (--rgn_cnt)
> > > > > +               lmb_remove_region(rgn, i + 1);
> > > > > +
> > > > > +       return 0;
> > > > > +}
> > > > > +
> > > > > +static long lmb_resize_regions(struct lmb_region *rgn, unsigned long i,
> > > > > +                              phys_addr_t base, phys_size_t size,
> > > > > +                              enum lmb_flags flags)
> > > > > +{
> > > > > +       long ret = 0;
> > > > > +       phys_addr_t rgnend;
> > > > > +
> > > > > +       if (i == rgn->cnt - 1 ||
> > > > > +               base + size < rgn->region[i + 1].base) {
> > > > > +               if (!lmb_region_flags_match(rgn, i, flags))
> > > > > +                       return -1;
> > > > > +
> > > > > +               rgnend = rgn->region[i].base + rgn->region[i].size;
> > > > > +               rgn->region[i].base = min(base, rgn->region[i].base);
> > > > > +               rgnend = max(base + size, rgnend);
> > > > > +               rgn->region[i].size = rgnend - rgn->region[i].base;
> > > > > +       } else {
> > > > > +               ret = lmb_merge_overlap_regions(rgn, i, base, size, flags);
> > > > > +       }
> > > > > +
> > > > > +       return ret;
> > > > > +}
> > > > > +
> > > > >  /* This routine called with relocation disabled. */
> > > > >  static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> > > > >                                  phys_size_t size, enum lmb_flags flags)
> > > > >  {
> > > > >         unsigned long coalesced = 0;
> > > > > -       long adjacent, i;
> > > > > +       long ret, i;
> > > > >
> > > > >         if (rgn->cnt == 0) {
> > > > >                 rgn->region[0].base = base;
> > > > > @@ -290,23 +366,32 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> > > > >                                 return -1; /* regions with new flags */
> > > > >                 }
> > > > >
> > > > > -               adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> > > > > -               if (adjacent > 0) {
> > > > > +               ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> > > > > +               if (ret > 0) {
> > > > >                         if (flags != rgnflags)
> > > > >                                 break;
> > > > >                         rgn->region[i].base -= size;
> > > > >                         rgn->region[i].size += size;
> > > > >                         coalesced++;
> > > > >                         break;
> > > > > -               } else if (adjacent < 0) {
> > > > > +               } else if (ret < 0) {
> > > > >                         if (flags != rgnflags)
> > > > >                                 break;
> > > > >                         rgn->region[i].size += size;
> > > > >                         coalesced++;
> > > > >                         break;
> > > > >                 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
> > > > > -                       /* regions overlap */
> > > > > -                       return -1;
> > > > > +                       if (flags == LMB_NONE) {
> > > > > +                               ret = lmb_resize_regions(rgn, i, base, size,
> > > > > +                                                        flags);
> > > > > +                               if (ret < 0)
> > > > > +                                       return -1;
> > > > > +
> > > > > +                               coalesced++;
> > > > > +                               break;
> > > > > +                       } else {
> > > > > +                               return -1;
> > > > > +                       }
> > > > >                 }
> > > > >         }
> > > > >
> > > > > @@ -448,7 +533,7 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
> > > > >  }
> > > > >
> > > > >  static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
> > > > > -                                   phys_addr_t max_addr)
> > > > > +                                   phys_addr_t max_addr, enum lmb_flags flags)
> > > > >  {
> > > > >         long i, rgn;
> > > > >         phys_addr_t base = 0;
> > > > > @@ -498,7 +583,7 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> > > > >  {
> > > > >         phys_addr_t alloc;
> > > > >
> > > > > -       alloc = __lmb_alloc_base(size, align, max_addr);
> > > > > +       alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
> > > > >
> > > > >         if (alloc == 0)
> > > > >                 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
> > > > > @@ -507,11 +592,8 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> > > > >         return alloc;
> > > > >  }
> > > > >
> > > > > -/*
> > > > > - * Try to allocate a specific address range: must be in defined memory but not
> > > > > - * reserved
> > > > > - */
> > > > > -phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > > > > +static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
> > > > > +                                   enum lmb_flags flags)
> > > > >  {
> > > > >         long rgn;
> > > > >
> > > > > @@ -526,13 +608,23 @@ phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > > > >                                       lmb.memory.region[rgn].size,
> > > > >                                       base + size - 1, 1)) {
> > > > >                         /* ok, reserve the memory */
> > > > > -                       if (lmb_reserve(base, size) >= 0)
> > > > > +                       if (lmb_reserve_flags(base, size, flags) >= 0)
> > > > >                                 return base;
> > > > >                 }
> > > > >         }
> > > > > +
> > > > >         return 0;
> > > > >  }
> > > > >
> > > > > +/*
> > > > > + * Try to allocate a specific address range: must be in defined memory but not
> > > > > + * reserved
> > > > > + */
> > > > > +phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > > > > +{
> > > > > +       return __lmb_alloc_addr(base, size, LMB_NONE);
> > > > > +}
> > > > > +
> > > > >  /* Return number of bytes from a given address that are free */
> > > > >  phys_size_t lmb_get_free_size(phys_addr_t addr)
> > > > >  {
> > > > > --
> > > > > 2.34.1
> > > > >
Sughosh Ganu June 10, 2024, 2:33 p.m. UTC | #8
hi Ilias,

On Mon, 10 Jun 2024 at 19:52, Ilias Apalodimas
<ilias.apalodimas@linaro.org> wrote:
>
> Hi Sughosh
>
> On Mon, 10 Jun 2024 at 15:57, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
> >
> > On Mon, 10 Jun 2024 at 18:17, Ilias Apalodimas
> > <ilias.apalodimas@linaro.org> wrote:
> > >
> > > On Mon, 10 Jun 2024 at 15:20, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
> > > >
> > > > hi Ilias,
> > > >
> > > > On Mon, 10 Jun 2024 at 17:34, Ilias Apalodimas
> > > > <ilias.apalodimas@linaro.org> wrote:
> > > > >
> > > > > Hi Sughosh
> > > > >
> > > > >
> > > > > On Fri, 7 Jun 2024 at 21:54, Sughosh Ganu <sughosh.ganu@linaro.org> wrote:
> > > > > >
> > > > > > Allow for resizing of LMB regions if the region attributes match. The
> > > > > > current code returns a failure status on detecting an overlapping
> > > > > > address. This worked up until now since the LMB calls were not
> > > > > > persistent and global -- the LMB memory map was specific and private
> > > > > > to a given caller of the LMB API's.
> > > > > >
> > > > > > With the change in the LMB code to make the LMB reservations
> > > > > > persistent, there needs to be a check on whether the memory region can
> > > > > > be resized, and then do it if so. To distinguish between memory that
> > > > > > cannot be resized, add a new flag, LMB_NOOVERWRITE. Reserving a region
> > > > > > of memory with this attribute would indicate that the region cannot be
> > > > > > resized.
> > > > >
> > > > > Can you think of a memory region that needs to be protected from resizing?
> > > >
> > > > Actually, I think I could use a better term instead of 'resize'. The
> > > > aim of this patch is to allow re-allocation/re-reservation of the same
> > > > region of memory -- this will only be relevant for the LMB
> > > > reservations, as these are used for loading images into memory. All
> > > > other allocations(EFI currently) are true allocations in that these
> > > > should not get overwritten at all. Memory once allocated, say for
> > > > loading an EFI image, cannot be re-requested.
> > > >
> > > >
> > > > > I think we should design this a bit differently.  For example, think
> > > > > of someone loading a file in a memory region and then a subsystem
> > > > > trying to resize its reserved memory overwriting that region. Instead
> > > > > of this flag why don't we add
> > > > > - A flag that indicates whether this region can be re-used (IOW
> > > > > overwrites it), but only if the 'subsytem id' defined below matches
> > > > > - a u32 that indicates the subsystem ID that initiated the transaction
> > > > > -- e.g EFI, load command, U-Boot core .. etc
> > > > >
> > > > > Resizing can be enabled unconditionally in that case as long as there
> > > > > is enough space. The only requirement would be that the request comes
> > > > > from the same subsystem that reserved the memory in the beginning
> > > >
> > > > Like I mentioned above, resizing(or rather re-allocations) should only
> > > > be allowed for the LMB subsystem. Any other module should not be
> > > > returned an already allocated address. Which is why I mark any memory
> > > > map update coming from the EFI module as no-overwrite.
> > >
> > > And what happens if someone tries to overwrite 'load' memory? Won't
> > > you still corrupt whatever is loaded on that region as it's not marked
> > > for protection?
> >
> > Yes, but that is the expected behavior in U-Boot for LMB memory.
> > Consider the following flow,
> >
> > 1) load hostfs - $some_addr $some_image
> > 2) fs.write $some_addr $some_destination $filesize
> > 3) load hostfs - $some_addr $some_other_image
> > 4) fs.write $some_addr $some_other_destination $filesize
> >
> > The above flow is very much valid, and this exercises the same region
> > of LMB memory. Which is why we need to allow re-using the same memory
> > address for LMB. This worked up until now since all LMB
> > allocations/reservations were private. This is user visible behaviour,
> > and used in many scripts used by platforms to read and write files. So
> > even after making the LMB memory map global and persistent, this
> > aspect of re-use of LMB memory must be maintained.
>
> I am not talking about this. I haven't gone through all the patches
> yet, so I might be missing something but...
> What if a user loads a file to boot and then an EFI subsystem decides
> to allocate memory -- e.g the EventLog and that allocation routine
> returns memory within the space you just loaded the binary from? Yes
> memory should be overwritten to preserve the current behavior, but
> only if you perform the same action/command

In the scenario you describe above, what happens is that the file
getting loaded to memory by a user is LMB memory. When this action
happens, this gets notified to the EFI memory module, and the EFI
memory map gets updated, and this region of memory gets marked as not
EFI_CONVENTIONAL_MEMORY. So, when the EFI subsystem has to allocate
memory, say for the EventLog, the efi_find_free_memory would not
return that memory, as the EFI memory map has been updated. Hence the
EFI subsystem would allocate the next available memory region that it
finds.

The idea is pretty simple. Whenever a module allocates memory, it
notifies the other module, and that memory can then not be used by the
other module. In case of LMB memory, a request to re-use that memory
does not fail, because of how LMB memory is used in U-Boot.

-sughosh

>
> Thanks
> /Ilias
> >
> > -sughosh
> >
> > >
> > > /Ilias
> > > >
> > > > -sughosh
> > > > >
> > > > > Thanks
> > > > > /Ilias
> > > > >
> > > > > >
> > > > > > Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org>
> > > > > > ---
> > > > > >  include/lmb.h |   1 +
> > > > > >  lib/lmb.c     | 120 ++++++++++++++++++++++++++++++++++++++++++++------
> > > > > >  2 files changed, 107 insertions(+), 14 deletions(-)
> > > > > >
> > > > > > diff --git a/include/lmb.h b/include/lmb.h
> > > > > > index 03bce2a50c..1d4cd255d2 100644
> > > > > > --- a/include/lmb.h
> > > > > > +++ b/include/lmb.h
> > > > > > @@ -20,6 +20,7 @@
> > > > > >  enum lmb_flags {
> > > > > >         LMB_NONE                = 0x0,
> > > > > >         LMB_NOMAP               = 0x4,
> > > > > > +       LMB_NOOVERWRITE         = 0x8,
> > > > > >  };
> > > > > >
> > > > > >  /**
> > > > > > diff --git a/lib/lmb.c b/lib/lmb.c
> > > > > > index de5a2cf23b..0a4f3d5bcd 100644
> > > > > > --- a/lib/lmb.c
> > > > > > +++ b/lib/lmb.c
> > > > > > @@ -260,12 +260,88 @@ void lmb_add_memory(struct bd_info *bd)
> > > > > >         }
> > > > > >  }
> > > > > >
> > > > > > +static bool lmb_region_flags_match(struct lmb_region *rgn, unsigned long r1,
> > > > > > +                                  enum lmb_flags flags)
> > > > > > +{
> > > > > > +       return rgn->region[r1].flags == flags;
> > > > > > +}
> > > > > > +
> > > > > > +static long lmb_merge_overlap_regions(struct lmb_region *rgn, unsigned long i,
> > > > > > +                                     phys_addr_t base, phys_size_t size,
> > > > > > +                                     enum lmb_flags flags)
> > > > > > +{
> > > > > > +       phys_size_t rgnsize;
> > > > > > +       unsigned long rgn_cnt, idx;
> > > > > > +       phys_addr_t rgnbase, rgnend;
> > > > > > +       phys_addr_t mergebase, mergeend;
> > > > > > +
> > > > > > +       rgn_cnt = 0;
> > > > > > +       idx = i;
> > > > > > +       /*
> > > > > > +        * First thing to do is to identify how many regions does
> > > > > > +        * the requested region overlap.
> > > > > > +        * If the flags match, combine all these overlapping
> > > > > > +        * regions into a single region, and remove the merged
> > > > > > +        * regions.
> > > > > > +        */
> > > > > > +       while (idx < rgn->cnt - 1) {
> > > > > > +               rgnbase = rgn->region[idx].base;
> > > > > > +               rgnsize = rgn->region[idx].size;
> > > > > > +
> > > > > > +               if (lmb_addrs_overlap(base, size, rgnbase,
> > > > > > +                                     rgnsize)) {
> > > > > > +                       if (!lmb_region_flags_match(rgn, idx, flags))
> > > > > > +                               return -1;
> > > > > > +                       rgn_cnt++;
> > > > > > +                       idx++;
> > > > > > +               }
> > > > > > +       }
> > > > > > +
> > > > > > +       /* The merged region's base and size */
> > > > > > +       rgnbase = rgn->region[i].base;
> > > > > > +       mergebase = min(base, rgnbase);
> > > > > > +       rgnend = rgn->region[idx].base + rgn->region[idx].size;
> > > > > > +       mergeend = max(rgnend, (base + size));
> > > > > > +
> > > > > > +       rgn->region[i].base = mergebase;
> > > > > > +       rgn->region[i].size = mergeend - mergebase;
> > > > > > +
> > > > > > +       /* Now remove the merged regions */
> > > > > > +       while (--rgn_cnt)
> > > > > > +               lmb_remove_region(rgn, i + 1);
> > > > > > +
> > > > > > +       return 0;
> > > > > > +}
> > > > > > +
> > > > > > +static long lmb_resize_regions(struct lmb_region *rgn, unsigned long i,
> > > > > > +                              phys_addr_t base, phys_size_t size,
> > > > > > +                              enum lmb_flags flags)
> > > > > > +{
> > > > > > +       long ret = 0;
> > > > > > +       phys_addr_t rgnend;
> > > > > > +
> > > > > > +       if (i == rgn->cnt - 1 ||
> > > > > > +               base + size < rgn->region[i + 1].base) {
> > > > > > +               if (!lmb_region_flags_match(rgn, i, flags))
> > > > > > +                       return -1;
> > > > > > +
> > > > > > +               rgnend = rgn->region[i].base + rgn->region[i].size;
> > > > > > +               rgn->region[i].base = min(base, rgn->region[i].base);
> > > > > > +               rgnend = max(base + size, rgnend);
> > > > > > +               rgn->region[i].size = rgnend - rgn->region[i].base;
> > > > > > +       } else {
> > > > > > +               ret = lmb_merge_overlap_regions(rgn, i, base, size, flags);
> > > > > > +       }
> > > > > > +
> > > > > > +       return ret;
> > > > > > +}
> > > > > > +
> > > > > >  /* This routine called with relocation disabled. */
> > > > > >  static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> > > > > >                                  phys_size_t size, enum lmb_flags flags)
> > > > > >  {
> > > > > >         unsigned long coalesced = 0;
> > > > > > -       long adjacent, i;
> > > > > > +       long ret, i;
> > > > > >
> > > > > >         if (rgn->cnt == 0) {
> > > > > >                 rgn->region[0].base = base;
> > > > > > @@ -290,23 +366,32 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> > > > > >                                 return -1; /* regions with new flags */
> > > > > >                 }
> > > > > >
> > > > > > -               adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> > > > > > -               if (adjacent > 0) {
> > > > > > +               ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> > > > > > +               if (ret > 0) {
> > > > > >                         if (flags != rgnflags)
> > > > > >                                 break;
> > > > > >                         rgn->region[i].base -= size;
> > > > > >                         rgn->region[i].size += size;
> > > > > >                         coalesced++;
> > > > > >                         break;
> > > > > > -               } else if (adjacent < 0) {
> > > > > > +               } else if (ret < 0) {
> > > > > >                         if (flags != rgnflags)
> > > > > >                                 break;
> > > > > >                         rgn->region[i].size += size;
> > > > > >                         coalesced++;
> > > > > >                         break;
> > > > > >                 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
> > > > > > -                       /* regions overlap */
> > > > > > -                       return -1;
> > > > > > +                       if (flags == LMB_NONE) {
> > > > > > +                               ret = lmb_resize_regions(rgn, i, base, size,
> > > > > > +                                                        flags);
> > > > > > +                               if (ret < 0)
> > > > > > +                                       return -1;
> > > > > > +
> > > > > > +                               coalesced++;
> > > > > > +                               break;
> > > > > > +                       } else {
> > > > > > +                               return -1;
> > > > > > +                       }
> > > > > >                 }
> > > > > >         }
> > > > > >
> > > > > > @@ -448,7 +533,7 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
> > > > > >  }
> > > > > >
> > > > > >  static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
> > > > > > -                                   phys_addr_t max_addr)
> > > > > > +                                   phys_addr_t max_addr, enum lmb_flags flags)
> > > > > >  {
> > > > > >         long i, rgn;
> > > > > >         phys_addr_t base = 0;
> > > > > > @@ -498,7 +583,7 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> > > > > >  {
> > > > > >         phys_addr_t alloc;
> > > > > >
> > > > > > -       alloc = __lmb_alloc_base(size, align, max_addr);
> > > > > > +       alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
> > > > > >
> > > > > >         if (alloc == 0)
> > > > > >                 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
> > > > > > @@ -507,11 +592,8 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> > > > > >         return alloc;
> > > > > >  }
> > > > > >
> > > > > > -/*
> > > > > > - * Try to allocate a specific address range: must be in defined memory but not
> > > > > > - * reserved
> > > > > > - */
> > > > > > -phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > > > > > +static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
> > > > > > +                                   enum lmb_flags flags)
> > > > > >  {
> > > > > >         long rgn;
> > > > > >
> > > > > > @@ -526,13 +608,23 @@ phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > > > > >                                       lmb.memory.region[rgn].size,
> > > > > >                                       base + size - 1, 1)) {
> > > > > >                         /* ok, reserve the memory */
> > > > > > -                       if (lmb_reserve(base, size) >= 0)
> > > > > > +                       if (lmb_reserve_flags(base, size, flags) >= 0)
> > > > > >                                 return base;
> > > > > >                 }
> > > > > >         }
> > > > > > +
> > > > > >         return 0;
> > > > > >  }
> > > > > >
> > > > > > +/*
> > > > > > + * Try to allocate a specific address range: must be in defined memory but not
> > > > > > + * reserved
> > > > > > + */
> > > > > > +phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > > > > > +{
> > > > > > +       return __lmb_alloc_addr(base, size, LMB_NONE);
> > > > > > +}
> > > > > > +
> > > > > >  /* Return number of bytes from a given address that are free */
> > > > > >  phys_size_t lmb_get_free_size(phys_addr_t addr)
> > > > > >  {
> > > > > > --
> > > > > > 2.34.1
> > > > > >
Heinrich Schuchardt June 11, 2024, 9:17 a.m. UTC | #9
On 07.06.24 20:52, Sughosh Ganu wrote:
> Allow for resizing of LMB regions if the region attributes match. The
> current code returns a failure status on detecting an overlapping
> address. This worked up until now since the LMB calls were not
> persistent and global -- the LMB memory map was specific and private
> to a given caller of the LMB API's.
>
> With the change in the LMB code to make the LMB reservations
> persistent, there needs to be a check on whether the memory region can
> be resized, and then do it if so. To distinguish between memory that
> cannot be resized, add a new flag, LMB_NOOVERWRITE. Reserving a region
> of memory with this attribute would indicate that the region cannot be
> resized.
>
> Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org>
> ---
>   include/lmb.h |   1 +
>   lib/lmb.c     | 120 ++++++++++++++++++++++++++++++++++++++++++++------
>   2 files changed, 107 insertions(+), 14 deletions(-)
>
> diff --git a/include/lmb.h b/include/lmb.h
> index 03bce2a50c..1d4cd255d2 100644
> --- a/include/lmb.h
> +++ b/include/lmb.h
> @@ -20,6 +20,7 @@
>   enum lmb_flags {
>   	LMB_NONE		= 0x0,
>   	LMB_NOMAP		= 0x4,
> +	LMB_NOOVERWRITE		= 0x8,

Please, add the missing description for the new value. Using the first
available bit (0x01) would be expected.

Using the BIT macro would make it clearer that these are bits of a bitmap:

enum lmb_flags {
	LMB_NONE		= BIT(0),
	LMB_NOOVERWRITE		= BIT(1),
};

Best regards

Heinrich


>   };
>
>   /**
> diff --git a/lib/lmb.c b/lib/lmb.c
> index de5a2cf23b..0a4f3d5bcd 100644
> --- a/lib/lmb.c
> +++ b/lib/lmb.c
> @@ -260,12 +260,88 @@ void lmb_add_memory(struct bd_info *bd)
>   	}
>   }
>
> +static bool lmb_region_flags_match(struct lmb_region *rgn, unsigned long r1,
> +				   enum lmb_flags flags)
> +{
> +	return rgn->region[r1].flags == flags;
> +}
> +
> +static long lmb_merge_overlap_regions(struct lmb_region *rgn, unsigned long i,
> +				      phys_addr_t base, phys_size_t size,
> +				      enum lmb_flags flags)
> +{
> +	phys_size_t rgnsize;
> +	unsigned long rgn_cnt, idx;
> +	phys_addr_t rgnbase, rgnend;
> +	phys_addr_t mergebase, mergeend;
> +
> +	rgn_cnt = 0;
> +	idx = i;
> +	/*
> +	 * First thing to do is to identify how many regions does
> +	 * the requested region overlap.
> +	 * If the flags match, combine all these overlapping
> +	 * regions into a single region, and remove the merged
> +	 * regions.
> +	 */
> +	while (idx < rgn->cnt - 1) {
> +		rgnbase = rgn->region[idx].base;
> +		rgnsize = rgn->region[idx].size;
> +
> +		if (lmb_addrs_overlap(base, size, rgnbase,
> +				      rgnsize)) {
> +			if (!lmb_region_flags_match(rgn, idx, flags))
> +				return -1;
> +			rgn_cnt++;
> +			idx++;
> +		}
> +	}
> +
> +	/* The merged region's base and size */
> +	rgnbase = rgn->region[i].base;
> +	mergebase = min(base, rgnbase);
> +	rgnend = rgn->region[idx].base + rgn->region[idx].size;
> +	mergeend = max(rgnend, (base + size));
> +
> +	rgn->region[i].base = mergebase;
> +	rgn->region[i].size = mergeend - mergebase;
> +
> +	/* Now remove the merged regions */
> +	while (--rgn_cnt)
> +		lmb_remove_region(rgn, i + 1);
> +
> +	return 0;
> +}
> +
> +static long lmb_resize_regions(struct lmb_region *rgn, unsigned long i,
> +			       phys_addr_t base, phys_size_t size,
> +			       enum lmb_flags flags)
> +{
> +	long ret = 0;
> +	phys_addr_t rgnend;
> +
> +	if (i == rgn->cnt - 1 ||
> +		base + size < rgn->region[i + 1].base) {
> +		if (!lmb_region_flags_match(rgn, i, flags))
> +			return -1;
> +
> +		rgnend = rgn->region[i].base + rgn->region[i].size;
> +		rgn->region[i].base = min(base, rgn->region[i].base);
> +		rgnend = max(base + size, rgnend);
> +		rgn->region[i].size = rgnend - rgn->region[i].base;
> +	} else {
> +		ret = lmb_merge_overlap_regions(rgn, i, base, size, flags);
> +	}
> +
> +	return ret;
> +}
> +
>   /* This routine called with relocation disabled. */
>   static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
>   				 phys_size_t size, enum lmb_flags flags)
>   {
>   	unsigned long coalesced = 0;
> -	long adjacent, i;
> +	long ret, i;
>
>   	if (rgn->cnt == 0) {
>   		rgn->region[0].base = base;
> @@ -290,23 +366,32 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
>   				return -1; /* regions with new flags */
>   		}
>
> -		adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> -		if (adjacent > 0) {
> +		ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> +		if (ret > 0) {
>   			if (flags != rgnflags)
>   				break;
>   			rgn->region[i].base -= size;
>   			rgn->region[i].size += size;
>   			coalesced++;
>   			break;
> -		} else if (adjacent < 0) {
> +		} else if (ret < 0) {
>   			if (flags != rgnflags)
>   				break;
>   			rgn->region[i].size += size;
>   			coalesced++;
>   			break;
>   		} else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
> -			/* regions overlap */
> -			return -1;
> +			if (flags == LMB_NONE) {
> +				ret = lmb_resize_regions(rgn, i, base, size,
> +							 flags);
> +				if (ret < 0)
> +					return -1;
> +
> +				coalesced++;
> +				break;
> +			} else {
> +				return -1;
> +			}
>   		}
>   	}
>
> @@ -448,7 +533,7 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
>   }
>
>   static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
> -				    phys_addr_t max_addr)
> +				    phys_addr_t max_addr, enum lmb_flags flags)
>   {
>   	long i, rgn;
>   	phys_addr_t base = 0;
> @@ -498,7 +583,7 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
>   {
>   	phys_addr_t alloc;
>
> -	alloc = __lmb_alloc_base(size, align, max_addr);
> +	alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
>
>   	if (alloc == 0)
>   		printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
> @@ -507,11 +592,8 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
>   	return alloc;
>   }
>
> -/*
> - * Try to allocate a specific address range: must be in defined memory but not
> - * reserved
> - */
> -phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> +static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
> +				    enum lmb_flags flags)
>   {
>   	long rgn;
>
> @@ -526,13 +608,23 @@ phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
>   				      lmb.memory.region[rgn].size,
>   				      base + size - 1, 1)) {
>   			/* ok, reserve the memory */
> -			if (lmb_reserve(base, size) >= 0)
> +			if (lmb_reserve_flags(base, size, flags) >= 0)
>   				return base;
>   		}
>   	}
> +
>   	return 0;
>   }
>
> +/*
> + * Try to allocate a specific address range: must be in defined memory but not
> + * reserved
> + */
> +phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> +{
> +	return __lmb_alloc_addr(base, size, LMB_NONE);
> +}
> +
>   /* Return number of bytes from a given address that are free */
>   phys_size_t lmb_get_free_size(phys_addr_t addr)
>   {
Sughosh Ganu June 11, 2024, 9:50 a.m. UTC | #10
On Tue, 11 Jun 2024 at 14:47, Heinrich Schuchardt <xypron.glpk@gmx.de> wrote:
>
> On 07.06.24 20:52, Sughosh Ganu wrote:
> > Allow for resizing of LMB regions if the region attributes match. The
> > current code returns a failure status on detecting an overlapping
> > address. This worked up until now since the LMB calls were not
> > persistent and global -- the LMB memory map was specific and private
> > to a given caller of the LMB API's.
> >
> > With the change in the LMB code to make the LMB reservations
> > persistent, there needs to be a check on whether the memory region can
> > be resized, and then do it if so. To distinguish between memory that
> > cannot be resized, add a new flag, LMB_NOOVERWRITE. Reserving a region
> > of memory with this attribute would indicate that the region cannot be
> > resized.
> >
> > Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org>
> > ---
> >   include/lmb.h |   1 +
> >   lib/lmb.c     | 120 ++++++++++++++++++++++++++++++++++++++++++++------
> >   2 files changed, 107 insertions(+), 14 deletions(-)
> >
> > diff --git a/include/lmb.h b/include/lmb.h
> > index 03bce2a50c..1d4cd255d2 100644
> > --- a/include/lmb.h
> > +++ b/include/lmb.h
> > @@ -20,6 +20,7 @@
> >   enum lmb_flags {
> >       LMB_NONE                = 0x0,
> >       LMB_NOMAP               = 0x4,
> > +     LMB_NOOVERWRITE         = 0x8,
>
> Please, add the missing description for the new value. Using the first
> available bit (0x01) would be expected.
>
> Using the BIT macro would make it clearer that these are bits of a bitmap:
>
> enum lmb_flags {
>         LMB_NONE                = BIT(0),
>         LMB_NOOVERWRITE         = BIT(1),
> };

Noted. Will change.

-sughosh

>
> Best regards
>
> Heinrich
>
>
> >   };
> >
> >   /**
> > diff --git a/lib/lmb.c b/lib/lmb.c
> > index de5a2cf23b..0a4f3d5bcd 100644
> > --- a/lib/lmb.c
> > +++ b/lib/lmb.c
> > @@ -260,12 +260,88 @@ void lmb_add_memory(struct bd_info *bd)
> >       }
> >   }
> >
> > +static bool lmb_region_flags_match(struct lmb_region *rgn, unsigned long r1,
> > +                                enum lmb_flags flags)
> > +{
> > +     return rgn->region[r1].flags == flags;
> > +}
> > +
> > +static long lmb_merge_overlap_regions(struct lmb_region *rgn, unsigned long i,
> > +                                   phys_addr_t base, phys_size_t size,
> > +                                   enum lmb_flags flags)
> > +{
> > +     phys_size_t rgnsize;
> > +     unsigned long rgn_cnt, idx;
> > +     phys_addr_t rgnbase, rgnend;
> > +     phys_addr_t mergebase, mergeend;
> > +
> > +     rgn_cnt = 0;
> > +     idx = i;
> > +     /*
> > +      * First thing to do is to identify how many regions does
> > +      * the requested region overlap.
> > +      * If the flags match, combine all these overlapping
> > +      * regions into a single region, and remove the merged
> > +      * regions.
> > +      */
> > +     while (idx < rgn->cnt - 1) {
> > +             rgnbase = rgn->region[idx].base;
> > +             rgnsize = rgn->region[idx].size;
> > +
> > +             if (lmb_addrs_overlap(base, size, rgnbase,
> > +                                   rgnsize)) {
> > +                     if (!lmb_region_flags_match(rgn, idx, flags))
> > +                             return -1;
> > +                     rgn_cnt++;
> > +                     idx++;
> > +             }
> > +     }
> > +
> > +     /* The merged region's base and size */
> > +     rgnbase = rgn->region[i].base;
> > +     mergebase = min(base, rgnbase);
> > +     rgnend = rgn->region[idx].base + rgn->region[idx].size;
> > +     mergeend = max(rgnend, (base + size));
> > +
> > +     rgn->region[i].base = mergebase;
> > +     rgn->region[i].size = mergeend - mergebase;
> > +
> > +     /* Now remove the merged regions */
> > +     while (--rgn_cnt)
> > +             lmb_remove_region(rgn, i + 1);
> > +
> > +     return 0;
> > +}
> > +
> > +static long lmb_resize_regions(struct lmb_region *rgn, unsigned long i,
> > +                            phys_addr_t base, phys_size_t size,
> > +                            enum lmb_flags flags)
> > +{
> > +     long ret = 0;
> > +     phys_addr_t rgnend;
> > +
> > +     if (i == rgn->cnt - 1 ||
> > +             base + size < rgn->region[i + 1].base) {
> > +             if (!lmb_region_flags_match(rgn, i, flags))
> > +                     return -1;
> > +
> > +             rgnend = rgn->region[i].base + rgn->region[i].size;
> > +             rgn->region[i].base = min(base, rgn->region[i].base);
> > +             rgnend = max(base + size, rgnend);
> > +             rgn->region[i].size = rgnend - rgn->region[i].base;
> > +     } else {
> > +             ret = lmb_merge_overlap_regions(rgn, i, base, size, flags);
> > +     }
> > +
> > +     return ret;
> > +}
> > +
> >   /* This routine called with relocation disabled. */
> >   static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> >                                phys_size_t size, enum lmb_flags flags)
> >   {
> >       unsigned long coalesced = 0;
> > -     long adjacent, i;
> > +     long ret, i;
> >
> >       if (rgn->cnt == 0) {
> >               rgn->region[0].base = base;
> > @@ -290,23 +366,32 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
> >                               return -1; /* regions with new flags */
> >               }
> >
> > -             adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> > -             if (adjacent > 0) {
> > +             ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
> > +             if (ret > 0) {
> >                       if (flags != rgnflags)
> >                               break;
> >                       rgn->region[i].base -= size;
> >                       rgn->region[i].size += size;
> >                       coalesced++;
> >                       break;
> > -             } else if (adjacent < 0) {
> > +             } else if (ret < 0) {
> >                       if (flags != rgnflags)
> >                               break;
> >                       rgn->region[i].size += size;
> >                       coalesced++;
> >                       break;
> >               } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
> > -                     /* regions overlap */
> > -                     return -1;
> > +                     if (flags == LMB_NONE) {
> > +                             ret = lmb_resize_regions(rgn, i, base, size,
> > +                                                      flags);
> > +                             if (ret < 0)
> > +                                     return -1;
> > +
> > +                             coalesced++;
> > +                             break;
> > +                     } else {
> > +                             return -1;
> > +                     }
> >               }
> >       }
> >
> > @@ -448,7 +533,7 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
> >   }
> >
> >   static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
> > -                                 phys_addr_t max_addr)
> > +                                 phys_addr_t max_addr, enum lmb_flags flags)
> >   {
> >       long i, rgn;
> >       phys_addr_t base = 0;
> > @@ -498,7 +583,7 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> >   {
> >       phys_addr_t alloc;
> >
> > -     alloc = __lmb_alloc_base(size, align, max_addr);
> > +     alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
> >
> >       if (alloc == 0)
> >               printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
> > @@ -507,11 +592,8 @@ phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
> >       return alloc;
> >   }
> >
> > -/*
> > - * Try to allocate a specific address range: must be in defined memory but not
> > - * reserved
> > - */
> > -phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > +static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
> > +                                 enum lmb_flags flags)
> >   {
> >       long rgn;
> >
> > @@ -526,13 +608,23 @@ phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> >                                     lmb.memory.region[rgn].size,
> >                                     base + size - 1, 1)) {
> >                       /* ok, reserve the memory */
> > -                     if (lmb_reserve(base, size) >= 0)
> > +                     if (lmb_reserve_flags(base, size, flags) >= 0)
> >                               return base;
> >               }
> >       }
> > +
> >       return 0;
> >   }
> >
> > +/*
> > + * Try to allocate a specific address range: must be in defined memory but not
> > + * reserved
> > + */
> > +phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
> > +{
> > +     return __lmb_alloc_addr(base, size, LMB_NONE);
> > +}
> > +
> >   /* Return number of bytes from a given address that are free */
> >   phys_size_t lmb_get_free_size(phys_addr_t addr)
> >   {
>
diff mbox series

Patch

diff --git a/include/lmb.h b/include/lmb.h
index 03bce2a50c..1d4cd255d2 100644
--- a/include/lmb.h
+++ b/include/lmb.h
@@ -20,6 +20,7 @@ 
 enum lmb_flags {
 	LMB_NONE		= 0x0,
 	LMB_NOMAP		= 0x4,
+	LMB_NOOVERWRITE		= 0x8,
 };
 
 /**
diff --git a/lib/lmb.c b/lib/lmb.c
index de5a2cf23b..0a4f3d5bcd 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -260,12 +260,88 @@  void lmb_add_memory(struct bd_info *bd)
 	}
 }
 
+static bool lmb_region_flags_match(struct lmb_region *rgn, unsigned long r1,
+				   enum lmb_flags flags)
+{
+	return rgn->region[r1].flags == flags;
+}
+
+static long lmb_merge_overlap_regions(struct lmb_region *rgn, unsigned long i,
+				      phys_addr_t base, phys_size_t size,
+				      enum lmb_flags flags)
+{
+	phys_size_t rgnsize;
+	unsigned long rgn_cnt, idx;
+	phys_addr_t rgnbase, rgnend;
+	phys_addr_t mergebase, mergeend;
+
+	rgn_cnt = 0;
+	idx = i;
+	/*
+	 * First thing to do is to identify how many regions does
+	 * the requested region overlap.
+	 * If the flags match, combine all these overlapping
+	 * regions into a single region, and remove the merged
+	 * regions.
+	 */
+	while (idx < rgn->cnt - 1) {
+		rgnbase = rgn->region[idx].base;
+		rgnsize = rgn->region[idx].size;
+
+		if (lmb_addrs_overlap(base, size, rgnbase,
+				      rgnsize)) {
+			if (!lmb_region_flags_match(rgn, idx, flags))
+				return -1;
+			rgn_cnt++;
+			idx++;
+		}
+	}
+
+	/* The merged region's base and size */
+	rgnbase = rgn->region[i].base;
+	mergebase = min(base, rgnbase);
+	rgnend = rgn->region[idx].base + rgn->region[idx].size;
+	mergeend = max(rgnend, (base + size));
+
+	rgn->region[i].base = mergebase;
+	rgn->region[i].size = mergeend - mergebase;
+
+	/* Now remove the merged regions */
+	while (--rgn_cnt)
+		lmb_remove_region(rgn, i + 1);
+
+	return 0;
+}
+
+static long lmb_resize_regions(struct lmb_region *rgn, unsigned long i,
+			       phys_addr_t base, phys_size_t size,
+			       enum lmb_flags flags)
+{
+	long ret = 0;
+	phys_addr_t rgnend;
+
+	if (i == rgn->cnt - 1 ||
+		base + size < rgn->region[i + 1].base) {
+		if (!lmb_region_flags_match(rgn, i, flags))
+			return -1;
+
+		rgnend = rgn->region[i].base + rgn->region[i].size;
+		rgn->region[i].base = min(base, rgn->region[i].base);
+		rgnend = max(base + size, rgnend);
+		rgn->region[i].size = rgnend - rgn->region[i].base;
+	} else {
+		ret = lmb_merge_overlap_regions(rgn, i, base, size, flags);
+	}
+
+	return ret;
+}
+
 /* This routine called with relocation disabled. */
 static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
 				 phys_size_t size, enum lmb_flags flags)
 {
 	unsigned long coalesced = 0;
-	long adjacent, i;
+	long ret, i;
 
 	if (rgn->cnt == 0) {
 		rgn->region[0].base = base;
@@ -290,23 +366,32 @@  static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
 				return -1; /* regions with new flags */
 		}
 
-		adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
-		if (adjacent > 0) {
+		ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
+		if (ret > 0) {
 			if (flags != rgnflags)
 				break;
 			rgn->region[i].base -= size;
 			rgn->region[i].size += size;
 			coalesced++;
 			break;
-		} else if (adjacent < 0) {
+		} else if (ret < 0) {
 			if (flags != rgnflags)
 				break;
 			rgn->region[i].size += size;
 			coalesced++;
 			break;
 		} else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
-			/* regions overlap */
-			return -1;
+			if (flags == LMB_NONE) {
+				ret = lmb_resize_regions(rgn, i, base, size,
+							 flags);
+				if (ret < 0)
+					return -1;
+
+				coalesced++;
+				break;
+			} else {
+				return -1;
+			}
 		}
 	}
 
@@ -448,7 +533,7 @@  static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
 }
 
 static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
-				    phys_addr_t max_addr)
+				    phys_addr_t max_addr, enum lmb_flags flags)
 {
 	long i, rgn;
 	phys_addr_t base = 0;
@@ -498,7 +583,7 @@  phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
 {
 	phys_addr_t alloc;
 
-	alloc = __lmb_alloc_base(size, align, max_addr);
+	alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
 
 	if (alloc == 0)
 		printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
@@ -507,11 +592,8 @@  phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
 	return alloc;
 }
 
-/*
- * Try to allocate a specific address range: must be in defined memory but not
- * reserved
- */
-phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
+static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
+				    enum lmb_flags flags)
 {
 	long rgn;
 
@@ -526,13 +608,23 @@  phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
 				      lmb.memory.region[rgn].size,
 				      base + size - 1, 1)) {
 			/* ok, reserve the memory */
-			if (lmb_reserve(base, size) >= 0)
+			if (lmb_reserve_flags(base, size, flags) >= 0)
 				return base;
 		}
 	}
+
 	return 0;
 }
 
+/*
+ * Try to allocate a specific address range: must be in defined memory but not
+ * reserved
+ */
+phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
+{
+	return __lmb_alloc_addr(base, size, LMB_NONE);
+}
+
 /* Return number of bytes from a given address that are free */
 phys_size_t lmb_get_free_size(phys_addr_t addr)
 {