[v4] elf: Properly align PT_LOAD segments [BZ #28676]
Checks
Context |
Check |
Description |
dj/TryBot-apply_patch |
success
|
Patch applied to master at the time it was sent
|
dj/TryBot-32bit |
success
|
Build for i686
|
Commit Message
On Thu, Dec 09, 2021 at 10:04:32AM -0800, H.J. Lu wrote:
> On Thu, Dec 09, 2021 at 07:14:10AM -0800, H.J. Lu wrote:
>
> Hi,
>
> I updated your patch. Please take a look.
>
>
> H.J.
> --
> When PT_LOAD segment alignment > the page size, allocate enough space
> to ensure that the segment can be properly aligned.
>
> This fixes [BZ #28676].
H.J.
---
Changes in v4:
1. Call unmap when the second mmap fails.
When PT_LOAD segment alignment > the page size, allocate enough space
to ensure that the segment can be properly aligned.
This fixes [BZ #28676].
---
elf/dl-load.c | 1 +
elf/dl-load.h | 2 +-
elf/dl-map-segments.h | 49 +++++++++++++++++++++++++++++++++++++++----
3 files changed, 47 insertions(+), 5 deletions(-)
Comments
Hi, lhj
Thanks for your help! It's much better than my version.
And I find a small bug. you can check it again if available.
On 12/10/21 3:29 AM, H.J. Lu wrote:
> On Thu, Dec 09, 2021 at 10:04:32AM -0800, H.J. Lu wrote:
>> On Thu, Dec 09, 2021 at 07:14:10AM -0800, H.J. Lu wrote:
>>
>> Hi,
>>
>> I updated your patch. Please take a look.
>>
>>
>> H.J.
>> --
>> When PT_LOAD segment alignment > the page size, allocate enough space
>> to ensure that the segment can be properly aligned.
>>
>> This fixes [BZ #28676].
>
>
> H.J.
> ---
> Changes in v4:
>
> 1. Call unmap when the second mmap fails.
>
> When PT_LOAD segment alignment > the page size, allocate enough space
> to ensure that the segment can be properly aligned.
>
> This fixes [BZ #28676].
> ---
> elf/dl-load.c | 1 +
> elf/dl-load.h | 2 +-
> elf/dl-map-segments.h | 49 +++++++++++++++++++++++++++++++++++++++----
> 3 files changed, 47 insertions(+), 5 deletions(-)
>
> diff --git a/elf/dl-load.c b/elf/dl-load.c
> index bf8957e73c..9a23590bf4 100644
> --- a/elf/dl-load.c
> +++ b/elf/dl-load.c
> @@ -1150,6 +1150,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
> c->mapend = ALIGN_UP (ph->p_vaddr + ph->p_filesz, GLRO(dl_pagesize));
> c->dataend = ph->p_vaddr + ph->p_filesz;
> c->allocend = ph->p_vaddr + ph->p_memsz;
> + c->mapalign = ph->p_align;
> c->mapoff = ALIGN_DOWN (ph->p_offset, GLRO(dl_pagesize));
>
> /* Determine whether there is a gap between the last segment
> diff --git a/elf/dl-load.h b/elf/dl-load.h
> index e329d49a81..c121e3456c 100644
> --- a/elf/dl-load.h
> +++ b/elf/dl-load.h
> @@ -74,7 +74,7 @@ ELF_PREFERRED_ADDRESS_DATA;
> Its details have been expanded out and converted. */
> struct loadcmd
> {
> - ElfW(Addr) mapstart, mapend, dataend, allocend;
> + ElfW(Addr) mapstart, mapend, dataend, allocend, mapalign;
> ElfW(Off) mapoff;
> int prot; /* PROT_* bits. */
> };
> diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
> index f9fb110ee3..f710cf2e48 100644
> --- a/elf/dl-map-segments.h
> +++ b/elf/dl-map-segments.h
> @@ -18,6 +18,50 @@
>
> #include <dl-load.h>
>
> +/* Map a segment and align it properly. */
> +
> +static __always_inline ElfW(Addr)
> +_dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
> + const size_t maplength, int fd)
> +{
> + if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
> + return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
> + MAP_COPY|MAP_FILE, fd, c->mapoff);
> +
> + /* If the segment alignment > the page size, allocate enough space to
> + ensure that the segment can be properly aligned. */
> + ElfW(Addr) maplen = (maplength >= c->mapalign
> + ? (maplength + c->mapalign)
> + : (2 * c->mapalign));
> + ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
> + PROT_NONE,
> + MAP_ANONYMOUS|MAP_PRIVATE,
> + -1, 0);
> + if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
> + return map_start;
> +
> + ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
> + map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
> + maplength, c->prot,
> + MAP_COPY|MAP_FILE|MAP_FIXED,
> + fd, c->mapoff);
> + if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
Is map_start_aligned here?
> + __munmap ((void *) map_start, maplen);
> + else
> + {
> + /* Unmap the unused regions. */
> + ElfW(Addr) delta = map_start_aligned - map_start;
> + if (delta)
> + __munmap ((void *) map_start, delta);
> + ElfW(Addr) map_end = map_start_aligned + maplength;
> + delta = map_start + maplen - map_end;
> + if (delta)
> + __munmap ((void *) map_end, delta);
> + }
> +
> + return map_start_aligned;
> +}
> +
> /* This implementation assumes (as does the corresponding implementation
> of _dl_unmap_segments, in dl-unmap-segments.h) that shared objects
> are always laid out with all segments contiguous (or with gaps
> @@ -53,10 +97,7 @@ _dl_map_segments (struct link_map *l, int fd,
> - MAP_BASE_ADDR (l));
>
> /* Remember which part of the address space this object uses. */
> - l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
> - c->prot,
> - MAP_COPY|MAP_FILE,
> - fd, c->mapoff);
> + l->l_map_start = _dl_map_segment (c, mappref, maplength, fd);
> if (__glibc_unlikely ((void *) l->l_map_start == MAP_FAILED))
> return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
>
>
All is OK for me.
And I am not sure whether I need to add a testcase in one new patch,
because I saw you had submitted the testcase:
https://gitlab.com/x86-glibc/glibc/-/commit/ba63c1cb2ea1b0ae421f66756771ff4c89d4a470
I will test it again, and send out PATCH v5.
Thanks.
On Thu, Dec 9, 2021 at 5:58 PM Rongwei Wang
<rongwei.wang@linux.alibaba.com> wrote:
>
>
> Hi, lhj
>
> Thanks for your help! It's much better than my version.
> And I find a small bug. you can check it again if available.
>
> On 12/10/21 3:29 AM, H.J. Lu wrote:
> > On Thu, Dec 09, 2021 at 10:04:32AM -0800, H.J. Lu wrote:
> >> On Thu, Dec 09, 2021 at 07:14:10AM -0800, H.J. Lu wrote:
> >>
> >> Hi,
> >>
> >> I updated your patch. Please take a look.
> >>
> >>
> >> H.J.
> >> --
> >> When PT_LOAD segment alignment > the page size, allocate enough space
> >> to ensure that the segment can be properly aligned.
> >>
> >> This fixes [BZ #28676].
> >
> >
> > H.J.
> > ---
> > Changes in v4:
> >
> > 1. Call unmap when the second mmap fails.
> >
> > When PT_LOAD segment alignment > the page size, allocate enough space
> > to ensure that the segment can be properly aligned.
> >
> > This fixes [BZ #28676].
> > ---
> > elf/dl-load.c | 1 +
> > elf/dl-load.h | 2 +-
> > elf/dl-map-segments.h | 49 +++++++++++++++++++++++++++++++++++++++----
> > 3 files changed, 47 insertions(+), 5 deletions(-)
> >
> > diff --git a/elf/dl-load.c b/elf/dl-load.c
> > index bf8957e73c..9a23590bf4 100644
> > --- a/elf/dl-load.c
> > +++ b/elf/dl-load.c
> > @@ -1150,6 +1150,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
> > c->mapend = ALIGN_UP (ph->p_vaddr + ph->p_filesz, GLRO(dl_pagesize));
> > c->dataend = ph->p_vaddr + ph->p_filesz;
> > c->allocend = ph->p_vaddr + ph->p_memsz;
> > + c->mapalign = ph->p_align;
> > c->mapoff = ALIGN_DOWN (ph->p_offset, GLRO(dl_pagesize));
> >
> > /* Determine whether there is a gap between the last segment
> > diff --git a/elf/dl-load.h b/elf/dl-load.h
> > index e329d49a81..c121e3456c 100644
> > --- a/elf/dl-load.h
> > +++ b/elf/dl-load.h
> > @@ -74,7 +74,7 @@ ELF_PREFERRED_ADDRESS_DATA;
> > Its details have been expanded out and converted. */
> > struct loadcmd
> > {
> > - ElfW(Addr) mapstart, mapend, dataend, allocend;
> > + ElfW(Addr) mapstart, mapend, dataend, allocend, mapalign;
> > ElfW(Off) mapoff;
> > int prot; /* PROT_* bits. */
> > };
> > diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
> > index f9fb110ee3..f710cf2e48 100644
> > --- a/elf/dl-map-segments.h
> > +++ b/elf/dl-map-segments.h
> > @@ -18,6 +18,50 @@
> >
> > #include <dl-load.h>
> >
> > +/* Map a segment and align it properly. */
> > +
> > +static __always_inline ElfW(Addr)
> > +_dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
> > + const size_t maplength, int fd)
> > +{
> > + if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
> > + return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
> > + MAP_COPY|MAP_FILE, fd, c->mapoff);
> > +
> > + /* If the segment alignment > the page size, allocate enough space to
> > + ensure that the segment can be properly aligned. */
> > + ElfW(Addr) maplen = (maplength >= c->mapalign
> > + ? (maplength + c->mapalign)
> > + : (2 * c->mapalign));
> > + ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
> > + PROT_NONE,
> > + MAP_ANONYMOUS|MAP_PRIVATE,
> > + -1, 0);
> > + if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
> > + return map_start;
> > +
> > + ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
> > + map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
> > + maplength, c->prot,
> > + MAP_COPY|MAP_FILE|MAP_FIXED,
> > + fd, c->mapoff);
> > + if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
> Is map_start_aligned here?
>
> > + __munmap ((void *) map_start, maplen);
> > + else
> > + {
> > + /* Unmap the unused regions. */
> > + ElfW(Addr) delta = map_start_aligned - map_start;
> > + if (delta)
> > + __munmap ((void *) map_start, delta);
> > + ElfW(Addr) map_end = map_start_aligned + maplength;
> > + delta = map_start + maplen - map_end;
> > + if (delta)
> > + __munmap ((void *) map_end, delta);
> > + }
> > +
> > + return map_start_aligned;
> > +}
> > +
> > /* This implementation assumes (as does the corresponding implementation
> > of _dl_unmap_segments, in dl-unmap-segments.h) that shared objects
> > are always laid out with all segments contiguous (or with gaps
> > @@ -53,10 +97,7 @@ _dl_map_segments (struct link_map *l, int fd,
> > - MAP_BASE_ADDR (l));
> >
> > /* Remember which part of the address space this object uses. */
> > - l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
> > - c->prot,
> > - MAP_COPY|MAP_FILE,
> > - fd, c->mapoff);
> > + l->l_map_start = _dl_map_segment (c, mappref, maplength, fd);
> > if (__glibc_unlikely ((void *) l->l_map_start == MAP_FAILED))
> > return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
> >
> >
> All is OK for me.
>
> And I am not sure whether I need to add a testcase in one new patch,
> because I saw you had submitted the testcase:
> https://gitlab.com/x86-glibc/glibc/-/commit/ba63c1cb2ea1b0ae421f66756771ff4c89d4a470
No, I didn't submit it.
> I will test it again, and send out PATCH v5.
Please include my testcase in your v5 patch.
Thanks.
--
H.J.
On 12/10/21 10:24 AM, H.J. Lu wrote:
> On Thu, Dec 9, 2021 at 5:58 PM Rongwei Wang
> <rongwei.wang@linux.alibaba.com> wrote:
>>
>>
>> Hi, lhj
>>
>> Thanks for your help! It's much better than my version.
>> And I find a small bug. you can check it again if available.
>>
>> On 12/10/21 3:29 AM, H.J. Lu wrote:
>>> On Thu, Dec 09, 2021 at 10:04:32AM -0800, H.J. Lu wrote:
>>>> On Thu, Dec 09, 2021 at 07:14:10AM -0800, H.J. Lu wrote:
>>>>
>>>> Hi,
>>>>
>>>> I updated your patch. Please take a look.
>>>>
>>>>
>>>> H.J.
>>>> --
>>>> When PT_LOAD segment alignment > the page size, allocate enough space
>>>> to ensure that the segment can be properly aligned.
>>>>
>>>> This fixes [BZ #28676].
>>>
>>>
>>> H.J.
>>> ---
>>> Changes in v4:
>>>
>>> 1. Call unmap when the second mmap fails.
>>>
>>> When PT_LOAD segment alignment > the page size, allocate enough space
>>> to ensure that the segment can be properly aligned.
>>>
>>> This fixes [BZ #28676].
>>> ---
>>> elf/dl-load.c | 1 +
>>> elf/dl-load.h | 2 +-
>>> elf/dl-map-segments.h | 49 +++++++++++++++++++++++++++++++++++++++----
>>> 3 files changed, 47 insertions(+), 5 deletions(-)
>>>
>>> diff --git a/elf/dl-load.c b/elf/dl-load.c
>>> index bf8957e73c..9a23590bf4 100644
>>> --- a/elf/dl-load.c
>>> +++ b/elf/dl-load.c
>>> @@ -1150,6 +1150,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
>>> c->mapend = ALIGN_UP (ph->p_vaddr + ph->p_filesz, GLRO(dl_pagesize));
>>> c->dataend = ph->p_vaddr + ph->p_filesz;
>>> c->allocend = ph->p_vaddr + ph->p_memsz;
>>> + c->mapalign = ph->p_align;
>>> c->mapoff = ALIGN_DOWN (ph->p_offset, GLRO(dl_pagesize));
>>>
>>> /* Determine whether there is a gap between the last segment
>>> diff --git a/elf/dl-load.h b/elf/dl-load.h
>>> index e329d49a81..c121e3456c 100644
>>> --- a/elf/dl-load.h
>>> +++ b/elf/dl-load.h
>>> @@ -74,7 +74,7 @@ ELF_PREFERRED_ADDRESS_DATA;
>>> Its details have been expanded out and converted. */
>>> struct loadcmd
>>> {
>>> - ElfW(Addr) mapstart, mapend, dataend, allocend;
>>> + ElfW(Addr) mapstart, mapend, dataend, allocend, mapalign;
>>> ElfW(Off) mapoff;
>>> int prot; /* PROT_* bits. */
>>> };
>>> diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
>>> index f9fb110ee3..f710cf2e48 100644
>>> --- a/elf/dl-map-segments.h
>>> +++ b/elf/dl-map-segments.h
>>> @@ -18,6 +18,50 @@
>>>
>>> #include <dl-load.h>
>>>
>>> +/* Map a segment and align it properly. */
>>> +
>>> +static __always_inline ElfW(Addr)
>>> +_dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
>>> + const size_t maplength, int fd)
>>> +{
>>> + if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
>>> + return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
>>> + MAP_COPY|MAP_FILE, fd, c->mapoff);
>>> +
>>> + /* If the segment alignment > the page size, allocate enough space to
>>> + ensure that the segment can be properly aligned. */
>>> + ElfW(Addr) maplen = (maplength >= c->mapalign
>>> + ? (maplength + c->mapalign)
>>> + : (2 * c->mapalign));
>>> + ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
>>> + PROT_NONE,
>>> + MAP_ANONYMOUS|MAP_PRIVATE,
>>> + -1, 0);
>>> + if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
>>> + return map_start;
>>> +
>>> + ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
>>> + map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
>>> + maplength, c->prot,
>>> + MAP_COPY|MAP_FILE|MAP_FIXED,
>>> + fd, c->mapoff);
>>> + if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
>> Is map_start_aligned here?
>>
>>> + __munmap ((void *) map_start, maplen);
>>> + else
>>> + {
>>> + /* Unmap the unused regions. */
>>> + ElfW(Addr) delta = map_start_aligned - map_start;
>>> + if (delta)
>>> + __munmap ((void *) map_start, delta);
>>> + ElfW(Addr) map_end = map_start_aligned + maplength;
>>> + delta = map_start + maplen - map_end;
>>> + if (delta)
>>> + __munmap ((void *) map_end, delta);
>>> + }
>>> +
>>> + return map_start_aligned;
>>> +}
>>> +
>>> /* This implementation assumes (as does the corresponding implementation
>>> of _dl_unmap_segments, in dl-unmap-segments.h) that shared objects
>>> are always laid out with all segments contiguous (or with gaps
>>> @@ -53,10 +97,7 @@ _dl_map_segments (struct link_map *l, int fd,
>>> - MAP_BASE_ADDR (l));
>>>
>>> /* Remember which part of the address space this object uses. */
>>> - l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
>>> - c->prot,
>>> - MAP_COPY|MAP_FILE,
>>> - fd, c->mapoff);
>>> + l->l_map_start = _dl_map_segment (c, mappref, maplength, fd);
>>> if (__glibc_unlikely ((void *) l->l_map_start == MAP_FAILED))
>>> return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
>>>
>>>
>> All is OK for me.
>>
>> And I am not sure whether I need to add a testcase in one new patch,
>> because I saw you had submitted the testcase:
>> https://gitlab.com/x86-glibc/glibc/-/commit/ba63c1cb2ea1b0ae421f66756771ff4c89d4a470
>
> No, I didn't submit it.
>
>> I will test it again, and send out PATCH v5.
>
> Please include my testcase in your v5 patch.
Ok
Thanks.
>
> Thanks.
>
>
> --
> H.J.
>
@@ -1150,6 +1150,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
c->mapend = ALIGN_UP (ph->p_vaddr + ph->p_filesz, GLRO(dl_pagesize));
c->dataend = ph->p_vaddr + ph->p_filesz;
c->allocend = ph->p_vaddr + ph->p_memsz;
+ c->mapalign = ph->p_align;
c->mapoff = ALIGN_DOWN (ph->p_offset, GLRO(dl_pagesize));
/* Determine whether there is a gap between the last segment
@@ -74,7 +74,7 @@ ELF_PREFERRED_ADDRESS_DATA;
Its details have been expanded out and converted. */
struct loadcmd
{
- ElfW(Addr) mapstart, mapend, dataend, allocend;
+ ElfW(Addr) mapstart, mapend, dataend, allocend, mapalign;
ElfW(Off) mapoff;
int prot; /* PROT_* bits. */
};
@@ -18,6 +18,50 @@
#include <dl-load.h>
+/* Map a segment and align it properly. */
+
+static __always_inline ElfW(Addr)
+_dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
+ const size_t maplength, int fd)
+{
+ if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
+ return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
+ MAP_COPY|MAP_FILE, fd, c->mapoff);
+
+ /* If the segment alignment > the page size, allocate enough space to
+ ensure that the segment can be properly aligned. */
+ ElfW(Addr) maplen = (maplength >= c->mapalign
+ ? (maplength + c->mapalign)
+ : (2 * c->mapalign));
+ ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
+ PROT_NONE,
+ MAP_ANONYMOUS|MAP_PRIVATE,
+ -1, 0);
+ if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
+ return map_start;
+
+ ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
+ map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
+ maplength, c->prot,
+ MAP_COPY|MAP_FILE|MAP_FIXED,
+ fd, c->mapoff);
+ if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
+ __munmap ((void *) map_start, maplen);
+ else
+ {
+ /* Unmap the unused regions. */
+ ElfW(Addr) delta = map_start_aligned - map_start;
+ if (delta)
+ __munmap ((void *) map_start, delta);
+ ElfW(Addr) map_end = map_start_aligned + maplength;
+ delta = map_start + maplen - map_end;
+ if (delta)
+ __munmap ((void *) map_end, delta);
+ }
+
+ return map_start_aligned;
+}
+
/* This implementation assumes (as does the corresponding implementation
of _dl_unmap_segments, in dl-unmap-segments.h) that shared objects
are always laid out with all segments contiguous (or with gaps
@@ -53,10 +97,7 @@ _dl_map_segments (struct link_map *l, int fd,
- MAP_BASE_ADDR (l));
/* Remember which part of the address space this object uses. */
- l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
- c->prot,
- MAP_COPY|MAP_FILE,
- fd, c->mapoff);
+ l->l_map_start = _dl_map_segment (c, mappref, maplength, fd);
if (__glibc_unlikely ((void *) l->l_map_start == MAP_FAILED))
return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;