* time/tzfile.c (__tzfile_read): Allocate more portably,
removing the need for a couple of _Static_asserts.
---
time/tzfile.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
@@ -302,14 +302,14 @@ __tzfile_read (const char *file)
The piece-wise allocations from buf below verify that no
overflow/wraparound occurred in these computations.
- The order of the suballocations is important for alignment
- purposes. __time64_t outside a struct may require more alignment
- then inside a struct on some architectures, so it must come
- first. */
- _Static_assert (__alignof (__time64_t) >= __alignof (struct leap),
- "alignment of __time64_t");
- _Static_assert (__alignof (struct leap) >= __alignof (struct ttinfo),
- "alignment of struct leap");
+ The order of the suballocations is important for alignment purposes.
+ The calculation of ALIGNMENT_SLOP assumes arrays are allocated in
+ element order __time_64_t, struct leap, struct ttinfo. */
+ size_t alignment_slop =
+ ((__alignof__ (struct leap) <= __alignof__ (__time64_t)
+ ? 0 : __alignof__ (struct leap) - __alignof__ (__time64_t))
+ + (__alignof__ (struct ttinfo) <= __alignof__ (struct leap)
+ ? 0 : __alignof__ (struct ttinfo) - __alignof__ (struct leap)));
struct alloc_buffer buf;
{
size_t total_size, product;
@@ -322,6 +322,7 @@ __tzfile_read (const char *file)
v |= ckd_add (&total_size, total_size, num_transitions); /* type_idxs */
v |= ckd_add (&total_size, total_size, chars); /* zone_names */
v |= ckd_add (&total_size, total_size, tzspec_size);
+ v |= ckd_add (&total_size, total_size, alignment_slop);
if (v)
goto lose;
transitions = malloc (total_size);