libatomic: Fix up libat_{,un}lock_n for mingw [PR119796]
Checks
Context |
Check |
Description |
linaro-tcwg-bot/tcwg_gcc_build--master-aarch64 |
success
|
Build passed
|
linaro-tcwg-bot/tcwg_simplebootstrap_build--master-arm-bootstrap |
success
|
Build passed
|
linaro-tcwg-bot/tcwg_simplebootstrap_build--master-aarch64-bootstrap |
success
|
Build passed
|
linaro-tcwg-bot/tcwg_gcc_check--master-aarch64 |
success
|
Test passed
|
linaro-tcwg-bot/tcwg_gcc_build--master-arm |
success
|
Build passed
|
linaro-tcwg-bot/tcwg_gcc_check--master-arm |
success
|
Test passed
|
Commit Message
Hi!
Here is just a port of the previously posted patch to mingw which
clearly has the same problems.
Untested though, I don't have Windows anywhere around.
2025-04-14 Jakub Jelinek <jakub@redhat.com>
PR libstdc++/119796
* config/mingw/lock.c (libat_lock_n, libat_unlock_n): Start with
computing how many locks will be needed and take into account
((uintptr_t)ptr % WATCH_SIZE). If some locks from the end of the
locks array and others from the start of it will be needed, first
lock the ones from the start followed by ones from the end.
Jakub
Comments
On Mon, 14 Apr 2025, Jakub Jelinek wrote:
> Hi!
>
> Here is just a port of the previously posted patch to mingw which
> clearly has the same problems.
>
> Untested though, I don't have Windows anywhere around.
OK.
Richard.
> 2025-04-14 Jakub Jelinek <jakub@redhat.com>
>
> PR libstdc++/119796
> * config/mingw/lock.c (libat_lock_n, libat_unlock_n): Start with
> computing how many locks will be needed and take into account
> ((uintptr_t)ptr % WATCH_SIZE). If some locks from the end of the
> locks array and others from the start of it will be needed, first
> lock the ones from the start followed by ones from the end.
>
> --- libatomic/config/mingw/lock.c.jj 2025-04-08 14:09:40.988589457 +0200
> +++ libatomic/config/mingw/lock.c 2025-04-14 15:39:08.244686172 +0200
> @@ -87,21 +87,30 @@ libat_lock_n (void *ptr, size_t n)
> {
> uintptr_t h = addr_hash (ptr);
> size_t i = 0;
> + size_t nlocks
> + = (n + ((uintptr_t)ptr % WATCH_SIZE) + WATCH_SIZE - 1) / WATCH_SIZE;
>
> /* Don't lock more than all the locks we have. */
> - if (n > PAGE_SIZE)
> - n = PAGE_SIZE;
> + if (nlocks > NLOCKS)
> + nlocks = NLOCKS;
>
> - do
> + if (__builtin_expect (h + nlocks > NLOCKS, 0))
> + {
> + size_t j = h + nlocks - NLOCKS;
> + for (; i < j; ++i)
> + {
> + if (!locks[i].mutex)
> + locks[i].mutex = CreateMutex (NULL, FALSE, NULL);
> + WaitForSingleObject (locks[i].mutex, INFINITE);
> + }
> + }
> +
> + for (; i < nlocks; ++i)
> {
> if (!locks[h].mutex)
> - locks[h].mutex = CreateMutex (NULL, FALSE, NULL);
> - WaitForSingleObject (locks[h].mutex, INFINITE);
> - if (++h == NLOCKS)
> - h = 0;
> - i += WATCH_SIZE;
> + locks[h].mutex = CreateMutex (NULL, FALSE, NULL);
> + WaitForSingleObject (locks[h++].mutex, INFINITE);
> }
> - while (i < n);
> }
>
> void
> @@ -109,17 +118,22 @@ libat_unlock_n (void *ptr, size_t n)
> {
> uintptr_t h = addr_hash (ptr);
> size_t i = 0;
> + size_t nlocks
> + = (n + ((uintptr_t)ptr % WATCH_SIZE) + WATCH_SIZE - 1) / WATCH_SIZE;
>
> - if (n > PAGE_SIZE)
> - n = PAGE_SIZE;
> + /* Don't lock more than all the locks we have. */
> + if (nlocks > NLOCKS)
> + nlocks = NLOCKS;
>
> - do
> + if (__builtin_expect (h + nlocks > NLOCKS, 0))
> {
> - if (locks[h].mutex)
> - ReleaseMutex (locks[h].mutex);
> - if (++h == NLOCKS)
> - h = 0;
> - i += WATCH_SIZE;
> + size_t j = h + nlocks - NLOCKS;
> + for (; i < j; ++i)
> + if (locks[i].mutex)
> + ReleaseMutex (locks[i].mutex);
> }
> - while (i < n);
> +
> + for (; i < nlocks; ++i, ++h)
> + if (locks[h].mutex)
> + ReleaseMutex (locks[h].mutex);
> }
>
> Jakub
>
>
@@ -87,21 +87,30 @@ libat_lock_n (void *ptr, size_t n)
{
uintptr_t h = addr_hash (ptr);
size_t i = 0;
+ size_t nlocks
+ = (n + ((uintptr_t)ptr % WATCH_SIZE) + WATCH_SIZE - 1) / WATCH_SIZE;
/* Don't lock more than all the locks we have. */
- if (n > PAGE_SIZE)
- n = PAGE_SIZE;
+ if (nlocks > NLOCKS)
+ nlocks = NLOCKS;
- do
+ if (__builtin_expect (h + nlocks > NLOCKS, 0))
+ {
+ size_t j = h + nlocks - NLOCKS;
+ for (; i < j; ++i)
+ {
+ if (!locks[i].mutex)
+ locks[i].mutex = CreateMutex (NULL, FALSE, NULL);
+ WaitForSingleObject (locks[i].mutex, INFINITE);
+ }
+ }
+
+ for (; i < nlocks; ++i)
{
if (!locks[h].mutex)
- locks[h].mutex = CreateMutex (NULL, FALSE, NULL);
- WaitForSingleObject (locks[h].mutex, INFINITE);
- if (++h == NLOCKS)
- h = 0;
- i += WATCH_SIZE;
+ locks[h].mutex = CreateMutex (NULL, FALSE, NULL);
+ WaitForSingleObject (locks[h++].mutex, INFINITE);
}
- while (i < n);
}
void
@@ -109,17 +118,22 @@ libat_unlock_n (void *ptr, size_t n)
{
uintptr_t h = addr_hash (ptr);
size_t i = 0;
+ size_t nlocks
+ = (n + ((uintptr_t)ptr % WATCH_SIZE) + WATCH_SIZE - 1) / WATCH_SIZE;
- if (n > PAGE_SIZE)
- n = PAGE_SIZE;
+ /* Don't lock more than all the locks we have. */
+ if (nlocks > NLOCKS)
+ nlocks = NLOCKS;
- do
+ if (__builtin_expect (h + nlocks > NLOCKS, 0))
{
- if (locks[h].mutex)
- ReleaseMutex (locks[h].mutex);
- if (++h == NLOCKS)
- h = 0;
- i += WATCH_SIZE;
+ size_t j = h + nlocks - NLOCKS;
+ for (; i < j; ++i)
+ if (locks[i].mutex)
+ ReleaseMutex (locks[i].mutex);
}
- while (i < n);
+
+ for (; i < nlocks; ++i, ++h)
+ if (locks[h].mutex)
+ ReleaseMutex (locks[h].mutex);
}