aarch64: Standardized codes by using macro __glibc_{un, }likely and AARCH64_R
Checks
Context |
Check |
Description |
dj/TryBot-apply_patch |
success
|
Patch applied to master at the time it was sent
|
dj/TryBot-32bit |
success
|
Build for i686
|
redhat-pt-bot/TryBot-still_applies |
warning
|
Patch no longer applies to master
|
Commit Message
---
sysdeps/aarch64/dl-machine.h | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
Comments
The 03/13/2023 11:18, caiyinyu wrote:
> ---
> sysdeps/aarch64/dl-machine.h | 10 +++++-----
> 1 file changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/sysdeps/aarch64/dl-machine.h b/sysdeps/aarch64/dl-machine.h
> index 4170b9269f..da4f08dec6 100644
> --- a/sysdeps/aarch64/dl-machine.h
> +++ b/sysdeps/aarch64/dl-machine.h
> @@ -83,7 +83,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
> to intercept the calls to collect information. In this case we
> don't store the address in the GOT so that all future calls also
> end in this function. */
> - if ( profile)
> + if (__glibc_unlikely (profile))
> {
profile is unlikely, but this check only runs once
so i don't think this is useful.
the rest looks ok... but is there a reason to clean
this up now? there are many __builtin_expect in the
codebase and fixing them likely makes backporting
patches harder without much benefit.
@@ -83,7 +83,7 @@ elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
to intercept the calls to collect information. In this case we
don't store the address in the GOT so that all future calls also
end in this function. */
- if ( profile)
+ if (__glibc_unlikely (profile))
{
got[2] = (ElfW(Addr)) &_dl_runtime_profile;
@@ -172,9 +172,9 @@ elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
ElfW(Addr) *const reloc_addr = reloc_addr_arg;
const unsigned int r_type = ELFW (R_TYPE) (reloc->r_info);
- if (__builtin_expect (r_type == AARCH64_R(RELATIVE), 0))
+ if (__glibc_unlikely (r_type == AARCH64_R(RELATIVE)))
*reloc_addr = map->l_addr + reloc->r_addend;
- else if (__builtin_expect (r_type == R_AARCH64_NONE, 0))
+ else if (__glibc_unlikely (r_type == AARCH64_R(NONE)))
return;
else
{
@@ -311,7 +311,7 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
ElfW(Addr) *const reloc_addr = (void *) (l_addr + reloc->r_offset);
const unsigned int r_type = ELFW (R_TYPE) (reloc->r_info);
/* Check for unexpected PLT reloc type. */
- if (__builtin_expect (r_type == AARCH64_R(JUMP_SLOT), 1))
+ if (__glibc_likely (r_type == AARCH64_R(JUMP_SLOT)))
{
if (__glibc_unlikely (map->l_info[DT_AARCH64 (VARIANT_PCS)] != NULL))
{
@@ -341,7 +341,7 @@ elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
else
*reloc_addr = map->l_mach.plt;
}
- else if (__builtin_expect (r_type == AARCH64_R(TLSDESC), 1))
+ else if (__glibc_likely (r_type == AARCH64_R(TLSDESC)))
{
const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);