From patchwork Wed Jun 1 02:18:30 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: caiyinyu X-Patchwork-Id: 54609 Return-Path: X-Original-To: patchwork@sourceware.org Delivered-To: patchwork@sourceware.org Received: from server2.sourceware.org (localhost [IPv6:::1]) by sourceware.org (Postfix) with ESMTP id 3F7723857365 for ; Wed, 1 Jun 2022 02:20:59 +0000 (GMT) X-Original-To: libc-alpha@sourceware.org Delivered-To: libc-alpha@sourceware.org Received: from loongson.cn (mail.loongson.cn [114.242.206.163]) by sourceware.org (Postfix) with ESMTP id 001F73857B83 for ; Wed, 1 Jun 2022 02:18:48 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org 001F73857B83 Authentication-Results: sourceware.org; dmarc=none (p=none dis=none) header.from=loongson.cn Authentication-Results: sourceware.org; spf=pass smtp.mailfrom=loongson.cn Received: from 5.5.5 (unknown [10.2.5.5]) by mail.loongson.cn (Coremail) with SMTP id AQAAf9Dx3099zJZineUKAA--.3094S9; Wed, 01 Jun 2022 10:18:45 +0800 (CST) From: caiyinyu To: libc-alpha@sourceware.org Subject: [PATCH v5 07/13] LoongArch: Atomic and Locking Routines Date: Wed, 1 Jun 2022 10:18:30 +0800 Message-Id: <20220601021836.1082160-8-caiyinyu@loongson.cn> X-Mailer: git-send-email 2.31.1 In-Reply-To: <20220601021836.1082160-1-caiyinyu@loongson.cn> References: <20220601021836.1082160-1-caiyinyu@loongson.cn> MIME-Version: 1.0 X-CM-TRANSID: AQAAf9Dx3099zJZineUKAA--.3094S9 X-Coremail-Antispam: 1UD129KBjvJXoWxtFWfAryDAFyUury8CFWkCrg_yoWxCrW8pw 4xArn3Jrs0q3y3GaykAa13Grn8Gr10kF48WayqkFy7W3s7Cwn7X3W0yFZxCFyUAFZ3Cw4r JF4Fqa4q93WfJwUanT9S1TB71UUUUUUqnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUBm14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_Xr0_Ar1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr 1UM28EF7xvwVC2z280aVAFwI0_Gr1j6F4UJwA2z4x0Y4vEx4A2jsIEc7CjxVAFwI0_Gr1j 6F4UJwAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0VAKzVAqx4xG6I80ewAv7V C0I7IYx2IY67AKxVWUJVWUGwAv7VC2z280aVAFwI0_Jr0_Gr1lOx8S6xCaFVCjc4AY6r1j 6r4UM4x0Y48IcxkI7VAKI48JM4x0x7Aq67IIx4CEVc8vx2IErcIFxwCY02Avz4vE-syl42 xK82IYc2Ij64vIr41l4I8I3I0E4IkC6x0Yz7v_Jr0_Gr1lx2IqxVAqx4xG67AKxVWUJVWU GwC20s026x8GjcxK67AKxVWUGVWUWwC2zVAF1VAY17CE14v26r126r1DMIIYrxkI7VAKI4 8JMIIF0xvE2Ix0cI8IcVAFwI0_JFI_Gr1lIxAIcVC0I7IYx2IY6xkF7I0E14v26r4j6F4U MIIF0xvE42xK8VAvwI8IcIk0rVWUJVWUCwCI42IY6I8E87Iv67AKxVWUJVW8JwCI42IY6I 8E87Iv6xkF7I0E14v26r4j6r4UJbIYCTnIWIevJa73UjIFyTuYvjfUOBTYUUUUU X-CM-SenderInfo: 5fdl5xhq1xqz5rrqw2lrqou0/ X-Spam-Status: No, score=-13.3 required=5.0 tests=BAYES_00, GIT_PATCH_0, KAM_DMARC_STATUS, KAM_SHORT, SPF_HELO_PASS, SPF_PASS, TXREP, T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on server2.sourceware.org X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: joseph_myers@mentor.com, caiyinyu Errors-To: libc-alpha-bounces+patchwork=sourceware.org@sourceware.org Sender: "Libc-alpha" --- .../sysv/linux/loongarch/atomic-machine.h | 181 ++++++++++++++++++ 1 file changed, 181 insertions(+) create mode 100644 sysdeps/unix/sysv/linux/loongarch/atomic-machine.h diff --git a/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h new file mode 100644 index 0000000000..60db25587e --- /dev/null +++ b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h @@ -0,0 +1,181 @@ +/* Atomic operations. + Copyright (C) 2022 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#ifndef _LINUX_LOONGARCH_BITS_ATOMIC_H +#define _LINUX_LOONGARCH_BITS_ATOMIC_H 1 + +#define atomic_full_barrier() __sync_synchronize () + +#define __HAVE_64B_ATOMICS (__loongarch_grlen >=64) +#define USE_ATOMIC_COMPILER_BUILTINS 1 +#define ATOMIC_EXCHANGE_USES_CAS 0 + +/* Compare and exchange. + For all "bool" routines, we return FALSE if exchange succesful. */ + +#define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ + __ATOMIC_RELAXED); \ + }) + +#define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ + __ATOMIC_RELAXED); \ + }) + +#define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ + __ATOMIC_RELAXED); \ + }) + +#define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ + __ATOMIC_RELAXED); \ + }) + +#define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ + __ATOMIC_RELAXED); \ + __oldval; \ + }) + +#define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ + __ATOMIC_RELAXED); \ + __oldval; \ + }) + +#define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ + __ATOMIC_RELAXED); \ + __oldval; \ + }) + +#define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ + __ATOMIC_RELAXED); \ + __oldval; \ + }) + +/* Atomic compare and exchange. */ + +#define atomic_compare_and_exchange_bool_acq(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, mem, new, old, \ + __ATOMIC_ACQUIRE) + +#define atomic_compare_and_exchange_val_acq(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \ + __ATOMIC_ACQUIRE) + +#define atomic_compare_and_exchange_val_rel(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \ + __ATOMIC_RELEASE) + +/* Atomic exchange (without compare). */ + +#define __arch_exchange_8_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +#define __arch_exchange_16_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +#define __arch_exchange_32_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +#define __arch_exchange_64_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +#define atomic_exchange_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE) + +#define atomic_exchange_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE) + +/* Atomically add value and return the previous (unincremented) value. */ + +#define __arch_exchange_and_add_8_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) + +#define __arch_exchange_and_add_16_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) + +#define __arch_exchange_and_add_32_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) + +#define __arch_exchange_and_add_64_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) + +#define atomic_exchange_and_add_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ + __ATOMIC_ACQUIRE) + +#define atomic_exchange_and_add_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ + __ATOMIC_RELEASE) + +/* Miscellaneous. */ + +#define asm_amo(which, mem, value) \ + ({ \ + __atomic_check_size (mem); \ + typeof (*mem) __tmp; \ + if (sizeof (__tmp) == 4) \ + asm volatile(which ".w" \ + "\t%0, %z2, %1" \ + : "=&r"(__tmp), "+ZB"(*(mem)) \ + : "rJ"(value)); \ + else if (sizeof (__tmp) == 8) \ + asm volatile(which ".d" \ + "\t%0, %z2, %1" \ + : "=&r"(__tmp), "+ZB"(*(mem)) \ + : "rJ"(value)); \ + else \ + abort (); \ + __tmp; \ + }) + +#define atomic_max(mem, value) asm_amo ("ammax_db", mem, value) +#define atomic_min(mem, value) asm_amo ("ammin_db", mem, value) + +#define atomic_bit_test_set(mem, bit) \ + ({ \ + typeof (*mem) __mask = (typeof (*mem)) 1 << (bit); \ + asm_amo ("amor_db", mem, __mask) & __mask; \ + }) + +#define catomic_exchange_and_add(mem, value) \ + atomic_exchange_and_add (mem, value) +#define catomic_max(mem, value) atomic_max (mem, value) + +#endif /* bits/atomic.h */