From patchwork Sat Nov 19 00:13:30 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Noah Goldstein X-Patchwork-Id: 60872 Return-Path: X-Original-To: patchwork@sourceware.org Delivered-To: patchwork@sourceware.org Received: from server2.sourceware.org (localhost [IPv6:::1]) by sourceware.org (Postfix) with ESMTP id 6D13438432FA for ; Sat, 19 Nov 2022 00:14:57 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org 6D13438432FA DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=sourceware.org; s=default; t=1668816897; bh=vQCXdl1sJVmPknv5WkMIacfiyDWZB/7ZwobTxUXULt4=; h=To:Cc:Subject:Date:In-Reply-To:References:List-Id: List-Unsubscribe:List-Archive:List-Post:List-Help:List-Subscribe: From:Reply-To:From; b=w0rfD5Jaj38HrblcZLjPC4etpv34l6Fi5E0ZXZRIqTHUvSwcoRHUZ4nO4HJDdiXic 246WHXbTmn+bpfS9c0W5rBzoZ8MJzCMBzJpnFXm/Mb4V2jKKak1z2BIb6s5dCbxXf6 NeVBdZunaXkv7Aov8FPKm9V1Mbhnm7YC+wuVse/k= X-Original-To: libc-alpha@sourceware.org Delivered-To: libc-alpha@sourceware.org Received: from mail-pj1-x1030.google.com (mail-pj1-x1030.google.com [IPv6:2607:f8b0:4864:20::1030]) by sourceware.org (Postfix) with ESMTPS id BC269385220F for ; Sat, 19 Nov 2022 00:13:38 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org BC269385220F Received: by mail-pj1-x1030.google.com with SMTP id l22-20020a17090a3f1600b00212fbbcfb78so9686317pjc.3 for ; Fri, 18 Nov 2022 16:13:38 -0800 (PST) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=vQCXdl1sJVmPknv5WkMIacfiyDWZB/7ZwobTxUXULt4=; b=QKKoNYoqKsjv9WxBCYomSjySz8H3f85LkrvrIAxHyNKWXKZHnqpYiI5ZIxxjuWLyFH M2ZaPhHICXA65SlIDoGCMILpsTKzt4eO71fGbk7NF9XvxQQLaaeYz8MO16rmMGr4cdf4 HlpTLjXKZZ/5rkdTIuBech62thbFIos/o5feoY42mEfd9j7zqhATodwQuOuAExmYzQ7Y AfJ994uhZYjMLd+Ckx+u++az5pWj1ojSIw1sS/p/zwAW2GD/1duE7zhkmcvPaf47wEI+ FArjnG1chtYbJunTfQ/9YqtW9DG3D0FLxS6awgeeYLv/2xBfeB6hZhXVgdyAhJ91ynCK g8AA== X-Gm-Message-State: ANoB5pnQuAaE22UKCovvr97RiTWhJm+pDc7fi4B3dlwo6jWFFh3XorxI Az8CCFRpB6h1Neaq7k2vHtHdSE3o18U= X-Google-Smtp-Source: AA0mqf7qhJ5nTrRc24IcCH+DWLu/tAqGQVlJHNWypy/sIziu2uRjELTSmIVOuAVfz3YhOGNogyRPyA== X-Received: by 2002:a17:90a:4745:b0:213:1442:24be with SMTP id y5-20020a17090a474500b00213144224bemr15709997pjg.15.1668816816825; Fri, 18 Nov 2022 16:13:36 -0800 (PST) Received: from noahgold-desk.. ([192.55.60.47]) by smtp.gmail.com with ESMTPSA id o1-20020a625a01000000b0056232682a7esm3799650pfb.2.2022.11.18.16.13.35 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 18 Nov 2022 16:13:36 -0800 (PST) To: libc-alpha@sourceware.org Cc: goldstein.w.n@gmail.com, hjl.tools@gmail.com, andrey.kolesov@intel.com, carlos@systemhalted.org Subject: [PATCH v5 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h Date: Fri, 18 Nov 2022 16:13:30 -0800 Message-Id: <20221119001332.1428330-1-goldstein.w.n@gmail.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20221118190835.1033248-1-goldstein.w.n@gmail.com> References: <20221118190835.1033248-1-goldstein.w.n@gmail.com> MIME-Version: 1.0 X-Spam-Status: No, score=-12.2 required=5.0 tests=BAYES_00, DKIM_SIGNED, DKIM_VALID, DKIM_VALID_AU, DKIM_VALID_EF, FREEMAIL_FROM, GIT_PATCH_0, RCVD_IN_DNSWL_NONE, SPF_HELO_NONE, SPF_PASS, TXREP autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on server2.sourceware.org X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-Patchwork-Original-From: Noah Goldstein via Libc-alpha From: Noah Goldstein Reply-To: Noah Goldstein Errors-To: libc-alpha-bounces+patchwork=sourceware.org@sourceware.org Sender: "Libc-alpha" Just reformat with the style convention used in other x86 assembler files. This doesn't change libm.so or libmvec.so. --- sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 474 ++++++++++---------- sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 546 +++++++++++------------ 2 files changed, 510 insertions(+), 510 deletions(-) diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h index 2334713015..b03a2122b9 100644 --- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h +++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h @@ -18,273 +18,273 @@ /* SSE2 ISA version as wrapper to scalar. */ .macro WRAPPER_IMPL_SSE2 callee - subq $40, %rsp - cfi_adjust_cfa_offset(40) - movaps %xmm0, (%rsp) - call JUMPTARGET(\callee) - movsd %xmm0, 16(%rsp) - movsd 8(%rsp), %xmm0 - call JUMPTARGET(\callee) - movsd 16(%rsp), %xmm1 - movsd %xmm0, 24(%rsp) - unpcklpd %xmm0, %xmm1 - movaps %xmm1, %xmm0 - addq $40, %rsp - cfi_adjust_cfa_offset(-40) - ret + subq $40, %rsp + cfi_adjust_cfa_offset (40) + movaps %xmm0, (%rsp) + call JUMPTARGET(\callee) + movsd %xmm0, 16(%rsp) + movsd 8(%rsp), %xmm0 + call JUMPTARGET(\callee) + movsd 16(%rsp), %xmm1 + movsd %xmm0, 24(%rsp) + unpcklpd %xmm0, %xmm1 + movaps %xmm1, %xmm0 + addq $40, %rsp + cfi_adjust_cfa_offset (-40) + ret .endm /* 2 argument SSE2 ISA version as wrapper to scalar. */ .macro WRAPPER_IMPL_SSE2_ff callee - subq $56, %rsp - cfi_adjust_cfa_offset(56) - movaps %xmm0, (%rsp) - movaps %xmm1, 16(%rsp) - call JUMPTARGET(\callee) - movsd %xmm0, 32(%rsp) - movsd 8(%rsp), %xmm0 - movsd 24(%rsp), %xmm1 - call JUMPTARGET(\callee) - movsd 32(%rsp), %xmm1 - movsd %xmm0, 40(%rsp) - unpcklpd %xmm0, %xmm1 - movaps %xmm1, %xmm0 - addq $56, %rsp - cfi_adjust_cfa_offset(-56) - ret + subq $56, %rsp + cfi_adjust_cfa_offset (56) + movaps %xmm0, (%rsp) + movaps %xmm1, 16(%rsp) + call JUMPTARGET(\callee) + movsd %xmm0, 32(%rsp) + movsd 8(%rsp), %xmm0 + movsd 24(%rsp), %xmm1 + call JUMPTARGET(\callee) + movsd 32(%rsp), %xmm1 + movsd %xmm0, 40(%rsp) + unpcklpd %xmm0, %xmm1 + movaps %xmm1, %xmm0 + addq $56, %rsp + cfi_adjust_cfa_offset (-56) + ret .endm /* 3 argument SSE2 ISA version as wrapper to scalar. */ .macro WRAPPER_IMPL_SSE2_fFF callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - pushq %rbx - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbx, 0) - movq %rdi, %rbp - movq %rsi, %rbx - subq $40, %rsp - cfi_adjust_cfa_offset(40) - leaq 16(%rsp), %rsi - leaq 24(%rsp), %rdi - movaps %xmm0, (%rsp) - call JUMPTARGET(\callee) - leaq 16(%rsp), %rsi - leaq 24(%rsp), %rdi - movsd 24(%rsp), %xmm0 - movapd (%rsp), %xmm1 - movsd %xmm0, 0(%rbp) - unpckhpd %xmm1, %xmm1 - movsd 16(%rsp), %xmm0 - movsd %xmm0, (%rbx) - movapd %xmm1, %xmm0 - call JUMPTARGET(\callee) - movsd 24(%rsp), %xmm0 - movsd %xmm0, 8(%rbp) - movsd 16(%rsp), %xmm0 - movsd %xmm0, 8(%rbx) - addq $40, %rsp - cfi_adjust_cfa_offset(-40) - popq %rbx - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbx) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + pushq %rbx + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbx, 0) + movq %rdi, %rbp + movq %rsi, %rbx + subq $40, %rsp + cfi_adjust_cfa_offset (40) + leaq 16(%rsp), %rsi + leaq 24(%rsp), %rdi + movaps %xmm0, (%rsp) + call JUMPTARGET(\callee) + leaq 16(%rsp), %rsi + leaq 24(%rsp), %rdi + movsd 24(%rsp), %xmm0 + movapd (%rsp), %xmm1 + movsd %xmm0, 0(%rbp) + unpckhpd %xmm1, %xmm1 + movsd 16(%rsp), %xmm0 + movsd %xmm0, (%rbx) + movapd %xmm1, %xmm0 + call JUMPTARGET(\callee) + movsd 24(%rsp), %xmm0 + movsd %xmm0, 8(%rbp) + movsd 16(%rsp), %xmm0 + movsd %xmm0, 8(%rbx) + addq $40, %rsp + cfi_adjust_cfa_offset (-40) + popq %rbx + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbx) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm /* AVX/AVX2 ISA version as wrapper to SSE ISA version. */ .macro WRAPPER_IMPL_AVX callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-32, %rsp - subq $32, %rsp - vextractf128 $1, %ymm0, (%rsp) - vzeroupper - call HIDDEN_JUMPTARGET(\callee) - vmovapd %xmm0, 16(%rsp) - vmovaps (%rsp), %xmm0 - call HIDDEN_JUMPTARGET(\callee) - vmovapd %xmm0, %xmm1 - vmovapd 16(%rsp), %xmm0 - vinsertf128 $1, %xmm1, %ymm0, %ymm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-32, %rsp + subq $32, %rsp + vextractf128 $1, %ymm0, (%rsp) + vzeroupper + call HIDDEN_JUMPTARGET(\callee) + vmovapd %xmm0, 16(%rsp) + vmovaps (%rsp), %xmm0 + call HIDDEN_JUMPTARGET(\callee) + vmovapd %xmm0, %xmm1 + vmovapd 16(%rsp), %xmm0 + vinsertf128 $1, %xmm1, %ymm0, %ymm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm /* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */ .macro WRAPPER_IMPL_AVX_ff callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-32, %rsp - subq $64, %rsp - vextractf128 $1, %ymm0, 16(%rsp) - vextractf128 $1, %ymm1, (%rsp) - vzeroupper - call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, 32(%rsp) - vmovaps 16(%rsp), %xmm0 - vmovaps (%rsp), %xmm1 - call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, %xmm1 - vmovaps 32(%rsp), %xmm0 - vinsertf128 $1, %xmm1, %ymm0, %ymm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-32, %rsp + subq $64, %rsp + vextractf128 $1, %ymm0, 16(%rsp) + vextractf128 $1, %ymm1, (%rsp) + vzeroupper + call HIDDEN_JUMPTARGET(\callee) + vmovaps %xmm0, 32(%rsp) + vmovaps 16(%rsp), %xmm0 + vmovaps (%rsp), %xmm1 + call HIDDEN_JUMPTARGET(\callee) + vmovaps %xmm0, %xmm1 + vmovaps 32(%rsp), %xmm0 + vinsertf128 $1, %xmm1, %ymm0, %ymm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm /* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */ .macro WRAPPER_IMPL_AVX_fFF callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-32, %rsp - pushq %r13 - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%r13, 0) - pushq %r14 - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%r14, 0) - subq $48, %rsp - movq %rsi, %r14 - movq %rdi, %r13 - vextractf128 $1, %ymm0, 32(%rsp) - vzeroupper - call HIDDEN_JUMPTARGET(\callee) - vmovaps 32(%rsp), %xmm0 - lea (%rsp), %rdi - lea 16(%rsp), %rsi - call HIDDEN_JUMPTARGET(\callee) - vmovapd (%rsp), %xmm0 - vmovapd 16(%rsp), %xmm1 - vmovapd %xmm0, 16(%r13) - vmovapd %xmm1, 16(%r14) - addq $48, %rsp - popq %r14 - cfi_adjust_cfa_offset (-8) - cfi_restore (%r14) - popq %r13 - cfi_adjust_cfa_offset (-8) - cfi_restore (%r13) - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-32, %rsp + pushq %r13 + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%r13, 0) + pushq %r14 + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%r14, 0) + subq $48, %rsp + movq %rsi, %r14 + movq %rdi, %r13 + vextractf128 $1, %ymm0, 32(%rsp) + vzeroupper + call HIDDEN_JUMPTARGET(\callee) + vmovaps 32(%rsp), %xmm0 + lea (%rsp), %rdi + lea 16(%rsp), %rsi + call HIDDEN_JUMPTARGET(\callee) + vmovapd (%rsp), %xmm0 + vmovapd 16(%rsp), %xmm1 + vmovapd %xmm0, 16(%r13) + vmovapd %xmm1, 16(%r14) + addq $48, %rsp + popq %r14 + cfi_adjust_cfa_offset (-8) + cfi_restore (%r14) + popq %r13 + cfi_adjust_cfa_offset (-8) + cfi_restore (%r13) + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm /* AVX512 ISA version as wrapper to AVX2 ISA version. */ .macro WRAPPER_IMPL_AVX512 callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-64, %rsp - subq $128, %rsp - vmovups %zmm0, (%rsp) - vmovupd (%rsp), %ymm0 - call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, 64(%rsp) - vmovupd 32(%rsp), %ymm0 - call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, 96(%rsp) - vmovups 64(%rsp), %zmm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-64, %rsp + subq $128, %rsp + vmovups %zmm0, (%rsp) + vmovupd (%rsp), %ymm0 + call HIDDEN_JUMPTARGET(\callee) + vmovupd %ymm0, 64(%rsp) + vmovupd 32(%rsp), %ymm0 + call HIDDEN_JUMPTARGET(\callee) + vmovupd %ymm0, 96(%rsp) + vmovups 64(%rsp), %zmm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm /* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */ .macro WRAPPER_IMPL_AVX512_ff callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-64, %rsp - subq $192, %rsp - vmovups %zmm0, (%rsp) - vmovups %zmm1, 64(%rsp) - vmovupd (%rsp), %ymm0 - vmovupd 64(%rsp), %ymm1 - call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, 128(%rsp) - vmovupd 32(%rsp), %ymm0 - vmovupd 96(%rsp), %ymm1 - call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, 160(%rsp) - vmovups 128(%rsp), %zmm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-64, %rsp + subq $192, %rsp + vmovups %zmm0, (%rsp) + vmovups %zmm1, 64(%rsp) + vmovupd (%rsp), %ymm0 + vmovupd 64(%rsp), %ymm1 + call HIDDEN_JUMPTARGET(\callee) + vmovupd %ymm0, 128(%rsp) + vmovupd 32(%rsp), %ymm0 + vmovupd 96(%rsp), %ymm1 + call HIDDEN_JUMPTARGET(\callee) + vmovupd %ymm0, 160(%rsp) + vmovups 128(%rsp), %zmm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm /* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */ .macro WRAPPER_IMPL_AVX512_fFF callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-64, %rsp - pushq %r12 - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%r12, 0) - pushq %r13 - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%r13, 0) - subq $176, %rsp - movq %rsi, %r13 - vmovups %zmm0, (%rsp) - movq %rdi, %r12 - vmovupd (%rsp), %ymm0 - call HIDDEN_JUMPTARGET(\callee) - vmovupd 32(%rsp), %ymm0 - lea 64(%rsp), %rdi - lea 96(%rsp), %rsi - call HIDDEN_JUMPTARGET(\callee) - vmovupd 64(%rsp), %ymm0 - vmovupd 96(%rsp), %ymm1 - vmovupd %ymm0, 32(%r12) - vmovupd %ymm1, 32(%r13) - vzeroupper - addq $176, %rsp - popq %r13 - cfi_adjust_cfa_offset (-8) - cfi_restore (%r13) - popq %r12 - cfi_adjust_cfa_offset (-8) - cfi_restore (%r12) - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-64, %rsp + pushq %r12 + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%r12, 0) + pushq %r13 + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%r13, 0) + subq $176, %rsp + movq %rsi, %r13 + vmovups %zmm0, (%rsp) + movq %rdi, %r12 + vmovupd (%rsp), %ymm0 + call HIDDEN_JUMPTARGET(\callee) + vmovupd 32(%rsp), %ymm0 + lea 64(%rsp), %rdi + lea 96(%rsp), %rsi + call HIDDEN_JUMPTARGET(\callee) + vmovupd 64(%rsp), %ymm0 + vmovupd 96(%rsp), %ymm1 + vmovupd %ymm0, 32(%r12) + vmovupd %ymm1, 32(%r13) + vzeroupper + addq $176, %rsp + popq %r13 + cfi_adjust_cfa_offset (-8) + cfi_restore (%r13) + popq %r12 + cfi_adjust_cfa_offset (-8) + cfi_restore (%r12) + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h index c23da7ec83..cecf6c8384 100644 --- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h +++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h @@ -18,309 +18,309 @@ /* SSE2 ISA version as wrapper to scalar. */ .macro WRAPPER_IMPL_SSE2 callee - subq $40, %rsp - cfi_adjust_cfa_offset(40) - movaps %xmm0, (%rsp) - call JUMPTARGET(\callee) - movss %xmm0, 16(%rsp) - movss 4(%rsp), %xmm0 - call JUMPTARGET(\callee) - movss %xmm0, 20(%rsp) - movss 8(%rsp), %xmm0 - call JUMPTARGET(\callee) - movss %xmm0, 24(%rsp) - movss 12(%rsp), %xmm0 - call JUMPTARGET(\callee) - movss 16(%rsp), %xmm3 - movss 20(%rsp), %xmm2 - movss 24(%rsp), %xmm1 - movss %xmm0, 28(%rsp) - unpcklps %xmm1, %xmm3 - unpcklps %xmm0, %xmm2 - unpcklps %xmm2, %xmm3 - movaps %xmm3, %xmm0 - addq $40, %rsp - cfi_adjust_cfa_offset(-40) - ret + subq $40, %rsp + cfi_adjust_cfa_offset (40) + movaps %xmm0, (%rsp) + call JUMPTARGET(\callee) + movss %xmm0, 16(%rsp) + movss 4(%rsp), %xmm0 + call JUMPTARGET(\callee) + movss %xmm0, 20(%rsp) + movss 8(%rsp), %xmm0 + call JUMPTARGET(\callee) + movss %xmm0, 24(%rsp) + movss 12(%rsp), %xmm0 + call JUMPTARGET(\callee) + movss 16(%rsp), %xmm3 + movss 20(%rsp), %xmm2 + movss 24(%rsp), %xmm1 + movss %xmm0, 28(%rsp) + unpcklps %xmm1, %xmm3 + unpcklps %xmm0, %xmm2 + unpcklps %xmm2, %xmm3 + movaps %xmm3, %xmm0 + addq $40, %rsp + cfi_adjust_cfa_offset (-40) + ret .endm /* 2 argument SSE2 ISA version as wrapper to scalar. */ .macro WRAPPER_IMPL_SSE2_ff callee - subq $56, %rsp - cfi_adjust_cfa_offset(56) - movaps %xmm0, (%rsp) - movaps %xmm1, 16(%rsp) - call JUMPTARGET(\callee) - movss %xmm0, 32(%rsp) - movss 4(%rsp), %xmm0 - movss 20(%rsp), %xmm1 - call JUMPTARGET(\callee) - movss %xmm0, 36(%rsp) - movss 8(%rsp), %xmm0 - movss 24(%rsp), %xmm1 - call JUMPTARGET(\callee) - movss %xmm0, 40(%rsp) - movss 12(%rsp), %xmm0 - movss 28(%rsp), %xmm1 - call JUMPTARGET(\callee) - movss 32(%rsp), %xmm3 - movss 36(%rsp), %xmm2 - movss 40(%rsp), %xmm1 - movss %xmm0, 44(%rsp) - unpcklps %xmm1, %xmm3 - unpcklps %xmm0, %xmm2 - unpcklps %xmm2, %xmm3 - movaps %xmm3, %xmm0 - addq $56, %rsp - cfi_adjust_cfa_offset(-56) - ret + subq $56, %rsp + cfi_adjust_cfa_offset (56) + movaps %xmm0, (%rsp) + movaps %xmm1, 16(%rsp) + call JUMPTARGET(\callee) + movss %xmm0, 32(%rsp) + movss 4(%rsp), %xmm0 + movss 20(%rsp), %xmm1 + call JUMPTARGET(\callee) + movss %xmm0, 36(%rsp) + movss 8(%rsp), %xmm0 + movss 24(%rsp), %xmm1 + call JUMPTARGET(\callee) + movss %xmm0, 40(%rsp) + movss 12(%rsp), %xmm0 + movss 28(%rsp), %xmm1 + call JUMPTARGET(\callee) + movss 32(%rsp), %xmm3 + movss 36(%rsp), %xmm2 + movss 40(%rsp), %xmm1 + movss %xmm0, 44(%rsp) + unpcklps %xmm1, %xmm3 + unpcklps %xmm0, %xmm2 + unpcklps %xmm2, %xmm3 + movaps %xmm3, %xmm0 + addq $56, %rsp + cfi_adjust_cfa_offset (-56) + ret .endm /* 3 argument SSE2 ISA version as wrapper to scalar. */ .macro WRAPPER_IMPL_SSE2_fFF callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - pushq %rbx - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbx, 0) - movq %rdi, %rbp - movq %rsi, %rbx - subq $40, %rsp - cfi_adjust_cfa_offset(40) - leaq 24(%rsp), %rsi - leaq 28(%rsp), %rdi - movaps %xmm0, (%rsp) - call JUMPTARGET(\callee) - leaq 24(%rsp), %rsi - leaq 28(%rsp), %rdi - movss 28(%rsp), %xmm0 - movss %xmm0, 0(%rbp) - movaps (%rsp), %xmm1 - movss 24(%rsp), %xmm0 - movss %xmm0, (%rbx) - movaps %xmm1, %xmm0 - shufps $85, %xmm1, %xmm0 - call JUMPTARGET(\callee) - movss 28(%rsp), %xmm0 - leaq 24(%rsp), %rsi - movss %xmm0, 4(%rbp) - leaq 28(%rsp), %rdi - movaps (%rsp), %xmm1 - movss 24(%rsp), %xmm0 - movss %xmm0, 4(%rbx) - movaps %xmm1, %xmm0 - unpckhps %xmm1, %xmm0 - call JUMPTARGET(\callee) - movaps (%rsp), %xmm1 - leaq 24(%rsp), %rsi - leaq 28(%rsp), %rdi - movss 28(%rsp), %xmm0 - shufps $255, %xmm1, %xmm1 - movss %xmm0, 8(%rbp) - movss 24(%rsp), %xmm0 - movss %xmm0, 8(%rbx) - movaps %xmm1, %xmm0 - call JUMPTARGET(\callee) - movss 28(%rsp), %xmm0 - movss %xmm0, 12(%rbp) - movss 24(%rsp), %xmm0 - movss %xmm0, 12(%rbx) - addq $40, %rsp - cfi_adjust_cfa_offset(-40) - popq %rbx - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbx) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + pushq %rbx + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbx, 0) + movq %rdi, %rbp + movq %rsi, %rbx + subq $40, %rsp + cfi_adjust_cfa_offset (40) + leaq 24(%rsp), %rsi + leaq 28(%rsp), %rdi + movaps %xmm0, (%rsp) + call JUMPTARGET(\callee) + leaq 24(%rsp), %rsi + leaq 28(%rsp), %rdi + movss 28(%rsp), %xmm0 + movss %xmm0, 0(%rbp) + movaps (%rsp), %xmm1 + movss 24(%rsp), %xmm0 + movss %xmm0, (%rbx) + movaps %xmm1, %xmm0 + shufps $85, %xmm1, %xmm0 + call JUMPTARGET(\callee) + movss 28(%rsp), %xmm0 + leaq 24(%rsp), %rsi + movss %xmm0, 4(%rbp) + leaq 28(%rsp), %rdi + movaps (%rsp), %xmm1 + movss 24(%rsp), %xmm0 + movss %xmm0, 4(%rbx) + movaps %xmm1, %xmm0 + unpckhps %xmm1, %xmm0 + call JUMPTARGET(\callee) + movaps (%rsp), %xmm1 + leaq 24(%rsp), %rsi + leaq 28(%rsp), %rdi + movss 28(%rsp), %xmm0 + shufps $255, %xmm1, %xmm1 + movss %xmm0, 8(%rbp) + movss 24(%rsp), %xmm0 + movss %xmm0, 8(%rbx) + movaps %xmm1, %xmm0 + call JUMPTARGET(\callee) + movss 28(%rsp), %xmm0 + movss %xmm0, 12(%rbp) + movss 24(%rsp), %xmm0 + movss %xmm0, 12(%rbx) + addq $40, %rsp + cfi_adjust_cfa_offset (-40) + popq %rbx + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbx) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm /* AVX/AVX2 ISA version as wrapper to SSE ISA version. */ .macro WRAPPER_IMPL_AVX callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-32, %rsp - subq $32, %rsp - vextractf128 $1, %ymm0, (%rsp) - vzeroupper - call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, 16(%rsp) - vmovaps (%rsp), %xmm0 - call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, %xmm1 - vmovaps 16(%rsp), %xmm0 - vinsertf128 $1, %xmm1, %ymm0, %ymm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-32, %rsp + subq $32, %rsp + vextractf128 $1, %ymm0, (%rsp) + vzeroupper + call HIDDEN_JUMPTARGET(\callee) + vmovaps %xmm0, 16(%rsp) + vmovaps (%rsp), %xmm0 + call HIDDEN_JUMPTARGET(\callee) + vmovaps %xmm0, %xmm1 + vmovaps 16(%rsp), %xmm0 + vinsertf128 $1, %xmm1, %ymm0, %ymm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm /* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */ .macro WRAPPER_IMPL_AVX_ff callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-32, %rsp - subq $64, %rsp - vextractf128 $1, %ymm0, 16(%rsp) - vextractf128 $1, %ymm1, (%rsp) - vzeroupper - call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, 32(%rsp) - vmovaps 16(%rsp), %xmm0 - vmovaps (%rsp), %xmm1 - call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, %xmm1 - vmovaps 32(%rsp), %xmm0 - vinsertf128 $1, %xmm1, %ymm0, %ymm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-32, %rsp + subq $64, %rsp + vextractf128 $1, %ymm0, 16(%rsp) + vextractf128 $1, %ymm1, (%rsp) + vzeroupper + call HIDDEN_JUMPTARGET(\callee) + vmovaps %xmm0, 32(%rsp) + vmovaps 16(%rsp), %xmm0 + vmovaps (%rsp), %xmm1 + call HIDDEN_JUMPTARGET(\callee) + vmovaps %xmm0, %xmm1 + vmovaps 32(%rsp), %xmm0 + vinsertf128 $1, %xmm1, %ymm0, %ymm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm /* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */ .macro WRAPPER_IMPL_AVX_fFF callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-32, %rsp - pushq %r13 - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%r13, 0) - pushq %r14 - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%r14, 0) - subq $48, %rsp - movq %rsi, %r14 - vmovaps %ymm0, (%rsp) - movq %rdi, %r13 - vmovaps 16(%rsp), %xmm1 - vmovaps %xmm1, 32(%rsp) - vzeroupper - vmovaps (%rsp), %xmm0 - call HIDDEN_JUMPTARGET(\callee) - vmovaps 32(%rsp), %xmm0 - lea (%rsp), %rdi - lea 16(%rsp), %rsi - call HIDDEN_JUMPTARGET(\callee) - vmovaps (%rsp), %xmm0 - vmovaps 16(%rsp), %xmm1 - vmovaps %xmm0, 16(%r13) - vmovaps %xmm1, 16(%r14) - addq $48, %rsp - popq %r14 - cfi_adjust_cfa_offset (-8) - cfi_restore (%r14) - popq %r13 - cfi_adjust_cfa_offset (-8) - cfi_restore (%r13) - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-32, %rsp + pushq %r13 + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%r13, 0) + pushq %r14 + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%r14, 0) + subq $48, %rsp + movq %rsi, %r14 + vmovaps %ymm0, (%rsp) + movq %rdi, %r13 + vmovaps 16(%rsp), %xmm1 + vmovaps %xmm1, 32(%rsp) + vzeroupper + vmovaps (%rsp), %xmm0 + call HIDDEN_JUMPTARGET(\callee) + vmovaps 32(%rsp), %xmm0 + lea (%rsp), %rdi + lea 16(%rsp), %rsi + call HIDDEN_JUMPTARGET(\callee) + vmovaps (%rsp), %xmm0 + vmovaps 16(%rsp), %xmm1 + vmovaps %xmm0, 16(%r13) + vmovaps %xmm1, 16(%r14) + addq $48, %rsp + popq %r14 + cfi_adjust_cfa_offset (-8) + cfi_restore (%r14) + popq %r13 + cfi_adjust_cfa_offset (-8) + cfi_restore (%r13) + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm /* AVX512 ISA version as wrapper to AVX2 ISA version. */ .macro WRAPPER_IMPL_AVX512 callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-64, %rsp - subq $128, %rsp - vmovups %zmm0, (%rsp) - vmovupd (%rsp), %ymm0 - call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, 64(%rsp) - vmovupd 32(%rsp), %ymm0 - call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, 96(%rsp) - vmovups 64(%rsp), %zmm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-64, %rsp + subq $128, %rsp + vmovups %zmm0, (%rsp) + vmovupd (%rsp), %ymm0 + call HIDDEN_JUMPTARGET(\callee) + vmovupd %ymm0, 64(%rsp) + vmovupd 32(%rsp), %ymm0 + call HIDDEN_JUMPTARGET(\callee) + vmovupd %ymm0, 96(%rsp) + vmovups 64(%rsp), %zmm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm /* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */ .macro WRAPPER_IMPL_AVX512_ff callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-64, %rsp - subq $192, %rsp - vmovups %zmm0, (%rsp) - vmovups %zmm1, 64(%rsp) - vmovups (%rsp), %ymm0 - vmovups 64(%rsp), %ymm1 - call HIDDEN_JUMPTARGET(\callee) - vmovups %ymm0, 128(%rsp) - vmovups 32(%rsp), %ymm0 - vmovups 96(%rsp), %ymm1 - call HIDDEN_JUMPTARGET(\callee) - vmovups %ymm0, 160(%rsp) - vmovups 128(%rsp), %zmm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-64, %rsp + subq $192, %rsp + vmovups %zmm0, (%rsp) + vmovups %zmm1, 64(%rsp) + vmovups (%rsp), %ymm0 + vmovups 64(%rsp), %ymm1 + call HIDDEN_JUMPTARGET(\callee) + vmovups %ymm0, 128(%rsp) + vmovups 32(%rsp), %ymm0 + vmovups 96(%rsp), %ymm1 + call HIDDEN_JUMPTARGET(\callee) + vmovups %ymm0, 160(%rsp) + vmovups 128(%rsp), %zmm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm /* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */ .macro WRAPPER_IMPL_AVX512_fFF callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-64, %rsp - pushq %r12 - pushq %r13 - subq $176, %rsp - movq %rsi, %r13 - vmovaps %zmm0, (%rsp) - movq %rdi, %r12 - vmovaps (%rsp), %ymm0 - call HIDDEN_JUMPTARGET(\callee) - vmovaps 32(%rsp), %ymm0 - lea 64(%rsp), %rdi - lea 96(%rsp), %rsi - call HIDDEN_JUMPTARGET(\callee) - vmovaps 64(%rsp), %ymm0 - vmovaps 96(%rsp), %ymm1 - vmovaps %ymm0, 32(%r12) - vmovaps %ymm1, 32(%r13) - addq $176, %rsp - popq %r13 - popq %r12 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-64, %rsp + pushq %r12 + pushq %r13 + subq $176, %rsp + movq %rsi, %r13 + vmovaps %zmm0, (%rsp) + movq %rdi, %r12 + vmovaps (%rsp), %ymm0 + call HIDDEN_JUMPTARGET(\callee) + vmovaps 32(%rsp), %ymm0 + lea 64(%rsp), %rdi + lea 96(%rsp), %rsi + call HIDDEN_JUMPTARGET(\callee) + vmovaps 64(%rsp), %ymm0 + vmovaps 96(%rsp), %ymm1 + vmovaps %ymm0, 32(%r12) + vmovaps %ymm1, 32(%r13) + addq $176, %rsp + popq %r13 + popq %r12 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret .endm From patchwork Sat Nov 19 00:13:31 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Noah Goldstein X-Patchwork-Id: 60870 Return-Path: X-Original-To: patchwork@sourceware.org Delivered-To: patchwork@sourceware.org Received: from server2.sourceware.org (localhost [IPv6:::1]) by sourceware.org (Postfix) with ESMTP id C17C538432E9 for ; Sat, 19 Nov 2022 00:14:08 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org C17C538432E9 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=sourceware.org; s=default; t=1668816848; bh=SEnlGGnvkjtZpI9urTQ0KXWx/4FXxl8wsi14HTCz5sM=; h=To:Cc:Subject:Date:In-Reply-To:References:List-Id: List-Unsubscribe:List-Archive:List-Post:List-Help:List-Subscribe: From:Reply-To:From; b=J7ijn0PRQ3B7IY3PiawVwFsTyhUUKHGyGJlOsYYO49xqwTZlchYamwKujZhp/UTod sUidqpsJAR3r1Lp1oMw6aHyJ4eFmzNmjdsQB322O3/iiinC9HYGVhvEssn9D0jvsXR 7pZBesNf5l4e21PAF81OvNfzEhPW/HWegcoEWLm8= X-Original-To: libc-alpha@sourceware.org Delivered-To: libc-alpha@sourceware.org Received: from mail-pl1-x633.google.com (mail-pl1-x633.google.com [IPv6:2607:f8b0:4864:20::633]) by sourceware.org (Postfix) with ESMTPS id 7B56338518B7 for ; Sat, 19 Nov 2022 00:13:40 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org 7B56338518B7 Received: by mail-pl1-x633.google.com with SMTP id y4so5939224plb.2 for ; Fri, 18 Nov 2022 16:13:40 -0800 (PST) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=SEnlGGnvkjtZpI9urTQ0KXWx/4FXxl8wsi14HTCz5sM=; b=DIjSE+yesQybvDZQYohJQuCTi2QY5n6Z8p2CFJoy+Z9DN7AWW7LOkyPjT/gwMa4CtF mwKtHh6w0Wzhzn4SsDeZbglXZBW14ZU7phxWMV90PZk8Mm16aH3cupRyS+aw6JYrINLE Q7l56vOagBYaaZI/997tU5ssSQIwPYesIxgwKilkNTCbE8cHUCp4KBPH3P6ek7W+2AfJ UHub3sSZukAJiTFAPhSmC2Fs3UYVswbCkvdxbjAtTd0Yy1AeKr75QlkLOid3bTwC3n6F 3kSwERi+aexU1/HUuXP1ZIDM2HKtKeJPYeE6nnu/SJyeLd55tHqQYn/UBesSPFBSL5q2 aqng== X-Gm-Message-State: ANoB5plB1KRqBl9nY0AdZLQTriluPYJapdmb3K5r4ACcA3G5F+tUYzzr RhsB7OUGpql8Hx9VTuLv54moRd8BmRw= X-Google-Smtp-Source: AA0mqf6GY1fZuFDQZG/fDqJMaG1PoPvnoQhKb/SIj9aoBdPWhxiTCWxk9k5R+OxNBVA/Qo4F8ErkwQ== X-Received: by 2002:a17:90b:3941:b0:215:db2e:bb17 with SMTP id oe1-20020a17090b394100b00215db2ebb17mr10156127pjb.166.1668816818758; Fri, 18 Nov 2022 16:13:38 -0800 (PST) Received: from noahgold-desk.. ([192.55.60.47]) by smtp.gmail.com with ESMTPSA id o1-20020a625a01000000b0056232682a7esm3799650pfb.2.2022.11.18.16.13.37 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 18 Nov 2022 16:13:37 -0800 (PST) To: libc-alpha@sourceware.org Cc: goldstein.w.n@gmail.com, hjl.tools@gmail.com, andrey.kolesov@intel.com, carlos@systemhalted.org Subject: [PATCH v5 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h Date: Fri, 18 Nov 2022 16:13:31 -0800 Message-Id: <20221119001332.1428330-2-goldstein.w.n@gmail.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20221119001332.1428330-1-goldstein.w.n@gmail.com> References: <20221118190835.1033248-1-goldstein.w.n@gmail.com> <20221119001332.1428330-1-goldstein.w.n@gmail.com> MIME-Version: 1.0 X-Spam-Status: No, score=-12.2 required=5.0 tests=BAYES_00, DKIM_SIGNED, DKIM_VALID, DKIM_VALID_AU, DKIM_VALID_EF, FREEMAIL_FROM, GIT_PATCH_0, RCVD_IN_DNSWL_NONE, SPF_HELO_NONE, SPF_PASS, TXREP autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on server2.sourceware.org X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-Patchwork-Original-From: Noah Goldstein via Libc-alpha From: Noah Goldstein Reply-To: Noah Goldstein Errors-To: libc-alpha-bounces+patchwork=sourceware.org@sourceware.org Sender: "Libc-alpha" 1. Remove unnecessary spills. 2. Fix some small nit missed optimizations. All math and mathvec tests pass on x86. --- sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 179 +++++++---------- sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 235 ++++++++++------------- 2 files changed, 172 insertions(+), 242 deletions(-) diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h index b03a2122b9..9900f85a55 100644 --- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h +++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h @@ -18,39 +18,38 @@ /* SSE2 ISA version as wrapper to scalar. */ .macro WRAPPER_IMPL_SSE2 callee - subq $40, %rsp - cfi_adjust_cfa_offset (40) + subq $24, %rsp + cfi_adjust_cfa_offset (24) movaps %xmm0, (%rsp) call JUMPTARGET(\callee) - movsd %xmm0, 16(%rsp) + movsd %xmm0, (%rsp) movsd 8(%rsp), %xmm0 call JUMPTARGET(\callee) - movsd 16(%rsp), %xmm1 - movsd %xmm0, 24(%rsp) + movsd (%rsp), %xmm1 unpcklpd %xmm0, %xmm1 movaps %xmm1, %xmm0 - addq $40, %rsp - cfi_adjust_cfa_offset (-40) + addq $24, %rsp + cfi_adjust_cfa_offset (-24) ret .endm + /* 2 argument SSE2 ISA version as wrapper to scalar. */ .macro WRAPPER_IMPL_SSE2_ff callee - subq $56, %rsp - cfi_adjust_cfa_offset (56) + subq $40, %rsp + cfi_adjust_cfa_offset (40) movaps %xmm0, (%rsp) movaps %xmm1, 16(%rsp) call JUMPTARGET(\callee) - movsd %xmm0, 32(%rsp) + movsd %xmm0, (%rsp) movsd 8(%rsp), %xmm0 movsd 24(%rsp), %xmm1 call JUMPTARGET(\callee) - movsd 32(%rsp), %xmm1 - movsd %xmm0, 40(%rsp) + movsd (%rsp), %xmm1 unpcklpd %xmm0, %xmm1 movaps %xmm1, %xmm0 - addq $56, %rsp - cfi_adjust_cfa_offset (-56) + addq $40, %rsp + cfi_adjust_cfa_offset (-40) ret .endm @@ -62,30 +61,18 @@ pushq %rbx cfi_adjust_cfa_offset (8) cfi_rel_offset (%rbx, 0) + subq $24, %rsp + cfi_adjust_cfa_offset (24) + movaps %xmm0, (%rsp) movq %rdi, %rbp movq %rsi, %rbx - subq $40, %rsp - cfi_adjust_cfa_offset (40) - leaq 16(%rsp), %rsi - leaq 24(%rsp), %rdi - movaps %xmm0, (%rsp) call JUMPTARGET(\callee) - leaq 16(%rsp), %rsi - leaq 24(%rsp), %rdi - movsd 24(%rsp), %xmm0 - movapd (%rsp), %xmm1 - movsd %xmm0, 0(%rbp) - unpckhpd %xmm1, %xmm1 - movsd 16(%rsp), %xmm0 - movsd %xmm0, (%rbx) - movapd %xmm1, %xmm0 + movsd 8(%rsp), %xmm0 + leaq 8(%rbp), %rdi + leaq 8(%rbx), %rsi call JUMPTARGET(\callee) - movsd 24(%rsp), %xmm0 - movsd %xmm0, 8(%rbp) - movsd 16(%rsp), %xmm0 - movsd %xmm0, 8(%rbx) - addq $40, %rsp - cfi_adjust_cfa_offset (-40) + addq $24, %rsp + cfi_adjust_cfa_offset (-24) popq %rbx cfi_adjust_cfa_offset (-8) cfi_restore (%rbx) @@ -104,15 +91,17 @@ cfi_def_cfa_register (%rbp) andq $-32, %rsp subq $32, %rsp - vextractf128 $1, %ymm0, (%rsp) + vmovaps %ymm0, (%rsp) vzeroupper call HIDDEN_JUMPTARGET(\callee) - vmovapd %xmm0, 16(%rsp) - vmovaps (%rsp), %xmm0 + vmovaps %xmm0, (%rsp) + vmovaps 16(%rsp), %xmm0 call HIDDEN_JUMPTARGET(\callee) - vmovapd %xmm0, %xmm1 - vmovapd 16(%rsp), %xmm0 - vinsertf128 $1, %xmm1, %ymm0, %ymm0 + /* combine xmm0 (return of second call) with result of first + call (saved on stack). Might be worth exploring logic that + uses `vpblend` and reads in ymm1 using -16(rsp). */ + vmovaps (%rsp), %xmm1 + vinsertf128 $1, %xmm0, %ymm1, %ymm0 movq %rbp, %rsp cfi_def_cfa_register (%rsp) popq %rbp @@ -130,17 +119,19 @@ cfi_def_cfa_register (%rbp) andq $-32, %rsp subq $64, %rsp - vextractf128 $1, %ymm0, 16(%rsp) - vextractf128 $1, %ymm1, (%rsp) + vmovaps %ymm0, (%rsp) + vmovaps %ymm1, 32(%rsp) vzeroupper call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, 32(%rsp) + vmovaps 48(%rsp), %xmm1 + vmovaps %xmm0, (%rsp) vmovaps 16(%rsp), %xmm0 - vmovaps (%rsp), %xmm1 call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, %xmm1 - vmovaps 32(%rsp), %xmm0 - vinsertf128 $1, %xmm1, %ymm0, %ymm0 + /* combine xmm0 (return of second call) with result of first + call (saved on stack). Might be worth exploring logic that + uses `vpblend` and reads in ymm1 using -16(rsp). */ + vmovaps (%rsp), %xmm1 + vinsertf128 $1, %xmm0, %ymm1, %ymm0 movq %rbp, %rsp cfi_def_cfa_register (%rsp) popq %rbp @@ -155,35 +146,21 @@ cfi_adjust_cfa_offset (8) cfi_rel_offset (%rbp, 0) movq %rsp, %rbp - cfi_def_cfa_register (%rbp) andq $-32, %rsp - pushq %r13 - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%r13, 0) + subq $32, %rsp + vmovaps %ymm0, (%rsp) + pushq %rbx pushq %r14 - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%r14, 0) - subq $48, %rsp + movq %rdi, %rbx movq %rsi, %r14 - movq %rdi, %r13 - vextractf128 $1, %ymm0, 32(%rsp) vzeroupper call HIDDEN_JUMPTARGET(\callee) vmovaps 32(%rsp), %xmm0 - lea (%rsp), %rdi - lea 16(%rsp), %rsi + leaq 16(%rbx), %rdi + leaq 16(%r14), %rsi call HIDDEN_JUMPTARGET(\callee) - vmovapd (%rsp), %xmm0 - vmovapd 16(%rsp), %xmm1 - vmovapd %xmm0, 16(%r13) - vmovapd %xmm1, 16(%r14) - addq $48, %rsp popq %r14 - cfi_adjust_cfa_offset (-8) - cfi_restore (%r14) - popq %r13 - cfi_adjust_cfa_offset (-8) - cfi_restore (%r13) + popq %rbx movq %rbp, %rsp cfi_def_cfa_register (%rsp) popq %rbp @@ -200,15 +177,16 @@ movq %rsp, %rbp cfi_def_cfa_register (%rbp) andq $-64, %rsp - subq $128, %rsp + subq $64, %rsp vmovups %zmm0, (%rsp) - vmovupd (%rsp), %ymm0 call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, 64(%rsp) + vmovupd %ymm0, (%rsp) vmovupd 32(%rsp), %ymm0 call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, 96(%rsp) - vmovups 64(%rsp), %zmm0 + /* combine ymm0 (return of second call) with result of first + call (saved on stack). */ + vmovaps (%rsp), %ymm1 + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0 movq %rbp, %rsp cfi_def_cfa_register (%rsp) popq %rbp @@ -225,18 +203,19 @@ movq %rsp, %rbp cfi_def_cfa_register (%rbp) andq $-64, %rsp - subq $192, %rsp + addq $-128, %rsp vmovups %zmm0, (%rsp) vmovups %zmm1, 64(%rsp) - vmovupd (%rsp), %ymm0 - vmovupd 64(%rsp), %ymm1 + /* ymm0 and ymm1 are already set. */ call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, 128(%rsp) - vmovupd 32(%rsp), %ymm0 - vmovupd 96(%rsp), %ymm1 + vmovups 96(%rsp), %ymm1 + vmovaps %ymm0, (%rsp) + vmovups 32(%rsp), %ymm0 call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, 160(%rsp) - vmovups 128(%rsp), %zmm0 + /* combine ymm0 (return of second call) with result of first + call (saved on stack). */ + vmovaps (%rsp), %ymm1 + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0 movq %rbp, %rsp cfi_def_cfa_register (%rsp) popq %rbp @@ -253,34 +232,20 @@ movq %rsp, %rbp cfi_def_cfa_register (%rbp) andq $-64, %rsp - pushq %r12 - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%r12, 0) - pushq %r13 - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%r13, 0) - subq $176, %rsp - movq %rsi, %r13 - vmovups %zmm0, (%rsp) - movq %rdi, %r12 - vmovupd (%rsp), %ymm0 + subq $64, %rsp + vmovaps %zmm0, (%rsp) + pushq %rbx + pushq %r14 + movq %rdi, %rbx + movq %rsi, %r14 + /* ymm0 is already set. */ call HIDDEN_JUMPTARGET(\callee) - vmovupd 32(%rsp), %ymm0 - lea 64(%rsp), %rdi - lea 96(%rsp), %rsi + vmovaps 48(%rsp), %ymm0 + leaq 32(%rbx), %rdi + leaq 32(%r14), %rsi call HIDDEN_JUMPTARGET(\callee) - vmovupd 64(%rsp), %ymm0 - vmovupd 96(%rsp), %ymm1 - vmovupd %ymm0, 32(%r12) - vmovupd %ymm1, 32(%r13) - vzeroupper - addq $176, %rsp - popq %r13 - cfi_adjust_cfa_offset (-8) - cfi_restore (%r13) - popq %r12 - cfi_adjust_cfa_offset (-8) - cfi_restore (%r12) + popq %r14 + popq %rbx movq %rbp, %rsp cfi_def_cfa_register (%rsp) popq %rbp diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h index cecf6c8384..fd9b363045 100644 --- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h +++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h @@ -18,61 +18,66 @@ /* SSE2 ISA version as wrapper to scalar. */ .macro WRAPPER_IMPL_SSE2 callee - subq $40, %rsp - cfi_adjust_cfa_offset (40) + push %rbx + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbx, 0) + subq $16, %rsp + cfi_adjust_cfa_offset (16) movaps %xmm0, (%rsp) call JUMPTARGET(\callee) - movss %xmm0, 16(%rsp) + movss %xmm0, (%rsp) movss 4(%rsp), %xmm0 call JUMPTARGET(\callee) - movss %xmm0, 20(%rsp) + movss %xmm0, 4(%rsp) movss 8(%rsp), %xmm0 call JUMPTARGET(\callee) - movss %xmm0, 24(%rsp) + movd %xmm0, %ebx movss 12(%rsp), %xmm0 call JUMPTARGET(\callee) - movss 16(%rsp), %xmm3 - movss 20(%rsp), %xmm2 - movss 24(%rsp), %xmm1 - movss %xmm0, 28(%rsp) - unpcklps %xmm1, %xmm3 - unpcklps %xmm0, %xmm2 - unpcklps %xmm2, %xmm3 - movaps %xmm3, %xmm0 - addq $40, %rsp - cfi_adjust_cfa_offset (-40) + movd %ebx, %xmm1 + unpcklps %xmm0, %xmm1 + movsd (%rsp), %xmm0 + unpcklpd %xmm1, %xmm0 + addq $16, %rsp + cfi_adjust_cfa_offset (-16) + popq %rbx + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbx) ret .endm /* 2 argument SSE2 ISA version as wrapper to scalar. */ .macro WRAPPER_IMPL_SSE2_ff callee - subq $56, %rsp - cfi_adjust_cfa_offset (56) + push %rbx + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbx, 0) + subq $32, %rsp + cfi_adjust_cfa_offset (40) movaps %xmm0, (%rsp) movaps %xmm1, 16(%rsp) call JUMPTARGET(\callee) - movss %xmm0, 32(%rsp) - movss 4(%rsp), %xmm0 movss 20(%rsp), %xmm1 + movss %xmm0, 0(%rsp) + movss 4(%rsp), %xmm0 call JUMPTARGET(\callee) - movss %xmm0, 36(%rsp) - movss 8(%rsp), %xmm0 movss 24(%rsp), %xmm1 + movss %xmm0, 4(%rsp) + movss 8(%rsp), %xmm0 call JUMPTARGET(\callee) - movss %xmm0, 40(%rsp) - movss 12(%rsp), %xmm0 movss 28(%rsp), %xmm1 + movd %xmm0, %ebx + movss 12(%rsp), %xmm0 call JUMPTARGET(\callee) - movss 32(%rsp), %xmm3 - movss 36(%rsp), %xmm2 - movss 40(%rsp), %xmm1 - movss %xmm0, 44(%rsp) - unpcklps %xmm1, %xmm3 - unpcklps %xmm0, %xmm2 - unpcklps %xmm2, %xmm3 - movaps %xmm3, %xmm0 - addq $56, %rsp - cfi_adjust_cfa_offset (-56) + /* merge 4x results into xmm0. */ + movd %ebx, %xmm1 + unpcklps %xmm0, %xmm1 + movsd (%rsp), %xmm0 + unpcklpd %xmm1, %xmm0 + addq $32, %rsp + cfi_adjust_cfa_offset (-32) + popq %rbx + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbx) ret .endm @@ -86,48 +91,24 @@ cfi_rel_offset (%rbx, 0) movq %rdi, %rbp movq %rsi, %rbx - subq $40, %rsp - cfi_adjust_cfa_offset (40) - leaq 24(%rsp), %rsi - leaq 28(%rsp), %rdi + subq $24, %rsp + cfi_adjust_cfa_offset (24) movaps %xmm0, (%rsp) call JUMPTARGET(\callee) - leaq 24(%rsp), %rsi - leaq 28(%rsp), %rdi - movss 28(%rsp), %xmm0 - movss %xmm0, 0(%rbp) - movaps (%rsp), %xmm1 - movss 24(%rsp), %xmm0 - movss %xmm0, (%rbx) - movaps %xmm1, %xmm0 - shufps $85, %xmm1, %xmm0 + movss 4(%rsp), %xmm0 + leaq 4(%rbp), %rdi + leaq 4(%rbx), %rsi call JUMPTARGET(\callee) - movss 28(%rsp), %xmm0 - leaq 24(%rsp), %rsi - movss %xmm0, 4(%rbp) - leaq 28(%rsp), %rdi - movaps (%rsp), %xmm1 - movss 24(%rsp), %xmm0 - movss %xmm0, 4(%rbx) - movaps %xmm1, %xmm0 - unpckhps %xmm1, %xmm0 + movss 8(%rsp), %xmm0 + leaq 8(%rbp), %rdi + leaq 8(%rbx), %rsi call JUMPTARGET(\callee) - movaps (%rsp), %xmm1 - leaq 24(%rsp), %rsi - leaq 28(%rsp), %rdi - movss 28(%rsp), %xmm0 - shufps $255, %xmm1, %xmm1 - movss %xmm0, 8(%rbp) - movss 24(%rsp), %xmm0 - movss %xmm0, 8(%rbx) - movaps %xmm1, %xmm0 + movss 12(%rsp), %xmm0 + leaq 12(%rbp), %rdi + leaq 12(%rbx), %rsi call JUMPTARGET(\callee) - movss 28(%rsp), %xmm0 - movss %xmm0, 12(%rbp) - movss 24(%rsp), %xmm0 - movss %xmm0, 12(%rbx) - addq $40, %rsp - cfi_adjust_cfa_offset (-40) + addq $24, %rsp + cfi_adjust_cfa_offset (-24) popq %rbx cfi_adjust_cfa_offset (-8) cfi_restore (%rbx) @@ -146,15 +127,17 @@ cfi_def_cfa_register (%rbp) andq $-32, %rsp subq $32, %rsp - vextractf128 $1, %ymm0, (%rsp) + vmovaps %ymm0, (%rsp) vzeroupper call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, 16(%rsp) - vmovaps (%rsp), %xmm0 - call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, %xmm1 + vmovaps %xmm0, (%rsp) vmovaps 16(%rsp), %xmm0 - vinsertf128 $1, %xmm1, %ymm0, %ymm0 + call HIDDEN_JUMPTARGET(\callee) + /* combine xmm0 (return of second call) with result of first + call (saved on stack). Might be worth exploring logic that + uses `vpblend` and reads in ymm1 using -16(rsp). */ + vmovaps (%rsp), %xmm1 + vinsertf128 $1, %xmm0, %ymm1, %ymm0 movq %rbp, %rsp cfi_def_cfa_register (%rsp) popq %rbp @@ -172,17 +155,19 @@ cfi_def_cfa_register (%rbp) andq $-32, %rsp subq $64, %rsp - vextractf128 $1, %ymm0, 16(%rsp) - vextractf128 $1, %ymm1, (%rsp) + vmovaps %ymm0, (%rsp) + vmovaps %ymm1, 32(%rsp) vzeroupper call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, 32(%rsp) + vmovaps 48(%rsp), %xmm1 + vmovaps %xmm0, (%rsp) vmovaps 16(%rsp), %xmm0 - vmovaps (%rsp), %xmm1 call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, %xmm1 - vmovaps 32(%rsp), %xmm0 - vinsertf128 $1, %xmm1, %ymm0, %ymm0 + /* combine xmm0 (return of second call) with result of first + call (saved on stack). Might be worth exploring logic that + uses `vpblend` and reads in ymm1 using -16(rsp). */ + vmovaps (%rsp), %xmm1 + vinsertf128 $1, %xmm0, %ymm1, %ymm0 movq %rbp, %rsp cfi_def_cfa_register (%rsp) popq %rbp @@ -197,38 +182,21 @@ cfi_adjust_cfa_offset (8) cfi_rel_offset (%rbp, 0) movq %rsp, %rbp - cfi_def_cfa_register (%rbp) andq $-32, %rsp - pushq %r13 - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%r13, 0) + subq $32, %rsp + vmovaps %ymm0, (%rsp) + pushq %rbx pushq %r14 - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%r14, 0) - subq $48, %rsp + movq %rdi, %rbx movq %rsi, %r14 - vmovaps %ymm0, (%rsp) - movq %rdi, %r13 - vmovaps 16(%rsp), %xmm1 - vmovaps %xmm1, 32(%rsp) vzeroupper - vmovaps (%rsp), %xmm0 call HIDDEN_JUMPTARGET(\callee) vmovaps 32(%rsp), %xmm0 - lea (%rsp), %rdi - lea 16(%rsp), %rsi + leaq 16(%rbx), %rdi + leaq 16(%r14), %rsi call HIDDEN_JUMPTARGET(\callee) - vmovaps (%rsp), %xmm0 - vmovaps 16(%rsp), %xmm1 - vmovaps %xmm0, 16(%r13) - vmovaps %xmm1, 16(%r14) - addq $48, %rsp popq %r14 - cfi_adjust_cfa_offset (-8) - cfi_restore (%r14) - popq %r13 - cfi_adjust_cfa_offset (-8) - cfi_restore (%r13) + popq %rbx movq %rbp, %rsp cfi_def_cfa_register (%rsp) popq %rbp @@ -245,15 +213,16 @@ movq %rsp, %rbp cfi_def_cfa_register (%rbp) andq $-64, %rsp - subq $128, %rsp + subq $64, %rsp vmovups %zmm0, (%rsp) - vmovupd (%rsp), %ymm0 call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, 64(%rsp) + vmovupd %ymm0, (%rsp) vmovupd 32(%rsp), %ymm0 call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, 96(%rsp) - vmovups 64(%rsp), %zmm0 + /* combine ymm0 (return of second call) with result of first + call (saved on stack). */ + vmovaps (%rsp), %ymm1 + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0 movq %rbp, %rsp cfi_def_cfa_register (%rsp) popq %rbp @@ -270,18 +239,19 @@ movq %rsp, %rbp cfi_def_cfa_register (%rbp) andq $-64, %rsp - subq $192, %rsp + addq $-128, %rsp vmovups %zmm0, (%rsp) vmovups %zmm1, 64(%rsp) - vmovups (%rsp), %ymm0 - vmovups 64(%rsp), %ymm1 + /* ymm0 and ymm1 are already set. */ call HIDDEN_JUMPTARGET(\callee) - vmovups %ymm0, 128(%rsp) - vmovups 32(%rsp), %ymm0 vmovups 96(%rsp), %ymm1 + vmovaps %ymm0, (%rsp) + vmovups 32(%rsp), %ymm0 call HIDDEN_JUMPTARGET(\callee) - vmovups %ymm0, 160(%rsp) - vmovups 128(%rsp), %zmm0 + /* combine ymm0 (return of second call) with result of first + call (saved on stack). */ + vmovaps (%rsp), %ymm1 + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0 movq %rbp, %rsp cfi_def_cfa_register (%rsp) popq %rbp @@ -298,25 +268,20 @@ movq %rsp, %rbp cfi_def_cfa_register (%rbp) andq $-64, %rsp - pushq %r12 - pushq %r13 - subq $176, %rsp - movq %rsi, %r13 + subq $64, %rsp vmovaps %zmm0, (%rsp) - movq %rdi, %r12 - vmovaps (%rsp), %ymm0 + pushq %rbx + pushq %r14 + movq %rdi, %rbx + movq %rsi, %r14 + /* ymm0 is already set. */ call HIDDEN_JUMPTARGET(\callee) - vmovaps 32(%rsp), %ymm0 - lea 64(%rsp), %rdi - lea 96(%rsp), %rsi + vmovaps 48(%rsp), %ymm0 + leaq 32(%rbx), %rdi + leaq 32(%r14), %rsi call HIDDEN_JUMPTARGET(\callee) - vmovaps 64(%rsp), %ymm0 - vmovaps 96(%rsp), %ymm1 - vmovaps %ymm0, 32(%r12) - vmovaps %ymm1, 32(%r13) - addq $176, %rsp - popq %r13 - popq %r12 + popq %r14 + popq %rbx movq %rbp, %rsp cfi_def_cfa_register (%rsp) popq %rbp From patchwork Sat Nov 19 00:13:32 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Noah Goldstein X-Patchwork-Id: 60871 Return-Path: X-Original-To: patchwork@sourceware.org Delivered-To: patchwork@sourceware.org Received: from server2.sourceware.org (localhost [IPv6:::1]) by sourceware.org (Postfix) with ESMTP id 6669D38432EA for ; Sat, 19 Nov 2022 00:14:11 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org 6669D38432EA DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=sourceware.org; s=default; t=1668816851; bh=2IXFKvg1I0eQGpc3cfcoYR5V/OwIB5MI4SIWTwQWPXc=; h=To:Cc:Subject:Date:In-Reply-To:References:List-Id: List-Unsubscribe:List-Archive:List-Post:List-Help:List-Subscribe: From:Reply-To:From; b=F+jyplMmLddSl2wLadUK5bPYlu7bspLyimwVeWfg2dvDX1triLRZyQL/s87PQpLdn O1dBUUOuspNZXRJopfY7vPKgW4CMCncUjCmTV8uNoeAh1xcAMP5nwVSN/gjhDca8Fe hYLIfzM/UANpc1Tb3s0yDfhsoRAODo7+JALafsd0= X-Original-To: libc-alpha@sourceware.org Delivered-To: libc-alpha@sourceware.org Received: from mail-pl1-x62c.google.com (mail-pl1-x62c.google.com [IPv6:2607:f8b0:4864:20::62c]) by sourceware.org (Postfix) with ESMTPS id 3806D38515D7 for ; Sat, 19 Nov 2022 00:13:42 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org 3806D38515D7 Received: by mail-pl1-x62c.google.com with SMTP id io19so5918137plb.8 for ; Fri, 18 Nov 2022 16:13:42 -0800 (PST) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=2IXFKvg1I0eQGpc3cfcoYR5V/OwIB5MI4SIWTwQWPXc=; b=vurm7pbTa/DqAaWVg6p7Oz4QjhtZ8yrXyhVQoCg6Qs8MC23RRbSZbKk4wVPy6i0TNF kfuFEyTwE8KqUTM1sWkey6RrFSpSk+mev7nHMKyHH4uZj5lobJ52LsC5uJlNb49SONzR hpz8ekQOJglWbV7KWG9G9TQAt2qB1hf1u0AgEKSuh9I/QP9r/RtV74JTjKDglEJifSpV W4idSM+QJlp9cu+g6xO93gYyJGciFELLrEuN8I33Kd7RclS7JPrnSSWwGqYqPqPmTlE1 HAykux5vB2RrhCxY6pD2b4FNn3gh1k3yQohj8QzwRWQH770rM6SFa8QZOKreMuXnp7W1 K+KQ== X-Gm-Message-State: ANoB5pmW63oQPnmyoYY5mIK1GeyIsGgHqsvgf16IMlhzK26BCtEKn0ZV RX9UG2QOx5Tn7tLPUgxwlXV658gGMf8= X-Google-Smtp-Source: AA0mqf4EEEwGQ3bUkqXsduSHTXXaqCbYG1bUC9MmyRwVV2TRD1/D4xMi0FLqY2PNk6khHxwyO1+PIg== X-Received: by 2002:a17:902:8c8e:b0:188:d588:34f2 with SMTP id t14-20020a1709028c8e00b00188d58834f2mr1951229plo.15.1668816820607; Fri, 18 Nov 2022 16:13:40 -0800 (PST) Received: from noahgold-desk.. ([192.55.60.47]) by smtp.gmail.com with ESMTPSA id o1-20020a625a01000000b0056232682a7esm3799650pfb.2.2022.11.18.16.13.38 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 18 Nov 2022 16:13:40 -0800 (PST) To: libc-alpha@sourceware.org Cc: goldstein.w.n@gmail.com, hjl.tools@gmail.com, andrey.kolesov@intel.com, carlos@systemhalted.org Subject: [PATCH v5 3/3] x86/fpu: Factor out shared avx2/avx512 code in svml_{s|d}_wrapper_impl.h Date: Fri, 18 Nov 2022 16:13:32 -0800 Message-Id: <20221119001332.1428330-3-goldstein.w.n@gmail.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20221119001332.1428330-1-goldstein.w.n@gmail.com> References: <20221118190835.1033248-1-goldstein.w.n@gmail.com> <20221119001332.1428330-1-goldstein.w.n@gmail.com> MIME-Version: 1.0 X-Spam-Status: No, score=-12.2 required=5.0 tests=BAYES_00, DKIM_SIGNED, DKIM_VALID, DKIM_VALID_AU, DKIM_VALID_EF, FREEMAIL_FROM, GIT_PATCH_0, KAM_SHORT, RCVD_IN_DNSWL_NONE, SPF_HELO_NONE, SPF_PASS, TXREP autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on server2.sourceware.org X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-Patchwork-Original-From: Noah Goldstein via Libc-alpha From: Noah Goldstein Reply-To: Noah Goldstein Errors-To: libc-alpha-bounces+patchwork=sourceware.org@sourceware.org Sender: "Libc-alpha" Code is exactly the same for the two so better to only maintain one version. All math and mathvec tests pass on x86. --- sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 172 +------------------- sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 172 +------------------- sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h | 190 ++++++++++++++++++++++ 3 files changed, 192 insertions(+), 342 deletions(-) create mode 100644 sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h index 9900f85a55..f63b49f4b8 100644 --- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h +++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h @@ -82,174 +82,4 @@ ret .endm -/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */ -.macro WRAPPER_IMPL_AVX callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-32, %rsp - subq $32, %rsp - vmovaps %ymm0, (%rsp) - vzeroupper - call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, (%rsp) - vmovaps 16(%rsp), %xmm0 - call HIDDEN_JUMPTARGET(\callee) - /* combine xmm0 (return of second call) with result of first - call (saved on stack). Might be worth exploring logic that - uses `vpblend` and reads in ymm1 using -16(rsp). */ - vmovaps (%rsp), %xmm1 - vinsertf128 $1, %xmm0, %ymm1, %ymm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret -.endm - -/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */ -.macro WRAPPER_IMPL_AVX_ff callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-32, %rsp - subq $64, %rsp - vmovaps %ymm0, (%rsp) - vmovaps %ymm1, 32(%rsp) - vzeroupper - call HIDDEN_JUMPTARGET(\callee) - vmovaps 48(%rsp), %xmm1 - vmovaps %xmm0, (%rsp) - vmovaps 16(%rsp), %xmm0 - call HIDDEN_JUMPTARGET(\callee) - /* combine xmm0 (return of second call) with result of first - call (saved on stack). Might be worth exploring logic that - uses `vpblend` and reads in ymm1 using -16(rsp). */ - vmovaps (%rsp), %xmm1 - vinsertf128 $1, %xmm0, %ymm1, %ymm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret -.endm - -/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */ -.macro WRAPPER_IMPL_AVX_fFF callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - andq $-32, %rsp - subq $32, %rsp - vmovaps %ymm0, (%rsp) - pushq %rbx - pushq %r14 - movq %rdi, %rbx - movq %rsi, %r14 - vzeroupper - call HIDDEN_JUMPTARGET(\callee) - vmovaps 32(%rsp), %xmm0 - leaq 16(%rbx), %rdi - leaq 16(%r14), %rsi - call HIDDEN_JUMPTARGET(\callee) - popq %r14 - popq %rbx - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret -.endm - -/* AVX512 ISA version as wrapper to AVX2 ISA version. */ -.macro WRAPPER_IMPL_AVX512 callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-64, %rsp - subq $64, %rsp - vmovups %zmm0, (%rsp) - call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, (%rsp) - vmovupd 32(%rsp), %ymm0 - call HIDDEN_JUMPTARGET(\callee) - /* combine ymm0 (return of second call) with result of first - call (saved on stack). */ - vmovaps (%rsp), %ymm1 - vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret -.endm - -/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */ -.macro WRAPPER_IMPL_AVX512_ff callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-64, %rsp - addq $-128, %rsp - vmovups %zmm0, (%rsp) - vmovups %zmm1, 64(%rsp) - /* ymm0 and ymm1 are already set. */ - call HIDDEN_JUMPTARGET(\callee) - vmovups 96(%rsp), %ymm1 - vmovaps %ymm0, (%rsp) - vmovups 32(%rsp), %ymm0 - call HIDDEN_JUMPTARGET(\callee) - /* combine ymm0 (return of second call) with result of first - call (saved on stack). */ - vmovaps (%rsp), %ymm1 - vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret -.endm - -/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */ -.macro WRAPPER_IMPL_AVX512_fFF callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-64, %rsp - subq $64, %rsp - vmovaps %zmm0, (%rsp) - pushq %rbx - pushq %r14 - movq %rdi, %rbx - movq %rsi, %r14 - /* ymm0 is already set. */ - call HIDDEN_JUMPTARGET(\callee) - vmovaps 48(%rsp), %ymm0 - leaq 32(%rbx), %rdi - leaq 32(%r14), %rsi - call HIDDEN_JUMPTARGET(\callee) - popq %r14 - popq %rbx - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret -.endm +#include "svml_sd_wrapper_impl.h" diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h index fd9b363045..8d8e5ef7ec 100644 --- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h +++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h @@ -118,174 +118,4 @@ ret .endm -/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */ -.macro WRAPPER_IMPL_AVX callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-32, %rsp - subq $32, %rsp - vmovaps %ymm0, (%rsp) - vzeroupper - call HIDDEN_JUMPTARGET(\callee) - vmovaps %xmm0, (%rsp) - vmovaps 16(%rsp), %xmm0 - call HIDDEN_JUMPTARGET(\callee) - /* combine xmm0 (return of second call) with result of first - call (saved on stack). Might be worth exploring logic that - uses `vpblend` and reads in ymm1 using -16(rsp). */ - vmovaps (%rsp), %xmm1 - vinsertf128 $1, %xmm0, %ymm1, %ymm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret -.endm - -/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */ -.macro WRAPPER_IMPL_AVX_ff callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-32, %rsp - subq $64, %rsp - vmovaps %ymm0, (%rsp) - vmovaps %ymm1, 32(%rsp) - vzeroupper - call HIDDEN_JUMPTARGET(\callee) - vmovaps 48(%rsp), %xmm1 - vmovaps %xmm0, (%rsp) - vmovaps 16(%rsp), %xmm0 - call HIDDEN_JUMPTARGET(\callee) - /* combine xmm0 (return of second call) with result of first - call (saved on stack). Might be worth exploring logic that - uses `vpblend` and reads in ymm1 using -16(rsp). */ - vmovaps (%rsp), %xmm1 - vinsertf128 $1, %xmm0, %ymm1, %ymm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret -.endm - -/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */ -.macro WRAPPER_IMPL_AVX_fFF callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - andq $-32, %rsp - subq $32, %rsp - vmovaps %ymm0, (%rsp) - pushq %rbx - pushq %r14 - movq %rdi, %rbx - movq %rsi, %r14 - vzeroupper - call HIDDEN_JUMPTARGET(\callee) - vmovaps 32(%rsp), %xmm0 - leaq 16(%rbx), %rdi - leaq 16(%r14), %rsi - call HIDDEN_JUMPTARGET(\callee) - popq %r14 - popq %rbx - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret -.endm - -/* AVX512 ISA version as wrapper to AVX2 ISA version. */ -.macro WRAPPER_IMPL_AVX512 callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-64, %rsp - subq $64, %rsp - vmovups %zmm0, (%rsp) - call HIDDEN_JUMPTARGET(\callee) - vmovupd %ymm0, (%rsp) - vmovupd 32(%rsp), %ymm0 - call HIDDEN_JUMPTARGET(\callee) - /* combine ymm0 (return of second call) with result of first - call (saved on stack). */ - vmovaps (%rsp), %ymm1 - vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret -.endm - -/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */ -.macro WRAPPER_IMPL_AVX512_ff callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-64, %rsp - addq $-128, %rsp - vmovups %zmm0, (%rsp) - vmovups %zmm1, 64(%rsp) - /* ymm0 and ymm1 are already set. */ - call HIDDEN_JUMPTARGET(\callee) - vmovups 96(%rsp), %ymm1 - vmovaps %ymm0, (%rsp) - vmovups 32(%rsp), %ymm0 - call HIDDEN_JUMPTARGET(\callee) - /* combine ymm0 (return of second call) with result of first - call (saved on stack). */ - vmovaps (%rsp), %ymm1 - vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0 - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret -.endm - -/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */ -.macro WRAPPER_IMPL_AVX512_fFF callee - pushq %rbp - cfi_adjust_cfa_offset (8) - cfi_rel_offset (%rbp, 0) - movq %rsp, %rbp - cfi_def_cfa_register (%rbp) - andq $-64, %rsp - subq $64, %rsp - vmovaps %zmm0, (%rsp) - pushq %rbx - pushq %r14 - movq %rdi, %rbx - movq %rsi, %r14 - /* ymm0 is already set. */ - call HIDDEN_JUMPTARGET(\callee) - vmovaps 48(%rsp), %ymm0 - leaq 32(%rbx), %rdi - leaq 32(%r14), %rsi - call HIDDEN_JUMPTARGET(\callee) - popq %r14 - popq %rbx - movq %rbp, %rsp - cfi_def_cfa_register (%rsp) - popq %rbp - cfi_adjust_cfa_offset (-8) - cfi_restore (%rbp) - ret -.endm +#include "svml_sd_wrapper_impl.h" diff --git a/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h new file mode 100644 index 0000000000..bd934ad578 --- /dev/null +++ b/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h @@ -0,0 +1,190 @@ +/* Common float/double wrapper implementations of vector math + functions. + Copyright (C) 2022 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */ +.macro WRAPPER_IMPL_AVX callee + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-32, %rsp + subq $32, %rsp + vmovaps %ymm0, (%rsp) + vzeroupper + call HIDDEN_JUMPTARGET(\callee) + vmovaps %xmm0, (%rsp) + vmovaps 16(%rsp), %xmm0 + call HIDDEN_JUMPTARGET(\callee) + /* combine xmm0 (return of second call) with result of first + call (saved on stack). Might be worth exploring logic that + uses `vpblend` and reads in ymm1 using -16(rsp). */ + vmovaps (%rsp), %xmm1 + vinsertf128 $1, %xmm0, %ymm1, %ymm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret +.endm + +/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */ +.macro WRAPPER_IMPL_AVX_ff callee + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-32, %rsp + subq $64, %rsp + vmovaps %ymm0, (%rsp) + vmovaps %ymm1, 32(%rsp) + vzeroupper + call HIDDEN_JUMPTARGET(\callee) + vmovaps 48(%rsp), %xmm1 + vmovaps %xmm0, (%rsp) + vmovaps 16(%rsp), %xmm0 + call HIDDEN_JUMPTARGET(\callee) + /* combine xmm0 (return of second call) with result of first + call (saved on stack). Might be worth exploring logic that + uses `vpblend` and reads in ymm1 using -16(rsp). */ + vmovaps (%rsp), %xmm1 + vinsertf128 $1, %xmm0, %ymm1, %ymm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret +.endm + +/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */ +.macro WRAPPER_IMPL_AVX_fFF callee + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + andq $-32, %rsp + subq $32, %rsp + vmovaps %ymm0, (%rsp) + pushq %rbx + pushq %r14 + movq %rdi, %rbx + movq %rsi, %r14 + vzeroupper + call HIDDEN_JUMPTARGET(\callee) + vmovaps 32(%rsp), %xmm0 + leaq 16(%rbx), %rdi + leaq 16(%r14), %rsi + call HIDDEN_JUMPTARGET(\callee) + popq %r14 + popq %rbx + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret +.endm + +/* AVX512 ISA version as wrapper to AVX2 ISA version. */ +.macro WRAPPER_IMPL_AVX512 callee + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-64, %rsp + subq $64, %rsp + vmovups %zmm0, (%rsp) + call HIDDEN_JUMPTARGET(\callee) + vmovupd %ymm0, (%rsp) + vmovupd 32(%rsp), %ymm0 + call HIDDEN_JUMPTARGET(\callee) + /* combine ymm0 (return of second call) with result of first + call (saved on stack). */ + vmovaps (%rsp), %ymm1 + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret +.endm + +/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */ +.macro WRAPPER_IMPL_AVX512_ff callee + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-64, %rsp + addq $-128, %rsp + vmovups %zmm0, (%rsp) + vmovups %zmm1, 64(%rsp) + /* ymm0 and ymm1 are already set. */ + call HIDDEN_JUMPTARGET(\callee) + vmovups 96(%rsp), %ymm1 + vmovaps %ymm0, (%rsp) + vmovups 32(%rsp), %ymm0 + call HIDDEN_JUMPTARGET(\callee) + /* combine ymm0 (return of second call) with result of first + call (saved on stack). */ + vmovaps (%rsp), %ymm1 + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret +.endm + +/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */ +.macro WRAPPER_IMPL_AVX512_fFF callee + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-64, %rsp + subq $64, %rsp + vmovaps %zmm0, (%rsp) + pushq %rbx + pushq %r14 + movq %rdi, %rbx + movq %rsi, %r14 + /* ymm0 is already set. */ + call HIDDEN_JUMPTARGET(\callee) + vmovaps 48(%rsp), %ymm0 + leaq 32(%rbx), %rdi + leaq 32(%r14), %rsi + call HIDDEN_JUMPTARGET(\callee) + popq %r14 + popq %rbx + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret +.endm