On 25.06.2024 11:56, Cui, Lili wrote:
> Changes in V2
> 1. Added EVEX_NF to operandconstraint to indicate setting EVEX.NF to 1.
> 2. Refined test cases to cover all cc formats.
> 3. Added invalid test case `{nf} cfcmovb %dx,%ax,%r31w` and reported error for it.
> 4. Used CFCMOV_Fixup_op instead of CFCMOV_Fixup_op0 and CFCMOV_Fixup_op1 to handle both operands as NOP_Fixup().
Just like NOP_Fixup() (and others), the new one would better be just
CFCMOV_Fixup() (without the _op suffix).
> --- a/gas/config/tc-i386.c
> +++ b/gas/config/tc-i386.c
> @@ -4426,7 +4426,9 @@ build_apx_evex_prefix (void)
> }
>
> /* Encode the NF bit. */
> - if (i.has_nf)
> + /* For CFCMOV, when the insn template supports EVEX_NF, it means that it
> + requires EVEX.NF to be 1. */
> + if (i.has_nf || i.tm.opcode_modifier.operandconstraint == EVEX_NF)
> i.vex.bytes[3] |= 0x04;
> }
Especially with it now being an operand constraint, would you mind leaving
the original comment alone? It would only be at risk of going stale when
another EVexNF insn is added.
> --- /dev/null
> +++ b/gas/testsuite/gas/i386/x86-64-apx-cfcmov.s
> @@ -0,0 +1,74 @@
> +# Check 64bit EVEX-promoted CMOVcc instructions
> +
> + .text
> +_start:
> + cfcmovb %dx,%ax
> + cfcmovb %dx,%ax,%r31w
> + cfcmovb %dx,291(%r8,%rax,4)
> + cfcmovb %ecx,%edx
> + cfcmovb %ecx,%edx,%r10d
> + cfcmovb %ecx,291(%r8,%rax,4)
> + cfcmovb %r31,%r15
> + cfcmovb %r31,%r15,%r11
> + cfcmovb %r31,291(%r8,%rax,4)
> + cfcmovb 291(%r8,%rax,4),%dx
> + cfcmovb 291(%r8,%rax,4),%dx,%ax
> + cfcmovb 291(%r8,%rax,4),%ecx
> + cfcmovb 291(%r8,%rax,4),%ecx,%edx
> + cfcmovb 291(%r8,%rax,4),%r31
> + cfcmovb 291(%r8,%rax,4),%r31,%r15
> + cmovb %dx,%ax,%r31w
> + cmovb %ecx,%edx,%r10d
> + cmovb %r31,%r15,%r11
> + cmovb 291(%r8,%rax,4),%dx,%ax
> + cmovb 291(%r8,%rax,4),%ecx,%edx
> + cmovb 291(%r8,%rax,4),%r31,%r15
> +
> + .irp m, cfcmovbe, cfcmovl, cfcmovle, cfcmovnb, cfcmovnbe, cfcmovnl, cfcmovnle, cfcmovno, cfcmovnp, cfcmovns, cfcmovnz, cfcmovo, cfcmovp, cfcmovs, cfcmovz, cfcmovae, cfcmove, cfcmovne, cfcmova, cfcmovge, cfcmovg
> + \m %dx,%ax
> + .endr
To keep line length under control and to help readability:
.irp cc, be, l, le, nb, nbe, nl, nle, no, np, ns, nz, o, p, s, z, ae, e, ne, a, e, g
cfcmov\cc %dx,%ax
.endr
This way it's also much easier to see that some forms are still missing:
na, nae, c, nc, pe, and po come to mind. Not sure whether that's then an
exhaustive list. It may also help if the list was sorted by some suitable
criteria, such that it's easier to locate individual items.
> + .irp m, cmovbe, cmovl, cmovle, cmovnb, cmovnbe, cmovnl, cmovnle, cmovno, cmovnp, cmovns, cmovnz, cmovo, cmovp, cmovs, cmovz, cmovae, cmove, cmovne, cmova, cmovge, cmovg
> + \m %dx,%ax,%r31w
> + .endr
This then also makes noticeable that you don't need two .irp-s here
(which would always need keeping in sync):
.irp cc, be, l, le, nb, nbe, nl, nle, no, np, ns, nz, o, p, s, z, ae, e, ne, a, e, g
cfcmov\cc %dx,%ax
cmov\cc %dx,%ax,%r31w
.endr
I'm even inclined to ask that ...
> + cfcmovb %dx,%ax
> + cfcmovb.s %dx,%ax
> + {load} cfcmovb %dx,%ax
> + {store} cfcmovb %dx,%ax
> +
> + .intel_syntax noprefix
> + cfcmovb ax,dx
> + cfcmovb r31w,ax,dx
> + cfcmovb WORD PTR [r8+rax*4+291],dx
> + cfcmovb edx,ecx
> + cfcmovb r10d,edx,ecx
> + cfcmovb DWORD PTR [r8+rax*4+291],ecx
> + cfcmovb r15,r31
> + cfcmovb r11,r15,r31
> + cfcmovb QWORD PTR [r8+rax*4+291],r31
> + cfcmovb dx,WORD PTR [r8+rax*4+291]
> + cfcmovb ax,dx,WORD PTR [r8+rax*4+291]
> + cfcmovb ecx,DWORD PTR [r8+rax*4+291]
> + cfcmovb edx,ecx,DWORD PTR [r8+rax*4+291]
> + cfcmovb r31,QWORD PTR [r8+rax*4+291]
> + cfcmovb r15,r31,QWORD PTR [r8+rax*4+291]
> + cmovb r31w,ax,dx
> + cmovb r10d,edx,ecx
> + cmovb r11,r15,r31
> + cmovb ax,dx,WORD PTR [r8+rax*4+291]
> + cmovb edx,ecx,DWORD PTR [r8+rax*4+291]
> + cmovb r15,r31,QWORD PTR [r8+rax*4+291]
> +
> + .irp m, cfcmovbe, cfcmovl, cfcmovle, cfcmovnb, cfcmovnbe, cfcmovnl, cfcmovnle, cfcmovno, cfcmovnp, cfcmovns, cfcmovnz, cfcmovo, cfcmovp, cfcmovs, cfcmovz, cfcmovae, cfcmove, cfcmovne, cfcmova, cfcmovge, cfcmovg
> + \m ax,dx
> + .endr
> +
> + .irp m, cmovbe, cmovl, cmovle, cmovnb, cmovnbe, cmovnl, cmovnle, cmovno, cmovnp, cmovns, cmovnz, cmovo, cmovp, cmovs, cmovz, cmovae, cmove, cmovne, cmova, cmovge, cmovg
> + \m r31w,ax,dx
> + .endr
... these be folded with the earlier ones, too.
> @@ -14039,3 +14047,26 @@ JMPABS_Fixup (instr_info *ins, int bytemode, int sizeflag)
> return OP_IMREG (ins, bytemode, sizeflag);
> return OP_OFF64 (ins, bytemode, sizeflag);
> }
> +
> +static bool
> +CFCMOV_Fixup_op (instr_info *ins, int opnd, int sizeflag)
> +{
> + /* EVEX.NF is used as a direction bit in the 2-operand case to reverse the
> + source and destination operands. */
> + if (!ins->vex.nd && ins->vex.nf)
> + {
> + if (opnd == 0)
> + return OP_E (ins, v_swap_mode, sizeflag);
There's still no testing of this use of v_swap_mode afaics.
> + /* These bits have been consumed and should be cleared. */
> + ins->vex.nf = false;
> + ins->vex.mask_register_specifier = 0;
Hmm, I thought I had asked for this and ...
> + return OP_G (ins, v_mode, sizeflag);
> + }
> +
> + if (opnd == 0)
> + return OP_G (ins, v_mode, sizeflag);
> + /* These bits have been consumed and should be cleared. */
> + ins->vex.nf = false;
> + ins->vex.mask_register_specifier = 0;
... this state update to be folded; I may be misremembering though. Any
future updating shouldn't require touching two entirely identical places
in the same (small) function. Plus when taking the first OP_E() path,
you fail to clear ->vex.nf right now anyway (i.e. another reason to do
it once uniformly).
Jan
> On 25.06.2024 11:56, Cui, Lili wrote:
> > Changes in V2
> > 1. Added EVEX_NF to operandconstraint to indicate setting EVEX.NF to 1.
> > 2. Refined test cases to cover all cc formats.
> > 3. Added invalid test case `{nf} cfcmovb %dx,%ax,%r31w` and reported error
> for it.
> > 4. Used CFCMOV_Fixup_op instead of CFCMOV_Fixup_op0 and
> CFCMOV_Fixup_op1 to handle both operands as NOP_Fixup().
>
> Just like NOP_Fixup() (and others), the new one would better be just
> CFCMOV_Fixup() (without the _op suffix).
>
Suggestion is better.
> > --- a/gas/config/tc-i386.c
> > +++ b/gas/config/tc-i386.c
> > @@ -4426,7 +4426,9 @@ build_apx_evex_prefix (void)
> > }
> >
> > /* Encode the NF bit. */
> > - if (i.has_nf)
> > + /* For CFCMOV, when the insn template supports EVEX_NF, it means that
> it
> > + requires EVEX.NF to be 1. */
> > + if (i.has_nf || i.tm.opcode_modifier.operandconstraint == EVEX_NF)
> > i.vex.bytes[3] |= 0x04;
> > }
>
> Especially with it now being an operand constraint, would you mind leaving
> the original comment alone? It would only be at risk of going stale when
> another EVexNF insn is added.
>
Removed the additional comment.
> > --- /dev/null
> > +++ b/gas/testsuite/gas/i386/x86-64-apx-cfcmov.s
> > @@ -0,0 +1,74 @@
> > +# Check 64bit EVEX-promoted CMOVcc instructions
> > +
> > + .text
> > +_start:
> > + cfcmovb %dx,%ax
> > + cfcmovb %dx,%ax,%r31w
> > + cfcmovb %dx,291(%r8,%rax,4)
> > + cfcmovb %ecx,%edx
> > + cfcmovb %ecx,%edx,%r10d
> > + cfcmovb %ecx,291(%r8,%rax,4)
> > + cfcmovb %r31,%r15
> > + cfcmovb %r31,%r15,%r11
> > + cfcmovb %r31,291(%r8,%rax,4)
> > + cfcmovb 291(%r8,%rax,4),%dx
> > + cfcmovb 291(%r8,%rax,4),%dx,%ax
> > + cfcmovb 291(%r8,%rax,4),%ecx
> > + cfcmovb 291(%r8,%rax,4),%ecx,%edx
> > + cfcmovb 291(%r8,%rax,4),%r31
> > + cfcmovb 291(%r8,%rax,4),%r31,%r15
> > + cmovb %dx,%ax,%r31w
> > + cmovb %ecx,%edx,%r10d
> > + cmovb %r31,%r15,%r11
> > + cmovb 291(%r8,%rax,4),%dx,%ax
> > + cmovb 291(%r8,%rax,4),%ecx,%edx
> > + cmovb 291(%r8,%rax,4),%r31,%r15
> > +
> > + .irp m, cfcmovbe, cfcmovl, cfcmovle, cfcmovnb, cfcmovnbe, cfcmovnl,
> cfcmovnle, cfcmovno, cfcmovnp, cfcmovns, cfcmovnz, cfcmovo, cfcmovp,
> cfcmovs, cfcmovz, cfcmovae, cfcmove, cfcmovne, cfcmova, cfcmovge, cfcmovg
> > + \m %dx,%ax
> > + .endr
>
> To keep line length under control and to help readability:
>
> .irp cc, be, l, le, nb, nbe, nl, nle, no, np, ns, nz, o, p, s, z, ae, e, ne, a, e,
> g
> cfcmov\cc %dx,%ax
> .endr
>
> This way it's also much easier to see that some forms are still missing:
> na, nae, c, nc, pe, and po come to mind. Not sure whether that's then an
> exhaustive list. It may also help if the list was sorted by some suitable criteria,
> such that it's easier to locate individual items.
>
> > + .irp m, cmovbe, cmovl, cmovle, cmovnb, cmovnbe, cmovnl, cmovnle,
> cmovno, cmovnp, cmovns, cmovnz, cmovo, cmovp, cmovs, cmovz, cmovae,
> cmove, cmovne, cmova, cmovge, cmovg
> > + \m %dx,%ax,%r31w
> > + .endr
>
> This then also makes noticeable that you don't need two .irp-s here (which
> would always need keeping in sync):
>
> .irp cc, be, l, le, nb, nbe, nl, nle, no, np, ns, nz, o, p, s, z, ae, e, ne, a, e,
> g
> cfcmov\cc %dx,%ax
> cmov\cc %dx,%ax,%r31w
> .endr
>
Your suggestion is tidier, added all the cc formats in SDM order, I had only added the jcc part in V2.
> I'm even inclined to ask that ...
>
> > + cfcmovb %dx,%ax
> > + cfcmovb.s %dx,%ax
> > + {load} cfcmovb %dx,%ax
> > + {store} cfcmovb %dx,%ax
> > +
> > + .intel_syntax noprefix
> > + cfcmovb ax,dx
> > + cfcmovb r31w,ax,dx
> > + cfcmovb WORD PTR [r8+rax*4+291],dx
> > + cfcmovb edx,ecx
> > + cfcmovb r10d,edx,ecx
> > + cfcmovb DWORD PTR [r8+rax*4+291],ecx
> > + cfcmovb r15,r31
> > + cfcmovb r11,r15,r31
> > + cfcmovb QWORD PTR [r8+rax*4+291],r31
> > + cfcmovb dx,WORD PTR [r8+rax*4+291]
> > + cfcmovb ax,dx,WORD PTR [r8+rax*4+291]
> > + cfcmovb ecx,DWORD PTR [r8+rax*4+291]
> > + cfcmovb edx,ecx,DWORD PTR [r8+rax*4+291]
> > + cfcmovb r31,QWORD PTR [r8+rax*4+291]
> > + cfcmovb r15,r31,QWORD PTR [r8+rax*4+291]
> > + cmovb r31w,ax,dx
> > + cmovb r10d,edx,ecx
> > + cmovb r11,r15,r31
> > + cmovb ax,dx,WORD PTR [r8+rax*4+291]
> > + cmovb edx,ecx,DWORD PTR [r8+rax*4+291]
> > + cmovb r15,r31,QWORD PTR [r8+rax*4+291]
> > +
> > + .irp m, cfcmovbe, cfcmovl, cfcmovle, cfcmovnb, cfcmovnbe, cfcmovnl,
> cfcmovnle, cfcmovno, cfcmovnp, cfcmovns, cfcmovnz, cfcmovo, cfcmovp,
> cfcmovs, cfcmovz, cfcmovae, cfcmove, cfcmovne, cfcmova, cfcmovge, cfcmovg
> > + \m ax,dx
> > + .endr
> > +
> > + .irp m, cmovbe, cmovl, cmovle, cmovnb, cmovnbe, cmovnl, cmovnle,
> cmovno, cmovnp, cmovns, cmovnz, cmovo, cmovp, cmovs, cmovz, cmovae,
> cmove, cmovne, cmova, cmovge, cmovg
> > + \m r31w,ax,dx
> > + .endr
>
> ... these be folded with the earlier ones, too.
>
I prefer to keep ".irp" under .intel_syntax noprefix part, this is more symmetrical.
> > @@ -14039,3 +14047,26 @@ JMPABS_Fixup (instr_info *ins, int bytemode,
> int sizeflag)
> > return OP_IMREG (ins, bytemode, sizeflag);
> > return OP_OFF64 (ins, bytemode, sizeflag); }
> > +
> > +static bool
> > +CFCMOV_Fixup_op (instr_info *ins, int opnd, int sizeflag) {
> > + /* EVEX.NF is used as a direction bit in the 2-operand case to reverse the
> > + source and destination operands. */
> > + if (!ins->vex.nd && ins->vex.nf)
> > + {
> > + if (opnd == 0)
> > + return OP_E (ins, v_swap_mode, sizeflag);
>
> There's still no testing of this use of v_swap_mode afaics.
>
I thought adding -Msuffix in x86-64-apx-cfcmov-intel.d and doing the following tests were the tests you wanted, now it seems that I'm missing something. Could you add more information? Thanks.
[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb ax,dx
[ ]*[a-f0-9]+:[ ]*62 f4 7d 0c 42 d0[ ]+cfcmovb.s ax,dx
[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb ax,dx
[ ]*[a-f0-9]+:[ ]*62 f4 7d 0c 42 d0[ ]+cfcmovb.s ax,dx
> > + /* These bits have been consumed and should be cleared. */
> > + ins->vex.nf = false;
> > + ins->vex.mask_register_specifier = 0;
>
> Hmm, I thought I had asked for this and ...
>
> > + return OP_G (ins, v_mode, sizeflag);
> > + }
> > +
> > + if (opnd == 0)
> > + return OP_G (ins, v_mode, sizeflag);
> > + /* These bits have been consumed and should be cleared. */
> > + ins->vex.nf = false; ins->vex.mask_register_specifier = 0;
>
> ... this state update to be folded; I may be misremembering though. Any future
> updating shouldn't require touching two entirely identical places in the same
> (small) function. Plus when taking the first OP_E() path, you fail to clear -
> >vex.nf right now anyway (i.e. another reason to do it once uniformly).
>
I think you mean to clear vex.nf and vex.mask_register_specifier in only one place.
I checked the logic, I think we can't fold them, we want to clear vex.nf before returning the last operand, the last operand has two cases,
Case 1: (!ins->vex.nd && ins->vex.nf) is true, and vex.nf needs to be cleared after judgment.
Case 2: ins->vex.nf is true, and vex.nf needs to be cleared.( I added a condition to clear it exactly)
if (opnd == 0)
return OP_G (ins, v_mode, sizeflag);
- /* These bits have been consumed and should be cleared. */
- ins->vex.nf = false;
- ins->vex.mask_register_specifier = 0;
+ if (ins->vex.nf)
+ {
+ /* These bits have been consumed and should be cleared. */
+ ins->vex.nf = false;
+ ins->vex.mask_register_specifier = 0;
+ }
return OP_E (ins, v_mode, sizeflag);
}
Thanks,
Lili.
@@ -4426,7 +4426,9 @@ build_apx_evex_prefix (void)
}
/* Encode the NF bit. */
- if (i.has_nf)
+ /* For CFCMOV, when the insn template supports EVEX_NF, it means that it
+ requires EVEX.NF to be 1. */
+ if (i.has_nf || i.tm.opcode_modifier.operandconstraint == EVEX_NF)
i.vex.bytes[3] |= 0x04;
}
new file mode 100644
@@ -0,0 +1,145 @@
+#as:
+#objdump: -dw -Mintel -Msuffix
+#name: x86_64 APX_F insns (Intel disassembly)
+#source: x86-64-apx-cfcmov.s
+
+.*: +file format .*
+
+Disassembly of section \.text:
+
+0+ <_start>:
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 14 42 c2[ ]+cfcmovb r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 0c 42 94 80 23 01 00 00[ ]+cfcmovb WORD PTR \[r8\+rax\*4\+0x123\],dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7c 08 42 d1[ ]+cfcmovb edx,ecx
+[ ]*[a-f0-9]+:[ ]*62 f4 2c 1c 42 d1[ ]+cfcmovb r10d,edx,ecx
+[ ]*[a-f0-9]+:[ ]*62 d4 7c 0c 42 8c 80 23 01 00 00[ ]+cfcmovb DWORD PTR \[r8\+rax\*4\+0x123\],ecx
+[ ]*[a-f0-9]+:[ ]*62 5c fc 08 42 ff[ ]+cfcmovb r15,r31
+[ ]*[a-f0-9]+:[ ]*62 5c a4 1c 42 ff[ ]+cfcmovb r11,r15,r31
+[ ]*[a-f0-9]+:[ ]*62 44 fc 0c 42 bc 80 23 01 00 00[ ]+cfcmovb QWORD PTR \[r8\+rax\*4\+0x123\],r31
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 08 42 94 80 23 01 00 00[ ]+cfcmovb dx,WORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 1c 42 94 80 23 01 00 00[ ]+cfcmovb ax,dx,WORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 d4 7c 08 42 8c 80 23 01 00 00[ ]+cfcmovb ecx,DWORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 d4 6c 1c 42 8c 80 23 01 00 00[ ]+cfcmovb edx,ecx,DWORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 44 fc 08 42 bc 80 23 01 00 00[ ]+cfcmovb r31,QWORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 44 84 1c 42 bc 80 23 01 00 00[ ]+cfcmovb r15,r31,QWORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 42 c2[ ]+cmovb r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 2c 18 42 d1[ ]+cmovb r10d,edx,ecx
+[ ]*[a-f0-9]+:[ ]*62 5c a4 18 42 ff[ ]+cmovb r11,r15,r31
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 18 42 94 80 23 01 00 00[ ]+cmovb ax,dx,WORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 d4 6c 18 42 8c 80 23 01 00 00[ ]+cmovb edx,ecx,DWORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 44 84 18 42 bc 80 23 01 00 00[ ]+cmovb r15,r31,QWORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 46 c2[ ]+cfcmovbe ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4c c2[ ]+cfcmovl ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4e c2[ ]+cfcmovle ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 43 c2[ ]+cfcmovae ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 47 c2[ ]+cfcmova ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4d c2[ ]+cfcmovge ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4f c2[ ]+cfcmovg ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 41 c2[ ]+cfcmovno ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4b c2[ ]+cfcmovnp ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 49 c2[ ]+cfcmovns ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 45 c2[ ]+cfcmovne ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 40 c2[ ]+cfcmovo ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4a c2[ ]+cfcmovp ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 48 c2[ ]+cfcmovs ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 44 c2[ ]+cfcmove ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 43 c2[ ]+cfcmovae ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 44 c2[ ]+cfcmove ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 45 c2[ ]+cfcmovne ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 47 c2[ ]+cfcmova ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4d c2[ ]+cfcmovge ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4f c2[ ]+cfcmovg ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 46 c2[ ]+cmovbe r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4c c2[ ]+cmovl r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4e c2[ ]+cmovle r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 43 c2[ ]+cmovae r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 47 c2[ ]+cmova r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4d c2[ ]+cmovge r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4f c2[ ]+cmovg r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 41 c2[ ]+cmovno r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4b c2[ ]+cmovnp r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 49 c2[ ]+cmovns r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 45 c2[ ]+cmovne r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 40 c2[ ]+cmovo r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4a c2[ ]+cmovp r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 48 c2[ ]+cmovs r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 44 c2[ ]+cmove r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 43 c2[ ]+cmovae r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 44 c2[ ]+cmove r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 45 c2[ ]+cmovne r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 47 c2[ ]+cmova r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4d c2[ ]+cmovge r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4f c2[ ]+cmovg r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 0c 42 d0[ ]+cfcmovb.s ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 0c 42 d0[ ]+cfcmovb.s ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 14 42 c2[ ]+cfcmovb r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 0c 42 94 80 23 01 00 00[ ]+cfcmovb WORD PTR \[r8\+rax\*4\+0x123\],dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7c 08 42 d1[ ]+cfcmovb edx,ecx
+[ ]*[a-f0-9]+:[ ]*62 f4 2c 1c 42 d1[ ]+cfcmovb r10d,edx,ecx
+[ ]*[a-f0-9]+:[ ]*62 d4 7c 0c 42 8c 80 23 01 00 00[ ]+cfcmovb DWORD PTR \[r8\+rax\*4\+0x123\],ecx
+[ ]*[a-f0-9]+:[ ]*62 5c fc 08 42 ff[ ]+cfcmovb r15,r31
+[ ]*[a-f0-9]+:[ ]*62 5c a4 1c 42 ff[ ]+cfcmovb r11,r15,r31
+[ ]*[a-f0-9]+:[ ]*62 44 fc 0c 42 bc 80 23 01 00 00[ ]+cfcmovb QWORD PTR \[r8\+rax\*4\+0x123\],r31
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 08 42 94 80 23 01 00 00[ ]+cfcmovb dx,WORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 1c 42 94 80 23 01 00 00[ ]+cfcmovb ax,dx,WORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 d4 7c 08 42 8c 80 23 01 00 00[ ]+cfcmovb ecx,DWORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 d4 6c 1c 42 8c 80 23 01 00 00[ ]+cfcmovb edx,ecx,DWORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 44 fc 08 42 bc 80 23 01 00 00[ ]+cfcmovb r31,QWORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 44 84 1c 42 bc 80 23 01 00 00[ ]+cfcmovb r15,r31,QWORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 42 c2[ ]+cmovb r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 2c 18 42 d1[ ]+cmovb r10d,edx,ecx
+[ ]*[a-f0-9]+:[ ]*62 5c a4 18 42 ff[ ]+cmovb r11,r15,r31
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 18 42 94 80 23 01 00 00[ ]+cmovb ax,dx,WORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 d4 6c 18 42 8c 80 23 01 00 00[ ]+cmovb edx,ecx,DWORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 44 84 18 42 bc 80 23 01 00 00[ ]+cmovb r15,r31,QWORD PTR \[r8\+rax\*4\+0x123\]
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 46 c2[ ]+cfcmovbe ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4c c2[ ]+cfcmovl ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4e c2[ ]+cfcmovle ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 43 c2[ ]+cfcmovae ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 47 c2[ ]+cfcmova ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4d c2[ ]+cfcmovge ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4f c2[ ]+cfcmovg ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 41 c2[ ]+cfcmovno ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4b c2[ ]+cfcmovnp ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 49 c2[ ]+cfcmovns ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 45 c2[ ]+cfcmovne ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 40 c2[ ]+cfcmovo ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4a c2[ ]+cfcmovp ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 48 c2[ ]+cfcmovs ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 44 c2[ ]+cfcmove ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 43 c2[ ]+cfcmovae ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 44 c2[ ]+cfcmove ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 45 c2[ ]+cfcmovne ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 47 c2[ ]+cfcmova ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4d c2[ ]+cfcmovge ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4f c2[ ]+cfcmovg ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 46 c2[ ]+cmovbe r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4c c2[ ]+cmovl r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4e c2[ ]+cmovle r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 43 c2[ ]+cmovae r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 47 c2[ ]+cmova r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4d c2[ ]+cmovge r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4f c2[ ]+cmovg r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 41 c2[ ]+cmovno r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4b c2[ ]+cmovnp r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 49 c2[ ]+cmovns r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 45 c2[ ]+cmovne r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 40 c2[ ]+cmovo r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4a c2[ ]+cmovp r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 48 c2[ ]+cmovs r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 44 c2[ ]+cmove r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 43 c2[ ]+cmovae r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 44 c2[ ]+cmove r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 45 c2[ ]+cmovne r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 47 c2[ ]+cmova r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4d c2[ ]+cmovge r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4f c2[ ]+cmovg r31w,ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 0c 42 d0[ ]+cfcmovb.s ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb ax,dx
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 0c 42 d0[ ]+cfcmovb.s ax,dx
+#pass
new file mode 100644
@@ -0,0 +1,145 @@
+#as:
+#objdump: -dw
+#name: x86_64 APX_F insns
+#source: x86-64-apx-cfcmov.s
+
+.*: +file format .*
+
+Disassembly of section \.text:
+
+0+ <_start>:
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 05 14 42 c2[ ]+cfcmovb %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 0c 42 94 80 23 01 00 00[ ]+cfcmovb %dx,0x123\(%r8,%rax,4\)
+[ ]*[a-f0-9]+:[ ]*62 f4 7c 08 42 d1[ ]+cfcmovb %ecx,%edx
+[ ]*[a-f0-9]+:[ ]*62 f4 2c 1c 42 d1[ ]+cfcmovb %ecx,%edx,%r10d
+[ ]*[a-f0-9]+:[ ]*62 d4 7c 0c 42 8c 80 23 01 00 00[ ]+cfcmovb %ecx,0x123\(%r8,%rax,4\)
+[ ]*[a-f0-9]+:[ ]*62 5c fc 08 42 ff[ ]+cfcmovb %r31,%r15
+[ ]*[a-f0-9]+:[ ]*62 5c a4 1c 42 ff[ ]+cfcmovb %r31,%r15,%r11
+[ ]*[a-f0-9]+:[ ]*62 44 fc 0c 42 bc 80 23 01 00 00[ ]+cfcmovb %r31,0x123\(%r8,%rax,4\)
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 08 42 94 80 23 01 00 00[ ]+cfcmovb 0x123\(%r8,%rax,4\),%dx
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 1c 42 94 80 23 01 00 00[ ]+cfcmovb 0x123\(%r8,%rax,4\),%dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 d4 7c 08 42 8c 80 23 01 00 00[ ]+cfcmovb 0x123\(%r8,%rax,4\),%ecx
+[ ]*[a-f0-9]+:[ ]*62 d4 6c 1c 42 8c 80 23 01 00 00[ ]+cfcmovb 0x123\(%r8,%rax,4\),%ecx,%edx
+[ ]*[a-f0-9]+:[ ]*62 44 fc 08 42 bc 80 23 01 00 00[ ]+cfcmovb 0x123\(%r8,%rax,4\),%r31
+[ ]*[a-f0-9]+:[ ]*62 44 84 1c 42 bc 80 23 01 00 00[ ]+cfcmovb 0x123\(%r8,%rax,4\),%r31,%r15
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 42 c2[ ]+cmovb %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 2c 18 42 d1[ ]+cmovb %ecx,%edx,%r10d
+[ ]*[a-f0-9]+:[ ]*62 5c a4 18 42 ff[ ]+cmovb %r31,%r15,%r11
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 18 42 94 80 23 01 00 00[ ]+cmovb 0x123\(%r8,%rax,4\),%dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 d4 6c 18 42 8c 80 23 01 00 00[ ]+cmovb 0x123\(%r8,%rax,4\),%ecx,%edx
+[ ]*[a-f0-9]+:[ ]*62 44 84 18 42 bc 80 23 01 00 00[ ]+cmovb 0x123\(%r8,%rax,4\),%r31,%r15
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 46 c2[ ]+cfcmovbe %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4c c2[ ]+cfcmovl %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4e c2[ ]+cfcmovle %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 43 c2[ ]+cfcmovae %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 47 c2[ ]+cfcmova %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4d c2[ ]+cfcmovge %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4f c2[ ]+cfcmovg %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 41 c2[ ]+cfcmovno %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4b c2[ ]+cfcmovnp %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 49 c2[ ]+cfcmovns %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 45 c2[ ]+cfcmovne %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 40 c2[ ]+cfcmovo %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4a c2[ ]+cfcmovp %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 48 c2[ ]+cfcmovs %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 44 c2[ ]+cfcmove %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 43 c2[ ]+cfcmovae %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 44 c2[ ]+cfcmove %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 45 c2[ ]+cfcmovne %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 47 c2[ ]+cfcmova %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4d c2[ ]+cfcmovge %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4f c2[ ]+cfcmovg %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 46 c2[ ]+cmovbe %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4c c2[ ]+cmovl %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4e c2[ ]+cmovle %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 43 c2[ ]+cmovae %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 47 c2[ ]+cmova %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4d c2[ ]+cmovge %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4f c2[ ]+cmovg %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 41 c2[ ]+cmovno %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4b c2[ ]+cmovnp %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 49 c2[ ]+cmovns %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 45 c2[ ]+cmovne %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 40 c2[ ]+cmovo %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4a c2[ ]+cmovp %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 48 c2[ ]+cmovs %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 44 c2[ ]+cmove %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 43 c2[ ]+cmovae %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 44 c2[ ]+cmove %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 45 c2[ ]+cmovne %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 47 c2[ ]+cmova %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4d c2[ ]+cmovge %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4f c2[ ]+cmovg %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 0c 42 d0[ ]+cfcmovb %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 0c 42 d0[ ]+cfcmovb %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 05 14 42 c2[ ]+cfcmovb %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 0c 42 94 80 23 01 00 00[ ]+cfcmovb %dx,0x123\(%r8,%rax,4\)
+[ ]*[a-f0-9]+:[ ]*62 f4 7c 08 42 d1[ ]+cfcmovb %ecx,%edx
+[ ]*[a-f0-9]+:[ ]*62 f4 2c 1c 42 d1[ ]+cfcmovb %ecx,%edx,%r10d
+[ ]*[a-f0-9]+:[ ]*62 d4 7c 0c 42 8c 80 23 01 00 00[ ]+cfcmovb %ecx,0x123\(%r8,%rax,4\)
+[ ]*[a-f0-9]+:[ ]*62 5c fc 08 42 ff[ ]+cfcmovb %r31,%r15
+[ ]*[a-f0-9]+:[ ]*62 5c a4 1c 42 ff[ ]+cfcmovb %r31,%r15,%r11
+[ ]*[a-f0-9]+:[ ]*62 44 fc 0c 42 bc 80 23 01 00 00[ ]+cfcmovb %r31,0x123\(%r8,%rax,4\)
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 08 42 94 80 23 01 00 00[ ]+cfcmovb 0x123\(%r8,%rax,4\),%dx
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 1c 42 94 80 23 01 00 00[ ]+cfcmovb 0x123\(%r8,%rax,4\),%dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 d4 7c 08 42 8c 80 23 01 00 00[ ]+cfcmovb 0x123\(%r8,%rax,4\),%ecx
+[ ]*[a-f0-9]+:[ ]*62 d4 6c 1c 42 8c 80 23 01 00 00[ ]+cfcmovb 0x123\(%r8,%rax,4\),%ecx,%edx
+[ ]*[a-f0-9]+:[ ]*62 44 fc 08 42 bc 80 23 01 00 00[ ]+cfcmovb 0x123\(%r8,%rax,4\),%r31
+[ ]*[a-f0-9]+:[ ]*62 44 84 1c 42 bc 80 23 01 00 00[ ]+cfcmovb 0x123\(%r8,%rax,4\),%r31,%r15
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 42 c2[ ]+cmovb %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 2c 18 42 d1[ ]+cmovb %ecx,%edx,%r10d
+[ ]*[a-f0-9]+:[ ]*62 5c a4 18 42 ff[ ]+cmovb %r31,%r15,%r11
+[ ]*[a-f0-9]+:[ ]*62 d4 7d 18 42 94 80 23 01 00 00[ ]+cmovb 0x123\(%r8,%rax,4\),%dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 d4 6c 18 42 8c 80 23 01 00 00[ ]+cmovb 0x123\(%r8,%rax,4\),%ecx,%edx
+[ ]*[a-f0-9]+:[ ]*62 44 84 18 42 bc 80 23 01 00 00[ ]+cmovb 0x123\(%r8,%rax,4\),%r31,%r15
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 46 c2[ ]+cfcmovbe %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4c c2[ ]+cfcmovl %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4e c2[ ]+cfcmovle %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 43 c2[ ]+cfcmovae %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 47 c2[ ]+cfcmova %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4d c2[ ]+cfcmovge %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4f c2[ ]+cfcmovg %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 41 c2[ ]+cfcmovno %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4b c2[ ]+cfcmovnp %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 49 c2[ ]+cfcmovns %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 45 c2[ ]+cfcmovne %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 40 c2[ ]+cfcmovo %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4a c2[ ]+cfcmovp %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 48 c2[ ]+cfcmovs %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 44 c2[ ]+cfcmove %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 43 c2[ ]+cfcmovae %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 44 c2[ ]+cfcmove %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 45 c2[ ]+cfcmovne %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 47 c2[ ]+cfcmova %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4d c2[ ]+cfcmovge %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 4f c2[ ]+cfcmovg %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 46 c2[ ]+cmovbe %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4c c2[ ]+cmovl %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4e c2[ ]+cmovle %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 43 c2[ ]+cmovae %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 47 c2[ ]+cmova %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4d c2[ ]+cmovge %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4f c2[ ]+cmovg %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 41 c2[ ]+cmovno %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4b c2[ ]+cmovnp %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 49 c2[ ]+cmovns %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 45 c2[ ]+cmovne %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 40 c2[ ]+cmovo %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4a c2[ ]+cmovp %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 48 c2[ ]+cmovs %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 44 c2[ ]+cmove %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 43 c2[ ]+cmovae %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 44 c2[ ]+cmove %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 45 c2[ ]+cmovne %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 47 c2[ ]+cmova %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4d c2[ ]+cmovge %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 05 10 4f c2[ ]+cmovg %dx,%ax,%r31w
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 0c 42 d0[ ]+cfcmovb %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 08 42 c2[ ]+cfcmovb %dx,%ax
+[ ]*[a-f0-9]+:[ ]*62 f4 7d 0c 42 d0[ ]+cfcmovb %dx,%ax
+#pass
new file mode 100644
@@ -0,0 +1,74 @@
+# Check 64bit EVEX-promoted CMOVcc instructions
+
+ .text
+_start:
+ cfcmovb %dx,%ax
+ cfcmovb %dx,%ax,%r31w
+ cfcmovb %dx,291(%r8,%rax,4)
+ cfcmovb %ecx,%edx
+ cfcmovb %ecx,%edx,%r10d
+ cfcmovb %ecx,291(%r8,%rax,4)
+ cfcmovb %r31,%r15
+ cfcmovb %r31,%r15,%r11
+ cfcmovb %r31,291(%r8,%rax,4)
+ cfcmovb 291(%r8,%rax,4),%dx
+ cfcmovb 291(%r8,%rax,4),%dx,%ax
+ cfcmovb 291(%r8,%rax,4),%ecx
+ cfcmovb 291(%r8,%rax,4),%ecx,%edx
+ cfcmovb 291(%r8,%rax,4),%r31
+ cfcmovb 291(%r8,%rax,4),%r31,%r15
+ cmovb %dx,%ax,%r31w
+ cmovb %ecx,%edx,%r10d
+ cmovb %r31,%r15,%r11
+ cmovb 291(%r8,%rax,4),%dx,%ax
+ cmovb 291(%r8,%rax,4),%ecx,%edx
+ cmovb 291(%r8,%rax,4),%r31,%r15
+
+ .irp m, cfcmovbe, cfcmovl, cfcmovle, cfcmovnb, cfcmovnbe, cfcmovnl, cfcmovnle, cfcmovno, cfcmovnp, cfcmovns, cfcmovnz, cfcmovo, cfcmovp, cfcmovs, cfcmovz, cfcmovae, cfcmove, cfcmovne, cfcmova, cfcmovge, cfcmovg
+ \m %dx,%ax
+ .endr
+
+ .irp m, cmovbe, cmovl, cmovle, cmovnb, cmovnbe, cmovnl, cmovnle, cmovno, cmovnp, cmovns, cmovnz, cmovo, cmovp, cmovs, cmovz, cmovae, cmove, cmovne, cmova, cmovge, cmovg
+ \m %dx,%ax,%r31w
+ .endr
+
+ cfcmovb %dx,%ax
+ cfcmovb.s %dx,%ax
+ {load} cfcmovb %dx,%ax
+ {store} cfcmovb %dx,%ax
+
+ .intel_syntax noprefix
+ cfcmovb ax,dx
+ cfcmovb r31w,ax,dx
+ cfcmovb WORD PTR [r8+rax*4+291],dx
+ cfcmovb edx,ecx
+ cfcmovb r10d,edx,ecx
+ cfcmovb DWORD PTR [r8+rax*4+291],ecx
+ cfcmovb r15,r31
+ cfcmovb r11,r15,r31
+ cfcmovb QWORD PTR [r8+rax*4+291],r31
+ cfcmovb dx,WORD PTR [r8+rax*4+291]
+ cfcmovb ax,dx,WORD PTR [r8+rax*4+291]
+ cfcmovb ecx,DWORD PTR [r8+rax*4+291]
+ cfcmovb edx,ecx,DWORD PTR [r8+rax*4+291]
+ cfcmovb r31,QWORD PTR [r8+rax*4+291]
+ cfcmovb r15,r31,QWORD PTR [r8+rax*4+291]
+ cmovb r31w,ax,dx
+ cmovb r10d,edx,ecx
+ cmovb r11,r15,r31
+ cmovb ax,dx,WORD PTR [r8+rax*4+291]
+ cmovb edx,ecx,DWORD PTR [r8+rax*4+291]
+ cmovb r15,r31,QWORD PTR [r8+rax*4+291]
+
+ .irp m, cfcmovbe, cfcmovl, cfcmovle, cfcmovnb, cfcmovnbe, cfcmovnl, cfcmovnle, cfcmovno, cfcmovnp, cfcmovns, cfcmovnz, cfcmovo, cfcmovp, cfcmovs, cfcmovz, cfcmovae, cfcmove, cfcmovne, cfcmova, cfcmovge, cfcmovg
+ \m ax,dx
+ .endr
+
+ .irp m, cmovbe, cmovl, cmovle, cmovnb, cmovnbe, cmovnl, cmovnle, cmovno, cmovnp, cmovns, cmovnz, cmovo, cmovp, cmovs, cmovz, cmovae, cmove, cmovne, cmova, cmovge, cmovg
+ \m r31w,ax,dx
+ .endr
+
+ cfcmovb ax,dx
+ cfcmovb.s ax,dx
+ {load} cfcmovb ax,dx
+ {store} cfcmovb ax,dx
@@ -10,8 +10,11 @@
.*:11: Error: \{nf\} unsupported for `push2'
.*:12: Error: \{nf\} unsupported for `adcx'
.*:13: Error: \{nf\} unsupported for `mulx'
-.*:14: Error: \{nf\} cannot be combined with \{vex\}/\{vex3\}
-.*:15: Error: \{nf\} cannot be combined with \{vex\}/\{vex3\}
-.*:16: Error: can't encode register '%ah' in an instruction requiring EVEX prefix
-.*:17: Error: can't encode register '%ah' in an instruction requiring EVEX prefix
+.*:14: Error: \{nf\} unsupported for `cfcmovl'
+.*:15: Error: \{nf\} unsupported for `cfcmovl'
+.*:16: Error: \{nf\} unsupported for `cfcmovb'
+.*:17: Error: \{nf\} cannot be combined with \{vex\}/\{vex3\}
+.*:18: Error: \{nf\} cannot be combined with \{vex\}/\{vex3\}
+.*:19: Error: can't encode register '%ah' in an instruction requiring EVEX prefix
+.*:20: Error: can't encode register '%ah' in an instruction requiring EVEX prefix
#pass
@@ -11,6 +11,9 @@
{nf} push2 %rbx, %rax
{nf} adcx %r15,%r15
{nf} mulx %r15,%r15,%r11
+ {nf} cfcmovl %dx,291(%r8,%rax,4)
+ {nf} cfcmovl 291(%r8,%rax,4),%dx
+ {nf} cfcmovb %dx,%ax,%r31w
{nf} {vex} bextr %ecx, %edx, %r10d
{vex} {nf} bextr %ecx, %edx, %r10d
{nf} add %dl,%ah
@@ -356,6 +356,8 @@ run_list_test "x86-64-apx-push2pop2-inval"
run_dump_test "x86-64-apx-ccmp-ctest"
run_dump_test "x86-64-apx-ccmp-ctest-intel"
run_list_test "x86-64-apx-ccmp-ctest-inval"
+run_dump_test "x86-64-apx-cfcmov"
+run_dump_test "x86-64-apx-cfcmov-intel"
run_dump_test "x86-64-apx-pushp-popp"
run_dump_test "x86-64-apx-pushp-popp-intel"
run_list_test "x86-64-apx-pushp-popp-inval"
@@ -340,9 +340,9 @@
},
/* PREFIX_EVEX_MAP4_4x */
{
- { "%CFcmov%CCS", { VexGv, Gv, Ev }, 0 },
+ { "%CFcmov%CCS", { VexGv, { CFCMOV_Fixup_op, 0 }, { CFCMOV_Fixup_op, 1 } }, 0 },
{ Bad_Opcode },
- { "%CFcmov%CCS", { VexGv, Gv, Ev }, 0 },
+ { "%CFcmov%CCS", { VexGv, { CFCMOV_Fixup_op, 0 }, { CFCMOV_Fixup_op, 1 } }, 0 },
{ "set%ZU%CC", { Eb }, 0 },
},
/* PREFIX_EVEX_MAP4_F0 */
@@ -107,6 +107,7 @@ static bool DistinctDest_Fixup (instr_info *, int, int);
static bool PREFETCHI_Fixup (instr_info *, int, int);
static bool PUSH2_POP2_Fixup (instr_info *, int, int);
static bool JMPABS_Fixup (instr_info *, int, int);
+static bool CFCMOV_Fixup_op (instr_info *, int, int);
static void ATTRIBUTE_PRINTF_3 i386_dis_printf (const disassemble_info *,
enum disassembler_style,
@@ -4041,7 +4042,7 @@ static const struct dis386 prefix_table[][4] = {
{ "vbcstnebf162ps", { XM, Mw }, 0 },
{ "vbcstnesh2ps", { XM, Mw }, 0 },
},
-
+
/* PREFIX_VEX_0F38D2_W_0 */
{
{ "vpdpwuud", { XM, Vex, EXx }, 0 },
@@ -10559,7 +10560,14 @@ putop (instr_info *ins, const char *in_template, int sizeflag)
}
}
else if (l == 1 && last[0] == 'C')
- break;
+ {
+ if (ins->vex.nd && !ins->vex.nf)
+ break;
+ *ins->obufp++ = 'c';
+ *ins->obufp++ = 'f';
+ /* Skip printing {evex} */
+ evex_printed = true;
+ }
else if (l == 1 && last[0] == 'N')
{
if (ins->vex.nf)
@@ -14039,3 +14047,26 @@ JMPABS_Fixup (instr_info *ins, int bytemode, int sizeflag)
return OP_IMREG (ins, bytemode, sizeflag);
return OP_OFF64 (ins, bytemode, sizeflag);
}
+
+static bool
+CFCMOV_Fixup_op (instr_info *ins, int opnd, int sizeflag)
+{
+ /* EVEX.NF is used as a direction bit in the 2-operand case to reverse the
+ source and destination operands. */
+ if (!ins->vex.nd && ins->vex.nf)
+ {
+ if (opnd == 0)
+ return OP_E (ins, v_swap_mode, sizeflag);
+ /* These bits have been consumed and should be cleared. */
+ ins->vex.nf = false;
+ ins->vex.mask_register_specifier = 0;
+ return OP_G (ins, v_mode, sizeflag);
+ }
+
+ if (opnd == 0)
+ return OP_G (ins, v_mode, sizeflag);
+ /* These bits have been consumed and should be cleared. */
+ ins->vex.nf = false;
+ ins->vex.mask_register_specifier = 0;
+ return OP_E (ins, v_mode, sizeflag);
+}
@@ -583,6 +583,8 @@ enum
#define ZERO_UPPER 10
/* Instruction support SCC. */
#define SCC 11
+ /* Instruction requires EVEX.NF to be 1. */
+#define EVEX_NF 12
OperandConstraint,
/* instruction ignores operand size prefix and in Intel mode ignores
mnemonic size suffix check. */
@@ -87,6 +87,7 @@
#define ImplicitStackOp OperandConstraint=IMPLICIT_STACK_OP
#define ZU OperandConstraint=ZERO_UPPER
#define Scc OperandConstraint=SCC
+#define EVexNF OperandConstraint=EVEX_NF
#define ATTSyntax Dialect=ATT_SYNTAX
#define ATTMnemonic Dialect=ATT_MNEMONIC
@@ -989,6 +990,10 @@ ud0, 0xfff, i186, Modrm|CheckOperandSize|No_bSuf|No_sSuf, { Reg16|Reg32|Reg64|Un
cmov<cc>, 0x4<cc:opc>, CMOV&APX_F, Modrm|CheckOperandSize|No_bSuf|No_sSuf|DstVVVV|EVexMap4, { Reg16|Reg32|Reg64|Unspecified|BaseIndex, Reg16|Reg32|Reg64, Reg16|Reg32|Reg64 }
cmov<cc>, 0xf4<cc:opc>, CMOV, Modrm|CheckOperandSize|No_bSuf|No_sSuf, { Reg16|Reg32|Reg64|Unspecified|BaseIndex, Reg16|Reg32|Reg64 }
+cfcmov<cc>, 0x4<cc:opc>, CMOV&APX_F, Modrm|CheckOperandSize|No_bSuf|No_sSuf|DstVVVV|EVexMap4|EVexNF, { Reg16|Reg32|Reg64|Unspecified|BaseIndex, Reg16|Reg32|Reg64, Reg16|Reg32|Reg64 }
+cfcmov<cc>, 0x4<cc:opc>, CMOV&APX_F, Load|Modrm|CheckOperandSize|No_bSuf|No_sSuf|EVexMap4, { Reg16|Reg32|Reg64|Unspecified|BaseIndex, Reg16|Reg32|Reg64 }
+cfcmov<cc>, 0x4<cc:opc>, CMOV&APX_F, Modrm|CheckOperandSize|No_bSuf|No_sSuf|EVexMap4|EVexNF, { Reg16|Reg32|Reg64, Reg16|Reg32|Reg64|Unspecified|BaseIndex }
+
fcmovb, 0xda/0, i687, Modrm|NoSuf, { FloatReg, FloatAcc }
fcmovnae, 0xda/0, i687, Modrm|NoSuf, { FloatReg, FloatAcc }
fcmove, 0xda/1, i687, Modrm|NoSuf, { FloatReg, FloatAcc }