//===-- X86InstrArithmetic.td - Integer Arithmetic Instrs --*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file describes the integer arithmetic instructions in the X86 // architecture. // //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // LEA - Load Effective Address let SchedRW = [WriteLEA] in { let hasSideEffects = 0 in def LEA16r : I<0x8D, MRMSrcMem, (outs GR16:$dst), (ins anymem:$src), "lea{w}\t{$src|$dst}, {$dst|$src}", []>, OpSize16; let isReMaterializable = 1 in def LEA32r : I<0x8D, MRMSrcMem, (outs GR32:$dst), (ins anymem:$src), "lea{l}\t{$src|$dst}, {$dst|$src}", [(set GR32:$dst, lea32addr:$src)]>, OpSize32, Requires<[Not64BitMode]>; def LEA64_32r : I<0x8D, MRMSrcMem, (outs GR32:$dst), (ins lea64_32mem:$src), "lea{l}\t{$src|$dst}, {$dst|$src}", [(set GR32:$dst, lea64_32addr:$src)]>, OpSize32, Requires<[In64BitMode]>; let isReMaterializable = 1 in def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src), "lea{q}\t{$src|$dst}, {$dst|$src}", [(set GR64:$dst, lea64addr:$src)]>; } // SchedRW // Pseudo instruction for lea that prevent optimizer from eliminating // the instruction. let SchedRW = [WriteLEA], isPseudo = true, hasSideEffects = 1 in { def PLEA32r : PseudoI<(outs GR32:$dst), (ins anymem:$src), []>; def PLEA64r : PseudoI<(outs GR64:$dst), (ins anymem:$src), []>; } //===----------------------------------------------------------------------===// // MUL/IMUL and DIV/IDIV Instructions // class MulDivOpR o, Format f, string m, X86TypeInfo t, X86FoldableSchedWrite sched, list p> : UnaryOpR { let SchedRW = [sched]; } class MulDivOpM o, Format f, string m, X86TypeInfo t, X86FoldableSchedWrite sched, list p> : UnaryOpM { let SchedRW = [sched.Folded, // Memory operand. ReadDefault, ReadDefault, ReadDefault, ReadDefault, ReadDefault, // Register reads (implicit or explicit). sched.ReadAfterFold, sched.ReadAfterFold]; } multiclass Mul o, string m, Format RegMRM, Format MemMRM, SDPatternOperator node> { // AL is really implied by AX, but the registers in Defs must match the // SDNode results (i8, i32). // // FIXME: Used for 8-bit mul, ignore result upper 8 bits. // This probably ought to be moved to a def : Pat<> if the // syntax can be accepted. let Defs = [AL, EFLAGS, AX], Uses = [AL] in def 8r : MulDivOpR; let Defs = [AX, DX, EFLAGS], Uses = [AX] in def 16r : MulDivOpR, OpSize16; let Defs = [EAX, EDX, EFLAGS], Uses = [EAX] in def 32r : MulDivOpR, OpSize32; let Defs = [RAX, RDX, EFLAGS], Uses = [RAX] in def 64r : MulDivOpR; let Defs = [AL, EFLAGS, AX], Uses = [AL] in def 8m : MulDivOpM; let Defs = [AX, DX, EFLAGS], Uses = [AX] in def 16m : MulDivOpM, OpSize16; let Defs = [EAX, EDX, EFLAGS], Uses = [EAX] in def 32m : MulDivOpM, OpSize32; let Defs = [RAX, RDX, EFLAGS], Uses = [RAX] in def 64m : MulDivOpM, Requires<[In64BitMode]>; let Predicates = [In64BitMode] in { let Defs = [AL, AX], Uses = [AL] in def 8r_NF : MulDivOpR, NF; let Defs = [AX, DX], Uses = [AX] in def 16r_NF : MulDivOpR, NF, PD; let Defs = [EAX, EDX], Uses = [EAX] in def 32r_NF : MulDivOpR, NF; let Defs = [RAX, RDX], Uses = [RAX] in def 64r_NF : MulDivOpR, NF; let Defs = [AL, AX], Uses = [AL] in def 8m_NF : MulDivOpM, NF; let Defs = [AX, DX], Uses = [AX] in def 16m_NF : MulDivOpM, NF, PD; let Defs = [EAX, EDX], Uses = [EAX] in def 32m_NF : MulDivOpM, NF; let Defs = [RAX, RDX], Uses = [RAX] in def 64m_NF : MulDivOpM, NF; let Defs = [AL, EFLAGS, AX], Uses = [AL] in def 8r_EVEX : MulDivOpR, PL; let Defs = [AX, DX, EFLAGS], Uses = [AX] in def 16r_EVEX : MulDivOpR, PL, PD; let Defs = [EAX, EDX, EFLAGS], Uses = [EAX] in def 32r_EVEX : MulDivOpR, PL; let Defs = [RAX, RDX, EFLAGS], Uses = [RAX] in def 64r_EVEX : MulDivOpR, PL; let Defs = [AL, EFLAGS, AX], Uses = [AL] in def 8m_EVEX : MulDivOpM, PL; let Defs = [AX, DX, EFLAGS], Uses = [AX] in def 16m_EVEX : MulDivOpM, PL, PD; let Defs = [EAX, EDX, EFLAGS], Uses = [EAX] in def 32m_EVEX : MulDivOpM, PL; let Defs = [RAX, RDX, EFLAGS], Uses = [RAX] in def 64m_EVEX : MulDivOpM, PL; } } defm MUL : Mul<0xF7, "mul", MRM4r, MRM4m, mul>; defm IMUL : Mul<0xF7, "imul", MRM5r, MRM5m, null_frag>; multiclass Div o, string m, Format RegMRM, Format MemMRM> { defvar sched8 = !if(!eq(m, "div"), WriteDiv8, WriteIDiv8); defvar sched16 = !if(!eq(m, "div"), WriteDiv16, WriteIDiv16); defvar sched32 = !if(!eq(m, "div"), WriteDiv32, WriteIDiv32); defvar sched64 = !if(!eq(m, "div"), WriteDiv64, WriteIDiv64); let Defs = [AL, AH, EFLAGS], Uses = [AX] in def 8r : MulDivOpR; let Defs = [AX, DX, EFLAGS], Uses = [AX, DX] in def 16r : MulDivOpR, OpSize16; let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EDX] in def 32r : MulDivOpR, OpSize32; let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RDX] in def 64r : MulDivOpR; let Defs = [AL, AH, EFLAGS], Uses = [AX] in def 8m : MulDivOpM; let Defs = [AX, DX, EFLAGS], Uses = [AX, DX] in def 16m : MulDivOpM, OpSize16; let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EDX] in def 32m : MulDivOpM, OpSize32; let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RDX] in def 64m : MulDivOpM, Requires<[In64BitMode]>; let Predicates = [In64BitMode] in { let Defs = [AL, AH], Uses = [AX] in def 8r_NF : MulDivOpR, NF; let Defs = [AX, DX], Uses = [AX, DX] in def 16r_NF : MulDivOpR, NF, PD; let Defs = [EAX, EDX], Uses = [EAX, EDX] in def 32r_NF : MulDivOpR, NF; let Defs = [RAX, RDX], Uses = [RAX, RDX] in def 64r_NF : MulDivOpR, NF; let Defs = [AL, AH], Uses = [AX] in def 8m_NF : MulDivOpM, NF; let Defs = [AX, DX], Uses = [AX, DX] in def 16m_NF : MulDivOpM, NF, PD; let Defs = [EAX, EDX], Uses = [EAX, EDX] in def 32m_NF : MulDivOpM, NF; let Defs = [RAX, RDX], Uses = [RAX, RDX] in def 64m_NF : MulDivOpM, NF; let Defs = [AL, AH, EFLAGS], Uses = [AX] in def 8r_EVEX : MulDivOpR, PL; let Defs = [AX, DX, EFLAGS], Uses = [AX, DX] in def 16r_EVEX : MulDivOpR, PL, PD; let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EDX] in def 32r_EVEX : MulDivOpR, PL; let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RDX] in def 64r_EVEX : MulDivOpR, PL; let Defs = [AL, AH, EFLAGS], Uses = [AX] in def 8m_EVEX : MulDivOpM, PL; let Defs = [AX, DX, EFLAGS], Uses = [AX, DX] in def 16m_EVEX : MulDivOpM, PL, PD; let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EDX] in def 32m_EVEX : MulDivOpM, PL; let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RDX] in def 64m_EVEX : MulDivOpM, PL; } } let hasSideEffects = 1 in { // so that we don't speculatively execute defm DIV: Div<0xF7, "div", MRM6r, MRM6m>; defm IDIV: Div<0xF7, "idiv", MRM7r, MRM7m>; } class IMulOpRR_R : BinOpRR_R<0xAF, "imul", t, ndd> { let Form = MRMSrcReg; let SchedRW = [sched]; // X = IMUL Y, Z --> X = IMUL Z, Y let isCommutable = 1; } class IMulOpRR_RF : BinOpRR_RF<0xAF, "imul", t, X86smul_flag, ndd> { let Form = MRMSrcReg; let SchedRW = [sched]; // X = IMUL Y, Z --> X = IMUL Z, Y let isCommutable = 1; } class IMulOpRM_R : BinOpRM_R<0xAF, "imul", t, ndd> { let Form = MRMSrcMem; let SchedRW = [sched.Folded, sched.ReadAfterFold]; } class IMulOpRM_RF : BinOpRM_RF<0xAF, "imul", t, X86smul_flag, ndd> { let Form = MRMSrcMem; let SchedRW = [sched.Folded, sched.ReadAfterFold]; } let Predicates = [NoNDD] in { def IMUL16rr : IMulOpRR_RF, TB, OpSize16; def IMUL32rr : IMulOpRR_RF, TB, OpSize32; def IMUL64rr : IMulOpRR_RF, TB; def IMUL16rm : IMulOpRM_RF, TB, OpSize16; def IMUL32rm : IMulOpRM_RF, TB, OpSize32; def IMUL64rm : IMulOpRM_RF, TB; } let Predicates = [HasNDD, In64BitMode] in { def IMUL16rr_ND : IMulOpRR_RF, PD; def IMUL32rr_ND : IMulOpRR_RF; def IMUL64rr_ND : IMulOpRR_RF; def IMUL16rm_ND : IMulOpRM_RF, PD; def IMUL32rm_ND : IMulOpRM_RF; def IMUL64rm_ND : IMulOpRM_RF; } let Predicates = [In64BitMode], Pattern = [(null_frag)] in { def IMUL16rr_NF : IMulOpRR_R, NF, PD; def IMUL32rr_NF : IMulOpRR_R, NF; def IMUL64rr_NF : IMulOpRR_R, NF; def IMUL16rm_NF : IMulOpRM_R, NF, PD; def IMUL32rm_NF : IMulOpRM_R, NF; def IMUL64rm_NF : IMulOpRM_R, NF; def IMUL16rr_NF_ND : IMulOpRR_R, EVEX_NF, PD; def IMUL32rr_NF_ND : IMulOpRR_R, EVEX_NF; def IMUL64rr_NF_ND : IMulOpRR_R, EVEX_NF; def IMUL16rm_NF_ND : IMulOpRM_R, EVEX_NF, PD; def IMUL32rm_NF_ND : IMulOpRM_R, EVEX_NF; def IMUL64rm_NF_ND : IMulOpRM_R, EVEX_NF; def IMUL16rr_EVEX : IMulOpRR_RF, PL, PD; def IMUL32rr_EVEX : IMulOpRR_RF, PL; def IMUL64rr_EVEX : IMulOpRR_RF, PL; def IMUL16rm_EVEX : IMulOpRM_RF, PL, PD; def IMUL32rm_EVEX : IMulOpRM_RF, PL; def IMUL64rm_EVEX : IMulOpRM_RF, PL; } class IMulOpRI8_R : BinOpRI8<0x6B, "imul", binop_ndd_args, t, MRMSrcReg, (outs t.RegClass:$dst)> { let SchedRW = [sched]; } class IMulOpRI_R : BinOpRI<0x69, "imul", binop_ndd_args, t, MRMSrcReg, (outs t.RegClass:$dst), []> { let SchedRW = [sched]; } class IMulOpRI_RF : BinOpRI<0x69, "imul", binop_ndd_args, t, MRMSrcReg, (outs t.RegClass:$dst), [(set t.RegClass:$dst, EFLAGS, (X86smul_flag t.RegClass:$src1, t.ImmNoSuOperator:$src2))]>, DefEFLAGS { let SchedRW = [sched]; } class IMulOpMI8_R : BinOpMI8<"imul", binop_ndd_args, t, MRMSrcMem, (outs t.RegClass:$dst)> { let Opcode = 0x6B; let SchedRW = [sched.Folded]; } class IMulOpMI_R : BinOpMI<0x69, "imul", binop_ndd_args, t, MRMSrcMem, (outs t.RegClass:$dst), []> { let SchedRW = [sched.Folded]; } class IMulOpMI_RF : BinOpMI<0x69, "imul", binop_ndd_args, t, MRMSrcMem, (outs t.RegClass:$dst), [(set t.RegClass:$dst, EFLAGS, (X86smul_flag (t.LoadNode addr:$src1), t.ImmNoSuOperator:$src2))]>, DefEFLAGS { let SchedRW = [sched.Folded]; } def IMUL16rri8 : IMulOpRI8_R, DefEFLAGS, OpSize16; def IMUL32rri8 : IMulOpRI8_R, DefEFLAGS, OpSize32; def IMUL64rri8 : IMulOpRI8_R, DefEFLAGS; def IMUL16rri : IMulOpRI_RF, OpSize16; def IMUL32rri : IMulOpRI_RF, OpSize32; def IMUL64rri32 : IMulOpRI_RF; def IMUL16rmi8 : IMulOpMI8_R, DefEFLAGS, OpSize16; def IMUL32rmi8 : IMulOpMI8_R, DefEFLAGS, OpSize32; def IMUL64rmi8 : IMulOpMI8_R, DefEFLAGS; def IMUL16rmi : IMulOpMI_RF, OpSize16; def IMUL32rmi : IMulOpMI_RF, OpSize32; def IMUL64rmi32 : IMulOpMI_RF; let Predicates = [In64BitMode] in { def IMUL16rri8_NF : IMulOpRI8_R, NF, PD; def IMUL32rri8_NF : IMulOpRI8_R, NF; def IMUL64rri8_NF : IMulOpRI8_R, NF; def IMUL16rri_NF : IMulOpRI_R, NF, PD; def IMUL32rri_NF : IMulOpRI_R, NF; def IMUL64rri32_NF : IMulOpRI_R, NF; def IMUL16rmi8_NF : IMulOpMI8_R, NF, PD; def IMUL32rmi8_NF : IMulOpMI8_R, NF; def IMUL64rmi8_NF : IMulOpMI8_R, NF; def IMUL16rmi_NF : IMulOpMI_R, NF, PD; def IMUL32rmi_NF : IMulOpMI_R, NF; def IMUL64rmi32_NF : IMulOpMI_R, NF; def IMUL16rri8_EVEX : IMulOpRI8_R, DefEFLAGS, PL, PD; def IMUL32rri8_EVEX : IMulOpRI8_R, DefEFLAGS, PL; def IMUL64rri8_EVEX : IMulOpRI8_R, DefEFLAGS, PL; def IMUL16rri_EVEX : IMulOpRI_RF, PL, PD; def IMUL32rri_EVEX : IMulOpRI_RF, PL; def IMUL64rri32_EVEX : IMulOpRI_RF, PL; def IMUL16rmi8_EVEX : IMulOpMI8_R, DefEFLAGS, PL, PD; def IMUL32rmi8_EVEX : IMulOpMI8_R, DefEFLAGS, PL; def IMUL64rmi8_EVEX : IMulOpMI8_R, DefEFLAGS, PL; def IMUL16rmi_EVEX : IMulOpMI_RF, PL, PD; def IMUL32rmi_EVEX : IMulOpMI_RF, PL; def IMUL64rmi32_EVEX : IMulOpMI_RF, PL; } // IMULZU instructions class IMulZUOpRI8_R : BinOpRI8<0x6B, "imulzu", binop_ndd_args, t, MRMSrcReg, (outs t.RegClass:$dst)> { let SchedRW = [sched]; } class IMulZUOpRI_R : BinOpRI<0x69, "imulzu", binop_ndd_args, t, MRMSrcReg, (outs t.RegClass:$dst), []> { let SchedRW = [sched]; } class IMulZUOpMI8_R : BinOpMI8<"imulzu", binop_ndd_args, t, MRMSrcMem, (outs t.RegClass:$dst)> { let Opcode = 0x6B; let SchedRW = [sched.Folded]; } class IMulZUOpMI_R : BinOpMI<0x69, "imulzu", binop_ndd_args, t, MRMSrcMem, (outs t.RegClass:$dst), []> { let SchedRW = [sched.Folded]; } let Defs = [EFLAGS], Predicates = [HasEGPR, In64BitMode] in { def IMULZU16rri8 : IMulZUOpRI8_R, ZU, PD; def IMULZU16rmi8 : IMulZUOpMI8_R, ZU, PD; def IMULZU16rri : IMulZUOpRI_R, ZU, PD; def IMULZU16rmi : IMulZUOpMI_R, ZU, PD; def IMULZU32rri8 : IMulZUOpRI8_R, ZU; def IMULZU32rmi8 : IMulZUOpMI8_R, ZU; def IMULZU32rri : IMulZUOpRI_R, ZU; def IMULZU32rmi : IMulZUOpMI_R, ZU; def IMULZU64rri8 : IMulZUOpRI8_R, ZU; def IMULZU64rmi8 : IMulZUOpMI8_R, ZU; def IMULZU64rri32 : IMulZUOpRI_R, ZU; def IMULZU64rmi32 : IMulZUOpMI_R, ZU; } //===----------------------------------------------------------------------===// // INC and DEC Instructions // class IncOpR_RF : UnaryOpR_RF<0xFF, MRM0r, "inc", t, null_frag, ndd> { let Pattern = [(set t.RegClass:$dst, EFLAGS, (X86add_flag_nocf t.RegClass:$src1, 1))]; } class DecOpR_RF : UnaryOpR_RF<0xFF, MRM1r, "dec", t, null_frag, ndd> { let Pattern = [(set t.RegClass:$dst, EFLAGS, (X86sub_flag_nocf t.RegClass:$src1, 1))]; } class IncOpR_R : UnaryOpR_R<0xFF, MRM0r, "inc", t, null_frag, ndd>; class DecOpR_R : UnaryOpR_R<0xFF, MRM1r, "dec", t, null_frag, ndd>; class IncOpM_MF : UnaryOpM_MF<0xFF, MRM0m, "inc", t, null_frag> { let Pattern = [(store (add (t.LoadNode addr:$src1), 1), addr:$src1), (implicit EFLAGS)]; } class DecOpM_MF : UnaryOpM_MF<0xFF, MRM1m, "dec", t, null_frag> { let Pattern = [(store (add (t.LoadNode addr:$src1), -1), addr:$src1), (implicit EFLAGS)]; } class IncOpM_RF : UnaryOpM_RF<0xFF, MRM0m, "inc", t, null_frag> { let Pattern = [(set t.RegClass:$dst, EFLAGS, (add (t.LoadNode addr:$src1), 1))]; } class DecOpM_RF : UnaryOpM_RF<0xFF, MRM1m, "dec", t, null_frag> { let Pattern = [(set t.RegClass:$dst, EFLAGS, (add (t.LoadNode addr:$src1), -1))]; } class IncOpM_M : UnaryOpM_M<0xFF, MRM0m, "inc", t, null_frag>; class DecOpM_M : UnaryOpM_M<0xFF, MRM1m, "dec", t, null_frag>; class IncOpM_R : UnaryOpM_R<0xFF, MRM0m, "inc", t, null_frag>; class DecOpM_R : UnaryOpM_R<0xFF, MRM1m, "dec", t, null_frag>; // IncDec_Alt - Instructions like "inc reg" short forms. // Short forms only valid in 32-bit mode. Selected during MCInst lowering. class IncDec_Alt o, string m, X86TypeInfo t> : UnaryOpR_RF, Requires<[Not64BitMode]>; let isConvertibleToThreeAddress = 1 in { def INC16r_alt : IncDec_Alt<0x40, "inc", Xi16>, OpSize16; def INC32r_alt : IncDec_Alt<0x40, "inc", Xi32>, OpSize32; def DEC16r_alt : IncDec_Alt<0x48, "dec", Xi16>, OpSize16; def DEC32r_alt : IncDec_Alt<0x48, "dec", Xi32>, OpSize32; let Predicates = [NoNDD] in { def INC8r : IncOpR_RF; def INC16r : IncOpR_RF, OpSize16; def INC32r : IncOpR_RF, OpSize32; def INC64r : IncOpR_RF; def DEC8r : DecOpR_RF; def DEC16r : DecOpR_RF, OpSize16; def DEC32r : DecOpR_RF, OpSize32; def DEC64r : DecOpR_RF; } let Predicates = [HasNDD, In64BitMode] in { def INC8r_ND : IncOpR_RF; def INC16r_ND : IncOpR_RF, PD; def INC32r_ND : IncOpR_RF; def INC64r_ND : IncOpR_RF; def DEC8r_ND : DecOpR_RF; def DEC16r_ND : DecOpR_RF, PD; def DEC32r_ND : DecOpR_RF; def DEC64r_ND : DecOpR_RF; } let Predicates = [In64BitMode], Pattern = [(null_frag)] in { def INC8r_NF : IncOpR_R, NF; def INC16r_NF : IncOpR_R, NF, PD; def INC32r_NF : IncOpR_R, NF; def INC64r_NF : IncOpR_R, NF; def DEC8r_NF : DecOpR_R, NF; def DEC16r_NF : DecOpR_R, NF, PD; def DEC32r_NF : DecOpR_R, NF; def DEC64r_NF : DecOpR_R, NF; def INC8r_NF_ND : IncOpR_R, NF; def INC16r_NF_ND : IncOpR_R, NF, PD; def INC32r_NF_ND : IncOpR_R, NF; def INC64r_NF_ND : IncOpR_R, NF; def DEC8r_NF_ND : DecOpR_R, NF; def DEC16r_NF_ND : DecOpR_R, NF, PD; def DEC32r_NF_ND : DecOpR_R, NF; def DEC64r_NF_ND : DecOpR_R, NF; def INC8r_EVEX : IncOpR_RF, PL; def INC16r_EVEX : IncOpR_RF, PL, PD; def INC32r_EVEX : IncOpR_RF, PL; def INC64r_EVEX : IncOpR_RF, PL; def DEC8r_EVEX : DecOpR_RF, PL; def DEC16r_EVEX : DecOpR_RF, PL, PD; def DEC32r_EVEX : DecOpR_RF, PL; def DEC64r_EVEX : DecOpR_RF, PL; } } let Predicates = [UseIncDec] in { def INC8m : IncOpM_MF; def INC16m : IncOpM_MF, OpSize16; def INC32m : IncOpM_MF, OpSize32; def DEC8m : DecOpM_MF; def DEC16m : DecOpM_MF, OpSize16; def DEC32m : DecOpM_MF, OpSize32; } let Predicates = [UseIncDec, In64BitMode] in { def INC64m : IncOpM_MF; def DEC64m : DecOpM_MF; } let Predicates = [HasNDD, In64BitMode, UseIncDec] in { def INC8m_ND : IncOpM_RF; def INC16m_ND : IncOpM_RF, PD; def INC32m_ND : IncOpM_RF; def DEC8m_ND : DecOpM_RF; def DEC16m_ND : DecOpM_RF, PD; def DEC32m_ND : DecOpM_RF; def INC64m_ND : IncOpM_RF; def DEC64m_ND : DecOpM_RF; } let Predicates = [In64BitMode], Pattern = [(null_frag)] in { def INC8m_NF : IncOpM_M, NF; def INC16m_NF : IncOpM_M, NF, PD; def INC32m_NF : IncOpM_M, NF; def INC64m_NF : IncOpM_M, NF; def DEC8m_NF : DecOpM_M, NF; def DEC16m_NF : DecOpM_M, NF, PD; def DEC32m_NF : DecOpM_M, NF; def DEC64m_NF : DecOpM_M, NF; def INC8m_NF_ND : IncOpM_R, NF; def INC16m_NF_ND : IncOpM_R, NF, PD; def INC32m_NF_ND : IncOpM_R, NF; def INC64m_NF_ND : IncOpM_R, NF; def DEC8m_NF_ND : DecOpM_R, NF; def DEC16m_NF_ND : DecOpM_R, NF, PD; def DEC32m_NF_ND : DecOpM_R, NF; def DEC64m_NF_ND : DecOpM_R, NF; def INC8m_EVEX : IncOpM_MF, PL; def INC16m_EVEX : IncOpM_MF, PL, PD; def INC32m_EVEX : IncOpM_MF, PL; def INC64m_EVEX : IncOpM_MF, PL; def DEC8m_EVEX : DecOpM_MF, PL; def DEC16m_EVEX : DecOpM_MF, PL, PD; def DEC32m_EVEX : DecOpM_MF, PL; def DEC64m_EVEX : DecOpM_MF, PL; } //===----------------------------------------------------------------------===// // NEG and NOT Instructions // class NegOpR_R : UnaryOpR_R<0xF7, MRM3r, "neg", t, ineg, ndd>; class NegOpR_RF : UnaryOpR_RF<0xF7, MRM3r, "neg", t, ineg, ndd>; class NegOpM_M : UnaryOpM_M<0xF7, MRM3m, "neg", t, null_frag>; class NegOpM_MF : UnaryOpM_MF<0xF7, MRM3m, "neg", t, ineg>; class NegOpM_R : UnaryOpM_R<0xF7, MRM3m, "neg", t, null_frag>; class NegOpM_RF : UnaryOpM_RF<0xF7, MRM3m, "neg", t, ineg>; class NotOpR_R : UnaryOpR_R<0xF7, MRM2r, "not", t, not, ndd>; class NotOpM_M : UnaryOpM_M<0xF7, MRM2m, "not", t, not>; class NotOpM_R : UnaryOpM_R<0xF7, MRM2m, "not", t, not>; let Predicates = [NoNDD] in { def NEG8r : NegOpR_RF; def NEG16r : NegOpR_RF, OpSize16; def NEG32r : NegOpR_RF, OpSize32; def NEG64r : NegOpR_RF; def NOT8r : NotOpR_R; def NOT16r : NotOpR_R, OpSize16; def NOT32r : NotOpR_R, OpSize32; def NOT64r : NotOpR_R; } let Predicates = [HasNDD, In64BitMode] in { def NEG8r_ND : NegOpR_RF; def NEG16r_ND : NegOpR_RF, PD; def NEG32r_ND : NegOpR_RF; def NEG64r_ND : NegOpR_RF; def NOT8r_ND : NotOpR_R; def NOT16r_ND : NotOpR_R, PD; def NOT32r_ND : NotOpR_R; def NOT64r_ND : NotOpR_R; def NEG8r_NF_ND : NegOpR_R, EVEX_NF; def NEG16r_NF_ND : NegOpR_R, EVEX_NF, PD; def NEG32r_NF_ND : NegOpR_R, EVEX_NF; def NEG64r_NF_ND : NegOpR_R, EVEX_NF; } def NEG8m : NegOpM_MF; def NEG16m : NegOpM_MF, OpSize16; def NEG32m : NegOpM_MF, OpSize32; def NEG64m : NegOpM_MF, Requires<[In64BitMode]>; let Predicates = [HasNDD, In64BitMode] in { def NEG8m_ND : NegOpM_RF; def NEG16m_ND : NegOpM_RF, PD; def NEG32m_ND : NegOpM_RF; def NEG64m_ND : NegOpM_RF; def NEG8m_NF_ND : NegOpM_R, EVEX_NF; def NEG16m_NF_ND : NegOpM_R, EVEX_NF, PD; def NEG32m_NF_ND : NegOpM_R, EVEX_NF; def NEG64m_NF_ND : NegOpM_R, EVEX_NF; } def NOT8m : NotOpM_M; def NOT16m : NotOpM_M, OpSize16; def NOT32m : NotOpM_M, OpSize32; def NOT64m : NotOpM_M, Requires<[In64BitMode]>; let Predicates = [HasNDD, In64BitMode] in { def NOT8m_ND : NotOpM_R; def NOT16m_ND : NotOpM_R, PD; def NOT32m_ND : NotOpM_R; def NOT64m_ND : NotOpM_R; } let Predicates = [In64BitMode], Pattern = [(null_frag)] in { def NEG8r_NF : NegOpR_R, NF; def NEG16r_NF : NegOpR_R, NF, PD; def NEG32r_NF : NegOpR_R, NF; def NEG64r_NF : NegOpR_R, NF; def NEG8m_NF : NegOpM_M, NF; def NEG16m_NF : NegOpM_M, NF, PD; def NEG32m_NF : NegOpM_M, NF; def NEG64m_NF : NegOpM_M, NF; def NEG8r_EVEX : NegOpR_RF, PL; def NEG16r_EVEX : NegOpR_RF, PL, PD; def NEG32r_EVEX : NegOpR_RF, PL; def NEG64r_EVEX : NegOpR_RF, PL; def NOT8r_EVEX : NotOpR_R, PL; def NOT16r_EVEX : NotOpR_R, PL, PD; def NOT32r_EVEX : NotOpR_R, PL; def NOT64r_EVEX : NotOpR_R, PL; def NEG8m_EVEX : NegOpM_MF, PL; def NEG16m_EVEX : NegOpM_MF, PL, PD; def NEG32m_EVEX : NegOpM_MF, PL; def NEG64m_EVEX : NegOpM_MF, PL; def NOT8m_EVEX : NotOpM_M, PL; def NOT16m_EVEX : NotOpM_M, PL, PD; def NOT32m_EVEX : NotOpM_M, PL; def NOT64m_EVEX : NotOpM_M, PL; } /// ArithBinOp_RF - This is an arithmetic binary operator where the pattern is /// defined with "(set GPR:$dst, EFLAGS, (...". /// /// It would be nice to get rid of the second and third argument here, but /// tblgen can't handle dependent type references aggressively enough: PR8330 multiclass ArithBinOp_RF BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4, string mnemonic, Format RegMRM, Format MemMRM, SDNode opnodeflag, SDNode opnode, bit CommutableRR, bit ConvertibleToThreeAddress, bit ConvertibleToThreeAddressRR> { let isCommutable = CommutableRR, isConvertibleToThreeAddress = ConvertibleToThreeAddressRR in { let Predicates = [NoNDD] in { def 8rr : BinOpRR_RF; def 16rr : BinOpRR_RF, OpSize16; def 32rr : BinOpRR_RF, OpSize32; def 64rr : BinOpRR_RF; } let Predicates = [HasNDD, In64BitMode] in { def 8rr_ND : BinOpRR_RF; def 16rr_ND : BinOpRR_RF, PD; def 32rr_ND : BinOpRR_RF; def 64rr_ND : BinOpRR_RF; def 8rr_NF_ND : BinOpRR_R, EVEX_NF; def 16rr_NF_ND : BinOpRR_R, EVEX_NF, PD; def 32rr_NF_ND : BinOpRR_R, EVEX_NF; def 64rr_NF_ND : BinOpRR_R, EVEX_NF; } let Predicates = [In64BitMode] in { def 8rr_NF : BinOpRR_R, NF; def 16rr_NF : BinOpRR_R, NF, PD; def 32rr_NF : BinOpRR_R, NF; def 64rr_NF : BinOpRR_R, NF; def 8rr_EVEX : BinOpRR_RF, PL; def 16rr_EVEX : BinOpRR_RF, PL, PD; def 32rr_EVEX : BinOpRR_RF, PL; def 64rr_EVEX : BinOpRR_RF, PL; } } def 8rr_REV : BinOpRR_RF_Rev; def 16rr_REV : BinOpRR_RF_Rev, OpSize16; def 32rr_REV : BinOpRR_RF_Rev, OpSize32; def 64rr_REV : BinOpRR_RF_Rev; let Predicates = [In64BitMode] in { def 8rr_EVEX_REV : BinOpRR_RF_Rev, PL; def 16rr_EVEX_REV : BinOpRR_RF_Rev, PL, PD; def 32rr_EVEX_REV : BinOpRR_RF_Rev, PL; def 64rr_EVEX_REV : BinOpRR_RF_Rev, PL; def 8rr_ND_REV : BinOpRR_RF_Rev; def 16rr_ND_REV : BinOpRR_RF_Rev, PD; def 32rr_ND_REV : BinOpRR_RF_Rev; def 64rr_ND_REV : BinOpRR_RF_Rev; def 8rr_NF_REV : BinOpRR_R_Rev, NF; def 16rr_NF_REV : BinOpRR_R_Rev, NF, PD; def 32rr_NF_REV : BinOpRR_R_Rev, NF; def 64rr_NF_REV : BinOpRR_R_Rev, NF; def 8rr_NF_ND_REV : BinOpRR_R_Rev, EVEX_NF; def 16rr_NF_ND_REV : BinOpRR_R_Rev, EVEX_NF, PD; def 32rr_NF_ND_REV : BinOpRR_R_Rev, EVEX_NF; def 64rr_NF_ND_REV : BinOpRR_R_Rev, EVEX_NF; } let Predicates = [NoNDD] in { def 8rm : BinOpRM_RF; def 16rm : BinOpRM_RF, OpSize16; def 32rm : BinOpRM_RF, OpSize32; def 64rm : BinOpRM_RF; } let Predicates = [HasNDD, In64BitMode] in { def 8rm_ND : BinOpRM_RF; def 16rm_ND : BinOpRM_RF, PD; def 32rm_ND : BinOpRM_RF; def 64rm_ND : BinOpRM_RF; def 8rm_NF_ND : BinOpRM_R, EVEX_NF; def 16rm_NF_ND : BinOpRM_R, EVEX_NF, PD; def 32rm_NF_ND : BinOpRM_R, EVEX_NF; def 64rm_NF_ND : BinOpRM_R, EVEX_NF; } let Predicates = [In64BitMode] in { def 8rm_NF : BinOpRM_R, NF; def 16rm_NF : BinOpRM_R, NF, PD; def 32rm_NF : BinOpRM_R, NF; def 64rm_NF : BinOpRM_R, NF; def 8rm_EVEX : BinOpRM_RF, PL; def 16rm_EVEX : BinOpRM_RF, PL, PD; def 32rm_EVEX : BinOpRM_RF, PL; def 64rm_EVEX : BinOpRM_RF, PL; } let isConvertibleToThreeAddress = ConvertibleToThreeAddress in { let Predicates = [NoNDD] in { // NOTE: These are order specific, we want the ri8 forms to be listed // first so that they are slightly preferred to the ri forms. def 16ri8 : BinOpRI8_RF<0x83, mnemonic, Xi16, RegMRM>, OpSize16; def 32ri8 : BinOpRI8_RF<0x83, mnemonic, Xi32, RegMRM>, OpSize32; def 64ri8 : BinOpRI8_RF<0x83, mnemonic, Xi64, RegMRM>; def 8ri : BinOpRI_RF<0x80, mnemonic, Xi8 , opnodeflag, RegMRM>; def 16ri : BinOpRI_RF<0x81, mnemonic, Xi16, opnodeflag, RegMRM>, OpSize16; def 32ri : BinOpRI_RF<0x81, mnemonic, Xi32, opnodeflag, RegMRM>, OpSize32; def 64ri32: BinOpRI_RF<0x81, mnemonic, Xi64, opnodeflag, RegMRM>; } let Predicates = [HasNDD, In64BitMode] in { def 16ri8_ND : BinOpRI8_RF<0x83, mnemonic, Xi16, RegMRM, 1>, PD; def 32ri8_ND : BinOpRI8_RF<0x83, mnemonic, Xi32, RegMRM, 1>; def 64ri8_ND : BinOpRI8_RF<0x83, mnemonic, Xi64, RegMRM, 1>; def 8ri_ND : BinOpRI_RF<0x80, mnemonic, Xi8 , opnodeflag, RegMRM, 1>; def 16ri_ND : BinOpRI_RF<0x81, mnemonic, Xi16, opnodeflag, RegMRM, 1>, PD; def 32ri_ND : BinOpRI_RF<0x81, mnemonic, Xi32, opnodeflag, RegMRM, 1>; def 64ri32_ND: BinOpRI_RF<0x81, mnemonic, Xi64, opnodeflag, RegMRM, 1>; def 16ri8_NF_ND : BinOpRI8_R<0x83, mnemonic, Xi16, RegMRM, 1>, EVEX_NF, PD; def 32ri8_NF_ND : BinOpRI8_R<0x83, mnemonic, Xi32, RegMRM, 1>, EVEX_NF; def 64ri8_NF_ND : BinOpRI8_R<0x83, mnemonic, Xi64, RegMRM, 1>, EVEX_NF; def 8ri_NF_ND : BinOpRI_R<0x80, mnemonic, Xi8, RegMRM, 1>, EVEX_NF; def 16ri_NF_ND : BinOpRI_R<0x81, mnemonic, Xi16, RegMRM, 1>, EVEX_NF, PD; def 32ri_NF_ND : BinOpRI_R<0x81, mnemonic, Xi32, RegMRM, 1>, EVEX_NF; def 64ri32_NF_ND : BinOpRI_R<0x81, mnemonic, Xi64, RegMRM, 1>, EVEX_NF; } let Predicates = [In64BitMode] in { def 16ri8_NF : BinOpRI8_R<0x83, mnemonic, Xi16, RegMRM>, NF, PD; def 32ri8_NF : BinOpRI8_R<0x83, mnemonic, Xi32, RegMRM>, NF; def 64ri8_NF : BinOpRI8_R<0x83, mnemonic, Xi64, RegMRM>, NF; def 8ri_NF : BinOpRI_R<0x80, mnemonic, Xi8, RegMRM>, NF; def 16ri_NF : BinOpRI_R<0x81, mnemonic, Xi16, RegMRM>, NF, PD; def 32ri_NF : BinOpRI_R<0x81, mnemonic, Xi32, RegMRM>, NF; def 64ri32_NF : BinOpRI_R<0x81, mnemonic, Xi64, RegMRM>, NF; def 16ri8_EVEX : BinOpRI8_RF<0x83, mnemonic, Xi16, RegMRM>, PL, PD; def 32ri8_EVEX : BinOpRI8_RF<0x83, mnemonic, Xi32, RegMRM>, PL; def 64ri8_EVEX : BinOpRI8_RF<0x83, mnemonic, Xi64, RegMRM>, PL; def 8ri_EVEX : BinOpRI_RF<0x80, mnemonic, Xi8 , null_frag, RegMRM>, PL; def 16ri_EVEX : BinOpRI_RF<0x81, mnemonic, Xi16, null_frag, RegMRM>, PL, PD; def 32ri_EVEX : BinOpRI_RF<0x81, mnemonic, Xi32, null_frag, RegMRM>, PL; def 64ri32_EVEX: BinOpRI_RF<0x81, mnemonic, Xi64, null_frag, RegMRM>, PL; } } def 8mr : BinOpMR_MF; def 16mr : BinOpMR_MF, OpSize16; def 32mr : BinOpMR_MF, OpSize32; def 64mr : BinOpMR_MF; let Predicates = [HasNDD, In64BitMode] in { defvar node = !if(!eq(CommutableRR, 0), opnode, null_frag); def 8mr_ND : BinOpMR_RF; def 16mr_ND : BinOpMR_RF, PD; def 32mr_ND : BinOpMR_RF; def 64mr_ND : BinOpMR_RF; def 8mr_NF_ND : BinOpMR_R, EVEX_NF; def 16mr_NF_ND : BinOpMR_R, EVEX_NF, PD; def 32mr_NF_ND : BinOpMR_R, EVEX_NF; def 64mr_NF_ND : BinOpMR_R, EVEX_NF; } let Predicates = [In64BitMode] in { def 8mr_NF : BinOpMR_M, NF; def 16mr_NF : BinOpMR_M, NF, PD; def 32mr_NF : BinOpMR_M, NF; def 64mr_NF : BinOpMR_M, NF; def 8mr_EVEX : BinOpMR_MF, PL; def 16mr_EVEX : BinOpMR_MF, PL, PD; def 32mr_EVEX : BinOpMR_MF, PL; def 64mr_EVEX : BinOpMR_MF, PL; } // NOTE: These are order specific, we want the mi8 forms to be listed // first so that they are slightly preferred to the mi forms. def 16mi8 : BinOpMI8_MF, OpSize16; def 32mi8 : BinOpMI8_MF, OpSize32; let Predicates = [In64BitMode] in def 64mi8 : BinOpMI8_MF; def 8mi : BinOpMI_MF<0x80, mnemonic, Xi8 , opnode, MemMRM>; def 16mi : BinOpMI_MF<0x81, mnemonic, Xi16, opnode, MemMRM>, OpSize16; def 32mi : BinOpMI_MF<0x81, mnemonic, Xi32, opnode, MemMRM>, OpSize32; let Predicates = [In64BitMode] in def 64mi32 : BinOpMI_MF<0x81, mnemonic, Xi64, opnode, MemMRM>; let Predicates = [HasNDD, In64BitMode] in { def 16mi8_ND : BinOpMI8_RF, PD; def 32mi8_ND : BinOpMI8_RF; def 64mi8_ND : BinOpMI8_RF; def 8mi_ND : BinOpMI_RF<0x80, mnemonic, Xi8 , opnode, MemMRM>; def 16mi_ND : BinOpMI_RF<0x81, mnemonic, Xi16, opnode, MemMRM>, PD; def 32mi_ND : BinOpMI_RF<0x81, mnemonic, Xi32, opnode, MemMRM>; def 64mi32_ND : BinOpMI_RF<0x81, mnemonic, Xi64, opnode, MemMRM>; def 16mi8_NF_ND : BinOpMI8_R, NF, PD; def 32mi8_NF_ND : BinOpMI8_R, NF; def 64mi8_NF_ND : BinOpMI8_R, NF; def 8mi_NF_ND : BinOpMI_R<0x80, mnemonic, Xi8, MemMRM>, NF; def 16mi_NF_ND : BinOpMI_R<0x81, mnemonic, Xi16, MemMRM>, NF, PD; def 32mi_NF_ND : BinOpMI_R<0x81, mnemonic, Xi32, MemMRM>, NF; def 64mi32_NF_ND : BinOpMI_R<0x81, mnemonic, Xi64, MemMRM>, NF; } let Predicates = [In64BitMode] in { def 16mi8_NF : BinOpMI8_M, NF, PD; def 32mi8_NF : BinOpMI8_M, NF; def 64mi8_NF : BinOpMI8_M, NF; def 8mi_NF : BinOpMI_M<0x80, mnemonic, Xi8, MemMRM>, NF; def 16mi_NF : BinOpMI_M<0x81, mnemonic, Xi16, MemMRM>, NF, PD; def 32mi_NF : BinOpMI_M<0x81, mnemonic, Xi32, MemMRM>, NF; def 64mi32_NF : BinOpMI_M<0x81, mnemonic, Xi64, MemMRM>, NF; def 16mi8_EVEX : BinOpMI8_MF, PL, PD; def 32mi8_EVEX : BinOpMI8_MF, PL; def 64mi8_EVEX : BinOpMI8_MF, PL; def 8mi_EVEX : BinOpMI_MF<0x80, mnemonic, Xi8 , null_frag, MemMRM>, PL; def 16mi_EVEX : BinOpMI_MF<0x81, mnemonic, Xi16, null_frag, MemMRM>, PL, PD; def 32mi_EVEX : BinOpMI_MF<0x81, mnemonic, Xi32, null_frag, MemMRM>, PL; def 64mi32_EVEX : BinOpMI_MF<0x81, mnemonic, Xi64, null_frag, MemMRM>, PL; } // These are for the disassembler since 0x82 opcode behaves like 0x80, but // not in 64-bit mode. let Predicates = [Not64BitMode] in { def 8ri8 : BinOpRI8_RF<0x82, mnemonic, Xi8, RegMRM>, DisassembleOnly; def 8mi8 : BinOpMI8_MF, DisassembleOnly; } def 8i8 : BinOpAI_AF; def 16i16 : BinOpAI_AF, OpSize16; def 32i32 : BinOpAI_AF, OpSize32; def 64i32 : BinOpAI_AF; } /// ArithBinOp_RFF - This is an arithmetic binary operator where the pattern is /// defined with "(set GPR:$dst, EFLAGS, (node LHS, RHS, EFLAGS))" like ADC and /// SBB. /// /// It would be nice to get rid of the second and third argument here, but /// tblgen can't handle dependent type references aggressively enough: PR8330 multiclass ArithBinOp_RFF BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4, string mnemonic, Format RegMRM, Format MemMRM, SDNode opnode, bit CommutableRR, bit ConvertibleToThreeAddress> { let isCommutable = CommutableRR in { let Predicates = [NoNDD] in { def 8rr : BinOpRRF_RF; let isConvertibleToThreeAddress = ConvertibleToThreeAddress in { def 16rr : BinOpRRF_RF, OpSize16; def 32rr : BinOpRRF_RF, OpSize32; def 64rr : BinOpRRF_RF; } } let Predicates = [HasNDD, In64BitMode] in { def 8rr_ND : BinOpRRF_RF; let isConvertibleToThreeAddress = ConvertibleToThreeAddress in { def 16rr_ND : BinOpRRF_RF, PD; def 32rr_ND : BinOpRRF_RF; def 64rr_ND : BinOpRRF_RF; } } } // isCommutable let Predicates = [In64BitMode] in { def 8rr_EVEX : BinOpRRF_RF, PL; def 16rr_EVEX : BinOpRRF_RF, PL, PD; def 32rr_EVEX : BinOpRRF_RF, PL; def 64rr_EVEX : BinOpRRF_RF, PL; } def 8rr_REV : BinOpRRF_RF_Rev; def 16rr_REV : BinOpRRF_RF_Rev, OpSize16; def 32rr_REV : BinOpRRF_RF_Rev, OpSize32; def 64rr_REV : BinOpRRF_RF_Rev; let Predicates = [In64BitMode] in { def 8rr_ND_REV : BinOpRRF_RF_Rev; def 16rr_ND_REV : BinOpRRF_RF_Rev, PD; def 32rr_ND_REV : BinOpRRF_RF_Rev; def 64rr_ND_REV : BinOpRRF_RF_Rev; def 8rr_EVEX_REV : BinOpRRF_RF_Rev, PL; def 16rr_EVEX_REV : BinOpRRF_RF_Rev, PL, PD; def 32rr_EVEX_REV : BinOpRRF_RF_Rev, PL; def 64rr_EVEX_REV : BinOpRRF_RF_Rev, PL; } let Predicates = [NoNDD] in { def 8rm : BinOpRMF_RF; def 16rm : BinOpRMF_RF, OpSize16; def 32rm : BinOpRMF_RF, OpSize32; def 64rm : BinOpRMF_RF; } let Predicates = [HasNDD, In64BitMode] in { def 8rm_ND : BinOpRMF_RF; def 16rm_ND : BinOpRMF_RF, PD; def 32rm_ND : BinOpRMF_RF; def 64rm_ND : BinOpRMF_RF; } let Predicates = [In64BitMode] in { def 8rm_EVEX : BinOpRMF_RF, PL; def 16rm_EVEX : BinOpRMF_RF, PL, PD; def 32rm_EVEX : BinOpRMF_RF, PL; def 64rm_EVEX : BinOpRMF_RF, PL; } let Predicates = [NoNDD] in { def 8ri : BinOpRIF_RF<0x80, mnemonic, Xi8 , opnode, RegMRM>; let isConvertibleToThreeAddress = ConvertibleToThreeAddress in { // NOTE: These are order specific, we want the ri8 forms to be listed // first so that they are slightly preferred to the ri forms. def 16ri8 : BinOpRI8F_RF<0x83, mnemonic, Xi16, RegMRM>, OpSize16; def 32ri8 : BinOpRI8F_RF<0x83, mnemonic, Xi32, RegMRM>, OpSize32; def 64ri8 : BinOpRI8F_RF<0x83, mnemonic, Xi64, RegMRM>; def 16ri : BinOpRIF_RF<0x81, mnemonic, Xi16, opnode, RegMRM>, OpSize16; def 32ri : BinOpRIF_RF<0x81, mnemonic, Xi32, opnode, RegMRM>, OpSize32; def 64ri32: BinOpRIF_RF<0x81, mnemonic, Xi64, opnode, RegMRM>; } } let Predicates = [HasNDD, In64BitMode] in { def 8ri_ND : BinOpRIF_RF<0x80, mnemonic, Xi8 , opnode, RegMRM, 1>; let isConvertibleToThreeAddress = ConvertibleToThreeAddress in { def 16ri8_ND : BinOpRI8F_RF<0x83, mnemonic, Xi16, RegMRM, 1>, PD; def 32ri8_ND : BinOpRI8F_RF<0x83, mnemonic, Xi32, RegMRM, 1>; def 64ri8_ND : BinOpRI8F_RF<0x83, mnemonic, Xi64, RegMRM, 1>; def 16ri_ND : BinOpRIF_RF<0x81, mnemonic, Xi16, opnode, RegMRM, 1>, PD; def 32ri_ND : BinOpRIF_RF<0x81, mnemonic, Xi32, opnode, RegMRM, 1>; def 64ri32_ND: BinOpRIF_RF<0x81, mnemonic, Xi64, opnode, RegMRM, 1>; } } let Predicates = [In64BitMode] in { def 8ri_EVEX : BinOpRIF_RF<0x80, mnemonic, Xi8 , opnode, RegMRM>, PL; def 16ri8_EVEX : BinOpRI8F_RF<0x83, mnemonic, Xi16, RegMRM>, PL, PD; def 32ri8_EVEX : BinOpRI8F_RF<0x83, mnemonic, Xi32, RegMRM>, PL; def 64ri8_EVEX : BinOpRI8F_RF<0x83, mnemonic, Xi64, RegMRM>, PL; def 16ri_EVEX : BinOpRIF_RF<0x81, mnemonic, Xi16, opnode, RegMRM>, PL, PD; def 32ri_EVEX : BinOpRIF_RF<0x81, mnemonic, Xi32, opnode, RegMRM>, PL; def 64ri32_EVEX: BinOpRIF_RF<0x81, mnemonic, Xi64, opnode, RegMRM>, PL; } def 8mr : BinOpMRF_MF; def 16mr : BinOpMRF_MF, OpSize16; def 32mr : BinOpMRF_MF, OpSize32; def 64mr : BinOpMRF_MF; let Predicates = [HasNDD, In64BitMode] in { defvar node = !if(!eq(CommutableRR, 0), opnode, null_frag); def 8mr_ND : BinOpMRF_RF; def 16mr_ND : BinOpMRF_RF, PD; def 32mr_ND : BinOpMRF_RF; def 64mr_ND : BinOpMRF_RF; } let Predicates = [In64BitMode] in { def 8mr_EVEX : BinOpMRF_MF, PL; def 16mr_EVEX : BinOpMRF_MF, PL, PD; def 32mr_EVEX : BinOpMRF_MF, PL; def 64mr_EVEX : BinOpMRF_MF, PL; } // NOTE: These are order specific, we want the mi8 forms to be listed // first so that they are slightly preferred to the mi forms. def 8mi : BinOpMIF_MF<0x80, mnemonic, Xi8 , opnode, MemMRM>; def 16mi8 : BinOpMI8F_MF, OpSize16; def 32mi8 : BinOpMI8F_MF, OpSize32; let Predicates = [In64BitMode] in def 64mi8 : BinOpMI8F_MF; def 16mi : BinOpMIF_MF<0x81, mnemonic, Xi16, opnode, MemMRM>, OpSize16; def 32mi : BinOpMIF_MF<0x81, mnemonic, Xi32, opnode, MemMRM>, OpSize32; let Predicates = [In64BitMode] in def 64mi32 : BinOpMIF_MF<0x81, mnemonic, Xi64, opnode, MemMRM>; let Predicates = [HasNDD, In64BitMode] in { def 8mi_ND : BinOpMIF_RF<0x80, mnemonic, Xi8 , opnode, MemMRM>; def 16mi8_ND : BinOpMI8F_RF, PD; def 32mi8_ND : BinOpMI8F_RF; def 64mi8_ND : BinOpMI8F_RF; def 16mi_ND : BinOpMIF_RF<0x81, mnemonic, Xi16, opnode, MemMRM>, PD; def 32mi_ND : BinOpMIF_RF<0x81, mnemonic, Xi32, opnode, MemMRM>; def 64mi32_ND : BinOpMIF_RF<0x81, mnemonic, Xi64, opnode, MemMRM>; } let Predicates = [In64BitMode] in { def 8mi_EVEX : BinOpMIF_MF<0x80, mnemonic, Xi8 , opnode, MemMRM>, PL; def 16mi8_EVEX : BinOpMI8F_MF, PL, PD; def 32mi8_EVEX : BinOpMI8F_MF, PL; def 64mi8_EVEX : BinOpMI8F_MF, PL; def 16mi_EVEX : BinOpMIF_MF<0x81, mnemonic, Xi16, opnode, MemMRM>, PL, PD; def 32mi_EVEX : BinOpMIF_MF<0x81, mnemonic, Xi32, opnode, MemMRM>, PL; def 64mi32_EVEX : BinOpMIF_MF<0x81, mnemonic, Xi64, opnode, MemMRM>, PL; } // These are for the disassembler since 0x82 opcode behaves like 0x80, but // not in 64-bit mode. let Predicates = [Not64BitMode] in { def 8ri8 : BinOpRI8F_RF<0x82, mnemonic, Xi8, RegMRM>, DisassembleOnly; def 8mi8 : BinOpMI8F_MF, DisassembleOnly; } def 8i8 : BinOpAIF_AF; def 16i16 : BinOpAIF_AF, OpSize16; def 32i32 : BinOpAIF_AF, OpSize32; def 64i32 : BinOpAIF_AF; } /// ArithBinOp_F - This is an arithmetic binary operator where the pattern is /// defined with "(set EFLAGS, (...". It would be really nice to find a way /// to factor this with the other ArithBinOp_*. /// multiclass ArithBinOp_F BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4, string mnemonic, Format RegMRM, Format MemMRM, SDNode opnode, bit CommutableRR, bit ConvertibleToThreeAddress> { let isCommutable = CommutableRR in { def 8rr : BinOpRR_F; let isConvertibleToThreeAddress = ConvertibleToThreeAddress in { def 16rr : BinOpRR_F, OpSize16; def 32rr : BinOpRR_F, OpSize32; def 64rr : BinOpRR_F; } // isConvertibleToThreeAddress } // isCommutable def 8rr_REV : BinOpRR_F_Rev; def 16rr_REV : BinOpRR_F_Rev, OpSize16; def 32rr_REV : BinOpRR_F_Rev, OpSize32; def 64rr_REV : BinOpRR_F_Rev; def 8rm : BinOpRM_F; def 16rm : BinOpRM_F, OpSize16; def 32rm : BinOpRM_F, OpSize32; def 64rm : BinOpRM_F; def 8ri : BinOpRI_F<0x80, mnemonic, Xi8 , opnode, RegMRM>; let isConvertibleToThreeAddress = ConvertibleToThreeAddress in { // NOTE: These are order specific, we want the ri8 forms to be listed // first so that they are slightly preferred to the ri forms. def 16ri8 : BinOpRI8_F<0x83, mnemonic, Xi16, RegMRM>, OpSize16; def 32ri8 : BinOpRI8_F<0x83, mnemonic, Xi32, RegMRM>, OpSize32; def 64ri8 : BinOpRI8_F<0x83, mnemonic, Xi64, RegMRM>; def 16ri : BinOpRI_F<0x81, mnemonic, Xi16, opnode, RegMRM>, OpSize16; def 32ri : BinOpRI_F<0x81, mnemonic, Xi32, opnode, RegMRM>, OpSize32; def 64ri32: BinOpRI_F<0x81, mnemonic, Xi64, opnode, RegMRM>; } def 8mr : BinOpMR_F; def 16mr : BinOpMR_F, OpSize16; def 32mr : BinOpMR_F, OpSize32; def 64mr : BinOpMR_F; // NOTE: These are order specific, we want the mi8 forms to be listed // first so that they are slightly preferred to the mi forms. def 16mi8 : BinOpMI8_F, OpSize16; def 32mi8 : BinOpMI8_F, OpSize32; let Predicates = [In64BitMode] in def 64mi8 : BinOpMI8_F; def 8mi : BinOpMI_F<0x80, mnemonic, Xi8 , opnode, MemMRM>; def 16mi : BinOpMI_F<0x81, mnemonic, Xi16, opnode, MemMRM>, OpSize16; def 32mi : BinOpMI_F<0x81, mnemonic, Xi32, opnode, MemMRM>, OpSize32; let Predicates = [In64BitMode] in def 64mi32 : BinOpMI_F<0x81, mnemonic, Xi64, opnode, MemMRM>; // These are for the disassembler since 0x82 opcode behaves like 0x80, but // not in 64-bit mode. let Predicates = [Not64BitMode] in { def 8ri8 : BinOpRI8_F<0x82, mnemonic, Xi8, RegMRM>, DisassembleOnly; let mayLoad = 1 in def 8mi8 : BinOpMI8_F; } def 8i8 : BinOpAI_F; def 16i16 : BinOpAI_F, OpSize16; def 32i32 : BinOpAI_F, OpSize32; def 64i32 : BinOpAI_F; } defm AND : ArithBinOp_RF<0x21, 0x23, 0x25, "and", MRM4r, MRM4m, X86and_flag, and, 1, 0, 0>; defm OR : ArithBinOp_RF<0x09, 0x0B, 0x0D, "or", MRM1r, MRM1m, X86or_flag, or, 1, 0, 0>; defm XOR : ArithBinOp_RF<0x31, 0x33, 0x35, "xor", MRM6r, MRM6m, X86xor_flag, xor, 1, 0, 0>; defm ADD : ArithBinOp_RF<0x01, 0x03, 0x05, "add", MRM0r, MRM0m, X86add_flag, add, 1, 1, 1>; let isCompare = 1 in { defm SUB : ArithBinOp_RF<0x29, 0x2B, 0x2D, "sub", MRM5r, MRM5m, X86sub_flag, sub, 0, 1, 0>; } // Version of XOR8rr_NOREX that use GR8_NOREX. This is used by the handling of // __builtin_parity where the last step xors an h-register with an l-register. let isCodeGenOnly = 1, hasSideEffects = 0, Constraints = "$src1 = $dst", Defs = [EFLAGS], isCommutable = 1 in def XOR8rr_NOREX : I<0x30, MRMDestReg, (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src1, GR8_NOREX:$src2), "xor{b}\t{$src2, $dst|$dst, $src2}", []>, Sched<[WriteALU]>; // Arithmetic. defm ADC : ArithBinOp_RFF<0x11, 0x13, 0x15, "adc", MRM2r, MRM2m, X86adc_flag, 1, 0>; defm SBB : ArithBinOp_RFF<0x19, 0x1B, 0x1D, "sbb", MRM3r, MRM3m, X86sbb_flag, 0, 0>; let isCompare = 1 in { defm CMP : ArithBinOp_F<0x39, 0x3B, 0x3D, "cmp", MRM7r, MRM7m, X86cmp, 0, 0>; } // Patterns to recognize loads on the LHS of an ADC. We can't make X86adc_flag // commutable since it has EFLAGs as an input. let Predicates = [NoNDD] in { def : Pat<(X86adc_flag (loadi8 addr:$src2), GR8:$src1, EFLAGS), (ADC8rm GR8:$src1, addr:$src2)>; def : Pat<(X86adc_flag (loadi16 addr:$src2), GR16:$src1, EFLAGS), (ADC16rm GR16:$src1, addr:$src2)>; def : Pat<(X86adc_flag (loadi32 addr:$src2), GR32:$src1, EFLAGS), (ADC32rm GR32:$src1, addr:$src2)>; def : Pat<(X86adc_flag (loadi64 addr:$src2), GR64:$src1, EFLAGS), (ADC64rm GR64:$src1, addr:$src2)>; } let Predicates = [HasNDD] in { def : Pat<(X86adc_flag (loadi8 addr:$src2), GR8:$src1, EFLAGS), (ADC8rm_ND GR8:$src1, addr:$src2)>; def : Pat<(X86adc_flag (loadi16 addr:$src2), GR16:$src1, EFLAGS), (ADC16rm_ND GR16:$src1, addr:$src2)>; def : Pat<(X86adc_flag (loadi32 addr:$src2), GR32:$src1, EFLAGS), (ADC32rm_ND GR32:$src1, addr:$src2)>; def : Pat<(X86adc_flag (loadi64 addr:$src2), GR64:$src1, EFLAGS), (ADC64rm_ND GR64:$src1, addr:$src2)>; } // Patterns to recognize RMW ADC with loads in operand 1. def : Pat<(store (X86adc_flag GR8:$src, (loadi8 addr:$dst), EFLAGS), addr:$dst), (ADC8mr addr:$dst, GR8:$src)>; def : Pat<(store (X86adc_flag GR16:$src, (loadi16 addr:$dst), EFLAGS), addr:$dst), (ADC16mr addr:$dst, GR16:$src)>; def : Pat<(store (X86adc_flag GR32:$src, (loadi32 addr:$dst), EFLAGS), addr:$dst), (ADC32mr addr:$dst, GR32:$src)>; def : Pat<(store (X86adc_flag GR64:$src, (loadi64 addr:$dst), EFLAGS), addr:$dst), (ADC64mr addr:$dst, GR64:$src)>; // Patterns for basic arithmetic ops with relocImm for the immediate field. multiclass ArithBinOp_RF_relocImm_Pats { let Predicates = [NoNDD] in { def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2), (!cast(NAME#"8ri") GR8:$src1, relocImm8_su:$src2)>; def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2), (!cast(NAME#"16ri") GR16:$src1, relocImm16_su:$src2)>; def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2), (!cast(NAME#"32ri") GR32:$src1, relocImm32_su:$src2)>; def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2), (!cast(NAME#"64ri32") GR64:$src1, i64relocImmSExt32_su:$src2)>; def : Pat<(store (OpNode (load addr:$dst), relocImm8_su:$src), addr:$dst), (!cast(NAME#"8mi") addr:$dst, relocImm8_su:$src)>; def : Pat<(store (OpNode (load addr:$dst), relocImm16_su:$src), addr:$dst), (!cast(NAME#"16mi") addr:$dst, relocImm16_su:$src)>; def : Pat<(store (OpNode (load addr:$dst), relocImm32_su:$src), addr:$dst), (!cast(NAME#"32mi") addr:$dst, relocImm32_su:$src)>; def : Pat<(store (OpNode (load addr:$dst), i64relocImmSExt32_su:$src), addr:$dst), (!cast(NAME#"64mi32") addr:$dst, i64relocImmSExt32_su:$src)>; } let Predicates = [HasNDD] in { def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2), (!cast(NAME#"8ri_ND") GR8:$src1, relocImm8_su:$src2)>; def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2), (!cast(NAME#"16ri_ND") GR16:$src1, relocImm16_su:$src2)>; def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2), (!cast(NAME#"32ri_ND") GR32:$src1, relocImm32_su:$src2)>; def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2), (!cast(NAME#"64ri32_ND") GR64:$src1, i64relocImmSExt32_su:$src2)>; def : Pat<(OpNode (load addr:$dst), relocImm8_su:$src), (!cast(NAME#"8mi_ND") addr:$dst, relocImm8_su:$src)>; def : Pat<(OpNode (load addr:$dst), relocImm16_su:$src), (!cast(NAME#"16mi_ND") addr:$dst, relocImm16_su:$src)>; def : Pat<(OpNode (load addr:$dst), relocImm32_su:$src), (!cast(NAME#"32mi_ND") addr:$dst, relocImm32_su:$src)>; def : Pat<(OpNode (load addr:$dst), i64relocImmSExt32_su:$src), (!cast(NAME#"64mi32_ND") addr:$dst, i64relocImmSExt32_su:$src)>; } } multiclass ArithBinOp_RFF_relocImm_Pats { let Predicates = [NoNDD] in { def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2, EFLAGS), (!cast(NAME#"8ri") GR8:$src1, relocImm8_su:$src2)>; def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2, EFLAGS), (!cast(NAME#"16ri") GR16:$src1, relocImm16_su:$src2)>; def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2, EFLAGS), (!cast(NAME#"32ri") GR32:$src1, relocImm32_su:$src2)>; def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2, EFLAGS), (!cast(NAME#"64ri32") GR64:$src1, i64relocImmSExt32_su:$src2)>; def : Pat<(store (OpNodeFlag (load addr:$dst), relocImm8_su:$src, EFLAGS), addr:$dst), (!cast(NAME#"8mi") addr:$dst, relocImm8_su:$src)>; def : Pat<(store (OpNodeFlag (load addr:$dst), relocImm16_su:$src, EFLAGS), addr:$dst), (!cast(NAME#"16mi") addr:$dst, relocImm16_su:$src)>; def : Pat<(store (OpNodeFlag (load addr:$dst), relocImm32_su:$src, EFLAGS), addr:$dst), (!cast(NAME#"32mi") addr:$dst, relocImm32_su:$src)>; def : Pat<(store (OpNodeFlag (load addr:$dst), i64relocImmSExt32_su:$src, EFLAGS), addr:$dst), (!cast(NAME#"64mi32") addr:$dst, i64relocImmSExt32_su:$src)>; } let Predicates = [HasNDD] in { def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2, EFLAGS), (!cast(NAME#"8ri_ND") GR8:$src1, relocImm8_su:$src2)>; def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2, EFLAGS), (!cast(NAME#"16ri_ND") GR16:$src1, relocImm16_su:$src2)>; def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2, EFLAGS), (!cast(NAME#"32ri_ND") GR32:$src1, relocImm32_su:$src2)>; def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2, EFLAGS), (!cast(NAME#"64ri32_ND") GR64:$src1, i64relocImmSExt32_su:$src2)>; def : Pat<(OpNodeFlag (load addr:$dst), relocImm8_su:$src, EFLAGS), (!cast(NAME#"8mi_ND") addr:$dst, relocImm8_su:$src)>; def : Pat<(OpNodeFlag (load addr:$dst), relocImm16_su:$src, EFLAGS), (!cast(NAME#"16mi_ND") addr:$dst, relocImm16_su:$src)>; def : Pat<(OpNodeFlag (load addr:$dst), relocImm32_su:$src, EFLAGS), (!cast(NAME#"32mi_ND") addr:$dst, relocImm32_su:$src)>; def : Pat<(OpNodeFlag (load addr:$dst), i64relocImmSExt32_su:$src, EFLAGS), (!cast(NAME#"64mi32_ND") addr:$dst, i64relocImmSExt32_su:$src)>; } } multiclass ArithBinOp_F_relocImm_Pats { def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2), (!cast(NAME#"8ri") GR8:$src1, relocImm8_su:$src2)>; def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2), (!cast(NAME#"16ri") GR16:$src1, relocImm16_su:$src2)>; def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2), (!cast(NAME#"32ri") GR32:$src1, relocImm32_su:$src2)>; def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2), (!cast(NAME#"64ri32") GR64:$src1, i64relocImmSExt32_su:$src2)>; def : Pat<(OpNodeFlag (loadi8 addr:$src1), relocImm8_su:$src2), (!cast(NAME#"8mi") addr:$src1, relocImm8_su:$src2)>; def : Pat<(OpNodeFlag (loadi16 addr:$src1), relocImm16_su:$src2), (!cast(NAME#"16mi") addr:$src1, relocImm16_su:$src2)>; def : Pat<(OpNodeFlag (loadi32 addr:$src1), relocImm32_su:$src2), (!cast(NAME#"32mi") addr:$src1, relocImm32_su:$src2)>; def : Pat<(OpNodeFlag (loadi64 addr:$src1), i64relocImmSExt32_su:$src2), (!cast(NAME#"64mi32") addr:$src1, i64relocImmSExt32_su:$src2)>; } defm AND : ArithBinOp_RF_relocImm_Pats; defm OR : ArithBinOp_RF_relocImm_Pats; defm XOR : ArithBinOp_RF_relocImm_Pats; defm ADD : ArithBinOp_RF_relocImm_Pats; defm SUB : ArithBinOp_RF_relocImm_Pats; defm ADC : ArithBinOp_RFF_relocImm_Pats; defm SBB : ArithBinOp_RFF_relocImm_Pats; defm CMP : ArithBinOp_F_relocImm_Pats; // ADC is commutable, but we can't indicate that to tablegen. So manually // reverse the operands. def : Pat<(X86adc_flag GR8:$src1, relocImm8_su:$src2, EFLAGS), (ADC8ri relocImm8_su:$src2, GR8:$src1)>; def : Pat<(X86adc_flag i16relocImmSExt8_su:$src2, GR16:$src1, EFLAGS), (ADC16ri8 GR16:$src1, i16relocImmSExt8_su:$src2)>; def : Pat<(X86adc_flag relocImm16_su:$src2, GR16:$src1, EFLAGS), (ADC16ri GR16:$src1, relocImm16_su:$src2)>; def : Pat<(X86adc_flag i32relocImmSExt8_su:$src2, GR32:$src1, EFLAGS), (ADC32ri8 GR32:$src1, i32relocImmSExt8_su:$src2)>; def : Pat<(X86adc_flag relocImm32_su:$src2, GR32:$src1, EFLAGS), (ADC32ri GR32:$src1, relocImm32_su:$src2)>; def : Pat<(X86adc_flag i64relocImmSExt8_su:$src2, GR64:$src1, EFLAGS), (ADC64ri8 GR64:$src1, i64relocImmSExt8_su:$src2)>; def : Pat<(X86adc_flag i64relocImmSExt32_su:$src2, GR64:$src1, EFLAGS), (ADC64ri32 GR64:$src1, i64relocImmSExt32_su:$src2)>; def : Pat<(store (X86adc_flag relocImm8_su:$src, (load addr:$dst), EFLAGS), addr:$dst), (ADC8mi addr:$dst, relocImm8_su:$src)>; def : Pat<(store (X86adc_flag i16relocImmSExt8_su:$src, (load addr:$dst), EFLAGS), addr:$dst), (ADC16mi8 addr:$dst, i16relocImmSExt8_su:$src)>; def : Pat<(store (X86adc_flag relocImm16_su:$src, (load addr:$dst), EFLAGS), addr:$dst), (ADC16mi addr:$dst, relocImm16_su:$src)>; def : Pat<(store (X86adc_flag i32relocImmSExt8_su:$src, (load addr:$dst), EFLAGS), addr:$dst), (ADC32mi8 addr:$dst, i32relocImmSExt8_su:$src)>; def : Pat<(store (X86adc_flag relocImm32_su:$src, (load addr:$dst), EFLAGS), addr:$dst), (ADC32mi addr:$dst, relocImm32_su:$src)>; def : Pat<(store (X86adc_flag i64relocImmSExt8_su:$src, (load addr:$dst), EFLAGS), addr:$dst), (ADC64mi8 addr:$dst, i64relocImmSExt8_su:$src)>; def : Pat<(store (X86adc_flag i64relocImmSExt32_su:$src, (load addr:$dst), EFLAGS), addr:$dst), (ADC64mi32 addr:$dst, i64relocImmSExt32_su:$src)>; //===----------------------------------------------------------------------===// // Semantically, test instructions are similar like AND, except they don't // generate a result. From an encoding perspective, they are very different: // they don't have all the usual imm8 and REV forms, and are encoded into a // different space. let isCompare = 1 in { let isCommutable = 1 in { // Avoid selecting these and instead use a test+and. Post processing will // combine them. This gives bunch of other patterns that start with // and a chance to match. def TEST8rr : BinOpRR_F<0x84, "test", Xi8 , null_frag>; def TEST16rr : BinOpRR_F<0x85, "test", Xi16, null_frag>, OpSize16; def TEST32rr : BinOpRR_F<0x85, "test", Xi32, null_frag>, OpSize32; def TEST64rr : BinOpRR_F<0x85, "test", Xi64, null_frag>; } // isCommutable def TEST8mr : BinOpMR_F<0x84, "test", Xi8 , null_frag>; def TEST16mr : BinOpMR_F<0x85, "test", Xi16, null_frag>, OpSize16; def TEST32mr : BinOpMR_F<0x85, "test", Xi32, null_frag>, OpSize32; def TEST64mr : BinOpMR_F<0x85, "test", Xi64, null_frag>; def TEST8ri : BinOpRI_F<0xF6, "test", Xi8 , X86testpat, MRM0r>; def TEST16ri : BinOpRI_F<0xF7, "test", Xi16, X86testpat, MRM0r>, OpSize16; def TEST32ri : BinOpRI_F<0xF7, "test", Xi32, X86testpat, MRM0r>, OpSize32; def TEST64ri32 : BinOpRI_F<0xF7, "test", Xi64, X86testpat, MRM0r>; def TEST8mi : BinOpMI_F<0xF6, "test", Xi8 , X86testpat, MRM0m>; def TEST16mi : BinOpMI_F<0xF7, "test", Xi16, X86testpat, MRM0m>, OpSize16; def TEST32mi : BinOpMI_F<0xF7, "test", Xi32, X86testpat, MRM0m>, OpSize32; let Predicates = [In64BitMode] in def TEST64mi32 : BinOpMI_F<0xF7, "test", Xi64, X86testpat, MRM0m>; def TEST8i8 : BinOpAI_F<0xA8, "test", Xi8 , AL, "{$src, %al|al, $src}">; def TEST16i16 : BinOpAI_F<0xA9, "test", Xi16, AX, "{$src, %ax|ax, $src}">, OpSize16; def TEST32i32 : BinOpAI_F<0xA9, "test", Xi32, EAX, "{$src, %eax|eax, $src}">, OpSize32; def TEST64i32 : BinOpAI_F<0xA9, "test", Xi64, RAX, "{$src, %rax|rax, $src}">; } // isCompare // Patterns to match a relocImm into the immediate field. def : Pat<(X86testpat GR8:$src1, relocImm8_su:$src2), (TEST8ri GR8:$src1, relocImm8_su:$src2)>; def : Pat<(X86testpat GR16:$src1, relocImm16_su:$src2), (TEST16ri GR16:$src1, relocImm16_su:$src2)>; def : Pat<(X86testpat GR32:$src1, relocImm32_su:$src2), (TEST32ri GR32:$src1, relocImm32_su:$src2)>; def : Pat<(X86testpat GR64:$src1, i64relocImmSExt32_su:$src2), (TEST64ri32 GR64:$src1, i64relocImmSExt32_su:$src2)>; def : Pat<(X86testpat (loadi8 addr:$src1), relocImm8_su:$src2), (TEST8mi addr:$src1, relocImm8_su:$src2)>; def : Pat<(X86testpat (loadi16 addr:$src1), relocImm16_su:$src2), (TEST16mi addr:$src1, relocImm16_su:$src2)>; def : Pat<(X86testpat (loadi32 addr:$src1), relocImm32_su:$src2), (TEST32mi addr:$src1, relocImm32_su:$src2)>; def : Pat<(X86testpat (loadi64 addr:$src1), i64relocImmSExt32_su:$src2), (TEST64mi32 addr:$src1, i64relocImmSExt32_su:$src2)>; //===----------------------------------------------------------------------===// // ANDN Instruction // multiclass AndN { defvar andn_rr_p = [(set t.RegClass:$dst, EFLAGS, (node (not t.RegClass:$src1), t.RegClass:$src2))]; defvar andn_rm_p = [(set t.RegClass:$dst, EFLAGS, (node (not t.RegClass:$src1), (t.LoadNode addr:$src2)))]; def rr#suffix : ITy<0xF2, MRMSrcReg, t, (outs t.RegClass:$dst), (ins t.RegClass:$src1, t.RegClass:$src2), "andn", binop_ndd_args, andn_rr_p>, VVVV, Sched<[WriteALU]>, T8; def rm#suffix : ITy<0xF2, MRMSrcMem, t, (outs t.RegClass:$dst), (ins t.RegClass:$src1, t.MemOperand:$src2), "andn", binop_ndd_args, andn_rm_p>, VVVV, Sched<[WriteALU.Folded, WriteALU.ReadAfterFold]>, T8; } // Complexity is reduced to give and with immediate a chance to match first. let AddedComplexity = -6 in { defm ANDN32 : AndN, VEX, Requires<[HasBMI, NoEGPR]>, DefEFLAGS; defm ANDN64 : AndN, VEX, Requires<[HasBMI, NoEGPR]>, DefEFLAGS; defm ANDN32 : AndN, EVEX, Requires<[HasBMI, HasEGPR, In64BitMode]>, DefEFLAGS; defm ANDN64 : AndN, EVEX, Requires<[HasBMI, HasEGPR, In64BitMode]>, DefEFLAGS; defm ANDN32 : AndN, EVEX, EVEX_NF, Requires<[In64BitMode]>; defm ANDN64 : AndN, EVEX, EVEX_NF, Requires<[In64BitMode]>; } multiclass Andn_Pats { def : Pat<(and (not GR32:$src1), GR32:$src2), (!cast(ANDN32rr#suffix) GR32:$src1, GR32:$src2)>; def : Pat<(and (not GR64:$src1), GR64:$src2), (!cast(ANDN64rr#suffix) GR64:$src1, GR64:$src2)>; def : Pat<(and (not GR32:$src1), (loadi32 addr:$src2)), (!cast(ANDN32rm#suffix) GR32:$src1, addr:$src2)>; def : Pat<(and (not GR64:$src1), (loadi64 addr:$src2)), (!cast(ANDN64rm#suffix) GR64:$src1, addr:$src2)>; } let Predicates = [HasBMI, NoEGPR], AddedComplexity = -6 in defm : Andn_Pats<"">; let Predicates = [HasBMI, HasEGPR], AddedComplexity = -6 in defm : Andn_Pats<"_EVEX">; //===----------------------------------------------------------------------===// // MULX Instruction // multiclass MulX { defvar mulx_args = "{$src, $dst2, $dst1|$dst1, $dst2, $src}"; defvar mulx_rm_sched = [WriteIMulHLd, sched.Folded, // Memory operand. ReadDefault, ReadDefault, ReadDefault, ReadDefault, ReadDefault, // Implicit read of EDX/RDX sched.ReadAfterFold]; def rr : ITy<0xF6, MRMSrcReg, t, (outs t.RegClass:$dst1, t.RegClass:$dst2), (ins t.RegClass:$src), "mulx", mulx_args, []>, T8, XD, VEX, VVVV, Sched<[WriteIMulH, sched]>; let mayLoad = 1 in def rm : ITy<0xF6, MRMSrcMem, t, (outs t.RegClass:$dst1, t.RegClass:$dst2), (ins t.MemOperand:$src), "mulx", mulx_args, []>, T8, XD, VEX, VVVV, Sched; let Predicates = [In64BitMode] in { def rr_EVEX : ITy<0xF6, MRMSrcReg, t, (outs t.RegClass:$dst1, t.RegClass:$dst2), (ins t.RegClass:$src), "mulx", mulx_args, []>, T8, XD, EVEX, VVVV, Sched<[WriteIMulH, sched]>; let mayLoad = 1 in def rm_EVEX : ITy<0xF6, MRMSrcMem, t, (outs t.RegClass:$dst1, t.RegClass:$dst2), (ins t.MemOperand:$src), "mulx", mulx_args, []>, T8, XD, EVEX, VVVV, Sched; } // Pseudo instructions to be used when the low result isn't used. The // instruction is defined to keep the high if both destinations are the same. def Hrr : PseudoI<(outs t.RegClass:$dst), (ins t.RegClass:$src), []>, Sched<[sched]>; let mayLoad = 1 in def Hrm : PseudoI<(outs t.RegClass:$dst), (ins t.MemOperand:$src), []>, Sched<[sched.Folded]>; } let Uses = [EDX] in defm MULX32 : MulX; let Uses = [RDX] in defm MULX64 : MulX, REX_W; //===----------------------------------------------------------------------===// // ADCX and ADOX Instructions // // We don't have patterns for these as there is no advantage over ADC for // most code. let Form = MRMSrcReg in { def ADCX32rr : BinOpRRF_RF<0xF6, "adcx", Xi32>, T8, PD; def ADCX64rr : BinOpRRF_RF<0xF6, "adcx", Xi64>, T8, PD; def ADOX32rr : BinOpRRF_RF<0xF6, "adox", Xi32>, T8, XS; def ADOX64rr : BinOpRRF_RF<0xF6, "adox", Xi64>, T8, XS; let Predicates =[In64BitMode] in { def ADCX32rr_EVEX : BinOpRRF_RF<0x66, "adcx", Xi32>, EVEX, T_MAP4, PD; def ADCX64rr_EVEX : BinOpRRF_RF<0x66, "adcx", Xi64>, EVEX, T_MAP4, PD; def ADOX32rr_EVEX : BinOpRRF_RF<0x66, "adox", Xi32>, EVEX, T_MAP4, XS; def ADOX64rr_EVEX : BinOpRRF_RF<0x66, "adox", Xi64>, EVEX, T_MAP4, XS; def ADCX32rr_ND : BinOpRRF_RF<0x66, "adcx", Xi32, null_frag, 1>, PD; def ADCX64rr_ND : BinOpRRF_RF<0x66, "adcx", Xi64, null_frag, 1>, PD; def ADOX32rr_ND : BinOpRRF_RF<0x66, "adox", Xi32, null_frag, 1>, XS; def ADOX64rr_ND : BinOpRRF_RF<0x66, "adox", Xi64, null_frag, 1>, XS; } } let Form = MRMSrcMem in { def ADCX32rm : BinOpRMF_RF<0xF6, "adcx", Xi32>, T8, PD; def ADCX64rm : BinOpRMF_RF<0xF6, "adcx", Xi64>, T8, PD; def ADOX32rm : BinOpRMF_RF<0xF6, "adox", Xi32>, T8, XS; def ADOX64rm : BinOpRMF_RF<0xF6, "adox", Xi64>, T8, XS; let Predicates =[In64BitMode] in { def ADCX32rm_EVEX : BinOpRMF_RF<0x66, "adcx", Xi32>, EVEX, T_MAP4, PD; def ADCX64rm_EVEX : BinOpRMF_RF<0x66, "adcx", Xi64>, EVEX, T_MAP4, PD; def ADOX32rm_EVEX : BinOpRMF_RF<0x66, "adox", Xi32>, EVEX, T_MAP4, XS; def ADOX64rm_EVEX : BinOpRMF_RF<0x66, "adox", Xi64>, EVEX, T_MAP4, XS; def ADCX32rm_ND : BinOpRMF_RF<0x66, "adcx", Xi32, null_frag, 1>, PD; def ADCX64rm_ND : BinOpRMF_RF<0x66, "adcx", Xi64, null_frag, 1>, PD; def ADOX32rm_ND : BinOpRMF_RF<0x66, "adox", Xi32, null_frag, 1>, XS; def ADOX64rm_ND : BinOpRMF_RF<0x66, "adox", Xi64, null_frag, 1>, XS; } }