//===-- BPFInstrInfo.td - Target Description for BPF Target ---------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file describes the BPF instructions in TableGen format. // //===----------------------------------------------------------------------===// include "BPFInstrFormats.td" // Instruction Operands and Patterns // These are target-independent nodes, but have target-specific formats. def SDT_BPFCallSeqStart : SDCallSeqStart<[SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>; def SDT_BPFCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>; def SDT_BPFCall : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>; def SDT_BPFSetFlag : SDTypeProfile<0, 3, [SDTCisSameAs<0, 1>]>; def SDT_BPFSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>, SDTCisSameAs<0, 4>, SDTCisSameAs<4, 5>]>; def SDT_BPFBrCC : SDTypeProfile<0, 4, [SDTCisSameAs<0, 1>, SDTCisVT<3, OtherVT>]>; def SDT_BPFWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>; def SDT_BPFMEMCPY : SDTypeProfile<0, 4, [SDTCisVT<0, i64>, SDTCisVT<1, i64>, SDTCisVT<2, i64>, SDTCisVT<3, i64>]>; def BPFcall : SDNode<"BPFISD::CALL", SDT_BPFCall, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>; def BPFretglue : SDNode<"BPFISD::RET_GLUE", SDTNone, [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; def BPFcallseq_start: SDNode<"ISD::CALLSEQ_START", SDT_BPFCallSeqStart, [SDNPHasChain, SDNPOutGlue]>; def BPFcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_BPFCallSeqEnd, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; def BPFbrcc : SDNode<"BPFISD::BR_CC", SDT_BPFBrCC, [SDNPHasChain, SDNPOutGlue, SDNPInGlue]>; def BPFselectcc : SDNode<"BPFISD::SELECT_CC", SDT_BPFSelectCC, [SDNPInGlue]>; def BPFWrapper : SDNode<"BPFISD::Wrapper", SDT_BPFWrapper>; def BPFmemcpy : SDNode<"BPFISD::MEMCPY", SDT_BPFMEMCPY, [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore, SDNPMayLoad]>; def BPFIsLittleEndian : Predicate<"Subtarget->isLittleEndian()">; def BPFIsBigEndian : Predicate<"!Subtarget->isLittleEndian()">; def BPFHasALU32 : Predicate<"Subtarget->getHasAlu32()">; def BPFNoALU32 : Predicate<"!Subtarget->getHasAlu32()">; def BPFHasLdsx : Predicate<"Subtarget->hasLdsx()">; def BPFHasMovsx : Predicate<"Subtarget->hasMovsx()">; def BPFHasBswap : Predicate<"Subtarget->hasBswap()">; def BPFHasSdivSmod : Predicate<"Subtarget->hasSdivSmod()">; def BPFNoMovsx : Predicate<"!Subtarget->hasMovsx()">; def BPFNoBswap : Predicate<"!Subtarget->hasBswap()">; def BPFHasStoreImm : Predicate<"Subtarget->hasStoreImm()">; class ImmediateAsmOperand : AsmOperandClass { let Name = name; let RenderMethod = "addImmOperands"; let DiagnosticType = !strconcat("Invalid", name); } def SImm16AsmOperand : ImmediateAsmOperand<"SImm16">; def brtarget : Operand { let PrintMethod = "printBrTargetOperand"; let ParserMatchClass = ImmediateAsmOperand<"BrTarget">; } def calltarget : Operand; def u64imm : Operand { let PrintMethod = "printImm64Operand"; } def s16imm : Operand { let ParserMatchClass = SImm16AsmOperand; } def gpr_or_imm : Operand; def i64immSExt32 : PatLeaf<(i64 imm), [{return isInt<32>(N->getSExtValue()); }]>; def i32immSExt32 : PatLeaf<(i32 imm), [{return isInt<32>(N->getSExtValue()); }]>; def i64immZExt32 : PatLeaf<(i64 imm), [{return isUInt<32>(N->getZExtValue()); }]>; def imm_to_i64 : SDNodeXFormgetTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i64); }]>; // Addressing modes. def ADDRri : ComplexPattern; def FIri : ComplexPattern; // Address operands def MEMri : Operand { let PrintMethod = "printMemOperand"; let EncoderMethod = "getMemoryOpValue"; let DecoderMethod = "decodeMemoryOpValue"; let MIOperandInfo = (ops GPR, s16imm); } // Conditional code predicates - used for pattern matching for jump instructions def BPF_CC_EQ : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETEQ);}]>; def BPF_CC_NE : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETNE);}]>; def BPF_CC_GE : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETGE);}]>; def BPF_CC_GT : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETGT);}]>; def BPF_CC_GTU : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETUGT);}]>; def BPF_CC_GEU : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETUGE);}]>; def BPF_CC_LE : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETLE);}]>; def BPF_CC_LT : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETLT);}]>; def BPF_CC_LTU : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETULT);}]>; def BPF_CC_LEU : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETULE);}]>; def BPF_CC_EQ_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETEQ);}]>; def BPF_CC_NE_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETNE);}]>; def BPF_CC_GE_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETGE);}]>; def BPF_CC_GT_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETGT);}]>; def BPF_CC_GTU_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETUGT);}]>; def BPF_CC_GEU_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETUGE);}]>; def BPF_CC_LE_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETLE);}]>; def BPF_CC_LT_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETLT);}]>; def BPF_CC_LTU_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETULT);}]>; def BPF_CC_LEU_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETULE);}]>; def NoCond : PatLeaf<(vt)> {} // For arithmetic and jump instructions the 8-bit 'code' // field is divided into three parts: // // +----------------+--------+--------------------+ // | 4 bits | 1 bit | 3 bits | // | operation code | source | instruction class | // +----------------+--------+--------------------+ // (MSB) (LSB) class TYPE_ALU_JMP op, bits<1> srctype, dag outs, dag ins, string asmstr, list pattern> : InstBPF { let Inst{63-60} = op; let Inst{59} = srctype; } //For load and store instructions the 8-bit 'code' field is divided as: // // +--------+--------+-------------------+ // | 3 bits | 2 bits | 3 bits | // | mode | size | instruction class | // +--------+--------+-------------------+ // (MSB) (LSB) class TYPE_LD_ST mode, bits<2> size, dag outs, dag ins, string asmstr, list pattern> : InstBPF { let Inst{63-61} = mode; let Inst{60-59} = size; } // jump instructions class JMP_RR : TYPE_ALU_JMP { bits<4> dst; bits<4> src; bits<16> BrDst; let Inst{55-52} = src; let Inst{51-48} = dst; let Inst{47-32} = BrDst; let BPFClass = BPF_JMP; } class JMP_RI : TYPE_ALU_JMP { bits<4> dst; bits<16> BrDst; bits<32> imm; let Inst{51-48} = dst; let Inst{47-32} = BrDst; let Inst{31-0} = imm; let BPFClass = BPF_JMP; } class JMP_JCOND Pattern> : TYPE_ALU_JMP { bits<16> BrDst; let Inst{47-32} = BrDst; let BPFClass = BPF_JMP; } class JMP_RR_32 : TYPE_ALU_JMP { bits<4> dst; bits<4> src; bits<16> BrDst; let Inst{55-52} = src; let Inst{51-48} = dst; let Inst{47-32} = BrDst; let BPFClass = BPF_JMP32; } class JMP_RI_32 : TYPE_ALU_JMP { bits<4> dst; bits<16> BrDst; bits<32> imm; let Inst{51-48} = dst; let Inst{47-32} = BrDst; let Inst{31-0} = imm; let BPFClass = BPF_JMP32; } multiclass J { def _rr : JMP_RR; def _ri : JMP_RI; def _rr_32 : JMP_RR_32; def _ri_32 : JMP_RI_32; } let isBranch = 1, isTerminator = 1, hasDelaySlot=0 in { // cmp+goto instructions defm JEQ : J; defm JUGT : J", BPF_CC_GTU, BPF_CC_GTU_32>; defm JUGE : J=", BPF_CC_GEU, BPF_CC_GEU_32>; defm JNE : J", BPF_CC_GT, BPF_CC_GT_32>; defm JSGE : J=", BPF_CC_GE, BPF_CC_GE_32>; defm JULT : J; defm JULE : J; defm JSLE : J; def JCOND : JMP_JCOND; } // ALU instructions class ALU_RI pattern> : TYPE_ALU_JMP { bits<4> dst; bits<32> imm; let Inst{51-48} = dst; let Inst{47-32} = off; let Inst{31-0} = imm; let BPFClass = Class; } class ALU_RR pattern> : TYPE_ALU_JMP { bits<4> dst; bits<4> src; let Inst{55-52} = src; let Inst{51-48} = dst; let Inst{47-32} = off; let BPFClass = Class; } multiclass ALU { def _rr : ALU_RR; def _ri : ALU_RI; def _rr_32 : ALU_RR; def _ri_32 : ALU_RI; } let Constraints = "$dst = $src2" in { let isAsCheapAsAMove = 1 in { defm ADD : ALU>=", srl>; defm XOR : ALU>=", sra>; } defm MUL : ALU; defm MOD : ALU; defm SMOD : ALU; def NEG_32: NEG_RR; } class LD_IMM64 Pseudo, string OpcodeStr> : TYPE_LD_ST { bits<4> dst; bits<64> imm; let Inst{51-48} = dst; let Inst{55-52} = Pseudo; let Inst{47-32} = 0; let Inst{31-0} = imm{31-0}; let BPFClass = BPF_LD; } let isReMaterializable = 1, isAsCheapAsAMove = 1 in { def LD_imm64 : LD_IMM64<0, "=">; def MOV_rr : ALU_RR; def MOV_ri : ALU_RI; def MOV_rr_32 : ALU_RR; def MOV_ri_32 : ALU_RI; let Predicates = [BPFHasMovsx] in { def MOVSX_rr_8 : ALU_RR; def MOVSX_rr_16 : ALU_RR; def MOVSX_rr_32 : ALU_RR; def MOVSX_rr_32_8 : ALU_RR; def MOVSX_rr_32_16 : ALU_RR; } } def ADDR_SPACE_CAST : ALU_RR { bits<64> dst_as; bits<64> src_as; let Inst{47-32} = 1; let Inst{31-16} = dst_as{15-0}; let Inst{15-0} = src_as{15-0}; } def SrcAddrSpace : SDNodeXFormgetTargetConstant( cast(N)->getSrcAddressSpace(), SDLoc(N), MVT::i64); }]>; def DstAddrSpace : SDNodeXFormgetTargetConstant( cast(N)->getDestAddressSpace(), SDLoc(N), MVT::i64); }]>; def : Pat<(addrspacecast:$this GPR:$src), (ADDR_SPACE_CAST $src, (DstAddrSpace $this), (SrcAddrSpace $this))>; def FI_ri : TYPE_LD_ST { // This is a tentative instruction, and will be replaced // with MOV_rr and ADD_ri in PEI phase let Inst{51-48} = 0; let Inst{55-52} = 2; let Inst{47-32} = 0; let Inst{31-0} = 0; let BPFClass = BPF_LD; bit isPseudo = true; } def LD_pseudo : TYPE_LD_ST { bits<4> dst; bits<64> imm; bits<4> pseudo; let Inst{51-48} = dst; let Inst{55-52} = pseudo; let Inst{47-32} = 0; let Inst{31-0} = imm{31-0}; let BPFClass = BPF_LD; } // STORE instructions class STORE Pattern> : TYPE_LD_ST { bits<4> src; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = src; let Inst{47-32} = addr{15-0}; // offset let BPFClass = BPF_STX; } class STOREi64 : STORE; let Predicates = [BPFNoALU32] in { def STW : STOREi64; def STH : STOREi64; def STB : STOREi64; } def STD : STOREi64; class STORE_imm : TYPE_LD_ST { bits<20> addr; bits<32> imm; let Inst{51-48} = addr{19-16}; // base reg let Inst{47-32} = addr{15-0}; // offset let Inst{31-0} = imm; let BPFClass = BPF_ST; } let Predicates = [BPFHasStoreImm] in { // Opcode (BPF_ST | BPF_MEM | BPF_DW) implies sign extension for // value stored to memory: // - it is fine to generate such write when immediate is -1 // - it is incorrect to generate such write when immediate is // +0xffff_ffff. // // In the latter case two instructions would be generated instead of // one BPF_ST: // rA = 0xffffffff ll ; LD_imm64 // *(u64 *)(rB + 0) = rA ; STX // // For BPF_{B,H,W} the size of value stored matches size of the immediate. def STD_imm : STORE_imm; def STW_imm : STORE_imm; def STH_imm : STORE_imm; def STB_imm : STORE_imm; } let Predicates = [BPFHasALU32, BPFHasStoreImm] in { def : Pat<(store (i32 imm:$src), ADDRri:$dst), (STW_imm (imm_to_i64 $src), ADDRri:$dst)>; def : Pat<(truncstorei16 (i32 imm:$src), ADDRri:$dst), (STH_imm (imm_to_i64 imm:$src), ADDRri:$dst)>; def : Pat<(truncstorei8 (i32 imm:$src), ADDRri:$dst), (STB_imm (imm_to_i64 imm:$src), ADDRri:$dst)>; } // LOAD instructions class LOAD Pattern> : TYPE_LD_ST { bits<4> dst; bits<20> addr; let Inst{51-48} = dst; let Inst{55-52} = addr{19-16}; let Inst{47-32} = addr{15-0}; let BPFClass = BPF_LDX; } class LOADi64 : LOAD; let isCodeGenOnly = 1 in { class CORE_LD : TYPE_LD_ST; def CORE_LD64 : CORE_LD; def CORE_LD32 : CORE_LD; def CORE_ST : TYPE_LD_ST; let Constraints = "$dst = $src" in { def CORE_SHIFT : ALU_RR; } } let Predicates = [BPFNoALU32] in { def LDW : LOADi64; def LDH : LOADi64; def LDB : LOADi64; } let Predicates = [BPFHasLdsx] in { def LDWSX : LOADi64; def LDHSX : LOADi64; def LDBSX : LOADi64; } def LDD : LOADi64; class BRANCH Pattern> : TYPE_ALU_JMP { bits<16> BrDst; let Inst{47-32} = BrDst; let BPFClass = BPF_JMP; } class BRANCH_LONG Pattern> : TYPE_ALU_JMP { bits<32> BrDst; let Inst{31-0} = BrDst; let BPFClass = BPF_JMP32; } class CALL : TYPE_ALU_JMP { bits<32> BrDst; let Inst{31-0} = BrDst; let BPFClass = BPF_JMP; } class CALLX : TYPE_ALU_JMP { bits<4> BrDst; let Inst{51-48} = BrDst; let BPFClass = BPF_JMP; } // Jump always let isBranch = 1, isTerminator = 1, hasDelaySlot=0, isBarrier = 1 in { def JMP : BRANCH; def JMPL : BRANCH_LONG; } // Jump and link let isCall=1, hasDelaySlot=0, Uses = [R11], // Potentially clobbered registers Defs = [R0, R1, R2, R3, R4, R5] in { def JAL : CALL<"call">; def JALX : CALLX<"callx">; } class NOP_I : TYPE_ALU_JMP { // mov r0, r0 == nop let Inst{55-52} = 0; let Inst{51-48} = 0; let BPFClass = BPF_ALU64; } let hasSideEffects = 0, isCodeGenOnly = 1 in def NOP : NOP_I<"nop">; class RET : TYPE_ALU_JMP { let Inst{31-0} = 0; let BPFClass = BPF_JMP; } let isReturn = 1, isTerminator = 1, hasDelaySlot=0, isBarrier = 1, isNotDuplicable = 1 in { def RET : RET<"exit">; } // ADJCALLSTACKDOWN/UP pseudo insns let Defs = [R11], Uses = [R11], isCodeGenOnly = 1 in { def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2), "#ADJCALLSTACKDOWN $amt1 $amt2", [(BPFcallseq_start timm:$amt1, timm:$amt2)]>; def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2), "#ADJCALLSTACKUP $amt1 $amt2", [(BPFcallseq_end timm:$amt1, timm:$amt2)]>; } let usesCustomInserter = 1, isCodeGenOnly = 1 in { def Select : Pseudo<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs, i64imm:$imm, GPR:$src, GPR:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i64:$dst, (BPFselectcc i64:$lhs, i64:$rhs, (i64 imm:$imm), i64:$src, i64:$src2))]>; def Select_Ri : Pseudo<(outs GPR:$dst), (ins GPR:$lhs, i64imm:$rhs, i64imm:$imm, GPR:$src, GPR:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i64:$dst, (BPFselectcc i64:$lhs, (i64immSExt32:$rhs), (i64 imm:$imm), i64:$src, i64:$src2))]>; def Select_64_32 : Pseudo<(outs GPR32:$dst), (ins GPR:$lhs, GPR:$rhs, i64imm:$imm, GPR32:$src, GPR32:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i32:$dst, (BPFselectcc i64:$lhs, i64:$rhs, (i64 imm:$imm), i32:$src, i32:$src2))]>; def Select_Ri_64_32 : Pseudo<(outs GPR32:$dst), (ins GPR:$lhs, i64imm:$rhs, i64imm:$imm, GPR32:$src, GPR32:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i32:$dst, (BPFselectcc i64:$lhs, (i64immSExt32:$rhs), (i64 imm:$imm), i32:$src, i32:$src2))]>; def Select_32 : Pseudo<(outs GPR32:$dst), (ins GPR32:$lhs, GPR32:$rhs, i32imm:$imm, GPR32:$src, GPR32:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i32:$dst, (BPFselectcc i32:$lhs, i32:$rhs, (i32 imm:$imm), i32:$src, i32:$src2))]>; def Select_Ri_32 : Pseudo<(outs GPR32:$dst), (ins GPR32:$lhs, i32imm:$rhs, i32imm:$imm, GPR32:$src, GPR32:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i32:$dst, (BPFselectcc i32:$lhs, (i32immSExt32:$rhs), (i32 imm:$imm), i32:$src, i32:$src2))]>; def Select_32_64 : Pseudo<(outs GPR:$dst), (ins GPR32:$lhs, GPR32:$rhs, i32imm:$imm, GPR:$src, GPR:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i64:$dst, (BPFselectcc i32:$lhs, i32:$rhs, (i32 imm:$imm), i64:$src, i64:$src2))]>; def Select_Ri_32_64 : Pseudo<(outs GPR:$dst), (ins GPR32:$lhs, i32imm:$rhs, i32imm:$imm, GPR:$src, GPR:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i64:$dst, (BPFselectcc i32:$lhs, (i32immSExt32:$rhs), (i32 imm:$imm), i64:$src, i64:$src2))]>; } // load 64-bit global addr into register def : Pat<(BPFWrapper tglobaladdr:$in), (LD_imm64 tglobaladdr:$in)>; def : Pat<(BPFWrapper tconstpool:$in), (LD_imm64 tconstpool:$in)>; // 0xffffFFFF doesn't fit into simm32, optimize common case def : Pat<(i64 (and (i64 GPR:$src), 0xffffFFFF)), (SRL_ri (SLL_ri (i64 GPR:$src), 32), 32)>; // Calls def : Pat<(BPFcall tglobaladdr:$dst), (JAL tglobaladdr:$dst)>; def : Pat<(BPFcall texternalsym:$dst), (JAL texternalsym:$dst)>; def : Pat<(BPFcall imm:$dst), (JAL imm:$dst)>; def : Pat<(BPFcall GPR:$dst), (JALX GPR:$dst)>; // Loads let Predicates = [BPFNoALU32] in { def : Pat<(i64 (extloadi8 ADDRri:$src)), (i64 (LDB ADDRri:$src))>; def : Pat<(i64 (extloadi16 ADDRri:$src)), (i64 (LDH ADDRri:$src))>; def : Pat<(i64 (extloadi32 ADDRri:$src)), (i64 (LDW ADDRri:$src))>; } // Atomic XADD for BPFNoALU32 class XADD : TYPE_LD_ST { bits<4> dst; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = dst; let Inst{47-32} = addr{15-0}; // offset let Inst{7-4} = BPF_ADD.Value; let BPFClass = BPF_STX; } let Constraints = "$dst = $val" in { let Predicates = [BPFNoALU32] in { def XADDW : XADD; } } // Atomic add, and, or, xor class ATOMIC_NOFETCH : TYPE_LD_ST { bits<4> dst; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = dst; let Inst{47-32} = addr{15-0}; // offset let Inst{7-4} = Opc.Value; let BPFClass = BPF_STX; } class ATOMIC32_NOFETCH : TYPE_LD_ST { bits<4> dst; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = dst; let Inst{47-32} = addr{15-0}; // offset let Inst{7-4} = Opc.Value; let BPFClass = BPF_STX; } let Constraints = "$dst = $val" in { let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in { def XADDW32 : ATOMIC32_NOFETCH; def XANDW32 : ATOMIC32_NOFETCH; def XORW32 : ATOMIC32_NOFETCH; def XXORW32 : ATOMIC32_NOFETCH; } def XADDD : ATOMIC_NOFETCH; def XANDD : ATOMIC_NOFETCH; def XORD : ATOMIC_NOFETCH; def XXORD : ATOMIC_NOFETCH; } // Atomic Fetch-and- operations class XFALU64 : TYPE_LD_ST { bits<4> dst; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = dst; let Inst{47-32} = addr{15-0}; // offset let Inst{7-4} = Opc.Value; let Inst{3-0} = BPF_FETCH.Value; let BPFClass = BPF_STX; } class XFALU32 : TYPE_LD_ST { bits<4> dst; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = dst; let Inst{47-32} = addr{15-0}; // offset let Inst{7-4} = Opc.Value; let Inst{3-0} = BPF_FETCH.Value; let BPFClass = BPF_STX; } let Constraints = "$dst = $val" in { let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in { def XFADDW32 : XFALU32; def XFANDW32 : XFALU32; def XFORW32 : XFALU32; def XFXORW32 : XFALU32; } def XFADDD : XFALU64; def XFANDD : XFALU64; def XFORD : XFALU64; def XFXORD : XFALU64; } // atomic_load_sub can be represented as a neg followed // by an atomic_load_add. def : Pat<(atomic_load_sub_i32 ADDRri:$addr, GPR32:$val), (XFADDW32 ADDRri:$addr, (NEG_32 GPR32:$val))>; def : Pat<(atomic_load_sub_i64 ADDRri:$addr, GPR:$val), (XFADDD ADDRri:$addr, (NEG_64 GPR:$val))>; // Atomic Exchange class XCHG : TYPE_LD_ST { bits<4> dst; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = dst; let Inst{47-32} = addr{15-0}; // offset let Inst{7-4} = BPF_XCHG.Value; let Inst{3-0} = BPF_FETCH.Value; let BPFClass = BPF_STX; } class XCHG32 : TYPE_LD_ST { bits<4> dst; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = dst; let Inst{47-32} = addr{15-0}; // offset let Inst{7-4} = BPF_XCHG.Value; let Inst{3-0} = BPF_FETCH.Value; let BPFClass = BPF_STX; } let Constraints = "$dst = $val" in { let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in { def XCHGW32 : XCHG32; } def XCHGD : XCHG; } // Compare-And-Exchange class CMPXCHG : TYPE_LD_ST { bits<4> new; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = new; let Inst{47-32} = addr{15-0}; // offset let Inst{7-4} = BPF_CMPXCHG.Value; let Inst{3-0} = BPF_FETCH.Value; let BPFClass = BPF_STX; } class CMPXCHG32 : TYPE_LD_ST { bits<4> new; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = new; let Inst{47-32} = addr{15-0}; // offset let Inst{7-4} = BPF_CMPXCHG.Value; let Inst{3-0} = BPF_FETCH.Value; let BPFClass = BPF_STX; } let Predicates = [BPFHasALU32], Defs = [W0], Uses = [W0], DecoderNamespace = "BPFALU32" in { def CMPXCHGW32 : CMPXCHG32; } let Defs = [R0], Uses = [R0] in { def CMPXCHGD : CMPXCHG; } // bswap16, bswap32, bswap64 class BSWAP SizeOp, string OpcodeStr, BPFSrcType SrcType, list Pattern> : TYPE_ALU_JMP { bits<4> dst; let Inst{51-48} = dst; let Inst{31-0} = SizeOp; let BPFClass = Class; } let Constraints = "$dst = $src" in { let Predicates = [BPFHasBswap] in { def BSWAP16 : BSWAP; def BSWAP32 : BSWAP; def BSWAP64 : BSWAP; } let Predicates = [BPFNoBswap] in { let Predicates = [BPFIsLittleEndian] in { def BE16 : BSWAP; def BE32 : BSWAP; def BE64 : BSWAP; } let Predicates = [BPFIsBigEndian] in { def LE16 : BSWAP; def LE32 : BSWAP; def LE64 : BSWAP; } } } let Defs = [R0, R1, R2, R3, R4, R5], Uses = [R6], hasSideEffects = 1, hasExtraDefRegAllocReq = 1, hasExtraSrcRegAllocReq = 1, mayLoad = 1 in { class LOAD_ABS : TYPE_LD_ST { bits<32> imm; let Inst{31-0} = imm; let BPFClass = BPF_LD; } class LOAD_IND : TYPE_LD_ST { bits<4> val; let Inst{55-52} = val; let BPFClass = BPF_LD; } } def LD_ABS_B : LOAD_ABS; def LD_ABS_H : LOAD_ABS; def LD_ABS_W : LOAD_ABS; def LD_IND_B : LOAD_IND; def LD_IND_H : LOAD_IND; def LD_IND_W : LOAD_IND; let isCodeGenOnly = 1 in { def MOV_32_64 : ALU_RR; } let Predicates = [BPFNoMovsx] in { def : Pat<(i64 (sext GPR32:$src)), (SRA_ri (SLL_ri (MOV_32_64 GPR32:$src), 32), 32)>; } let Predicates = [BPFHasMovsx] in { def : Pat<(i64 (sext GPR32:$src)), (MOVSX_rr_32 (MOV_32_64 GPR32:$src))>; } def : Pat<(i64 (zext GPR32:$src)), (MOV_32_64 GPR32:$src)>; // For i64 -> i32 truncation, use the 32-bit subregister directly. def : Pat<(i32 (trunc GPR:$src)), (i32 (EXTRACT_SUBREG GPR:$src, sub_32))>; // For i32 -> i64 anyext, we don't care about the high bits. def : Pat<(i64 (anyext GPR32:$src)), (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>; class STORE32 Pattern> : TYPE_LD_ST { bits<4> src; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = src; let Inst{47-32} = addr{15-0}; // offset let BPFClass = BPF_STX; } class STOREi32 : STORE32; let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in { def STW32 : STOREi32; def STH32 : STOREi32; def STB32 : STOREi32; } class LOAD32 Pattern> : TYPE_LD_ST { bits<4> dst; bits<20> addr; let Inst{51-48} = dst; let Inst{55-52} = addr{19-16}; let Inst{47-32} = addr{15-0}; let BPFClass = BPF_LDX; } class LOADi32 : LOAD32; let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in { def LDW32 : LOADi32; def LDH32 : LOADi32; def LDB32 : LOADi32; } let Predicates = [BPFHasALU32] in { def : Pat<(truncstorei8 GPR:$src, ADDRri:$dst), (STB32 (EXTRACT_SUBREG GPR:$src, sub_32), ADDRri:$dst)>; def : Pat<(truncstorei16 GPR:$src, ADDRri:$dst), (STH32 (EXTRACT_SUBREG GPR:$src, sub_32), ADDRri:$dst)>; def : Pat<(truncstorei32 GPR:$src, ADDRri:$dst), (STW32 (EXTRACT_SUBREG GPR:$src, sub_32), ADDRri:$dst)>; def : Pat<(i32 (extloadi8 ADDRri:$src)), (i32 (LDB32 ADDRri:$src))>; def : Pat<(i32 (extloadi16 ADDRri:$src)), (i32 (LDH32 ADDRri:$src))>; let Predicates = [BPFHasLdsx] in { def : Pat<(i32 (sextloadi8 ADDRri:$src)), (EXTRACT_SUBREG (LDBSX ADDRri:$src), sub_32)>; def : Pat<(i32 (sextloadi16 ADDRri:$src)), (EXTRACT_SUBREG (LDHSX ADDRri:$src), sub_32)>; } def : Pat<(i64 (zextloadi8 ADDRri:$src)), (SUBREG_TO_REG (i64 0), (LDB32 ADDRri:$src), sub_32)>; def : Pat<(i64 (zextloadi16 ADDRri:$src)), (SUBREG_TO_REG (i64 0), (LDH32 ADDRri:$src), sub_32)>; def : Pat<(i64 (zextloadi32 ADDRri:$src)), (SUBREG_TO_REG (i64 0), (LDW32 ADDRri:$src), sub_32)>; def : Pat<(i64 (extloadi8 ADDRri:$src)), (SUBREG_TO_REG (i64 0), (LDB32 ADDRri:$src), sub_32)>; def : Pat<(i64 (extloadi16 ADDRri:$src)), (SUBREG_TO_REG (i64 0), (LDH32 ADDRri:$src), sub_32)>; def : Pat<(i64 (extloadi32 ADDRri:$src)), (SUBREG_TO_REG (i64 0), (LDW32 ADDRri:$src), sub_32)>; } let usesCustomInserter = 1, isCodeGenOnly = 1 in { def MEMCPY : Pseudo< (outs), (ins GPR:$dst, GPR:$src, i64imm:$len, i64imm:$align, variable_ops), "#memcpy dst: $dst, src: $src, len: $len, align: $align", [(BPFmemcpy GPR:$dst, GPR:$src, imm:$len, imm:$align)]>; }