1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555 |
- //===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- //
- // This file defines all of the RISCV-specific intrinsics.
- //
- //===----------------------------------------------------------------------===//
- //===----------------------------------------------------------------------===//
- // Atomics
- // Atomic Intrinsics have multiple versions for different access widths, which
- // all follow one of the following signatures (depending on how many arguments
- // they require). We carefully instantiate only specific versions of these for
- // specific integer widths, rather than using `llvm_anyint_ty`.
- //
- // In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
- // canonical names, and the intrinsics used in the code will have a name
- // suffixed with the pointer type they are specialised for (denoted `<p>` in the
- // names below), in order to avoid type conflicts.
- let TargetPrefix = "riscv" in {
- // T @llvm.<name>.T.<p>(any*, T, T, T imm);
- class MaskedAtomicRMWFourArg<LLVMType itype>
- : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
- [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
- // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
- class MaskedAtomicRMWFiveArg<LLVMType itype>
- : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
- [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
- // We define 32-bit and 64-bit variants of the above, where T stands for i32
- // or i64 respectively:
- multiclass MaskedAtomicRMWFourArgIntrinsics {
- // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
- def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
- // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
- def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
- }
- multiclass MaskedAtomicRMWFiveArgIntrinsics {
- // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
- def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
- // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
- def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
- }
- // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(...)
- defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
- defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
- defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
- defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
- // Signed min and max need an extra operand to do sign extension with.
- defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
- defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
- // Unsigned min and max don't need the extra operand.
- defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
- defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
- // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(...)
- defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
- } // TargetPrefix = "riscv"
- //===----------------------------------------------------------------------===//
- // Bitmanip (Bit Manipulation) Extension
- let TargetPrefix = "riscv" in {
- class BitManipGPRIntrinsics
- : Intrinsic<[llvm_any_ty],
- [LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
- class BitManipGPRGPRIntrinsics
- : Intrinsic<[llvm_any_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
- class BitManipGPRGPRGRIntrinsics
- : Intrinsic<[llvm_any_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
- // Zbb
- def int_riscv_orc_b : BitManipGPRIntrinsics;
- // Zbc or Zbkc
- def int_riscv_clmul : BitManipGPRGPRIntrinsics;
- def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
- // Zbc
- def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
- // Zbe
- def int_riscv_bcompress : BitManipGPRGPRIntrinsics;
- def int_riscv_bdecompress : BitManipGPRGPRIntrinsics;
- // Zbf
- def int_riscv_bfp : BitManipGPRGPRIntrinsics;
- // Zbp
- def int_riscv_grev : BitManipGPRGPRIntrinsics;
- def int_riscv_gorc : BitManipGPRGPRIntrinsics;
- def int_riscv_shfl : BitManipGPRGPRIntrinsics;
- def int_riscv_unshfl : BitManipGPRGPRIntrinsics;
- def int_riscv_xperm_n : BitManipGPRGPRIntrinsics;
- def int_riscv_xperm_b : BitManipGPRGPRIntrinsics;
- def int_riscv_xperm_h : BitManipGPRGPRIntrinsics;
- def int_riscv_xperm_w : BitManipGPRGPRIntrinsics;
- // Zbr
- def int_riscv_crc32_b : BitManipGPRIntrinsics;
- def int_riscv_crc32_h : BitManipGPRIntrinsics;
- def int_riscv_crc32_w : BitManipGPRIntrinsics;
- def int_riscv_crc32_d : BitManipGPRIntrinsics;
- def int_riscv_crc32c_b : BitManipGPRIntrinsics;
- def int_riscv_crc32c_h : BitManipGPRIntrinsics;
- def int_riscv_crc32c_w : BitManipGPRIntrinsics;
- def int_riscv_crc32c_d : BitManipGPRIntrinsics;
- // Zbt
- def int_riscv_fsl : BitManipGPRGPRGRIntrinsics;
- def int_riscv_fsr : BitManipGPRGPRGRIntrinsics;
- // Zbkb
- def int_riscv_brev8 : BitManipGPRIntrinsics;
- def int_riscv_zip : BitManipGPRIntrinsics;
- def int_riscv_unzip : BitManipGPRIntrinsics;
- // Zbkx
- def int_riscv_xperm4 : BitManipGPRGPRIntrinsics;
- def int_riscv_xperm8 : BitManipGPRGPRIntrinsics;
- } // TargetPrefix = "riscv"
- //===----------------------------------------------------------------------===//
- // Vectors
- // The intrinsic does not have any operand that must be extended.
- defvar NoSplatOperand = 0xF;
- // The intrinsic does not have a VL operand.
- // (e.g., riscv_vmv_x_s and riscv_vfmv_f_s)
- defvar NoVLOperand = 0x1F;
- class RISCVVIntrinsic {
- // These intrinsics may accept illegal integer values in their llvm_any_ty
- // operand, so they have to be extended.
- Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
- bits<4> SplatOperand = NoSplatOperand;
- bits<5> VLOperand = NoVLOperand;
- }
- let TargetPrefix = "riscv" in {
- // We use anyint here but we only support XLen.
- def int_riscv_vsetvli : Intrinsic<[llvm_anyint_ty],
- /* AVL */ [LLVMMatchType<0>,
- /* VSEW */ LLVMMatchType<0>,
- /* VLMUL */ LLVMMatchType<0>],
- [IntrNoMem, IntrHasSideEffects,
- ImmArg<ArgIndex<1>>,
- ImmArg<ArgIndex<2>>]>;
- def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
- /* VSEW */ [LLVMMatchType<0>,
- /* VLMUL */ LLVMMatchType<0>],
- [IntrNoMem, IntrHasSideEffects,
- ImmArg<ArgIndex<0>>,
- ImmArg<ArgIndex<1>>]>;
- // Versions without side effects: better optimizable and usable if only the
- // returned vector length is important.
- def int_riscv_vsetvli_opt : Intrinsic<[llvm_anyint_ty],
- /* AVL */ [LLVMMatchType<0>,
- /* VSEW */ LLVMMatchType<0>,
- /* VLMUL */ LLVMMatchType<0>],
- [IntrNoMem,
- ImmArg<ArgIndex<1>>,
- ImmArg<ArgIndex<2>>]>;
- def int_riscv_vsetvlimax_opt : Intrinsic<[llvm_anyint_ty],
- /* VSEW */ [LLVMMatchType<0>,
- /* VLMUL */ LLVMMatchType<0>],
- [IntrNoMem,
- ImmArg<ArgIndex<0>>,
- ImmArg<ArgIndex<1>>]>;
- // For unit stride mask load
- // Input: (pointer, vl)
- class RISCVUSMLoad
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
- }
- // For unit stride load
- // Input: (passthru, pointer, vl)
- class RISCVUSLoad
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For unit stride fault-only-first load
- // Input: (passthru, pointer, vl)
- // Output: (data, vl)
- // NOTE: We model this with default memory properties since we model writing
- // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
- class RISCVUSLoadFF
- : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>]>,
- RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For unit stride load with mask
- // Input: (maskedoff, pointer, mask, vl, ta)
- class RISCVUSLoadMask
- : Intrinsic<[llvm_anyvector_ty ],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty, LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem]>,
- RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // For unit stride fault-only-first load with mask
- // Input: (maskedoff, pointer, mask, vl, ta)
- // Output: (data, vl)
- // NOTE: We model this with default memory properties since we model writing
- // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
- class RISCVUSLoadFFMask
- : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- LLVMMatchType<1>, LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // For strided load with passthru operand
- // Input: (passthru, pointer, stride, vl)
- class RISCVSLoad
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty, LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // For strided load with mask
- // Input: (maskedoff, pointer, stride, mask, vl, ta)
- class RISCVSLoadMask
- : Intrinsic<[llvm_anyvector_ty ],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
- LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
- RISCVVIntrinsic {
- let VLOperand = 4;
- }
- // For indexed load with passthru operand
- // Input: (passthru, pointer, index, vl)
- class RISCVILoad
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyvector_ty, llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // For indexed load with mask
- // Input: (maskedoff, pointer, index, mask, vl, ta)
- class RISCVILoadMask
- : Intrinsic<[llvm_anyvector_ty ],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
- LLVMMatchType<2>],
- [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
- RISCVVIntrinsic {
- let VLOperand = 4;
- }
- // For unit stride store
- // Input: (vector_in, pointer, vl)
- class RISCVUSStore
- : Intrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For unit stride store with mask
- // Input: (vector_in, pointer, mask, vl)
- class RISCVUSStoreMask
- : Intrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // For strided store
- // Input: (vector_in, pointer, stride, vl)
- class RISCVSStore
- : Intrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty, LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // For stride store with mask
- // Input: (vector_in, pointer, stirde, mask, vl)
- class RISCVSStoreMask
- : Intrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
- let VLOperand = 4;
- }
- // For indexed store
- // Input: (vector_in, pointer, index, vl)
- class RISCVIStore
- : Intrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty, llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // For indexed store with mask
- // Input: (vector_in, pointer, index, mask, vl)
- class RISCVIStoreMask
- : Intrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
- let VLOperand = 4;
- }
- // For destination vector type is the same as source vector.
- // Input: (vector_in, vl)
- class RISCVUnaryAANoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
- }
- // For destination vector type is the same as first source vector (with mask).
- // Input: (vector_in, mask, vl, ta)
- class RISCVUnaryAAMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
- LLVMMatchType<1>],
- [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- class RISCVUnaryAAMaskNoTA
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // For destination vector type is the same as first and second source vector.
- // Input: (vector_in, vector_in, vl)
- class RISCVBinaryAAANoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For destination vector type is the same as first and second source vector.
- // Input: (vector_in, int_vector_in, vl)
- class RISCVRGatherVVNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For destination vector type is the same as first and second source vector.
- // Input: (vector_in, vector_in, int_vector_in, vl, ta)
- class RISCVRGatherVVMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
- LLVMMatchType<1>],
- [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 4;
- }
- // Input: (vector_in, int16_vector_in, vl)
- class RISCVRGatherEI16VVNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For destination vector type is the same as first and second source vector.
- // Input: (vector_in, vector_in, int16_vector_in, vl, ta)
- class RISCVRGatherEI16VVMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
- LLVMMatchType<1>],
- [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 4;
- }
- // For destination vector type is the same as first source vector, and the
- // second operand is XLen.
- // Input: (vector_in, xlen_in, vl)
- class RISCVGatherVXNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For destination vector type is the same as first source vector (with mask).
- // Second operand is XLen.
- // Input: (maskedoff, vector_in, xlen_in, mask, vl, ta)
- class RISCVGatherVXMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
- LLVMMatchType<1>],
- [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 4;
- }
- // For destination vector type is the same as first source vector.
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVBinaryAAXNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 2;
- }
- // For destination vector type is the same as first source vector (with mask).
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
- class RISCVBinaryAAXMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
- LLVMMatchType<2>],
- [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 2;
- let VLOperand = 4;
- }
- // For destination vector type is the same as first source vector. The
- // second source operand must match the destination type or be an XLen scalar.
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVBinaryAAShiftNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For destination vector type is the same as first source vector (with mask).
- // The second source operand must match the destination type or be an XLen scalar.
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
- class RISCVBinaryAAShiftMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
- LLVMMatchType<2>],
- [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 4;
- }
- // For destination vector type is NOT the same as first source vector.
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVBinaryABXNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 2;
- }
- // For destination vector type is NOT the same as first source vector (with mask).
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
- class RISCVBinaryABXMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
- LLVMMatchType<3>],
- [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 2;
- let VLOperand = 4;
- }
- // For destination vector type is NOT the same as first source vector. The
- // second source operand must match the destination type or be an XLen scalar.
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVBinaryABShiftNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For destination vector type is NOT the same as first source vector (with mask).
- // The second source operand must match the destination type or be an XLen scalar.
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
- class RISCVBinaryABShiftMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
- LLVMMatchType<3>],
- [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 4;
- }
- // For binary operations with V0 as input.
- // Input: (vector_in, vector_in/scalar_in, V0, vl)
- class RISCVBinaryWithV0
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 3;
- }
- // For binary operations with mask type output and V0 as input.
- // Output: (mask type output)
- // Input: (vector_in, vector_in/scalar_in, V0, vl)
- class RISCVBinaryMOutWithV0
- :Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [llvm_anyvector_ty, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 3;
- }
- // For binary operations with mask type output.
- // Output: (mask type output)
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVBinaryMOut
- : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 2;
- }
- // For binary operations with mask type output without mask.
- // Output: (mask type output)
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVCompareNoMask
- : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 2;
- }
- // For binary operations with mask type output with mask.
- // Output: (mask type output)
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
- class RISCVCompareMask
- : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyvector_ty, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 2;
- let VLOperand = 4;
- }
- // For FP classify operations.
- // Output: (bit mask type output)
- // Input: (vector_in, vl)
- class RISCVClassifyNoMask
- : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
- [llvm_anyvector_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
- }
- // For FP classify operations with mask.
- // Output: (bit mask type output)
- // Input: (maskedoff, vector_in, mask, vl)
- class RISCVClassifyMask
- : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
- [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // For Saturating binary operations.
- // The destination vector type is the same as first source vector.
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVSaturatingBinaryAAXNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 2;
- }
- // For Saturating binary operations with mask.
- // The destination vector type is the same as first source vector.
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
- class RISCVSaturatingBinaryAAXMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
- LLVMMatchType<2>],
- [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let SplatOperand = 2;
- let VLOperand = 4;
- }
- // For Saturating binary operations.
- // The destination vector type is the same as first source vector.
- // The second source operand matches the destination type or is an XLen scalar.
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVSaturatingBinaryAAShiftNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For Saturating binary operations with mask.
- // The destination vector type is the same as first source vector.
- // The second source operand matches the destination type or is an XLen scalar.
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
- class RISCVSaturatingBinaryAAShiftMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
- LLVMMatchType<2>],
- [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let VLOperand = 4;
- }
- // For Saturating binary operations.
- // The destination vector type is NOT the same as first source vector.
- // The second source operand matches the destination type or is an XLen scalar.
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVSaturatingBinaryABShiftNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For Saturating binary operations with mask.
- // The destination vector type is NOT the same as first source vector (with mask).
- // The second source operand matches the destination type or is an XLen scalar.
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
- class RISCVSaturatingBinaryABShiftMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
- LLVMMatchType<3>],
- [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let VLOperand = 4;
- }
- class RISCVTernaryAAAXNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
- LLVMMatchType<1>],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- class RISCVTernaryAAAXMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 4;
- }
- class RISCVTernaryAAXANoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 3;
- }
- class RISCVTernaryAAXAMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 4;
- }
- class RISCVTernaryWideNoMask
- : Intrinsic< [llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
- llvm_anyint_ty],
- [IntrNoMem] >, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 3;
- }
- class RISCVTernaryWideMask
- : Intrinsic< [llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let SplatOperand = 1;
- let VLOperand = 4;
- }
- // For Reduction ternary operations.
- // For destination vector type is the same as first and third source vector.
- // Input: (vector_in, vector_in, vector_in, vl)
- class RISCVReductionNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // For Reduction ternary operations with mask.
- // For destination vector type is the same as first and third source vector.
- // The mask type come from second source vector.
- // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
- class RISCVReductionMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 4;
- }
- // For unary operations with scalar type output without mask
- // Output: (scalar type)
- // Input: (vector_in, vl)
- class RISCVMaskUnarySOutNoMask
- : Intrinsic<[LLVMMatchType<1>],
- [llvm_anyvector_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
- }
- // For unary operations with scalar type output with mask
- // Output: (scalar type)
- // Input: (vector_in, mask, vl)
- class RISCVMaskUnarySOutMask
- : Intrinsic<[LLVMMatchType<1>],
- [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For destination vector type is NOT the same as source vector.
- // Input: (vector_in, vl)
- class RISCVUnaryABNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
- }
- // For destination vector type is NOT the same as source vector (with mask).
- // Input: (maskedoff, vector_in, mask, vl, ta)
- class RISCVUnaryABMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
- llvm_anyint_ty, LLVMMatchType<2>],
- [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // For unary operations with the same vector type in/out without mask
- // Output: (vector)
- // Input: (vector_in, vl)
- class RISCVUnaryNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
- }
- // For mask unary operations with mask type in/out with mask
- // Output: (mask type output)
- // Input: (mask type maskedoff, mask type vector_in, mask, vl)
- class RISCVMaskUnaryMOutMask
- : Intrinsic<[llvm_anyint_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // Output: (vector)
- // Input: (vl)
- class RISCVNullaryIntrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 0;
- }
- // For Conversion unary operations.
- // Input: (vector_in, vl)
- class RISCVConversionNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
- }
- // For Conversion unary operations with mask.
- // Input: (maskedoff, vector_in, mask, vl, ta)
- class RISCVConversionMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
- LLVMMatchType<2>],
- [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // For unit stride segment load
- // Input: (pointer, vl)
- class RISCVUSSegLoad<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1))),
- [LLVMPointerToElt<0>, llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
- }
- // For unit stride segment load with mask
- // Input: (maskedoff, pointer, mask, vl, ta)
- class RISCVUSSegLoadMask<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1))),
- !listconcat(!listsplat(LLVMMatchType<0>, nf),
- [LLVMPointerToElt<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty, LLVMMatchType<1>]),
- [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
- RISCVVIntrinsic {
- let VLOperand = !add(nf, 2);
- }
- // For unit stride fault-only-first segment load
- // Input: (pointer, vl)
- // Output: (data, vl)
- // NOTE: We model this with default memory properties since we model writing
- // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
- class RISCVUSSegLoadFF<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1)), [llvm_anyint_ty]),
- [LLVMPointerToElt<0>, LLVMMatchType<1>],
- [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic {
- let VLOperand = 1;
- }
- // For unit stride fault-only-first segment load with mask
- // Input: (maskedoff, pointer, mask, vl, ta)
- // Output: (data, vl)
- // NOTE: We model this with default memory properties since we model writing
- // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
- class RISCVUSSegLoadFFMask<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1)), [llvm_anyint_ty]),
- !listconcat(!listsplat(LLVMMatchType<0>, nf),
- [LLVMPointerToElt<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- LLVMMatchType<1>, LLVMMatchType<1>]),
- [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>,
- RISCVVIntrinsic {
- let VLOperand = !add(nf, 2);
- }
- // For stride segment load
- // Input: (pointer, offset, vl)
- class RISCVSSegLoad<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1))),
- [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For stride segment load with mask
- // Input: (maskedoff, pointer, offset, mask, vl, ta)
- class RISCVSSegLoadMask<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1))),
- !listconcat(!listsplat(LLVMMatchType<0>, nf),
- [LLVMPointerToElt<0>,
- llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- LLVMMatchType<1>, LLVMMatchType<1>]),
- [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
- RISCVVIntrinsic {
- let VLOperand = !add(nf, 3);
- }
- // For indexed segment load
- // Input: (pointer, index, vl)
- class RISCVISegLoad<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1))),
- [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- // For indexed segment load with mask
- // Input: (maskedoff, pointer, index, mask, vl, ta)
- class RISCVISegLoadMask<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1))),
- !listconcat(!listsplat(LLVMMatchType<0>, nf),
- [LLVMPointerToElt<0>,
- llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty, LLVMMatchType<2>]),
- [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
- RISCVVIntrinsic {
- let VLOperand = !add(nf, 3);
- }
- // For unit stride segment store
- // Input: (value, pointer, vl)
- class RISCVUSSegStore<int nf>
- : Intrinsic<[],
- !listconcat([llvm_anyvector_ty],
- !listsplat(LLVMMatchType<0>, !add(nf, -1)),
- [LLVMPointerToElt<0>, llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
- let VLOperand = !add(nf, 1);
- }
- // For unit stride segment store with mask
- // Input: (value, pointer, mask, vl)
- class RISCVUSSegStoreMask<int nf>
- : Intrinsic<[],
- !listconcat([llvm_anyvector_ty],
- !listsplat(LLVMMatchType<0>, !add(nf, -1)),
- [LLVMPointerToElt<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
- let VLOperand = !add(nf, 2);
- }
- // For stride segment store
- // Input: (value, pointer, offset, vl)
- class RISCVSSegStore<int nf>
- : Intrinsic<[],
- !listconcat([llvm_anyvector_ty],
- !listsplat(LLVMMatchType<0>, !add(nf, -1)),
- [LLVMPointerToElt<0>, llvm_anyint_ty,
- LLVMMatchType<1>]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
- let VLOperand = !add(nf, 2);
- }
- // For stride segment store with mask
- // Input: (value, pointer, offset, mask, vl)
- class RISCVSSegStoreMask<int nf>
- : Intrinsic<[],
- !listconcat([llvm_anyvector_ty],
- !listsplat(LLVMMatchType<0>, !add(nf, -1)),
- [LLVMPointerToElt<0>, llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- LLVMMatchType<1>]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
- let VLOperand = !add(nf, 3);
- }
- // For indexed segment store
- // Input: (value, pointer, offset, vl)
- class RISCVISegStore<int nf>
- : Intrinsic<[],
- !listconcat([llvm_anyvector_ty],
- !listsplat(LLVMMatchType<0>, !add(nf, -1)),
- [LLVMPointerToElt<0>, llvm_anyvector_ty,
- llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
- let VLOperand = !add(nf, 2);
- }
- // For indexed segment store with mask
- // Input: (value, pointer, offset, mask, vl)
- class RISCVISegStoreMask<int nf>
- : Intrinsic<[],
- !listconcat([llvm_anyvector_ty],
- !listsplat(LLVMMatchType<0>, !add(nf, -1)),
- [LLVMPointerToElt<0>, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
- let VLOperand = !add(nf, 3);
- }
- multiclass RISCVUSLoad {
- def "int_riscv_" # NAME : RISCVUSLoad;
- def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
- }
- multiclass RISCVUSLoadFF {
- def "int_riscv_" # NAME : RISCVUSLoadFF;
- def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMask;
- }
- multiclass RISCVSLoad {
- def "int_riscv_" # NAME : RISCVSLoad;
- def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask;
- }
- multiclass RISCVILoad {
- def "int_riscv_" # NAME : RISCVILoad;
- def "int_riscv_" # NAME # "_mask" : RISCVILoadMask;
- }
- multiclass RISCVUSStore {
- def "int_riscv_" # NAME : RISCVUSStore;
- def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask;
- }
- multiclass RISCVSStore {
- def "int_riscv_" # NAME : RISCVSStore;
- def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask;
- }
- multiclass RISCVIStore {
- def "int_riscv_" # NAME : RISCVIStore;
- def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask;
- }
- multiclass RISCVUnaryAA {
- def "int_riscv_" # NAME : RISCVUnaryAANoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask;
- }
- multiclass RISCVUnaryAB {
- def "int_riscv_" # NAME : RISCVUnaryABNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask;
- }
- // AAX means the destination type(A) is the same as the first source
- // type(A). X means any type for the second source operand.
- multiclass RISCVBinaryAAX {
- def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
- }
- // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
- // must be a vector or an XLen scalar.
- multiclass RISCVBinaryAAShift {
- def "int_riscv_" # NAME : RISCVBinaryAAShiftNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMask;
- }
- multiclass RISCVRGatherVV {
- def "int_riscv_" # NAME : RISCVRGatherVVNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMask;
- }
- multiclass RISCVRGatherVX {
- def "int_riscv_" # NAME : RISCVGatherVXNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMask;
- }
- multiclass RISCVRGatherEI16VV {
- def "int_riscv_" # NAME : RISCVRGatherEI16VVNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMask;
- }
- // ABX means the destination type(A) is different from the first source
- // type(B). X means any type for the second source operand.
- multiclass RISCVBinaryABX {
- def "int_riscv_" # NAME : RISCVBinaryABXNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask;
- }
- // Like RISCVBinaryABX, but the second operand is used a shift amount so it
- // must be a vector or an XLen scalar.
- multiclass RISCVBinaryABShift {
- def "int_riscv_" # NAME : RISCVBinaryABShiftNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMask;
- }
- multiclass RISCVBinaryWithV0 {
- def "int_riscv_" # NAME : RISCVBinaryWithV0;
- }
- multiclass RISCVBinaryMaskOutWithV0 {
- def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
- }
- multiclass RISCVBinaryMaskOut {
- def "int_riscv_" # NAME : RISCVBinaryMOut;
- }
- multiclass RISCVSaturatingBinaryAAX {
- def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask;
- }
- multiclass RISCVSaturatingBinaryAAShift {
- def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMask;
- }
- multiclass RISCVSaturatingBinaryABShift {
- def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMask;
- }
- multiclass RISCVTernaryAAAX {
- def "int_riscv_" # NAME : RISCVTernaryAAAXNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMask;
- }
- multiclass RISCVTernaryAAXA {
- def "int_riscv_" # NAME : RISCVTernaryAAXANoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMask;
- }
- multiclass RISCVCompare {
- def "int_riscv_" # NAME : RISCVCompareNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVCompareMask;
- }
- multiclass RISCVClassify {
- def "int_riscv_" # NAME : RISCVClassifyNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVClassifyMask;
- }
- multiclass RISCVTernaryWide {
- def "int_riscv_" # NAME : RISCVTernaryWideNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask;
- }
- multiclass RISCVReduction {
- def "int_riscv_" # NAME : RISCVReductionNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVReductionMask;
- }
- multiclass RISCVMaskUnarySOut {
- def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask;
- }
- multiclass RISCVMaskUnaryMOut {
- def "int_riscv_" # NAME : RISCVUnaryNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask;
- }
- multiclass RISCVConversion {
- def "int_riscv_" #NAME :RISCVConversionNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVConversionMask;
- }
- multiclass RISCVUSSegLoad<int nf> {
- def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask<nf>;
- }
- multiclass RISCVUSSegLoadFF<int nf> {
- def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMask<nf>;
- }
- multiclass RISCVSSegLoad<int nf> {
- def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMask<nf>;
- }
- multiclass RISCVISegLoad<int nf> {
- def "int_riscv_" # NAME : RISCVISegLoad<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMask<nf>;
- }
- multiclass RISCVUSSegStore<int nf> {
- def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask<nf>;
- }
- multiclass RISCVSSegStore<int nf> {
- def "int_riscv_" # NAME : RISCVSSegStore<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask<nf>;
- }
- multiclass RISCVISegStore<int nf> {
- def "int_riscv_" # NAME : RISCVISegStore<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMask<nf>;
- }
- defm vle : RISCVUSLoad;
- defm vleff : RISCVUSLoadFF;
- defm vse : RISCVUSStore;
- defm vlse: RISCVSLoad;
- defm vsse: RISCVSStore;
- defm vluxei : RISCVILoad;
- defm vloxei : RISCVILoad;
- defm vsoxei : RISCVIStore;
- defm vsuxei : RISCVIStore;
- def int_riscv_vlm : RISCVUSMLoad;
- def int_riscv_vsm : RISCVUSStore;
- defm vadd : RISCVBinaryAAX;
- defm vsub : RISCVBinaryAAX;
- defm vrsub : RISCVBinaryAAX;
- defm vwaddu : RISCVBinaryABX;
- defm vwadd : RISCVBinaryABX;
- defm vwaddu_w : RISCVBinaryAAX;
- defm vwadd_w : RISCVBinaryAAX;
- defm vwsubu : RISCVBinaryABX;
- defm vwsub : RISCVBinaryABX;
- defm vwsubu_w : RISCVBinaryAAX;
- defm vwsub_w : RISCVBinaryAAX;
- defm vzext : RISCVUnaryAB;
- defm vsext : RISCVUnaryAB;
- defm vadc : RISCVBinaryWithV0;
- defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
- defm vmadc : RISCVBinaryMaskOut;
- defm vsbc : RISCVBinaryWithV0;
- defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
- defm vmsbc : RISCVBinaryMaskOut;
- defm vand : RISCVBinaryAAX;
- defm vor : RISCVBinaryAAX;
- defm vxor : RISCVBinaryAAX;
- defm vsll : RISCVBinaryAAShift;
- defm vsrl : RISCVBinaryAAShift;
- defm vsra : RISCVBinaryAAShift;
- defm vnsrl : RISCVBinaryABShift;
- defm vnsra : RISCVBinaryABShift;
- defm vmseq : RISCVCompare;
- defm vmsne : RISCVCompare;
- defm vmsltu : RISCVCompare;
- defm vmslt : RISCVCompare;
- defm vmsleu : RISCVCompare;
- defm vmsle : RISCVCompare;
- defm vmsgtu : RISCVCompare;
- defm vmsgt : RISCVCompare;
- defm vmsgeu : RISCVCompare;
- defm vmsge : RISCVCompare;
- defm vminu : RISCVBinaryAAX;
- defm vmin : RISCVBinaryAAX;
- defm vmaxu : RISCVBinaryAAX;
- defm vmax : RISCVBinaryAAX;
- defm vmul : RISCVBinaryAAX;
- defm vmulh : RISCVBinaryAAX;
- defm vmulhu : RISCVBinaryAAX;
- defm vmulhsu : RISCVBinaryAAX;
- defm vdivu : RISCVBinaryAAX;
- defm vdiv : RISCVBinaryAAX;
- defm vremu : RISCVBinaryAAX;
- defm vrem : RISCVBinaryAAX;
- defm vwmul : RISCVBinaryABX;
- defm vwmulu : RISCVBinaryABX;
- defm vwmulsu : RISCVBinaryABX;
- defm vmacc : RISCVTernaryAAXA;
- defm vnmsac : RISCVTernaryAAXA;
- defm vmadd : RISCVTernaryAAXA;
- defm vnmsub : RISCVTernaryAAXA;
- defm vwmaccu : RISCVTernaryWide;
- defm vwmacc : RISCVTernaryWide;
- defm vwmaccus : RISCVTernaryWide;
- defm vwmaccsu : RISCVTernaryWide;
- defm vfadd : RISCVBinaryAAX;
- defm vfsub : RISCVBinaryAAX;
- defm vfrsub : RISCVBinaryAAX;
- defm vfwadd : RISCVBinaryABX;
- defm vfwsub : RISCVBinaryABX;
- defm vfwadd_w : RISCVBinaryAAX;
- defm vfwsub_w : RISCVBinaryAAX;
- defm vsaddu : RISCVSaturatingBinaryAAX;
- defm vsadd : RISCVSaturatingBinaryAAX;
- defm vssubu : RISCVSaturatingBinaryAAX;
- defm vssub : RISCVSaturatingBinaryAAX;
- defm vmerge : RISCVBinaryWithV0;
- def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
- }
- def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty],
- [LLVMVectorElementType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
- }
- def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
- [LLVMVectorElementType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
- }
- def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty],
- [LLVMMatchType<0>, LLVMVectorElementType<0>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyfloat_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty],
- [LLVMMatchType<0>, LLVMVectorElementType<0>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- defm vfmul : RISCVBinaryAAX;
- defm vfdiv : RISCVBinaryAAX;
- defm vfrdiv : RISCVBinaryAAX;
- defm vfwmul : RISCVBinaryABX;
- defm vfmacc : RISCVTernaryAAXA;
- defm vfnmacc : RISCVTernaryAAXA;
- defm vfmsac : RISCVTernaryAAXA;
- defm vfnmsac : RISCVTernaryAAXA;
- defm vfmadd : RISCVTernaryAAXA;
- defm vfnmadd : RISCVTernaryAAXA;
- defm vfmsub : RISCVTernaryAAXA;
- defm vfnmsub : RISCVTernaryAAXA;
- defm vfwmacc : RISCVTernaryWide;
- defm vfwnmacc : RISCVTernaryWide;
- defm vfwmsac : RISCVTernaryWide;
- defm vfwnmsac : RISCVTernaryWide;
- defm vfsqrt : RISCVUnaryAA;
- defm vfrsqrt7 : RISCVUnaryAA;
- defm vfrec7 : RISCVUnaryAA;
- defm vfmin : RISCVBinaryAAX;
- defm vfmax : RISCVBinaryAAX;
- defm vfsgnj : RISCVBinaryAAX;
- defm vfsgnjn : RISCVBinaryAAX;
- defm vfsgnjx : RISCVBinaryAAX;
- defm vfclass : RISCVClassify;
- defm vfmerge : RISCVBinaryWithV0;
- defm vslideup : RISCVTernaryAAAX;
- defm vslidedown : RISCVTernaryAAAX;
- defm vslide1up : RISCVBinaryAAX;
- defm vslide1down : RISCVBinaryAAX;
- defm vfslide1up : RISCVBinaryAAX;
- defm vfslide1down : RISCVBinaryAAX;
- defm vrgather_vv : RISCVRGatherVV;
- defm vrgather_vx : RISCVRGatherVX;
- defm vrgatherei16_vv : RISCVRGatherEI16VV;
- def "int_riscv_vcompress" : RISCVUnaryAAMaskNoTA;
- defm vaaddu : RISCVSaturatingBinaryAAX;
- defm vaadd : RISCVSaturatingBinaryAAX;
- defm vasubu : RISCVSaturatingBinaryAAX;
- defm vasub : RISCVSaturatingBinaryAAX;
- defm vsmul : RISCVSaturatingBinaryAAX;
- defm vssrl : RISCVSaturatingBinaryAAShift;
- defm vssra : RISCVSaturatingBinaryAAShift;
- defm vnclipu : RISCVSaturatingBinaryABShift;
- defm vnclip : RISCVSaturatingBinaryABShift;
- defm vmfeq : RISCVCompare;
- defm vmfne : RISCVCompare;
- defm vmflt : RISCVCompare;
- defm vmfle : RISCVCompare;
- defm vmfgt : RISCVCompare;
- defm vmfge : RISCVCompare;
- defm vredsum : RISCVReduction;
- defm vredand : RISCVReduction;
- defm vredor : RISCVReduction;
- defm vredxor : RISCVReduction;
- defm vredminu : RISCVReduction;
- defm vredmin : RISCVReduction;
- defm vredmaxu : RISCVReduction;
- defm vredmax : RISCVReduction;
- defm vwredsumu : RISCVReduction;
- defm vwredsum : RISCVReduction;
- defm vfredosum : RISCVReduction;
- defm vfredusum : RISCVReduction;
- defm vfredmin : RISCVReduction;
- defm vfredmax : RISCVReduction;
- defm vfwredusum : RISCVReduction;
- defm vfwredosum : RISCVReduction;
- def int_riscv_vmand: RISCVBinaryAAANoMask;
- def int_riscv_vmnand: RISCVBinaryAAANoMask;
- def int_riscv_vmandn: RISCVBinaryAAANoMask;
- def int_riscv_vmxor: RISCVBinaryAAANoMask;
- def int_riscv_vmor: RISCVBinaryAAANoMask;
- def int_riscv_vmnor: RISCVBinaryAAANoMask;
- def int_riscv_vmorn: RISCVBinaryAAANoMask;
- def int_riscv_vmxnor: RISCVBinaryAAANoMask;
- def int_riscv_vmclr : RISCVNullaryIntrinsic;
- def int_riscv_vmset : RISCVNullaryIntrinsic;
- defm vcpop : RISCVMaskUnarySOut;
- defm vfirst : RISCVMaskUnarySOut;
- defm vmsbf : RISCVMaskUnaryMOut;
- defm vmsof : RISCVMaskUnaryMOut;
- defm vmsif : RISCVMaskUnaryMOut;
- defm vfcvt_xu_f_v : RISCVConversion;
- defm vfcvt_x_f_v : RISCVConversion;
- defm vfcvt_rtz_xu_f_v : RISCVConversion;
- defm vfcvt_rtz_x_f_v : RISCVConversion;
- defm vfcvt_f_xu_v : RISCVConversion;
- defm vfcvt_f_x_v : RISCVConversion;
- defm vfwcvt_f_xu_v : RISCVConversion;
- defm vfwcvt_f_x_v : RISCVConversion;
- defm vfwcvt_xu_f_v : RISCVConversion;
- defm vfwcvt_x_f_v : RISCVConversion;
- defm vfwcvt_rtz_xu_f_v : RISCVConversion;
- defm vfwcvt_rtz_x_f_v : RISCVConversion;
- defm vfwcvt_f_f_v : RISCVConversion;
- defm vfncvt_f_xu_w : RISCVConversion;
- defm vfncvt_f_x_w : RISCVConversion;
- defm vfncvt_xu_f_w : RISCVConversion;
- defm vfncvt_x_f_w : RISCVConversion;
- defm vfncvt_rtz_xu_f_w : RISCVConversion;
- defm vfncvt_rtz_x_f_w : RISCVConversion;
- defm vfncvt_f_f_w : RISCVConversion;
- defm vfncvt_rod_f_f_w : RISCVConversion;
- // Output: (vector)
- // Input: (mask type input, vl)
- def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
- [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
- }
- // Output: (vector)
- // Input: (maskedoff, mask type vector_in, mask, vl)
- def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 3;
- }
- // Output: (vector)
- // Input: (vl)
- def int_riscv_vid : RISCVNullaryIntrinsic;
- // Output: (vector)
- // Input: (maskedoff, mask, vl)
- def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 2;
- }
- foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
- defm vlseg # nf : RISCVUSSegLoad<nf>;
- defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
- defm vlsseg # nf : RISCVSSegLoad<nf>;
- defm vloxseg # nf : RISCVISegLoad<nf>;
- defm vluxseg # nf : RISCVISegLoad<nf>;
- defm vsseg # nf : RISCVUSSegStore<nf>;
- defm vssseg # nf : RISCVSSegStore<nf>;
- defm vsoxseg # nf : RISCVISegStore<nf>;
- defm vsuxseg # nf : RISCVISegStore<nf>;
- }
- // Strided loads/stores for fixed vectors.
- def int_riscv_masked_strided_load
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyptr_ty,
- llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [NoCapture<ArgIndex<1>>, IntrReadMem]>;
- def int_riscv_masked_strided_store
- : Intrinsic<[],
- [llvm_anyvector_ty, llvm_anyptr_ty,
- llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>;
- } // TargetPrefix = "riscv"
- //===----------------------------------------------------------------------===//
- // Scalar Cryptography
- //
- // These intrinsics will lower directly into the corresponding instructions
- // added by the scalar cyptography extension, if the extension is present.
- let TargetPrefix = "riscv" in {
- class ScalarCryptoGprIntrinsicAny
- : Intrinsic<[llvm_anyint_ty],
- [LLVMMatchType<0>],
- [IntrNoMem, IntrSpeculatable]>;
- class ScalarCryptoByteSelect32
- : Intrinsic<[llvm_i32_ty],
- [llvm_i32_ty, llvm_i32_ty, llvm_i8_ty],
- [IntrNoMem, IntrWillReturn, IntrSpeculatable,
- ImmArg<ArgIndex<2>>]>;
- class ScalarCryptoGprGprIntrinsic32
- : Intrinsic<[llvm_i32_ty],
- [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
- class ScalarCryptoGprGprIntrinsic64
- : Intrinsic<[llvm_i64_ty],
- [llvm_i64_ty, llvm_i64_ty],
- [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
- class ScalarCryptoGprIntrinsic64
- : Intrinsic<[llvm_i64_ty],
- [llvm_i64_ty],
- [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
- class ScalarCryptoByteSelectAny
- : Intrinsic<[llvm_anyint_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i8_ty],
- [IntrNoMem, IntrSpeculatable, IntrWillReturn,
- ImmArg<ArgIndex<2>>, Returned<ArgIndex<0>>]>;
- // Zknd
- def int_riscv_aes32dsi : ScalarCryptoByteSelect32;
- def int_riscv_aes32dsmi : ScalarCryptoByteSelect32;
- def int_riscv_aes64ds : ScalarCryptoGprGprIntrinsic64;
- def int_riscv_aes64dsm : ScalarCryptoGprGprIntrinsic64;
- def int_riscv_aes64im : ScalarCryptoGprIntrinsic64;
- // Zkne
- def int_riscv_aes32esi : ScalarCryptoByteSelect32;
- def int_riscv_aes32esmi : ScalarCryptoByteSelect32;
- def int_riscv_aes64es : ScalarCryptoGprGprIntrinsic64;
- def int_riscv_aes64esm : ScalarCryptoGprGprIntrinsic64;
- // Zknd & Zkne
- def int_riscv_aes64ks2 : ScalarCryptoGprGprIntrinsic64;
- def int_riscv_aes64ks1i : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
- [IntrNoMem, IntrSpeculatable,
- IntrWillReturn, ImmArg<ArgIndex<1>>]>;
- // Zknh
- def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsicAny;
- def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsicAny;
- def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsicAny;
- def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsicAny;
- def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32;
- def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32;
- def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32;
- def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32;
- def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32;
- def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32;
- def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64;
- def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64;
- def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64;
- def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64;
- // Zksed
- def int_riscv_sm4ks : ScalarCryptoByteSelectAny;
- def int_riscv_sm4ed : ScalarCryptoByteSelectAny;
- // Zksh
- def int_riscv_sm3p0 : ScalarCryptoGprIntrinsicAny;
- def int_riscv_sm3p1 : ScalarCryptoGprIntrinsicAny;
- } // TargetPrefix = "riscv"
|