123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092 |
- //===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- //
- // This file defines all of the RISCV-specific intrinsics.
- //
- //===----------------------------------------------------------------------===//
- //===----------------------------------------------------------------------===//
- // Atomics
- // Atomic Intrinsics have multiple versions for different access widths, which
- // all follow one of the following signatures (depending on how many arguments
- // they require). We carefully instantiate only specific versions of these for
- // specific integer widths, rather than using `llvm_anyint_ty`.
- //
- // In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
- // canonical names, and the intrinsics used in the code will have a name
- // suffixed with the pointer type they are specialised for (denoted `<p>` in the
- // names below), in order to avoid type conflicts.
- let TargetPrefix = "riscv" in {
- // T @llvm.<name>.T.<p>(any*, T, T, T imm);
- class MaskedAtomicRMWFourArg<LLVMType itype>
- : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
- [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
- // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
- class MaskedAtomicRMWFiveArg<LLVMType itype>
- : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
- [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
- // We define 32-bit and 64-bit variants of the above, where T stands for i32
- // or i64 respectively:
- multiclass MaskedAtomicRMWFourArgIntrinsics {
- // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
- def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
- // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
- def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
- }
- multiclass MaskedAtomicRMWFiveArgIntrinsics {
- // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
- def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
- // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
- def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
- }
- // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(...)
- defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
- defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
- defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
- defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
- // Signed min and max need an extra operand to do sign extension with.
- defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
- defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
- // Unsigned min and max don't need the extra operand.
- defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
- defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
- // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(...)
- defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
- } // TargetPrefix = "riscv"
- //===----------------------------------------------------------------------===//
- // Vectors
- class RISCVVIntrinsic {
- // These intrinsics may accept illegal integer values in their llvm_any_ty
- // operand, so they have to be extended. If set to zero then the intrinsic
- // does not have any operand that must be extended.
- Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
- bits<4> ExtendOperand = 0;
- }
- let TargetPrefix = "riscv" in {
- // We use anyint here but we only support XLen.
- def int_riscv_vsetvli : Intrinsic<[llvm_anyint_ty],
- /* AVL */ [LLVMMatchType<0>,
- /* VSEW */ LLVMMatchType<0>,
- /* VLMUL */ LLVMMatchType<0>],
- [IntrNoMem, IntrHasSideEffects,
- ImmArg<ArgIndex<1>>,
- ImmArg<ArgIndex<2>>]>;
- def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
- /* VSEW */ [LLVMMatchType<0>,
- /* VLMUL */ LLVMMatchType<0>],
- [IntrNoMem, IntrHasSideEffects,
- ImmArg<ArgIndex<0>>,
- ImmArg<ArgIndex<1>>]>;
- // For unit stride load
- // Input: (pointer, vl)
- class RISCVUSLoad
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
- // For unit stride fault-only-first load
- // Input: (pointer, vl)
- // Output: (data, vl)
- // NOTE: We model this with default memory properties since we model writing
- // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
- class RISCVUSLoadFF
- : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
- [LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
- [NoCapture<ArgIndex<0>>]>,
- RISCVVIntrinsic;
- // For unit stride load with mask
- // Input: (maskedoff, pointer, mask, vl)
- class RISCVUSLoadMask
- : Intrinsic<[llvm_anyvector_ty ],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
- // For unit stride fault-only-first load with mask
- // Input: (maskedoff, pointer, mask, vl)
- // Output: (data, vl)
- // NOTE: We model this with default memory properties since we model writing
- // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
- class RISCVUSLoadFFMask
- : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>]>, RISCVVIntrinsic;
- // For strided load
- // Input: (pointer, stride, vl)
- class RISCVSLoad
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty, LLVMMatchType<1>],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
- // For strided load with mask
- // Input: (maskedoff, pointer, stride, mask, vl)
- class RISCVSLoadMask
- : Intrinsic<[llvm_anyvector_ty ],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
- // For indexed load
- // Input: (pointer, index, vl)
- class RISCVILoad
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyvector_ty, llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
- // For indexed load with mask
- // Input: (maskedoff, pointer, index, mask, vl)
- class RISCVILoadMask
- : Intrinsic<[llvm_anyvector_ty ],
- [LLVMMatchType<0>,
- LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
- // For unit stride store
- // Input: (vector_in, pointer, vl)
- class RISCVUSStore
- : Intrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
- // For unit stride store with mask
- // Input: (vector_in, pointer, mask, vl)
- class RISCVUSStoreMask
- : Intrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
- // For strided store
- // Input: (vector_in, pointer, stride, vl)
- class RISCVSStore
- : Intrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty, LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
- // For stride store with mask
- // Input: (vector_in, pointer, stirde, mask, vl)
- class RISCVSStoreMask
- : Intrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
- // For indexed store
- // Input: (vector_in, pointer, index, vl)
- class RISCVIStore
- : Intrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>,
- llvm_anyint_ty, llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
- // For indexed store with mask
- // Input: (vector_in, pointer, index, mask, vl)
- class RISCVIStoreMask
- : Intrinsic<[],
- [llvm_anyvector_ty,
- LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
- // For destination vector type is the same as source vector.
- // Input: (vector_in, vl)
- class RISCVUnaryAANoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For destination vector type is the same as first source vector (with mask).
- // Input: (vector_in, mask, vl)
- class RISCVUnaryAAMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For destination vector type is the same as first and second source vector.
- // Input: (vector_in, vector_in, vl)
- class RISCVBinaryAAANoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For destination vector type is the same as first and second source vector.
- // Input: (vector_in, vector_in, vl)
- class RISCVBinaryAAAMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For destination vector type is the same as first source vector.
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVBinaryAAXNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 2;
- }
- // For destination vector type is the same as first source vector (with mask).
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
- class RISCVBinaryAAXMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 3;
- }
- // For destination vector type is NOT the same as first source vector.
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVBinaryABXNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 2;
- }
- // For destination vector type is NOT the same as first source vector (with mask).
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
- class RISCVBinaryABXMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 3;
- }
- // For binary operations with V0 as input.
- // Input: (vector_in, vector_in/scalar_in, V0, vl)
- class RISCVBinaryWithV0
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 2;
- }
- // For binary operations with mask type output and V0 as input.
- // Output: (mask type output)
- // Input: (vector_in, vector_in/scalar_in, V0, vl)
- class RISCVBinaryMOutWithV0
- :Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [llvm_anyvector_ty, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 2;
- }
- // For binary operations with mask type output.
- // Output: (mask type output)
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVBinaryMOut
- : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 2;
- }
- // For binary operations with mask type output without mask.
- // Output: (mask type output)
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVCompareNoMask
- : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 2;
- }
- // For binary operations with mask type output with mask.
- // Output: (mask type output)
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
- class RISCVCompareMask
- : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyvector_ty, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 3;
- }
- // For FP classify operations.
- // Output: (bit mask type output)
- // Input: (vector_in, vl)
- class RISCVClassifyNoMask
- : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
- [llvm_anyvector_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For FP classify operations with mask.
- // Output: (bit mask type output)
- // Input: (maskedoff, vector_in, mask, vl)
- class RISCVClassifyMask
- : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
- [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For Saturating binary operations.
- // The destination vector type is the same as first source vector.
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVSaturatingBinaryAAXNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let ExtendOperand = 2;
- }
- // For Saturating binary operations with mask.
- // The destination vector type is the same as first source vector.
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
- class RISCVSaturatingBinaryAAXMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let ExtendOperand = 3;
- }
- // For Saturating binary operations.
- // The destination vector type is NOT the same as first source vector.
- // Input: (vector_in, vector_in/scalar_in, vl)
- class RISCVSaturatingBinaryABXNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
- [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let ExtendOperand = 2;
- }
- // For Saturating binary operations with mask.
- // The destination vector type is NOT the same as first source vector (with mask).
- // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
- class RISCVSaturatingBinaryABXMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
- let ExtendOperand = 3;
- }
- class RISCVTernaryAAAXNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
- LLVMMatchType<1>],
- [IntrNoMem]>, RISCVVIntrinsic;
- class RISCVTernaryAAAXMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
- [IntrNoMem]>, RISCVVIntrinsic;
- class RISCVTernaryAAXANoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 2;
- }
- class RISCVTernaryAAXAMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 2;
- }
- class RISCVTernaryWideNoMask
- : Intrinsic< [llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
- llvm_anyint_ty],
- [IntrNoMem] >, RISCVVIntrinsic {
- let ExtendOperand = 2;
- }
- class RISCVTernaryWideMask
- : Intrinsic< [llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 2;
- }
- // For Reduction ternary operations.
- // For destination vector type is the same as first and third source vector.
- // Input: (vector_in, vector_in, vector_in, vl)
- class RISCVReductionNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For Reduction ternary operations with mask.
- // For destination vector type is the same as first and third source vector.
- // The mask type come from second source vector.
- // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
- class RISCVReductionMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For unary operations with scalar type output without mask
- // Output: (scalar type)
- // Input: (vector_in, vl)
- class RISCVMaskUnarySOutNoMask
- : Intrinsic<[llvm_anyint_ty],
- [llvm_anyvector_ty, LLVMMatchType<0>],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For unary operations with scalar type output with mask
- // Output: (scalar type)
- // Input: (vector_in, mask, vl)
- class RISCVMaskUnarySOutMask
- : Intrinsic<[llvm_anyint_ty],
- [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<0>],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For destination vector type is NOT the same as source vector.
- // Input: (vector_in, vl)
- class RISCVUnaryABNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For destination vector type is NOT the same as source vector (with mask).
- // Input: (maskedoff, vector_in, mask, vl)
- class RISCVUnaryABMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For unary operations with the same vector type in/out without mask
- // Output: (vector)
- // Input: (vector_in, vl)
- class RISCVUnaryNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For mask unary operations with mask type in/out with mask
- // Output: (mask type output)
- // Input: (mask type maskedoff, mask type vector_in, mask, vl)
- class RISCVMaskUnaryMOutMask
- : Intrinsic<[llvm_anyint_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // Output: (vector)
- // Input: (vl)
- class RISCVNullaryIntrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For Conversion unary operations.
- // Input: (vector_in, vl)
- class RISCVConversionNoMask
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For Conversion unary operations with mask.
- // Input: (maskedoff, vector_in, mask, vl)
- class RISCVConversionMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // For atomic operations without mask
- // Input: (base, index, value, vl)
- class RISCVAMONoMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
- llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
- // For atomic operations with mask
- // Input: (base, index, value, mask, vl)
- class RISCVAMOMask
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
- // For unit stride segment load
- // Input: (pointer, vl)
- class RISCVUSSegLoad<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1))),
- [LLVMPointerToElt<0>, llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
- // For unit stride segment load with mask
- // Input: (maskedoff, pointer, mask, vl)
- class RISCVUSSegLoadMask<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1))),
- !listconcat(!listsplat(LLVMMatchType<0>, nf),
- [LLVMPointerToElt<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
- // For unit stride fault-only-first segment load
- // Input: (pointer, vl)
- // Output: (data, vl)
- // NOTE: We model this with default memory properties since we model writing
- // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
- class RISCVUSSegLoadFF<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1)), [llvm_anyint_ty]),
- [LLVMPointerToElt<0>, LLVMMatchType<1>],
- [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
- // For unit stride fault-only-first segment load with mask
- // Input: (maskedoff, pointer, mask, vl)
- // Output: (data, vl)
- // NOTE: We model this with default memory properties since we model writing
- // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
- class RISCVUSSegLoadFFMask<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1)), [llvm_anyint_ty]),
- !listconcat(!listsplat(LLVMMatchType<0>, nf),
- [LLVMPointerToElt<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- LLVMMatchType<1>]),
- [NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic;
- // For stride segment load
- // Input: (pointer, offset, vl)
- class RISCVSSegLoad<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1))),
- [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
- // For stride segment load with mask
- // Input: (maskedoff, pointer, offset, mask, vl)
- class RISCVSSegLoadMask<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1))),
- !listconcat(!listsplat(LLVMMatchType<0>, nf),
- [LLVMPointerToElt<0>,
- llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- LLVMMatchType<1>]),
- [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
- // For indexed segment load
- // Input: (pointer, index, vl)
- class RISCVISegLoad<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1))),
- [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty],
- [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
- // For indexed segment load with mask
- // Input: (maskedoff, pointer, index, mask, vl)
- class RISCVISegLoadMask<int nf>
- : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
- !add(nf, -1))),
- !listconcat(!listsplat(LLVMMatchType<0>, nf),
- [LLVMPointerToElt<0>,
- llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
- // For unit stride segment store
- // Input: (value, pointer, vl)
- class RISCVUSSegStore<int nf>
- : Intrinsic<[],
- !listconcat([llvm_anyvector_ty],
- !listsplat(LLVMMatchType<0>, !add(nf, -1)),
- [LLVMPointerToElt<0>, llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
- // For unit stride segment store with mask
- // Input: (value, pointer, mask, vl)
- class RISCVUSSegStoreMask<int nf>
- : Intrinsic<[],
- !listconcat([llvm_anyvector_ty],
- !listsplat(LLVMMatchType<0>, !add(nf, -1)),
- [LLVMPointerToElt<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
- // For stride segment store
- // Input: (value, pointer, offset, vl)
- class RISCVSSegStore<int nf>
- : Intrinsic<[],
- !listconcat([llvm_anyvector_ty],
- !listsplat(LLVMMatchType<0>, !add(nf, -1)),
- [LLVMPointerToElt<0>, llvm_anyint_ty,
- LLVMMatchType<1>]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
- // For stride segment store with mask
- // Input: (value, pointer, offset, mask, vl)
- class RISCVSSegStoreMask<int nf>
- : Intrinsic<[],
- !listconcat([llvm_anyvector_ty],
- !listsplat(LLVMMatchType<0>, !add(nf, -1)),
- [LLVMPointerToElt<0>, llvm_anyint_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- LLVMMatchType<1>]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
- // For indexed segment store
- // Input: (value, pointer, offset, vl)
- class RISCVISegStore<int nf>
- : Intrinsic<[],
- !listconcat([llvm_anyvector_ty],
- !listsplat(LLVMMatchType<0>, !add(nf, -1)),
- [LLVMPointerToElt<0>, llvm_anyvector_ty,
- llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
- // For indexed segment store with mask
- // Input: (value, pointer, offset, mask, vl)
- class RISCVISegStoreMask<int nf>
- : Intrinsic<[],
- !listconcat([llvm_anyvector_ty],
- !listsplat(LLVMMatchType<0>, !add(nf, -1)),
- [LLVMPointerToElt<0>, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty]),
- [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
- multiclass RISCVUSLoad {
- def "int_riscv_" # NAME : RISCVUSLoad;
- def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
- }
- multiclass RISCVUSLoadFF {
- def "int_riscv_" # NAME : RISCVUSLoadFF;
- def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMask;
- }
- multiclass RISCVSLoad {
- def "int_riscv_" # NAME : RISCVSLoad;
- def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask;
- }
- multiclass RISCVILoad {
- def "int_riscv_" # NAME : RISCVILoad;
- def "int_riscv_" # NAME # "_mask" : RISCVILoadMask;
- }
- multiclass RISCVUSStore {
- def "int_riscv_" # NAME : RISCVUSStore;
- def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask;
- }
- multiclass RISCVSStore {
- def "int_riscv_" # NAME : RISCVSStore;
- def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask;
- }
- multiclass RISCVIStore {
- def "int_riscv_" # NAME : RISCVIStore;
- def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask;
- }
- multiclass RISCVUnaryAA {
- def "int_riscv_" # NAME : RISCVUnaryAANoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask;
- }
- multiclass RISCVUnaryAB {
- def "int_riscv_" # NAME : RISCVUnaryABNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask;
- }
- // AAX means the destination type(A) is the same as the first source
- // type(A). X means any type for the second source operand.
- multiclass RISCVBinaryAAX {
- def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
- }
- // ABX means the destination type(A) is different from the first source
- // type(B). X means any type for the second source operand.
- multiclass RISCVBinaryABX {
- def "int_riscv_" # NAME : RISCVBinaryABXNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask;
- }
- multiclass RISCVBinaryWithV0 {
- def "int_riscv_" # NAME : RISCVBinaryWithV0;
- }
- multiclass RISCVBinaryMaskOutWithV0 {
- def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
- }
- multiclass RISCVBinaryMaskOut {
- def "int_riscv_" # NAME : RISCVBinaryMOut;
- }
- multiclass RISCVSaturatingBinaryAAX {
- def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask;
- }
- multiclass RISCVSaturatingBinaryABX {
- def "int_riscv_" # NAME : RISCVSaturatingBinaryABXNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABXMask;
- }
- multiclass RISCVTernaryAAAX {
- def "int_riscv_" # NAME : RISCVTernaryAAAXNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMask;
- }
- multiclass RISCVTernaryAAXA {
- def "int_riscv_" # NAME : RISCVTernaryAAXANoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMask;
- }
- multiclass RISCVCompare {
- def "int_riscv_" # NAME : RISCVCompareNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVCompareMask;
- }
- multiclass RISCVClassify {
- def "int_riscv_" # NAME : RISCVClassifyNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVClassifyMask;
- }
- multiclass RISCVTernaryWide {
- def "int_riscv_" # NAME : RISCVTernaryWideNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask;
- }
- multiclass RISCVReduction {
- def "int_riscv_" # NAME : RISCVReductionNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVReductionMask;
- }
- multiclass RISCVMaskUnarySOut {
- def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask;
- }
- multiclass RISCVMaskUnaryMOut {
- def "int_riscv_" # NAME : RISCVUnaryNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask;
- }
- multiclass RISCVConversion {
- def "int_riscv_" #NAME :RISCVConversionNoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVConversionMask;
- }
- multiclass RISCVAMO {
- def "int_riscv_" # NAME : RISCVAMONoMask;
- def "int_riscv_" # NAME # "_mask" : RISCVAMOMask;
- }
- multiclass RISCVUSSegLoad<int nf> {
- def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask<nf>;
- }
- multiclass RISCVUSSegLoadFF<int nf> {
- def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMask<nf>;
- }
- multiclass RISCVSSegLoad<int nf> {
- def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMask<nf>;
- }
- multiclass RISCVISegLoad<int nf> {
- def "int_riscv_" # NAME : RISCVISegLoad<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMask<nf>;
- }
- multiclass RISCVUSSegStore<int nf> {
- def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask<nf>;
- }
- multiclass RISCVSSegStore<int nf> {
- def "int_riscv_" # NAME : RISCVSSegStore<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask<nf>;
- }
- multiclass RISCVISegStore<int nf> {
- def "int_riscv_" # NAME : RISCVISegStore<nf>;
- def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMask<nf>;
- }
- defm vle : RISCVUSLoad;
- defm vleff : RISCVUSLoadFF;
- defm vse : RISCVUSStore;
- defm vlse: RISCVSLoad;
- defm vsse: RISCVSStore;
- defm vluxei : RISCVILoad;
- defm vloxei : RISCVILoad;
- defm vsoxei : RISCVIStore;
- defm vsuxei : RISCVIStore;
- def int_riscv_vle1 : RISCVUSLoad;
- def int_riscv_vse1 : RISCVUSStore;
- defm vamoswap : RISCVAMO;
- defm vamoadd : RISCVAMO;
- defm vamoxor : RISCVAMO;
- defm vamoand : RISCVAMO;
- defm vamoor : RISCVAMO;
- defm vamomin : RISCVAMO;
- defm vamomax : RISCVAMO;
- defm vamominu : RISCVAMO;
- defm vamomaxu : RISCVAMO;
- defm vadd : RISCVBinaryAAX;
- defm vsub : RISCVBinaryAAX;
- defm vrsub : RISCVBinaryAAX;
- defm vwaddu : RISCVBinaryABX;
- defm vwadd : RISCVBinaryABX;
- defm vwaddu_w : RISCVBinaryAAX;
- defm vwadd_w : RISCVBinaryAAX;
- defm vwsubu : RISCVBinaryABX;
- defm vwsub : RISCVBinaryABX;
- defm vwsubu_w : RISCVBinaryAAX;
- defm vwsub_w : RISCVBinaryAAX;
- defm vzext : RISCVUnaryAB;
- defm vsext : RISCVUnaryAB;
- defm vadc : RISCVBinaryWithV0;
- defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
- defm vmadc : RISCVBinaryMaskOut;
- defm vsbc : RISCVBinaryWithV0;
- defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
- defm vmsbc : RISCVBinaryMaskOut;
- defm vand : RISCVBinaryAAX;
- defm vor : RISCVBinaryAAX;
- defm vxor : RISCVBinaryAAX;
- defm vsll : RISCVBinaryAAX;
- defm vsrl : RISCVBinaryAAX;
- defm vsra : RISCVBinaryAAX;
- defm vnsrl : RISCVBinaryABX;
- defm vnsra : RISCVBinaryABX;
- defm vmseq : RISCVCompare;
- defm vmsne : RISCVCompare;
- defm vmsltu : RISCVCompare;
- defm vmslt : RISCVCompare;
- defm vmsleu : RISCVCompare;
- defm vmsle : RISCVCompare;
- defm vmsgtu : RISCVCompare;
- defm vmsgt : RISCVCompare;
- defm vminu : RISCVBinaryAAX;
- defm vmin : RISCVBinaryAAX;
- defm vmaxu : RISCVBinaryAAX;
- defm vmax : RISCVBinaryAAX;
- defm vmul : RISCVBinaryAAX;
- defm vmulh : RISCVBinaryAAX;
- defm vmulhu : RISCVBinaryAAX;
- defm vmulhsu : RISCVBinaryAAX;
- defm vdivu : RISCVBinaryAAX;
- defm vdiv : RISCVBinaryAAX;
- defm vremu : RISCVBinaryAAX;
- defm vrem : RISCVBinaryAAX;
- defm vwmul : RISCVBinaryABX;
- defm vwmulu : RISCVBinaryABX;
- defm vwmulsu : RISCVBinaryABX;
- defm vmacc : RISCVTernaryAAXA;
- defm vnmsac : RISCVTernaryAAXA;
- defm vmadd : RISCVTernaryAAXA;
- defm vnmsub : RISCVTernaryAAXA;
- defm vwmaccu : RISCVTernaryWide;
- defm vwmacc : RISCVTernaryWide;
- defm vwmaccus : RISCVTernaryWide;
- defm vwmaccsu : RISCVTernaryWide;
- defm vfadd : RISCVBinaryAAX;
- defm vfsub : RISCVBinaryAAX;
- defm vfrsub : RISCVBinaryAAX;
- defm vfwadd : RISCVBinaryABX;
- defm vfwsub : RISCVBinaryABX;
- defm vfwadd_w : RISCVBinaryAAX;
- defm vfwsub_w : RISCVBinaryAAX;
- defm vsaddu : RISCVSaturatingBinaryAAX;
- defm vsadd : RISCVSaturatingBinaryAAX;
- defm vssubu : RISCVSaturatingBinaryAAX;
- defm vssub : RISCVSaturatingBinaryAAX;
- def int_riscv_vmerge : RISCVBinaryWithV0;
- def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty],
- [LLVMVectorElementType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 1;
- }
- def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
- [LLVMVectorElementType<0>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty],
- [LLVMMatchType<0>, LLVMVectorElementType<0>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
- let ExtendOperand = 2;
- }
- def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>],
- [llvm_anyfloat_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty],
- [LLVMMatchType<0>, LLVMVectorElementType<0>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- defm vfmul : RISCVBinaryAAX;
- defm vfdiv : RISCVBinaryAAX;
- defm vfrdiv : RISCVBinaryAAX;
- defm vfwmul : RISCVBinaryABX;
- defm vfmacc : RISCVTernaryAAXA;
- defm vfnmacc : RISCVTernaryAAXA;
- defm vfmsac : RISCVTernaryAAXA;
- defm vfnmsac : RISCVTernaryAAXA;
- defm vfmadd : RISCVTernaryAAXA;
- defm vfnmadd : RISCVTernaryAAXA;
- defm vfmsub : RISCVTernaryAAXA;
- defm vfnmsub : RISCVTernaryAAXA;
- defm vfwmacc : RISCVTernaryWide;
- defm vfwnmacc : RISCVTernaryWide;
- defm vfwmsac : RISCVTernaryWide;
- defm vfwnmsac : RISCVTernaryWide;
- defm vfsqrt : RISCVUnaryAA;
- defm vfrsqrt7 : RISCVUnaryAA;
- defm vfrec7 : RISCVUnaryAA;
- defm vfmin : RISCVBinaryAAX;
- defm vfmax : RISCVBinaryAAX;
- defm vfsgnj : RISCVBinaryAAX;
- defm vfsgnjn : RISCVBinaryAAX;
- defm vfsgnjx : RISCVBinaryAAX;
- defm vfclass : RISCVClassify;
- defm vfmerge : RISCVBinaryWithV0;
- defm vslideup : RISCVTernaryAAAX;
- defm vslidedown : RISCVTernaryAAAX;
- defm vslide1up : RISCVBinaryAAX;
- defm vslide1down : RISCVBinaryAAX;
- defm vfslide1up : RISCVBinaryAAX;
- defm vfslide1down : RISCVBinaryAAX;
- defm vrgather : RISCVBinaryAAX;
- defm vrgatherei16 : RISCVBinaryAAX;
- def "int_riscv_vcompress" : RISCVBinaryAAAMask;
- defm vaaddu : RISCVSaturatingBinaryAAX;
- defm vaadd : RISCVSaturatingBinaryAAX;
- defm vasubu : RISCVSaturatingBinaryAAX;
- defm vasub : RISCVSaturatingBinaryAAX;
- defm vsmul : RISCVSaturatingBinaryAAX;
- defm vssrl : RISCVSaturatingBinaryAAX;
- defm vssra : RISCVSaturatingBinaryAAX;
- defm vnclipu : RISCVSaturatingBinaryABX;
- defm vnclip : RISCVSaturatingBinaryABX;
- defm vmfeq : RISCVCompare;
- defm vmfne : RISCVCompare;
- defm vmflt : RISCVCompare;
- defm vmfle : RISCVCompare;
- defm vmfgt : RISCVCompare;
- defm vmfge : RISCVCompare;
- defm vredsum : RISCVReduction;
- defm vredand : RISCVReduction;
- defm vredor : RISCVReduction;
- defm vredxor : RISCVReduction;
- defm vredminu : RISCVReduction;
- defm vredmin : RISCVReduction;
- defm vredmaxu : RISCVReduction;
- defm vredmax : RISCVReduction;
- defm vwredsumu : RISCVReduction;
- defm vwredsum : RISCVReduction;
- defm vfredosum : RISCVReduction;
- defm vfredsum : RISCVReduction;
- defm vfredmin : RISCVReduction;
- defm vfredmax : RISCVReduction;
- defm vfwredsum : RISCVReduction;
- defm vfwredosum : RISCVReduction;
- def int_riscv_vmand: RISCVBinaryAAANoMask;
- def int_riscv_vmnand: RISCVBinaryAAANoMask;
- def int_riscv_vmandnot: RISCVBinaryAAANoMask;
- def int_riscv_vmxor: RISCVBinaryAAANoMask;
- def int_riscv_vmor: RISCVBinaryAAANoMask;
- def int_riscv_vmnor: RISCVBinaryAAANoMask;
- def int_riscv_vmornot: RISCVBinaryAAANoMask;
- def int_riscv_vmxnor: RISCVBinaryAAANoMask;
- def int_riscv_vmclr : RISCVNullaryIntrinsic;
- def int_riscv_vmset : RISCVNullaryIntrinsic;
- defm vpopc : RISCVMaskUnarySOut;
- defm vfirst : RISCVMaskUnarySOut;
- defm vmsbf : RISCVMaskUnaryMOut;
- defm vmsof : RISCVMaskUnaryMOut;
- defm vmsif : RISCVMaskUnaryMOut;
- defm vfcvt_xu_f_v : RISCVConversion;
- defm vfcvt_x_f_v : RISCVConversion;
- defm vfcvt_rtz_xu_f_v : RISCVConversion;
- defm vfcvt_rtz_x_f_v : RISCVConversion;
- defm vfcvt_f_xu_v : RISCVConversion;
- defm vfcvt_f_x_v : RISCVConversion;
- defm vfwcvt_f_xu_v : RISCVConversion;
- defm vfwcvt_f_x_v : RISCVConversion;
- defm vfwcvt_xu_f_v : RISCVConversion;
- defm vfwcvt_x_f_v : RISCVConversion;
- defm vfwcvt_rtz_xu_f_v : RISCVConversion;
- defm vfwcvt_rtz_x_f_v : RISCVConversion;
- defm vfwcvt_f_f_v : RISCVConversion;
- defm vfncvt_f_xu_w : RISCVConversion;
- defm vfncvt_f_x_w : RISCVConversion;
- defm vfncvt_xu_f_w : RISCVConversion;
- defm vfncvt_x_f_w : RISCVConversion;
- defm vfncvt_rtz_xu_f_w : RISCVConversion;
- defm vfncvt_rtz_x_f_w : RISCVConversion;
- defm vfncvt_f_f_w : RISCVConversion;
- defm vfncvt_rod_f_f_w : RISCVConversion;
- // Output: (vector)
- // Input: (mask type input, vl)
- def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
- [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // Output: (vector)
- // Input: (maskedoff, mask type vector_in, mask, vl)
- def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- // Output: (vector)
- // Input: (vl)
- def int_riscv_vid : RISCVNullaryIntrinsic;
- // Output: (vector)
- // Input: (maskedoff, mask, vl)
- def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
- llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic;
- foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
- defm vlseg # nf : RISCVUSSegLoad<nf>;
- defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
- defm vlsseg # nf : RISCVSSegLoad<nf>;
- defm vloxseg # nf : RISCVISegLoad<nf>;
- defm vluxseg # nf : RISCVISegLoad<nf>;
- defm vsseg # nf : RISCVUSSegStore<nf>;
- defm vssseg # nf : RISCVSSegStore<nf>;
- defm vsoxseg # nf : RISCVISegStore<nf>;
- defm vsuxseg # nf : RISCVISegStore<nf>;
- }
- } // TargetPrefix = "riscv"
|