Execution.cpp 81 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168
  1. //===-- Execution.cpp - Implement code to simulate the program ------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file contains the actual instruction interpreter.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "Interpreter.h"
  13. #include "llvm/ADT/APInt.h"
  14. #include "llvm/ADT/Statistic.h"
  15. #include "llvm/CodeGen/IntrinsicLowering.h"
  16. #include "llvm/IR/Constants.h"
  17. #include "llvm/IR/DerivedTypes.h"
  18. #include "llvm/IR/GetElementPtrTypeIterator.h"
  19. #include "llvm/IR/Instructions.h"
  20. #include "llvm/Support/CommandLine.h"
  21. #include "llvm/Support/Debug.h"
  22. #include "llvm/Support/ErrorHandling.h"
  23. #include "llvm/Support/MathExtras.h"
  24. #include "llvm/Support/raw_ostream.h"
  25. #include <algorithm>
  26. #include <cmath>
  27. using namespace llvm;
  28. #define DEBUG_TYPE "interpreter"
  29. STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
  30. static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
  31. cl::desc("make the interpreter print every volatile load and store"));
  32. //===----------------------------------------------------------------------===//
  33. // Various Helper Functions
  34. //===----------------------------------------------------------------------===//
  35. static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
  36. SF.Values[V] = Val;
  37. }
  38. //===----------------------------------------------------------------------===//
  39. // Unary Instruction Implementations
  40. //===----------------------------------------------------------------------===//
  41. static void executeFNegInst(GenericValue &Dest, GenericValue Src, Type *Ty) {
  42. switch (Ty->getTypeID()) {
  43. case Type::FloatTyID:
  44. Dest.FloatVal = -Src.FloatVal;
  45. break;
  46. case Type::DoubleTyID:
  47. Dest.DoubleVal = -Src.DoubleVal;
  48. break;
  49. default:
  50. llvm_unreachable("Unhandled type for FNeg instruction");
  51. }
  52. }
  53. void Interpreter::visitUnaryOperator(UnaryOperator &I) {
  54. ExecutionContext &SF = ECStack.back();
  55. Type *Ty = I.getOperand(0)->getType();
  56. GenericValue Src = getOperandValue(I.getOperand(0), SF);
  57. GenericValue R; // Result
  58. // First process vector operation
  59. if (Ty->isVectorTy()) {
  60. R.AggregateVal.resize(Src.AggregateVal.size());
  61. switch(I.getOpcode()) {
  62. default:
  63. llvm_unreachable("Don't know how to handle this unary operator");
  64. break;
  65. case Instruction::FNeg:
  66. if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
  67. for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
  68. R.AggregateVal[i].FloatVal = -Src.AggregateVal[i].FloatVal;
  69. } else if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) {
  70. for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
  71. R.AggregateVal[i].DoubleVal = -Src.AggregateVal[i].DoubleVal;
  72. } else {
  73. llvm_unreachable("Unhandled type for FNeg instruction");
  74. }
  75. break;
  76. }
  77. } else {
  78. switch (I.getOpcode()) {
  79. default:
  80. llvm_unreachable("Don't know how to handle this unary operator");
  81. break;
  82. case Instruction::FNeg: executeFNegInst(R, Src, Ty); break;
  83. }
  84. }
  85. SetValue(&I, R, SF);
  86. }
  87. //===----------------------------------------------------------------------===//
  88. // Binary Instruction Implementations
  89. //===----------------------------------------------------------------------===//
  90. #define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
  91. case Type::TY##TyID: \
  92. Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
  93. break
  94. static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
  95. GenericValue Src2, Type *Ty) {
  96. switch (Ty->getTypeID()) {
  97. IMPLEMENT_BINARY_OPERATOR(+, Float);
  98. IMPLEMENT_BINARY_OPERATOR(+, Double);
  99. default:
  100. dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
  101. llvm_unreachable(nullptr);
  102. }
  103. }
  104. static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
  105. GenericValue Src2, Type *Ty) {
  106. switch (Ty->getTypeID()) {
  107. IMPLEMENT_BINARY_OPERATOR(-, Float);
  108. IMPLEMENT_BINARY_OPERATOR(-, Double);
  109. default:
  110. dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
  111. llvm_unreachable(nullptr);
  112. }
  113. }
  114. static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
  115. GenericValue Src2, Type *Ty) {
  116. switch (Ty->getTypeID()) {
  117. IMPLEMENT_BINARY_OPERATOR(*, Float);
  118. IMPLEMENT_BINARY_OPERATOR(*, Double);
  119. default:
  120. dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
  121. llvm_unreachable(nullptr);
  122. }
  123. }
  124. static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
  125. GenericValue Src2, Type *Ty) {
  126. switch (Ty->getTypeID()) {
  127. IMPLEMENT_BINARY_OPERATOR(/, Float);
  128. IMPLEMENT_BINARY_OPERATOR(/, Double);
  129. default:
  130. dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
  131. llvm_unreachable(nullptr);
  132. }
  133. }
  134. static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
  135. GenericValue Src2, Type *Ty) {
  136. switch (Ty->getTypeID()) {
  137. case Type::FloatTyID:
  138. Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
  139. break;
  140. case Type::DoubleTyID:
  141. Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
  142. break;
  143. default:
  144. dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
  145. llvm_unreachable(nullptr);
  146. }
  147. }
  148. #define IMPLEMENT_INTEGER_ICMP(OP, TY) \
  149. case Type::IntegerTyID: \
  150. Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
  151. break;
  152. #define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \
  153. case Type::FixedVectorTyID: \
  154. case Type::ScalableVectorTyID: { \
  155. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
  156. Dest.AggregateVal.resize(Src1.AggregateVal.size()); \
  157. for (uint32_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
  158. Dest.AggregateVal[_i].IntVal = APInt( \
  159. 1, Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal)); \
  160. } break;
  161. // Handle pointers specially because they must be compared with only as much
  162. // width as the host has. We _do not_ want to be comparing 64 bit values when
  163. // running on a 32-bit target, otherwise the upper 32 bits might mess up
  164. // comparisons if they contain garbage.
  165. #define IMPLEMENT_POINTER_ICMP(OP) \
  166. case Type::PointerTyID: \
  167. Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
  168. (void*)(intptr_t)Src2.PointerVal); \
  169. break;
  170. static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
  171. Type *Ty) {
  172. GenericValue Dest;
  173. switch (Ty->getTypeID()) {
  174. IMPLEMENT_INTEGER_ICMP(eq,Ty);
  175. IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty);
  176. IMPLEMENT_POINTER_ICMP(==);
  177. default:
  178. dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
  179. llvm_unreachable(nullptr);
  180. }
  181. return Dest;
  182. }
  183. static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
  184. Type *Ty) {
  185. GenericValue Dest;
  186. switch (Ty->getTypeID()) {
  187. IMPLEMENT_INTEGER_ICMP(ne,Ty);
  188. IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty);
  189. IMPLEMENT_POINTER_ICMP(!=);
  190. default:
  191. dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
  192. llvm_unreachable(nullptr);
  193. }
  194. return Dest;
  195. }
  196. static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
  197. Type *Ty) {
  198. GenericValue Dest;
  199. switch (Ty->getTypeID()) {
  200. IMPLEMENT_INTEGER_ICMP(ult,Ty);
  201. IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty);
  202. IMPLEMENT_POINTER_ICMP(<);
  203. default:
  204. dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
  205. llvm_unreachable(nullptr);
  206. }
  207. return Dest;
  208. }
  209. static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
  210. Type *Ty) {
  211. GenericValue Dest;
  212. switch (Ty->getTypeID()) {
  213. IMPLEMENT_INTEGER_ICMP(slt,Ty);
  214. IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty);
  215. IMPLEMENT_POINTER_ICMP(<);
  216. default:
  217. dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
  218. llvm_unreachable(nullptr);
  219. }
  220. return Dest;
  221. }
  222. static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
  223. Type *Ty) {
  224. GenericValue Dest;
  225. switch (Ty->getTypeID()) {
  226. IMPLEMENT_INTEGER_ICMP(ugt,Ty);
  227. IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty);
  228. IMPLEMENT_POINTER_ICMP(>);
  229. default:
  230. dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
  231. llvm_unreachable(nullptr);
  232. }
  233. return Dest;
  234. }
  235. static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
  236. Type *Ty) {
  237. GenericValue Dest;
  238. switch (Ty->getTypeID()) {
  239. IMPLEMENT_INTEGER_ICMP(sgt,Ty);
  240. IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty);
  241. IMPLEMENT_POINTER_ICMP(>);
  242. default:
  243. dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
  244. llvm_unreachable(nullptr);
  245. }
  246. return Dest;
  247. }
  248. static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
  249. Type *Ty) {
  250. GenericValue Dest;
  251. switch (Ty->getTypeID()) {
  252. IMPLEMENT_INTEGER_ICMP(ule,Ty);
  253. IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty);
  254. IMPLEMENT_POINTER_ICMP(<=);
  255. default:
  256. dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
  257. llvm_unreachable(nullptr);
  258. }
  259. return Dest;
  260. }
  261. static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
  262. Type *Ty) {
  263. GenericValue Dest;
  264. switch (Ty->getTypeID()) {
  265. IMPLEMENT_INTEGER_ICMP(sle,Ty);
  266. IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty);
  267. IMPLEMENT_POINTER_ICMP(<=);
  268. default:
  269. dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
  270. llvm_unreachable(nullptr);
  271. }
  272. return Dest;
  273. }
  274. static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
  275. Type *Ty) {
  276. GenericValue Dest;
  277. switch (Ty->getTypeID()) {
  278. IMPLEMENT_INTEGER_ICMP(uge,Ty);
  279. IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty);
  280. IMPLEMENT_POINTER_ICMP(>=);
  281. default:
  282. dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
  283. llvm_unreachable(nullptr);
  284. }
  285. return Dest;
  286. }
  287. static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
  288. Type *Ty) {
  289. GenericValue Dest;
  290. switch (Ty->getTypeID()) {
  291. IMPLEMENT_INTEGER_ICMP(sge,Ty);
  292. IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty);
  293. IMPLEMENT_POINTER_ICMP(>=);
  294. default:
  295. dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
  296. llvm_unreachable(nullptr);
  297. }
  298. return Dest;
  299. }
  300. void Interpreter::visitICmpInst(ICmpInst &I) {
  301. ExecutionContext &SF = ECStack.back();
  302. Type *Ty = I.getOperand(0)->getType();
  303. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  304. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  305. GenericValue R; // Result
  306. switch (I.getPredicate()) {
  307. case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
  308. case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
  309. case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
  310. case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
  311. case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
  312. case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
  313. case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
  314. case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
  315. case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
  316. case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
  317. default:
  318. dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
  319. llvm_unreachable(nullptr);
  320. }
  321. SetValue(&I, R, SF);
  322. }
  323. #define IMPLEMENT_FCMP(OP, TY) \
  324. case Type::TY##TyID: \
  325. Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
  326. break
  327. #define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \
  328. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
  329. Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
  330. for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
  331. Dest.AggregateVal[_i].IntVal = APInt(1, \
  332. Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
  333. break;
  334. #define IMPLEMENT_VECTOR_FCMP(OP) \
  335. case Type::FixedVectorTyID: \
  336. case Type::ScalableVectorTyID: \
  337. if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
  338. IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
  339. } else { \
  340. IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
  341. }
  342. static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
  343. Type *Ty) {
  344. GenericValue Dest;
  345. switch (Ty->getTypeID()) {
  346. IMPLEMENT_FCMP(==, Float);
  347. IMPLEMENT_FCMP(==, Double);
  348. IMPLEMENT_VECTOR_FCMP(==);
  349. default:
  350. dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
  351. llvm_unreachable(nullptr);
  352. }
  353. return Dest;
  354. }
  355. #define IMPLEMENT_SCALAR_NANS(TY, X,Y) \
  356. if (TY->isFloatTy()) { \
  357. if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
  358. Dest.IntVal = APInt(1,false); \
  359. return Dest; \
  360. } \
  361. } else { \
  362. if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
  363. Dest.IntVal = APInt(1,false); \
  364. return Dest; \
  365. } \
  366. }
  367. #define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \
  368. assert(X.AggregateVal.size() == Y.AggregateVal.size()); \
  369. Dest.AggregateVal.resize( X.AggregateVal.size() ); \
  370. for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \
  371. if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \
  372. Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \
  373. Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \
  374. else { \
  375. Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \
  376. } \
  377. }
  378. #define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
  379. if (TY->isVectorTy()) { \
  380. if (cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
  381. MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
  382. } else { \
  383. MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
  384. } \
  385. } \
  386. static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
  387. Type *Ty)
  388. {
  389. GenericValue Dest;
  390. // if input is scalar value and Src1 or Src2 is NaN return false
  391. IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2)
  392. // if vector input detect NaNs and fill mask
  393. MASK_VECTOR_NANS(Ty, Src1, Src2, false)
  394. GenericValue DestMask = Dest;
  395. switch (Ty->getTypeID()) {
  396. IMPLEMENT_FCMP(!=, Float);
  397. IMPLEMENT_FCMP(!=, Double);
  398. IMPLEMENT_VECTOR_FCMP(!=);
  399. default:
  400. dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
  401. llvm_unreachable(nullptr);
  402. }
  403. // in vector case mask out NaN elements
  404. if (Ty->isVectorTy())
  405. for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
  406. if (DestMask.AggregateVal[_i].IntVal == false)
  407. Dest.AggregateVal[_i].IntVal = APInt(1,false);
  408. return Dest;
  409. }
  410. static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
  411. Type *Ty) {
  412. GenericValue Dest;
  413. switch (Ty->getTypeID()) {
  414. IMPLEMENT_FCMP(<=, Float);
  415. IMPLEMENT_FCMP(<=, Double);
  416. IMPLEMENT_VECTOR_FCMP(<=);
  417. default:
  418. dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
  419. llvm_unreachable(nullptr);
  420. }
  421. return Dest;
  422. }
  423. static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
  424. Type *Ty) {
  425. GenericValue Dest;
  426. switch (Ty->getTypeID()) {
  427. IMPLEMENT_FCMP(>=, Float);
  428. IMPLEMENT_FCMP(>=, Double);
  429. IMPLEMENT_VECTOR_FCMP(>=);
  430. default:
  431. dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
  432. llvm_unreachable(nullptr);
  433. }
  434. return Dest;
  435. }
  436. static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
  437. Type *Ty) {
  438. GenericValue Dest;
  439. switch (Ty->getTypeID()) {
  440. IMPLEMENT_FCMP(<, Float);
  441. IMPLEMENT_FCMP(<, Double);
  442. IMPLEMENT_VECTOR_FCMP(<);
  443. default:
  444. dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
  445. llvm_unreachable(nullptr);
  446. }
  447. return Dest;
  448. }
  449. static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
  450. Type *Ty) {
  451. GenericValue Dest;
  452. switch (Ty->getTypeID()) {
  453. IMPLEMENT_FCMP(>, Float);
  454. IMPLEMENT_FCMP(>, Double);
  455. IMPLEMENT_VECTOR_FCMP(>);
  456. default:
  457. dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
  458. llvm_unreachable(nullptr);
  459. }
  460. return Dest;
  461. }
  462. #define IMPLEMENT_UNORDERED(TY, X,Y) \
  463. if (TY->isFloatTy()) { \
  464. if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
  465. Dest.IntVal = APInt(1,true); \
  466. return Dest; \
  467. } \
  468. } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
  469. Dest.IntVal = APInt(1,true); \
  470. return Dest; \
  471. }
  472. #define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC) \
  473. if (TY->isVectorTy()) { \
  474. GenericValue DestMask = Dest; \
  475. Dest = FUNC(Src1, Src2, Ty); \
  476. for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
  477. if (DestMask.AggregateVal[_i].IntVal == true) \
  478. Dest.AggregateVal[_i].IntVal = APInt(1, true); \
  479. return Dest; \
  480. }
  481. static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
  482. Type *Ty) {
  483. GenericValue Dest;
  484. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  485. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  486. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ)
  487. return executeFCMP_OEQ(Src1, Src2, Ty);
  488. }
  489. static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
  490. Type *Ty) {
  491. GenericValue Dest;
  492. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  493. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  494. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE)
  495. return executeFCMP_ONE(Src1, Src2, Ty);
  496. }
  497. static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
  498. Type *Ty) {
  499. GenericValue Dest;
  500. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  501. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  502. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE)
  503. return executeFCMP_OLE(Src1, Src2, Ty);
  504. }
  505. static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
  506. Type *Ty) {
  507. GenericValue Dest;
  508. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  509. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  510. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE)
  511. return executeFCMP_OGE(Src1, Src2, Ty);
  512. }
  513. static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
  514. Type *Ty) {
  515. GenericValue Dest;
  516. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  517. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  518. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT)
  519. return executeFCMP_OLT(Src1, Src2, Ty);
  520. }
  521. static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
  522. Type *Ty) {
  523. GenericValue Dest;
  524. IMPLEMENT_UNORDERED(Ty, Src1, Src2)
  525. MASK_VECTOR_NANS(Ty, Src1, Src2, true)
  526. IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT)
  527. return executeFCMP_OGT(Src1, Src2, Ty);
  528. }
  529. static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
  530. Type *Ty) {
  531. GenericValue Dest;
  532. if(Ty->isVectorTy()) {
  533. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  534. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  535. if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
  536. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  537. Dest.AggregateVal[_i].IntVal = APInt(1,
  538. ( (Src1.AggregateVal[_i].FloatVal ==
  539. Src1.AggregateVal[_i].FloatVal) &&
  540. (Src2.AggregateVal[_i].FloatVal ==
  541. Src2.AggregateVal[_i].FloatVal)));
  542. } else {
  543. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  544. Dest.AggregateVal[_i].IntVal = APInt(1,
  545. ( (Src1.AggregateVal[_i].DoubleVal ==
  546. Src1.AggregateVal[_i].DoubleVal) &&
  547. (Src2.AggregateVal[_i].DoubleVal ==
  548. Src2.AggregateVal[_i].DoubleVal)));
  549. }
  550. } else if (Ty->isFloatTy())
  551. Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
  552. Src2.FloatVal == Src2.FloatVal));
  553. else {
  554. Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
  555. Src2.DoubleVal == Src2.DoubleVal));
  556. }
  557. return Dest;
  558. }
  559. static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
  560. Type *Ty) {
  561. GenericValue Dest;
  562. if(Ty->isVectorTy()) {
  563. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  564. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  565. if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
  566. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  567. Dest.AggregateVal[_i].IntVal = APInt(1,
  568. ( (Src1.AggregateVal[_i].FloatVal !=
  569. Src1.AggregateVal[_i].FloatVal) ||
  570. (Src2.AggregateVal[_i].FloatVal !=
  571. Src2.AggregateVal[_i].FloatVal)));
  572. } else {
  573. for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
  574. Dest.AggregateVal[_i].IntVal = APInt(1,
  575. ( (Src1.AggregateVal[_i].DoubleVal !=
  576. Src1.AggregateVal[_i].DoubleVal) ||
  577. (Src2.AggregateVal[_i].DoubleVal !=
  578. Src2.AggregateVal[_i].DoubleVal)));
  579. }
  580. } else if (Ty->isFloatTy())
  581. Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
  582. Src2.FloatVal != Src2.FloatVal));
  583. else {
  584. Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
  585. Src2.DoubleVal != Src2.DoubleVal));
  586. }
  587. return Dest;
  588. }
  589. static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
  590. Type *Ty, const bool val) {
  591. GenericValue Dest;
  592. if(Ty->isVectorTy()) {
  593. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  594. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  595. for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
  596. Dest.AggregateVal[_i].IntVal = APInt(1,val);
  597. } else {
  598. Dest.IntVal = APInt(1, val);
  599. }
  600. return Dest;
  601. }
  602. void Interpreter::visitFCmpInst(FCmpInst &I) {
  603. ExecutionContext &SF = ECStack.back();
  604. Type *Ty = I.getOperand(0)->getType();
  605. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  606. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  607. GenericValue R; // Result
  608. switch (I.getPredicate()) {
  609. default:
  610. dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
  611. llvm_unreachable(nullptr);
  612. break;
  613. case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
  614. break;
  615. case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
  616. break;
  617. case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
  618. case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
  619. case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
  620. case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
  621. case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
  622. case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
  623. case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
  624. case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
  625. case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
  626. case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
  627. case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
  628. case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
  629. case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
  630. case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
  631. }
  632. SetValue(&I, R, SF);
  633. }
  634. static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
  635. GenericValue Src2, Type *Ty) {
  636. GenericValue Result;
  637. switch (predicate) {
  638. case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
  639. case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty);
  640. case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty);
  641. case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty);
  642. case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty);
  643. case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty);
  644. case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty);
  645. case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty);
  646. case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty);
  647. case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty);
  648. case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty);
  649. case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty);
  650. case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty);
  651. case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty);
  652. case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty);
  653. case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty);
  654. case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty);
  655. case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty);
  656. case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty);
  657. case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty);
  658. case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty);
  659. case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty);
  660. case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty);
  661. case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty);
  662. case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false);
  663. case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true);
  664. default:
  665. dbgs() << "Unhandled Cmp predicate\n";
  666. llvm_unreachable(nullptr);
  667. }
  668. }
  669. void Interpreter::visitBinaryOperator(BinaryOperator &I) {
  670. ExecutionContext &SF = ECStack.back();
  671. Type *Ty = I.getOperand(0)->getType();
  672. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  673. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  674. GenericValue R; // Result
  675. // First process vector operation
  676. if (Ty->isVectorTy()) {
  677. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  678. R.AggregateVal.resize(Src1.AggregateVal.size());
  679. // Macros to execute binary operation 'OP' over integer vectors
  680. #define INTEGER_VECTOR_OPERATION(OP) \
  681. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  682. R.AggregateVal[i].IntVal = \
  683. Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
  684. // Additional macros to execute binary operations udiv/sdiv/urem/srem since
  685. // they have different notation.
  686. #define INTEGER_VECTOR_FUNCTION(OP) \
  687. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  688. R.AggregateVal[i].IntVal = \
  689. Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
  690. // Macros to execute binary operation 'OP' over floating point type TY
  691. // (float or double) vectors
  692. #define FLOAT_VECTOR_FUNCTION(OP, TY) \
  693. for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
  694. R.AggregateVal[i].TY = \
  695. Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
  696. // Macros to choose appropriate TY: float or double and run operation
  697. // execution
  698. #define FLOAT_VECTOR_OP(OP) { \
  699. if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
  700. FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
  701. else { \
  702. if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
  703. FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
  704. else { \
  705. dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
  706. llvm_unreachable(0); \
  707. } \
  708. } \
  709. }
  710. switch(I.getOpcode()){
  711. default:
  712. dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
  713. llvm_unreachable(nullptr);
  714. break;
  715. case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break;
  716. case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break;
  717. case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break;
  718. case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break;
  719. case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break;
  720. case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break;
  721. case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break;
  722. case Instruction::And: INTEGER_VECTOR_OPERATION(&) break;
  723. case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break;
  724. case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break;
  725. case Instruction::FAdd: FLOAT_VECTOR_OP(+) break;
  726. case Instruction::FSub: FLOAT_VECTOR_OP(-) break;
  727. case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
  728. case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
  729. case Instruction::FRem:
  730. if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
  731. for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
  732. R.AggregateVal[i].FloatVal =
  733. fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
  734. else {
  735. if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
  736. for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
  737. R.AggregateVal[i].DoubleVal =
  738. fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
  739. else {
  740. dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
  741. llvm_unreachable(nullptr);
  742. }
  743. }
  744. break;
  745. }
  746. } else {
  747. switch (I.getOpcode()) {
  748. default:
  749. dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
  750. llvm_unreachable(nullptr);
  751. break;
  752. case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
  753. case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
  754. case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
  755. case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
  756. case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
  757. case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
  758. case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
  759. case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
  760. case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
  761. case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
  762. case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
  763. case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
  764. case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
  765. case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
  766. case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
  767. }
  768. }
  769. SetValue(&I, R, SF);
  770. }
  771. static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
  772. GenericValue Src3, Type *Ty) {
  773. GenericValue Dest;
  774. if(Ty->isVectorTy()) {
  775. assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
  776. assert(Src2.AggregateVal.size() == Src3.AggregateVal.size());
  777. Dest.AggregateVal.resize( Src1.AggregateVal.size() );
  778. for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
  779. Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ?
  780. Src3.AggregateVal[i] : Src2.AggregateVal[i];
  781. } else {
  782. Dest = (Src1.IntVal == 0) ? Src3 : Src2;
  783. }
  784. return Dest;
  785. }
  786. void Interpreter::visitSelectInst(SelectInst &I) {
  787. ExecutionContext &SF = ECStack.back();
  788. Type * Ty = I.getOperand(0)->getType();
  789. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  790. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  791. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  792. GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty);
  793. SetValue(&I, R, SF);
  794. }
  795. //===----------------------------------------------------------------------===//
  796. // Terminator Instruction Implementations
  797. //===----------------------------------------------------------------------===//
  798. void Interpreter::exitCalled(GenericValue GV) {
  799. // runAtExitHandlers() assumes there are no stack frames, but
  800. // if exit() was called, then it had a stack frame. Blow away
  801. // the stack before interpreting atexit handlers.
  802. ECStack.clear();
  803. runAtExitHandlers();
  804. exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
  805. }
  806. /// Pop the last stack frame off of ECStack and then copy the result
  807. /// back into the result variable if we are not returning void. The
  808. /// result variable may be the ExitValue, or the Value of the calling
  809. /// CallInst if there was a previous stack frame. This method may
  810. /// invalidate any ECStack iterators you have. This method also takes
  811. /// care of switching to the normal destination BB, if we are returning
  812. /// from an invoke.
  813. ///
  814. void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
  815. GenericValue Result) {
  816. // Pop the current stack frame.
  817. ECStack.pop_back();
  818. if (ECStack.empty()) { // Finished main. Put result into exit code...
  819. if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type?
  820. ExitValue = Result; // Capture the exit value of the program
  821. } else {
  822. memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
  823. }
  824. } else {
  825. // If we have a previous stack frame, and we have a previous call,
  826. // fill in the return value...
  827. ExecutionContext &CallingSF = ECStack.back();
  828. if (CallingSF.Caller) {
  829. // Save result...
  830. if (!CallingSF.Caller->getType()->isVoidTy())
  831. SetValue(CallingSF.Caller, Result, CallingSF);
  832. if (InvokeInst *II = dyn_cast<InvokeInst>(CallingSF.Caller))
  833. SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
  834. CallingSF.Caller = nullptr; // We returned from the call...
  835. }
  836. }
  837. }
  838. void Interpreter::visitReturnInst(ReturnInst &I) {
  839. ExecutionContext &SF = ECStack.back();
  840. Type *RetTy = Type::getVoidTy(I.getContext());
  841. GenericValue Result;
  842. // Save away the return value... (if we are not 'ret void')
  843. if (I.getNumOperands()) {
  844. RetTy = I.getReturnValue()->getType();
  845. Result = getOperandValue(I.getReturnValue(), SF);
  846. }
  847. popStackAndReturnValueToCaller(RetTy, Result);
  848. }
  849. void Interpreter::visitUnreachableInst(UnreachableInst &I) {
  850. report_fatal_error("Program executed an 'unreachable' instruction!");
  851. }
  852. void Interpreter::visitBranchInst(BranchInst &I) {
  853. ExecutionContext &SF = ECStack.back();
  854. BasicBlock *Dest;
  855. Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
  856. if (!I.isUnconditional()) {
  857. Value *Cond = I.getCondition();
  858. if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
  859. Dest = I.getSuccessor(1);
  860. }
  861. SwitchToNewBasicBlock(Dest, SF);
  862. }
  863. void Interpreter::visitSwitchInst(SwitchInst &I) {
  864. ExecutionContext &SF = ECStack.back();
  865. Value* Cond = I.getCondition();
  866. Type *ElTy = Cond->getType();
  867. GenericValue CondVal = getOperandValue(Cond, SF);
  868. // Check to see if any of the cases match...
  869. BasicBlock *Dest = nullptr;
  870. for (auto Case : I.cases()) {
  871. GenericValue CaseVal = getOperandValue(Case.getCaseValue(), SF);
  872. if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
  873. Dest = cast<BasicBlock>(Case.getCaseSuccessor());
  874. break;
  875. }
  876. }
  877. if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
  878. SwitchToNewBasicBlock(Dest, SF);
  879. }
  880. void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
  881. ExecutionContext &SF = ECStack.back();
  882. void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
  883. SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
  884. }
  885. // SwitchToNewBasicBlock - This method is used to jump to a new basic block.
  886. // This function handles the actual updating of block and instruction iterators
  887. // as well as execution of all of the PHI nodes in the destination block.
  888. //
  889. // This method does this because all of the PHI nodes must be executed
  890. // atomically, reading their inputs before any of the results are updated. Not
  891. // doing this can cause problems if the PHI nodes depend on other PHI nodes for
  892. // their inputs. If the input PHI node is updated before it is read, incorrect
  893. // results can happen. Thus we use a two phase approach.
  894. //
  895. void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
  896. BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
  897. SF.CurBB = Dest; // Update CurBB to branch destination
  898. SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
  899. if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
  900. // Loop over all of the PHI nodes in the current block, reading their inputs.
  901. std::vector<GenericValue> ResultValues;
  902. for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
  903. // Search for the value corresponding to this previous bb...
  904. int i = PN->getBasicBlockIndex(PrevBB);
  905. assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
  906. Value *IncomingValue = PN->getIncomingValue(i);
  907. // Save the incoming value for this PHI node...
  908. ResultValues.push_back(getOperandValue(IncomingValue, SF));
  909. }
  910. // Now loop over all of the PHI nodes setting their values...
  911. SF.CurInst = SF.CurBB->begin();
  912. for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
  913. PHINode *PN = cast<PHINode>(SF.CurInst);
  914. SetValue(PN, ResultValues[i], SF);
  915. }
  916. }
  917. //===----------------------------------------------------------------------===//
  918. // Memory Instruction Implementations
  919. //===----------------------------------------------------------------------===//
  920. void Interpreter::visitAllocaInst(AllocaInst &I) {
  921. ExecutionContext &SF = ECStack.back();
  922. Type *Ty = I.getAllocatedType(); // Type to be allocated
  923. // Get the number of elements being allocated by the array...
  924. unsigned NumElements =
  925. getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
  926. unsigned TypeSize = (size_t)getDataLayout().getTypeAllocSize(Ty);
  927. // Avoid malloc-ing zero bytes, use max()...
  928. unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
  929. // Allocate enough memory to hold the type...
  930. void *Memory = safe_malloc(MemToAlloc);
  931. LLVM_DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize
  932. << " bytes) x " << NumElements << " (Total: " << MemToAlloc
  933. << ") at " << uintptr_t(Memory) << '\n');
  934. GenericValue Result = PTOGV(Memory);
  935. assert(Result.PointerVal && "Null pointer returned by malloc!");
  936. SetValue(&I, Result, SF);
  937. if (I.getOpcode() == Instruction::Alloca)
  938. ECStack.back().Allocas.add(Memory);
  939. }
  940. // getElementOffset - The workhorse for getelementptr.
  941. //
  942. GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
  943. gep_type_iterator E,
  944. ExecutionContext &SF) {
  945. assert(Ptr->getType()->isPointerTy() &&
  946. "Cannot getElementOffset of a nonpointer type!");
  947. uint64_t Total = 0;
  948. for (; I != E; ++I) {
  949. if (StructType *STy = I.getStructTypeOrNull()) {
  950. const StructLayout *SLO = getDataLayout().getStructLayout(STy);
  951. const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
  952. unsigned Index = unsigned(CPU->getZExtValue());
  953. Total += SLO->getElementOffset(Index);
  954. } else {
  955. // Get the index number for the array... which must be long type...
  956. GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
  957. int64_t Idx;
  958. unsigned BitWidth =
  959. cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
  960. if (BitWidth == 32)
  961. Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
  962. else {
  963. assert(BitWidth == 64 && "Invalid index type for getelementptr");
  964. Idx = (int64_t)IdxGV.IntVal.getZExtValue();
  965. }
  966. Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx;
  967. }
  968. }
  969. GenericValue Result;
  970. Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
  971. LLVM_DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
  972. return Result;
  973. }
  974. void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
  975. ExecutionContext &SF = ECStack.back();
  976. SetValue(&I, executeGEPOperation(I.getPointerOperand(),
  977. gep_type_begin(I), gep_type_end(I), SF), SF);
  978. }
  979. void Interpreter::visitLoadInst(LoadInst &I) {
  980. ExecutionContext &SF = ECStack.back();
  981. GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
  982. GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
  983. GenericValue Result;
  984. LoadValueFromMemory(Result, Ptr, I.getType());
  985. SetValue(&I, Result, SF);
  986. if (I.isVolatile() && PrintVolatile)
  987. dbgs() << "Volatile load " << I;
  988. }
  989. void Interpreter::visitStoreInst(StoreInst &I) {
  990. ExecutionContext &SF = ECStack.back();
  991. GenericValue Val = getOperandValue(I.getOperand(0), SF);
  992. GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
  993. StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
  994. I.getOperand(0)->getType());
  995. if (I.isVolatile() && PrintVolatile)
  996. dbgs() << "Volatile store: " << I;
  997. }
  998. //===----------------------------------------------------------------------===//
  999. // Miscellaneous Instruction Implementations
  1000. //===----------------------------------------------------------------------===//
  1001. void Interpreter::visitVAStartInst(VAStartInst &I) {
  1002. ExecutionContext &SF = ECStack.back();
  1003. GenericValue ArgIndex;
  1004. ArgIndex.UIntPairVal.first = ECStack.size() - 1;
  1005. ArgIndex.UIntPairVal.second = 0;
  1006. SetValue(&I, ArgIndex, SF);
  1007. }
  1008. void Interpreter::visitVAEndInst(VAEndInst &I) {
  1009. // va_end is a noop for the interpreter
  1010. }
  1011. void Interpreter::visitVACopyInst(VACopyInst &I) {
  1012. ExecutionContext &SF = ECStack.back();
  1013. SetValue(&I, getOperandValue(*I.arg_begin(), SF), SF);
  1014. }
  1015. void Interpreter::visitIntrinsicInst(IntrinsicInst &I) {
  1016. ExecutionContext &SF = ECStack.back();
  1017. // If it is an unknown intrinsic function, use the intrinsic lowering
  1018. // class to transform it into hopefully tasty LLVM code.
  1019. //
  1020. BasicBlock::iterator Me(&I);
  1021. BasicBlock *Parent = I.getParent();
  1022. bool atBegin(Parent->begin() == Me);
  1023. if (!atBegin)
  1024. --Me;
  1025. IL->LowerIntrinsicCall(&I);
  1026. // Restore the CurInst pointer to the first instruction newly inserted, if
  1027. // any.
  1028. if (atBegin) {
  1029. SF.CurInst = Parent->begin();
  1030. } else {
  1031. SF.CurInst = Me;
  1032. ++SF.CurInst;
  1033. }
  1034. }
  1035. void Interpreter::visitCallBase(CallBase &I) {
  1036. ExecutionContext &SF = ECStack.back();
  1037. SF.Caller = &I;
  1038. std::vector<GenericValue> ArgVals;
  1039. const unsigned NumArgs = SF.Caller->arg_size();
  1040. ArgVals.reserve(NumArgs);
  1041. for (Value *V : SF.Caller->args())
  1042. ArgVals.push_back(getOperandValue(V, SF));
  1043. // To handle indirect calls, we must get the pointer value from the argument
  1044. // and treat it as a function pointer.
  1045. GenericValue SRC = getOperandValue(SF.Caller->getCalledOperand(), SF);
  1046. callFunction((Function*)GVTOP(SRC), ArgVals);
  1047. }
  1048. // auxiliary function for shift operations
  1049. static unsigned getShiftAmount(uint64_t orgShiftAmount,
  1050. llvm::APInt valueToShift) {
  1051. unsigned valueWidth = valueToShift.getBitWidth();
  1052. if (orgShiftAmount < (uint64_t)valueWidth)
  1053. return orgShiftAmount;
  1054. // according to the llvm documentation, if orgShiftAmount > valueWidth,
  1055. // the result is undfeined. but we do shift by this rule:
  1056. return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
  1057. }
  1058. void Interpreter::visitShl(BinaryOperator &I) {
  1059. ExecutionContext &SF = ECStack.back();
  1060. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1061. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1062. GenericValue Dest;
  1063. Type *Ty = I.getType();
  1064. if (Ty->isVectorTy()) {
  1065. uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
  1066. assert(src1Size == Src2.AggregateVal.size());
  1067. for (unsigned i = 0; i < src1Size; i++) {
  1068. GenericValue Result;
  1069. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1070. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1071. Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
  1072. Dest.AggregateVal.push_back(Result);
  1073. }
  1074. } else {
  1075. // scalar
  1076. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1077. llvm::APInt valueToShift = Src1.IntVal;
  1078. Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
  1079. }
  1080. SetValue(&I, Dest, SF);
  1081. }
  1082. void Interpreter::visitLShr(BinaryOperator &I) {
  1083. ExecutionContext &SF = ECStack.back();
  1084. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1085. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1086. GenericValue Dest;
  1087. Type *Ty = I.getType();
  1088. if (Ty->isVectorTy()) {
  1089. uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
  1090. assert(src1Size == Src2.AggregateVal.size());
  1091. for (unsigned i = 0; i < src1Size; i++) {
  1092. GenericValue Result;
  1093. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1094. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1095. Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
  1096. Dest.AggregateVal.push_back(Result);
  1097. }
  1098. } else {
  1099. // scalar
  1100. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1101. llvm::APInt valueToShift = Src1.IntVal;
  1102. Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
  1103. }
  1104. SetValue(&I, Dest, SF);
  1105. }
  1106. void Interpreter::visitAShr(BinaryOperator &I) {
  1107. ExecutionContext &SF = ECStack.back();
  1108. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1109. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1110. GenericValue Dest;
  1111. Type *Ty = I.getType();
  1112. if (Ty->isVectorTy()) {
  1113. size_t src1Size = Src1.AggregateVal.size();
  1114. assert(src1Size == Src2.AggregateVal.size());
  1115. for (unsigned i = 0; i < src1Size; i++) {
  1116. GenericValue Result;
  1117. uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
  1118. llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
  1119. Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
  1120. Dest.AggregateVal.push_back(Result);
  1121. }
  1122. } else {
  1123. // scalar
  1124. uint64_t shiftAmount = Src2.IntVal.getZExtValue();
  1125. llvm::APInt valueToShift = Src1.IntVal;
  1126. Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
  1127. }
  1128. SetValue(&I, Dest, SF);
  1129. }
  1130. GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
  1131. ExecutionContext &SF) {
  1132. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1133. Type *SrcTy = SrcVal->getType();
  1134. if (SrcTy->isVectorTy()) {
  1135. Type *DstVecTy = DstTy->getScalarType();
  1136. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1137. unsigned NumElts = Src.AggregateVal.size();
  1138. // the sizes of src and dst vectors must be equal
  1139. Dest.AggregateVal.resize(NumElts);
  1140. for (unsigned i = 0; i < NumElts; i++)
  1141. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
  1142. } else {
  1143. IntegerType *DITy = cast<IntegerType>(DstTy);
  1144. unsigned DBitWidth = DITy->getBitWidth();
  1145. Dest.IntVal = Src.IntVal.trunc(DBitWidth);
  1146. }
  1147. return Dest;
  1148. }
  1149. GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
  1150. ExecutionContext &SF) {
  1151. Type *SrcTy = SrcVal->getType();
  1152. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1153. if (SrcTy->isVectorTy()) {
  1154. Type *DstVecTy = DstTy->getScalarType();
  1155. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1156. unsigned size = Src.AggregateVal.size();
  1157. // the sizes of src and dst vectors must be equal.
  1158. Dest.AggregateVal.resize(size);
  1159. for (unsigned i = 0; i < size; i++)
  1160. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
  1161. } else {
  1162. auto *DITy = cast<IntegerType>(DstTy);
  1163. unsigned DBitWidth = DITy->getBitWidth();
  1164. Dest.IntVal = Src.IntVal.sext(DBitWidth);
  1165. }
  1166. return Dest;
  1167. }
  1168. GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
  1169. ExecutionContext &SF) {
  1170. Type *SrcTy = SrcVal->getType();
  1171. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1172. if (SrcTy->isVectorTy()) {
  1173. Type *DstVecTy = DstTy->getScalarType();
  1174. unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1175. unsigned size = Src.AggregateVal.size();
  1176. // the sizes of src and dst vectors must be equal.
  1177. Dest.AggregateVal.resize(size);
  1178. for (unsigned i = 0; i < size; i++)
  1179. Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
  1180. } else {
  1181. auto *DITy = cast<IntegerType>(DstTy);
  1182. unsigned DBitWidth = DITy->getBitWidth();
  1183. Dest.IntVal = Src.IntVal.zext(DBitWidth);
  1184. }
  1185. return Dest;
  1186. }
  1187. GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
  1188. ExecutionContext &SF) {
  1189. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1190. if (isa<VectorType>(SrcVal->getType())) {
  1191. assert(SrcVal->getType()->getScalarType()->isDoubleTy() &&
  1192. DstTy->getScalarType()->isFloatTy() &&
  1193. "Invalid FPTrunc instruction");
  1194. unsigned size = Src.AggregateVal.size();
  1195. // the sizes of src and dst vectors must be equal.
  1196. Dest.AggregateVal.resize(size);
  1197. for (unsigned i = 0; i < size; i++)
  1198. Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
  1199. } else {
  1200. assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
  1201. "Invalid FPTrunc instruction");
  1202. Dest.FloatVal = (float)Src.DoubleVal;
  1203. }
  1204. return Dest;
  1205. }
  1206. GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
  1207. ExecutionContext &SF) {
  1208. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1209. if (isa<VectorType>(SrcVal->getType())) {
  1210. assert(SrcVal->getType()->getScalarType()->isFloatTy() &&
  1211. DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction");
  1212. unsigned size = Src.AggregateVal.size();
  1213. // the sizes of src and dst vectors must be equal.
  1214. Dest.AggregateVal.resize(size);
  1215. for (unsigned i = 0; i < size; i++)
  1216. Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
  1217. } else {
  1218. assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
  1219. "Invalid FPExt instruction");
  1220. Dest.DoubleVal = (double)Src.FloatVal;
  1221. }
  1222. return Dest;
  1223. }
  1224. GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
  1225. ExecutionContext &SF) {
  1226. Type *SrcTy = SrcVal->getType();
  1227. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1228. if (isa<VectorType>(SrcTy)) {
  1229. Type *DstVecTy = DstTy->getScalarType();
  1230. Type *SrcVecTy = SrcTy->getScalarType();
  1231. uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1232. unsigned size = Src.AggregateVal.size();
  1233. // the sizes of src and dst vectors must be equal.
  1234. Dest.AggregateVal.resize(size);
  1235. if (SrcVecTy->getTypeID() == Type::FloatTyID) {
  1236. assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction");
  1237. for (unsigned i = 0; i < size; i++)
  1238. Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
  1239. Src.AggregateVal[i].FloatVal, DBitWidth);
  1240. } else {
  1241. for (unsigned i = 0; i < size; i++)
  1242. Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
  1243. Src.AggregateVal[i].DoubleVal, DBitWidth);
  1244. }
  1245. } else {
  1246. // scalar
  1247. uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1248. assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
  1249. if (SrcTy->getTypeID() == Type::FloatTyID)
  1250. Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
  1251. else {
  1252. Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
  1253. }
  1254. }
  1255. return Dest;
  1256. }
  1257. GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
  1258. ExecutionContext &SF) {
  1259. Type *SrcTy = SrcVal->getType();
  1260. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1261. if (isa<VectorType>(SrcTy)) {
  1262. Type *DstVecTy = DstTy->getScalarType();
  1263. Type *SrcVecTy = SrcTy->getScalarType();
  1264. uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
  1265. unsigned size = Src.AggregateVal.size();
  1266. // the sizes of src and dst vectors must be equal
  1267. Dest.AggregateVal.resize(size);
  1268. if (SrcVecTy->getTypeID() == Type::FloatTyID) {
  1269. assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction");
  1270. for (unsigned i = 0; i < size; i++)
  1271. Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
  1272. Src.AggregateVal[i].FloatVal, DBitWidth);
  1273. } else {
  1274. for (unsigned i = 0; i < size; i++)
  1275. Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
  1276. Src.AggregateVal[i].DoubleVal, DBitWidth);
  1277. }
  1278. } else {
  1279. // scalar
  1280. unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1281. assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
  1282. if (SrcTy->getTypeID() == Type::FloatTyID)
  1283. Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
  1284. else {
  1285. Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
  1286. }
  1287. }
  1288. return Dest;
  1289. }
  1290. GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
  1291. ExecutionContext &SF) {
  1292. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1293. if (isa<VectorType>(SrcVal->getType())) {
  1294. Type *DstVecTy = DstTy->getScalarType();
  1295. unsigned size = Src.AggregateVal.size();
  1296. // the sizes of src and dst vectors must be equal
  1297. Dest.AggregateVal.resize(size);
  1298. if (DstVecTy->getTypeID() == Type::FloatTyID) {
  1299. assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction");
  1300. for (unsigned i = 0; i < size; i++)
  1301. Dest.AggregateVal[i].FloatVal =
  1302. APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal);
  1303. } else {
  1304. for (unsigned i = 0; i < size; i++)
  1305. Dest.AggregateVal[i].DoubleVal =
  1306. APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal);
  1307. }
  1308. } else {
  1309. // scalar
  1310. assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
  1311. if (DstTy->getTypeID() == Type::FloatTyID)
  1312. Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
  1313. else {
  1314. Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
  1315. }
  1316. }
  1317. return Dest;
  1318. }
  1319. GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
  1320. ExecutionContext &SF) {
  1321. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1322. if (isa<VectorType>(SrcVal->getType())) {
  1323. Type *DstVecTy = DstTy->getScalarType();
  1324. unsigned size = Src.AggregateVal.size();
  1325. // the sizes of src and dst vectors must be equal
  1326. Dest.AggregateVal.resize(size);
  1327. if (DstVecTy->getTypeID() == Type::FloatTyID) {
  1328. assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction");
  1329. for (unsigned i = 0; i < size; i++)
  1330. Dest.AggregateVal[i].FloatVal =
  1331. APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal);
  1332. } else {
  1333. for (unsigned i = 0; i < size; i++)
  1334. Dest.AggregateVal[i].DoubleVal =
  1335. APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal);
  1336. }
  1337. } else {
  1338. // scalar
  1339. assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
  1340. if (DstTy->getTypeID() == Type::FloatTyID)
  1341. Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
  1342. else {
  1343. Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
  1344. }
  1345. }
  1346. return Dest;
  1347. }
  1348. GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
  1349. ExecutionContext &SF) {
  1350. uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
  1351. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1352. assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
  1353. Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
  1354. return Dest;
  1355. }
  1356. GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
  1357. ExecutionContext &SF) {
  1358. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1359. assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
  1360. uint32_t PtrSize = getDataLayout().getPointerSizeInBits();
  1361. if (PtrSize != Src.IntVal.getBitWidth())
  1362. Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
  1363. Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
  1364. return Dest;
  1365. }
  1366. GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
  1367. ExecutionContext &SF) {
  1368. // This instruction supports bitwise conversion of vectors to integers and
  1369. // to vectors of other types (as long as they have the same size)
  1370. Type *SrcTy = SrcVal->getType();
  1371. GenericValue Dest, Src = getOperandValue(SrcVal, SF);
  1372. if (isa<VectorType>(SrcTy) || isa<VectorType>(DstTy)) {
  1373. // vector src bitcast to vector dst or vector src bitcast to scalar dst or
  1374. // scalar src bitcast to vector dst
  1375. bool isLittleEndian = getDataLayout().isLittleEndian();
  1376. GenericValue TempDst, TempSrc, SrcVec;
  1377. Type *SrcElemTy;
  1378. Type *DstElemTy;
  1379. unsigned SrcBitSize;
  1380. unsigned DstBitSize;
  1381. unsigned SrcNum;
  1382. unsigned DstNum;
  1383. if (isa<VectorType>(SrcTy)) {
  1384. SrcElemTy = SrcTy->getScalarType();
  1385. SrcBitSize = SrcTy->getScalarSizeInBits();
  1386. SrcNum = Src.AggregateVal.size();
  1387. SrcVec = Src;
  1388. } else {
  1389. // if src is scalar value, make it vector <1 x type>
  1390. SrcElemTy = SrcTy;
  1391. SrcBitSize = SrcTy->getPrimitiveSizeInBits();
  1392. SrcNum = 1;
  1393. SrcVec.AggregateVal.push_back(Src);
  1394. }
  1395. if (isa<VectorType>(DstTy)) {
  1396. DstElemTy = DstTy->getScalarType();
  1397. DstBitSize = DstTy->getScalarSizeInBits();
  1398. DstNum = (SrcNum * SrcBitSize) / DstBitSize;
  1399. } else {
  1400. DstElemTy = DstTy;
  1401. DstBitSize = DstTy->getPrimitiveSizeInBits();
  1402. DstNum = 1;
  1403. }
  1404. if (SrcNum * SrcBitSize != DstNum * DstBitSize)
  1405. llvm_unreachable("Invalid BitCast");
  1406. // If src is floating point, cast to integer first.
  1407. TempSrc.AggregateVal.resize(SrcNum);
  1408. if (SrcElemTy->isFloatTy()) {
  1409. for (unsigned i = 0; i < SrcNum; i++)
  1410. TempSrc.AggregateVal[i].IntVal =
  1411. APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal);
  1412. } else if (SrcElemTy->isDoubleTy()) {
  1413. for (unsigned i = 0; i < SrcNum; i++)
  1414. TempSrc.AggregateVal[i].IntVal =
  1415. APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal);
  1416. } else if (SrcElemTy->isIntegerTy()) {
  1417. for (unsigned i = 0; i < SrcNum; i++)
  1418. TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal;
  1419. } else {
  1420. // Pointers are not allowed as the element type of vector.
  1421. llvm_unreachable("Invalid Bitcast");
  1422. }
  1423. // now TempSrc is integer type vector
  1424. if (DstNum < SrcNum) {
  1425. // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
  1426. unsigned Ratio = SrcNum / DstNum;
  1427. unsigned SrcElt = 0;
  1428. for (unsigned i = 0; i < DstNum; i++) {
  1429. GenericValue Elt;
  1430. Elt.IntVal = 0;
  1431. Elt.IntVal = Elt.IntVal.zext(DstBitSize);
  1432. unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
  1433. for (unsigned j = 0; j < Ratio; j++) {
  1434. APInt Tmp;
  1435. Tmp = Tmp.zext(SrcBitSize);
  1436. Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
  1437. Tmp = Tmp.zext(DstBitSize);
  1438. Tmp <<= ShiftAmt;
  1439. ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
  1440. Elt.IntVal |= Tmp;
  1441. }
  1442. TempDst.AggregateVal.push_back(Elt);
  1443. }
  1444. } else {
  1445. // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32>
  1446. unsigned Ratio = DstNum / SrcNum;
  1447. for (unsigned i = 0; i < SrcNum; i++) {
  1448. unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
  1449. for (unsigned j = 0; j < Ratio; j++) {
  1450. GenericValue Elt;
  1451. Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
  1452. Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
  1453. Elt.IntVal.lshrInPlace(ShiftAmt);
  1454. // it could be DstBitSize == SrcBitSize, so check it
  1455. if (DstBitSize < SrcBitSize)
  1456. Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
  1457. ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
  1458. TempDst.AggregateVal.push_back(Elt);
  1459. }
  1460. }
  1461. }
  1462. // convert result from integer to specified type
  1463. if (isa<VectorType>(DstTy)) {
  1464. if (DstElemTy->isDoubleTy()) {
  1465. Dest.AggregateVal.resize(DstNum);
  1466. for (unsigned i = 0; i < DstNum; i++)
  1467. Dest.AggregateVal[i].DoubleVal =
  1468. TempDst.AggregateVal[i].IntVal.bitsToDouble();
  1469. } else if (DstElemTy->isFloatTy()) {
  1470. Dest.AggregateVal.resize(DstNum);
  1471. for (unsigned i = 0; i < DstNum; i++)
  1472. Dest.AggregateVal[i].FloatVal =
  1473. TempDst.AggregateVal[i].IntVal.bitsToFloat();
  1474. } else {
  1475. Dest = TempDst;
  1476. }
  1477. } else {
  1478. if (DstElemTy->isDoubleTy())
  1479. Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble();
  1480. else if (DstElemTy->isFloatTy()) {
  1481. Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat();
  1482. } else {
  1483. Dest.IntVal = TempDst.AggregateVal[0].IntVal;
  1484. }
  1485. }
  1486. } else { // if (isa<VectorType>(SrcTy)) || isa<VectorType>(DstTy))
  1487. // scalar src bitcast to scalar dst
  1488. if (DstTy->isPointerTy()) {
  1489. assert(SrcTy->isPointerTy() && "Invalid BitCast");
  1490. Dest.PointerVal = Src.PointerVal;
  1491. } else if (DstTy->isIntegerTy()) {
  1492. if (SrcTy->isFloatTy())
  1493. Dest.IntVal = APInt::floatToBits(Src.FloatVal);
  1494. else if (SrcTy->isDoubleTy()) {
  1495. Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
  1496. } else if (SrcTy->isIntegerTy()) {
  1497. Dest.IntVal = Src.IntVal;
  1498. } else {
  1499. llvm_unreachable("Invalid BitCast");
  1500. }
  1501. } else if (DstTy->isFloatTy()) {
  1502. if (SrcTy->isIntegerTy())
  1503. Dest.FloatVal = Src.IntVal.bitsToFloat();
  1504. else {
  1505. Dest.FloatVal = Src.FloatVal;
  1506. }
  1507. } else if (DstTy->isDoubleTy()) {
  1508. if (SrcTy->isIntegerTy())
  1509. Dest.DoubleVal = Src.IntVal.bitsToDouble();
  1510. else {
  1511. Dest.DoubleVal = Src.DoubleVal;
  1512. }
  1513. } else {
  1514. llvm_unreachable("Invalid Bitcast");
  1515. }
  1516. }
  1517. return Dest;
  1518. }
  1519. void Interpreter::visitTruncInst(TruncInst &I) {
  1520. ExecutionContext &SF = ECStack.back();
  1521. SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
  1522. }
  1523. void Interpreter::visitSExtInst(SExtInst &I) {
  1524. ExecutionContext &SF = ECStack.back();
  1525. SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
  1526. }
  1527. void Interpreter::visitZExtInst(ZExtInst &I) {
  1528. ExecutionContext &SF = ECStack.back();
  1529. SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
  1530. }
  1531. void Interpreter::visitFPTruncInst(FPTruncInst &I) {
  1532. ExecutionContext &SF = ECStack.back();
  1533. SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
  1534. }
  1535. void Interpreter::visitFPExtInst(FPExtInst &I) {
  1536. ExecutionContext &SF = ECStack.back();
  1537. SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
  1538. }
  1539. void Interpreter::visitUIToFPInst(UIToFPInst &I) {
  1540. ExecutionContext &SF = ECStack.back();
  1541. SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
  1542. }
  1543. void Interpreter::visitSIToFPInst(SIToFPInst &I) {
  1544. ExecutionContext &SF = ECStack.back();
  1545. SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
  1546. }
  1547. void Interpreter::visitFPToUIInst(FPToUIInst &I) {
  1548. ExecutionContext &SF = ECStack.back();
  1549. SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
  1550. }
  1551. void Interpreter::visitFPToSIInst(FPToSIInst &I) {
  1552. ExecutionContext &SF = ECStack.back();
  1553. SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
  1554. }
  1555. void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
  1556. ExecutionContext &SF = ECStack.back();
  1557. SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
  1558. }
  1559. void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
  1560. ExecutionContext &SF = ECStack.back();
  1561. SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
  1562. }
  1563. void Interpreter::visitBitCastInst(BitCastInst &I) {
  1564. ExecutionContext &SF = ECStack.back();
  1565. SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
  1566. }
  1567. #define IMPLEMENT_VAARG(TY) \
  1568. case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
  1569. void Interpreter::visitVAArgInst(VAArgInst &I) {
  1570. ExecutionContext &SF = ECStack.back();
  1571. // Get the incoming valist parameter. LLI treats the valist as a
  1572. // (ec-stack-depth var-arg-index) pair.
  1573. GenericValue VAList = getOperandValue(I.getOperand(0), SF);
  1574. GenericValue Dest;
  1575. GenericValue Src = ECStack[VAList.UIntPairVal.first]
  1576. .VarArgs[VAList.UIntPairVal.second];
  1577. Type *Ty = I.getType();
  1578. switch (Ty->getTypeID()) {
  1579. case Type::IntegerTyID:
  1580. Dest.IntVal = Src.IntVal;
  1581. break;
  1582. IMPLEMENT_VAARG(Pointer);
  1583. IMPLEMENT_VAARG(Float);
  1584. IMPLEMENT_VAARG(Double);
  1585. default:
  1586. dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
  1587. llvm_unreachable(nullptr);
  1588. }
  1589. // Set the Value of this Instruction.
  1590. SetValue(&I, Dest, SF);
  1591. // Move the pointer to the next vararg.
  1592. ++VAList.UIntPairVal.second;
  1593. }
  1594. void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
  1595. ExecutionContext &SF = ECStack.back();
  1596. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1597. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1598. GenericValue Dest;
  1599. Type *Ty = I.getType();
  1600. const unsigned indx = unsigned(Src2.IntVal.getZExtValue());
  1601. if(Src1.AggregateVal.size() > indx) {
  1602. switch (Ty->getTypeID()) {
  1603. default:
  1604. dbgs() << "Unhandled destination type for extractelement instruction: "
  1605. << *Ty << "\n";
  1606. llvm_unreachable(nullptr);
  1607. break;
  1608. case Type::IntegerTyID:
  1609. Dest.IntVal = Src1.AggregateVal[indx].IntVal;
  1610. break;
  1611. case Type::FloatTyID:
  1612. Dest.FloatVal = Src1.AggregateVal[indx].FloatVal;
  1613. break;
  1614. case Type::DoubleTyID:
  1615. Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal;
  1616. break;
  1617. }
  1618. } else {
  1619. dbgs() << "Invalid index in extractelement instruction\n";
  1620. }
  1621. SetValue(&I, Dest, SF);
  1622. }
  1623. void Interpreter::visitInsertElementInst(InsertElementInst &I) {
  1624. ExecutionContext &SF = ECStack.back();
  1625. VectorType *Ty = cast<VectorType>(I.getType());
  1626. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1627. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1628. GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
  1629. GenericValue Dest;
  1630. Type *TyContained = Ty->getElementType();
  1631. const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
  1632. Dest.AggregateVal = Src1.AggregateVal;
  1633. if(Src1.AggregateVal.size() <= indx)
  1634. llvm_unreachable("Invalid index in insertelement instruction");
  1635. switch (TyContained->getTypeID()) {
  1636. default:
  1637. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1638. case Type::IntegerTyID:
  1639. Dest.AggregateVal[indx].IntVal = Src2.IntVal;
  1640. break;
  1641. case Type::FloatTyID:
  1642. Dest.AggregateVal[indx].FloatVal = Src2.FloatVal;
  1643. break;
  1644. case Type::DoubleTyID:
  1645. Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal;
  1646. break;
  1647. }
  1648. SetValue(&I, Dest, SF);
  1649. }
  1650. void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
  1651. ExecutionContext &SF = ECStack.back();
  1652. VectorType *Ty = cast<VectorType>(I.getType());
  1653. GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
  1654. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1655. GenericValue Dest;
  1656. // There is no need to check types of src1 and src2, because the compiled
  1657. // bytecode can't contain different types for src1 and src2 for a
  1658. // shufflevector instruction.
  1659. Type *TyContained = Ty->getElementType();
  1660. unsigned src1Size = (unsigned)Src1.AggregateVal.size();
  1661. unsigned src2Size = (unsigned)Src2.AggregateVal.size();
  1662. unsigned src3Size = I.getShuffleMask().size();
  1663. Dest.AggregateVal.resize(src3Size);
  1664. switch (TyContained->getTypeID()) {
  1665. default:
  1666. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1667. break;
  1668. case Type::IntegerTyID:
  1669. for( unsigned i=0; i<src3Size; i++) {
  1670. unsigned j = std::max(0, I.getMaskValue(i));
  1671. if(j < src1Size)
  1672. Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal;
  1673. else if(j < src1Size + src2Size)
  1674. Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal;
  1675. else
  1676. // The selector may not be greater than sum of lengths of first and
  1677. // second operands and llasm should not allow situation like
  1678. // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef,
  1679. // <2 x i32> < i32 0, i32 5 >,
  1680. // where i32 5 is invalid, but let it be additional check here:
  1681. llvm_unreachable("Invalid mask in shufflevector instruction");
  1682. }
  1683. break;
  1684. case Type::FloatTyID:
  1685. for( unsigned i=0; i<src3Size; i++) {
  1686. unsigned j = std::max(0, I.getMaskValue(i));
  1687. if(j < src1Size)
  1688. Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal;
  1689. else if(j < src1Size + src2Size)
  1690. Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal;
  1691. else
  1692. llvm_unreachable("Invalid mask in shufflevector instruction");
  1693. }
  1694. break;
  1695. case Type::DoubleTyID:
  1696. for( unsigned i=0; i<src3Size; i++) {
  1697. unsigned j = std::max(0, I.getMaskValue(i));
  1698. if(j < src1Size)
  1699. Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal;
  1700. else if(j < src1Size + src2Size)
  1701. Dest.AggregateVal[i].DoubleVal =
  1702. Src2.AggregateVal[j-src1Size].DoubleVal;
  1703. else
  1704. llvm_unreachable("Invalid mask in shufflevector instruction");
  1705. }
  1706. break;
  1707. }
  1708. SetValue(&I, Dest, SF);
  1709. }
  1710. void Interpreter::visitExtractValueInst(ExtractValueInst &I) {
  1711. ExecutionContext &SF = ECStack.back();
  1712. Value *Agg = I.getAggregateOperand();
  1713. GenericValue Dest;
  1714. GenericValue Src = getOperandValue(Agg, SF);
  1715. ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
  1716. unsigned Num = I.getNumIndices();
  1717. GenericValue *pSrc = &Src;
  1718. for (unsigned i = 0 ; i < Num; ++i) {
  1719. pSrc = &pSrc->AggregateVal[*IdxBegin];
  1720. ++IdxBegin;
  1721. }
  1722. Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
  1723. switch (IndexedType->getTypeID()) {
  1724. default:
  1725. llvm_unreachable("Unhandled dest type for extractelement instruction");
  1726. break;
  1727. case Type::IntegerTyID:
  1728. Dest.IntVal = pSrc->IntVal;
  1729. break;
  1730. case Type::FloatTyID:
  1731. Dest.FloatVal = pSrc->FloatVal;
  1732. break;
  1733. case Type::DoubleTyID:
  1734. Dest.DoubleVal = pSrc->DoubleVal;
  1735. break;
  1736. case Type::ArrayTyID:
  1737. case Type::StructTyID:
  1738. case Type::FixedVectorTyID:
  1739. case Type::ScalableVectorTyID:
  1740. Dest.AggregateVal = pSrc->AggregateVal;
  1741. break;
  1742. case Type::PointerTyID:
  1743. Dest.PointerVal = pSrc->PointerVal;
  1744. break;
  1745. }
  1746. SetValue(&I, Dest, SF);
  1747. }
  1748. void Interpreter::visitInsertValueInst(InsertValueInst &I) {
  1749. ExecutionContext &SF = ECStack.back();
  1750. Value *Agg = I.getAggregateOperand();
  1751. GenericValue Src1 = getOperandValue(Agg, SF);
  1752. GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
  1753. GenericValue Dest = Src1; // Dest is a slightly changed Src1
  1754. ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
  1755. unsigned Num = I.getNumIndices();
  1756. GenericValue *pDest = &Dest;
  1757. for (unsigned i = 0 ; i < Num; ++i) {
  1758. pDest = &pDest->AggregateVal[*IdxBegin];
  1759. ++IdxBegin;
  1760. }
  1761. // pDest points to the target value in the Dest now
  1762. Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
  1763. switch (IndexedType->getTypeID()) {
  1764. default:
  1765. llvm_unreachable("Unhandled dest type for insertelement instruction");
  1766. break;
  1767. case Type::IntegerTyID:
  1768. pDest->IntVal = Src2.IntVal;
  1769. break;
  1770. case Type::FloatTyID:
  1771. pDest->FloatVal = Src2.FloatVal;
  1772. break;
  1773. case Type::DoubleTyID:
  1774. pDest->DoubleVal = Src2.DoubleVal;
  1775. break;
  1776. case Type::ArrayTyID:
  1777. case Type::StructTyID:
  1778. case Type::FixedVectorTyID:
  1779. case Type::ScalableVectorTyID:
  1780. pDest->AggregateVal = Src2.AggregateVal;
  1781. break;
  1782. case Type::PointerTyID:
  1783. pDest->PointerVal = Src2.PointerVal;
  1784. break;
  1785. }
  1786. SetValue(&I, Dest, SF);
  1787. }
  1788. GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
  1789. ExecutionContext &SF) {
  1790. switch (CE->getOpcode()) {
  1791. case Instruction::Trunc:
  1792. return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
  1793. case Instruction::ZExt:
  1794. return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
  1795. case Instruction::SExt:
  1796. return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
  1797. case Instruction::FPTrunc:
  1798. return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
  1799. case Instruction::FPExt:
  1800. return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
  1801. case Instruction::UIToFP:
  1802. return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
  1803. case Instruction::SIToFP:
  1804. return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
  1805. case Instruction::FPToUI:
  1806. return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
  1807. case Instruction::FPToSI:
  1808. return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
  1809. case Instruction::PtrToInt:
  1810. return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
  1811. case Instruction::IntToPtr:
  1812. return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
  1813. case Instruction::BitCast:
  1814. return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
  1815. case Instruction::GetElementPtr:
  1816. return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
  1817. gep_type_end(CE), SF);
  1818. case Instruction::FCmp:
  1819. case Instruction::ICmp:
  1820. return executeCmpInst(CE->getPredicate(),
  1821. getOperandValue(CE->getOperand(0), SF),
  1822. getOperandValue(CE->getOperand(1), SF),
  1823. CE->getOperand(0)->getType());
  1824. case Instruction::Select:
  1825. return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
  1826. getOperandValue(CE->getOperand(1), SF),
  1827. getOperandValue(CE->getOperand(2), SF),
  1828. CE->getOperand(0)->getType());
  1829. default :
  1830. break;
  1831. }
  1832. // The cases below here require a GenericValue parameter for the result
  1833. // so we initialize one, compute it and then return it.
  1834. GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
  1835. GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
  1836. GenericValue Dest;
  1837. Type * Ty = CE->getOperand(0)->getType();
  1838. switch (CE->getOpcode()) {
  1839. case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
  1840. case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
  1841. case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
  1842. case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
  1843. case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
  1844. case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
  1845. case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
  1846. case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
  1847. case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
  1848. case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
  1849. case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
  1850. case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
  1851. case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
  1852. case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
  1853. case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
  1854. case Instruction::Shl:
  1855. Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
  1856. break;
  1857. case Instruction::LShr:
  1858. Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
  1859. break;
  1860. case Instruction::AShr:
  1861. Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
  1862. break;
  1863. default:
  1864. dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
  1865. llvm_unreachable("Unhandled ConstantExpr");
  1866. }
  1867. return Dest;
  1868. }
  1869. GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
  1870. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
  1871. return getConstantExprValue(CE, SF);
  1872. } else if (Constant *CPV = dyn_cast<Constant>(V)) {
  1873. return getConstantValue(CPV);
  1874. } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
  1875. return PTOGV(getPointerToGlobal(GV));
  1876. } else {
  1877. return SF.Values[V];
  1878. }
  1879. }
  1880. //===----------------------------------------------------------------------===//
  1881. // Dispatch and Execution Code
  1882. //===----------------------------------------------------------------------===//
  1883. //===----------------------------------------------------------------------===//
  1884. // callFunction - Execute the specified function...
  1885. //
  1886. void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) {
  1887. assert((ECStack.empty() || !ECStack.back().Caller ||
  1888. ECStack.back().Caller->arg_size() == ArgVals.size()) &&
  1889. "Incorrect number of arguments passed into function call!");
  1890. // Make a new stack frame... and fill it in.
  1891. ECStack.emplace_back();
  1892. ExecutionContext &StackFrame = ECStack.back();
  1893. StackFrame.CurFunction = F;
  1894. // Special handling for external functions.
  1895. if (F->isDeclaration()) {
  1896. GenericValue Result = callExternalFunction (F, ArgVals);
  1897. // Simulate a 'ret' instruction of the appropriate type.
  1898. popStackAndReturnValueToCaller (F->getReturnType (), Result);
  1899. return;
  1900. }
  1901. // Get pointers to first LLVM BB & Instruction in function.
  1902. StackFrame.CurBB = &F->front();
  1903. StackFrame.CurInst = StackFrame.CurBB->begin();
  1904. // Run through the function arguments and initialize their values...
  1905. assert((ArgVals.size() == F->arg_size() ||
  1906. (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
  1907. "Invalid number of values passed to function invocation!");
  1908. // Handle non-varargs arguments...
  1909. unsigned i = 0;
  1910. for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
  1911. AI != E; ++AI, ++i)
  1912. SetValue(&*AI, ArgVals[i], StackFrame);
  1913. // Handle varargs arguments...
  1914. StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
  1915. }
  1916. void Interpreter::run() {
  1917. while (!ECStack.empty()) {
  1918. // Interpret a single instruction & increment the "PC".
  1919. ExecutionContext &SF = ECStack.back(); // Current stack frame
  1920. Instruction &I = *SF.CurInst++; // Increment before execute
  1921. // Track the number of dynamic instructions executed.
  1922. ++NumDynamicInsts;
  1923. LLVM_DEBUG(dbgs() << "About to interpret: " << I << "\n");
  1924. visit(I); // Dispatch to one of the visit* methods...
  1925. }
  1926. }