123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277 |
- //====- X86SpeculativeLoadHardening.cpp - A Spectre v1 mitigation ---------===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- /// \file
- ///
- /// Provide a pass which mitigates speculative execution attacks which operate
- /// by speculating incorrectly past some predicate (a type check, bounds check,
- /// or other condition) to reach a load with invalid inputs and leak the data
- /// accessed by that load using a side channel out of the speculative domain.
- ///
- /// For details on the attacks, see the first variant in both the Project Zero
- /// writeup and the Spectre paper:
- /// https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
- /// https://spectreattack.com/spectre.pdf
- ///
- //===----------------------------------------------------------------------===//
- #include "X86.h"
- #include "X86InstrBuilder.h"
- #include "X86InstrInfo.h"
- #include "X86Subtarget.h"
- #include "llvm/ADT/ArrayRef.h"
- #include "llvm/ADT/DenseMap.h"
- #include "llvm/ADT/Optional.h"
- #include "llvm/ADT/STLExtras.h"
- #include "llvm/ADT/ScopeExit.h"
- #include "llvm/ADT/SmallPtrSet.h"
- #include "llvm/ADT/SmallSet.h"
- #include "llvm/ADT/SmallVector.h"
- #include "llvm/ADT/SparseBitVector.h"
- #include "llvm/ADT/Statistic.h"
- #include "llvm/CodeGen/MachineBasicBlock.h"
- #include "llvm/CodeGen/MachineConstantPool.h"
- #include "llvm/CodeGen/MachineFunction.h"
- #include "llvm/CodeGen/MachineFunctionPass.h"
- #include "llvm/CodeGen/MachineInstr.h"
- #include "llvm/CodeGen/MachineInstrBuilder.h"
- #include "llvm/CodeGen/MachineModuleInfo.h"
- #include "llvm/CodeGen/MachineOperand.h"
- #include "llvm/CodeGen/MachineRegisterInfo.h"
- #include "llvm/CodeGen/MachineSSAUpdater.h"
- #include "llvm/CodeGen/TargetInstrInfo.h"
- #include "llvm/CodeGen/TargetRegisterInfo.h"
- #include "llvm/CodeGen/TargetSchedule.h"
- #include "llvm/CodeGen/TargetSubtargetInfo.h"
- #include "llvm/IR/DebugLoc.h"
- #include "llvm/MC/MCSchedule.h"
- #include "llvm/Pass.h"
- #include "llvm/Support/CommandLine.h"
- #include "llvm/Support/Debug.h"
- #include "llvm/Support/raw_ostream.h"
- #include "llvm/Target/TargetMachine.h"
- #include <algorithm>
- #include <cassert>
- #include <iterator>
- #include <utility>
- using namespace llvm;
- #define PASS_KEY "x86-slh"
- #define DEBUG_TYPE PASS_KEY
- STATISTIC(NumCondBranchesTraced, "Number of conditional branches traced");
- STATISTIC(NumBranchesUntraced, "Number of branches unable to trace");
- STATISTIC(NumAddrRegsHardened,
- "Number of address mode used registers hardaned");
- STATISTIC(NumPostLoadRegsHardened,
- "Number of post-load register values hardened");
- STATISTIC(NumCallsOrJumpsHardened,
- "Number of calls or jumps requiring extra hardening");
- STATISTIC(NumInstsInserted, "Number of instructions inserted");
- STATISTIC(NumLFENCEsInserted, "Number of lfence instructions inserted");
- static cl::opt<bool> EnableSpeculativeLoadHardening(
- "x86-speculative-load-hardening",
- cl::desc("Force enable speculative load hardening"), cl::init(false),
- cl::Hidden);
- static cl::opt<bool> HardenEdgesWithLFENCE(
- PASS_KEY "-lfence",
- cl::desc(
- "Use LFENCE along each conditional edge to harden against speculative "
- "loads rather than conditional movs and poisoned pointers."),
- cl::init(false), cl::Hidden);
- static cl::opt<bool> EnablePostLoadHardening(
- PASS_KEY "-post-load",
- cl::desc("Harden the value loaded *after* it is loaded by "
- "flushing the loaded bits to 1. This is hard to do "
- "in general but can be done easily for GPRs."),
- cl::init(true), cl::Hidden);
- static cl::opt<bool> FenceCallAndRet(
- PASS_KEY "-fence-call-and-ret",
- cl::desc("Use a full speculation fence to harden both call and ret edges "
- "rather than a lighter weight mitigation."),
- cl::init(false), cl::Hidden);
- static cl::opt<bool> HardenInterprocedurally(
- PASS_KEY "-ip",
- cl::desc("Harden interprocedurally by passing our state in and out of "
- "functions in the high bits of the stack pointer."),
- cl::init(true), cl::Hidden);
- static cl::opt<bool>
- HardenLoads(PASS_KEY "-loads",
- cl::desc("Sanitize loads from memory. When disable, no "
- "significant security is provided."),
- cl::init(true), cl::Hidden);
- static cl::opt<bool> HardenIndirectCallsAndJumps(
- PASS_KEY "-indirect",
- cl::desc("Harden indirect calls and jumps against using speculatively "
- "stored attacker controlled addresses. This is designed to "
- "mitigate Spectre v1.2 style attacks."),
- cl::init(true), cl::Hidden);
- namespace {
- class X86SpeculativeLoadHardeningPass : public MachineFunctionPass {
- public:
- X86SpeculativeLoadHardeningPass() : MachineFunctionPass(ID) { }
- StringRef getPassName() const override {
- return "X86 speculative load hardening";
- }
- bool runOnMachineFunction(MachineFunction &MF) override;
- void getAnalysisUsage(AnalysisUsage &AU) const override;
- /// Pass identification, replacement for typeid.
- static char ID;
- private:
- /// The information about a block's conditional terminators needed to trace
- /// our predicate state through the exiting edges.
- struct BlockCondInfo {
- MachineBasicBlock *MBB;
- // We mostly have one conditional branch, and in extremely rare cases have
- // two. Three and more are so rare as to be unimportant for compile time.
- SmallVector<MachineInstr *, 2> CondBrs;
- MachineInstr *UncondBr;
- };
- /// Manages the predicate state traced through the program.
- struct PredState {
- unsigned InitialReg = 0;
- unsigned PoisonReg = 0;
- const TargetRegisterClass *RC;
- MachineSSAUpdater SSA;
- PredState(MachineFunction &MF, const TargetRegisterClass *RC)
- : RC(RC), SSA(MF) {}
- };
- const X86Subtarget *Subtarget = nullptr;
- MachineRegisterInfo *MRI = nullptr;
- const X86InstrInfo *TII = nullptr;
- const TargetRegisterInfo *TRI = nullptr;
- Optional<PredState> PS;
- void hardenEdgesWithLFENCE(MachineFunction &MF);
- SmallVector<BlockCondInfo, 16> collectBlockCondInfo(MachineFunction &MF);
- SmallVector<MachineInstr *, 16>
- tracePredStateThroughCFG(MachineFunction &MF, ArrayRef<BlockCondInfo> Infos);
- void unfoldCallAndJumpLoads(MachineFunction &MF);
- SmallVector<MachineInstr *, 16>
- tracePredStateThroughIndirectBranches(MachineFunction &MF);
- void tracePredStateThroughBlocksAndHarden(MachineFunction &MF);
- unsigned saveEFLAGS(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertPt, DebugLoc Loc);
- void restoreEFLAGS(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertPt, DebugLoc Loc,
- Register Reg);
- void mergePredStateIntoSP(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertPt, DebugLoc Loc,
- unsigned PredStateReg);
- unsigned extractPredStateFromSP(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertPt,
- DebugLoc Loc);
- void
- hardenLoadAddr(MachineInstr &MI, MachineOperand &BaseMO,
- MachineOperand &IndexMO,
- SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg);
- MachineInstr *
- sinkPostLoadHardenedInst(MachineInstr &MI,
- SmallPtrSetImpl<MachineInstr *> &HardenedInstrs);
- bool canHardenRegister(Register Reg);
- unsigned hardenValueInRegister(Register Reg, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertPt,
- DebugLoc Loc);
- unsigned hardenPostLoad(MachineInstr &MI);
- void hardenReturnInstr(MachineInstr &MI);
- void tracePredStateThroughCall(MachineInstr &MI);
- void hardenIndirectCallOrJumpInstr(
- MachineInstr &MI,
- SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg);
- };
- } // end anonymous namespace
- char X86SpeculativeLoadHardeningPass::ID = 0;
- void X86SpeculativeLoadHardeningPass::getAnalysisUsage(
- AnalysisUsage &AU) const {
- MachineFunctionPass::getAnalysisUsage(AU);
- }
- static MachineBasicBlock &splitEdge(MachineBasicBlock &MBB,
- MachineBasicBlock &Succ, int SuccCount,
- MachineInstr *Br, MachineInstr *&UncondBr,
- const X86InstrInfo &TII) {
- assert(!Succ.isEHPad() && "Shouldn't get edges to EH pads!");
- MachineFunction &MF = *MBB.getParent();
- MachineBasicBlock &NewMBB = *MF.CreateMachineBasicBlock();
- // We have to insert the new block immediately after the current one as we
- // don't know what layout-successor relationships the successor has and we
- // may not be able to (and generally don't want to) try to fix those up.
- MF.insert(std::next(MachineFunction::iterator(&MBB)), &NewMBB);
- // Update the branch instruction if necessary.
- if (Br) {
- assert(Br->getOperand(0).getMBB() == &Succ &&
- "Didn't start with the right target!");
- Br->getOperand(0).setMBB(&NewMBB);
- // If this successor was reached through a branch rather than fallthrough,
- // we might have *broken* fallthrough and so need to inject a new
- // unconditional branch.
- if (!UncondBr) {
- MachineBasicBlock &OldLayoutSucc =
- *std::next(MachineFunction::iterator(&NewMBB));
- assert(MBB.isSuccessor(&OldLayoutSucc) &&
- "Without an unconditional branch, the old layout successor should "
- "be an actual successor!");
- auto BrBuilder =
- BuildMI(&MBB, DebugLoc(), TII.get(X86::JMP_1)).addMBB(&OldLayoutSucc);
- // Update the unconditional branch now that we've added one.
- UncondBr = &*BrBuilder;
- }
- // Insert unconditional "jump Succ" instruction in the new block if
- // necessary.
- if (!NewMBB.isLayoutSuccessor(&Succ)) {
- SmallVector<MachineOperand, 4> Cond;
- TII.insertBranch(NewMBB, &Succ, nullptr, Cond, Br->getDebugLoc());
- }
- } else {
- assert(!UncondBr &&
- "Cannot have a branchless successor and an unconditional branch!");
- assert(NewMBB.isLayoutSuccessor(&Succ) &&
- "A non-branch successor must have been a layout successor before "
- "and now is a layout successor of the new block.");
- }
- // If this is the only edge to the successor, we can just replace it in the
- // CFG. Otherwise we need to add a new entry in the CFG for the new
- // successor.
- if (SuccCount == 1) {
- MBB.replaceSuccessor(&Succ, &NewMBB);
- } else {
- MBB.splitSuccessor(&Succ, &NewMBB);
- }
- // Hook up the edge from the new basic block to the old successor in the CFG.
- NewMBB.addSuccessor(&Succ);
- // Fix PHI nodes in Succ so they refer to NewMBB instead of MBB.
- for (MachineInstr &MI : Succ) {
- if (!MI.isPHI())
- break;
- for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
- OpIdx += 2) {
- MachineOperand &OpV = MI.getOperand(OpIdx);
- MachineOperand &OpMBB = MI.getOperand(OpIdx + 1);
- assert(OpMBB.isMBB() && "Block operand to a PHI is not a block!");
- if (OpMBB.getMBB() != &MBB)
- continue;
- // If this is the last edge to the succesor, just replace MBB in the PHI
- if (SuccCount == 1) {
- OpMBB.setMBB(&NewMBB);
- break;
- }
- // Otherwise, append a new pair of operands for the new incoming edge.
- MI.addOperand(MF, OpV);
- MI.addOperand(MF, MachineOperand::CreateMBB(&NewMBB));
- break;
- }
- }
- // Inherit live-ins from the successor
- for (auto &LI : Succ.liveins())
- NewMBB.addLiveIn(LI);
- LLVM_DEBUG(dbgs() << " Split edge from '" << MBB.getName() << "' to '"
- << Succ.getName() << "'.\n");
- return NewMBB;
- }
- /// Removing duplicate PHI operands to leave the PHI in a canonical and
- /// predictable form.
- ///
- /// FIXME: It's really frustrating that we have to do this, but SSA-form in MIR
- /// isn't what you might expect. We may have multiple entries in PHI nodes for
- /// a single predecessor. This makes CFG-updating extremely complex, so here we
- /// simplify all PHI nodes to a model even simpler than the IR's model: exactly
- /// one entry per predecessor, regardless of how many edges there are.
- static void canonicalizePHIOperands(MachineFunction &MF) {
- SmallPtrSet<MachineBasicBlock *, 4> Preds;
- SmallVector<int, 4> DupIndices;
- for (auto &MBB : MF)
- for (auto &MI : MBB) {
- if (!MI.isPHI())
- break;
- // First we scan the operands of the PHI looking for duplicate entries
- // a particular predecessor. We retain the operand index of each duplicate
- // entry found.
- for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps;
- OpIdx += 2)
- if (!Preds.insert(MI.getOperand(OpIdx + 1).getMBB()).second)
- DupIndices.push_back(OpIdx);
- // Now walk the duplicate indices, removing both the block and value. Note
- // that these are stored as a vector making this element-wise removal
- // :w
- // potentially quadratic.
- //
- // FIXME: It is really frustrating that we have to use a quadratic
- // removal algorithm here. There should be a better way, but the use-def
- // updates required make that impossible using the public API.
- //
- // Note that we have to process these backwards so that we don't
- // invalidate other indices with each removal.
- while (!DupIndices.empty()) {
- int OpIdx = DupIndices.pop_back_val();
- // Remove both the block and value operand, again in reverse order to
- // preserve indices.
- MI.RemoveOperand(OpIdx + 1);
- MI.RemoveOperand(OpIdx);
- }
- Preds.clear();
- }
- }
- /// Helper to scan a function for loads vulnerable to misspeculation that we
- /// want to harden.
- ///
- /// We use this to avoid making changes to functions where there is nothing we
- /// need to do to harden against misspeculation.
- static bool hasVulnerableLoad(MachineFunction &MF) {
- for (MachineBasicBlock &MBB : MF) {
- for (MachineInstr &MI : MBB) {
- // Loads within this basic block after an LFENCE are not at risk of
- // speculatively executing with invalid predicates from prior control
- // flow. So break out of this block but continue scanning the function.
- if (MI.getOpcode() == X86::LFENCE)
- break;
- // Looking for loads only.
- if (!MI.mayLoad())
- continue;
- // An MFENCE is modeled as a load but isn't vulnerable to misspeculation.
- if (MI.getOpcode() == X86::MFENCE)
- continue;
- // We found a load.
- return true;
- }
- }
- // No loads found.
- return false;
- }
- bool X86SpeculativeLoadHardeningPass::runOnMachineFunction(
- MachineFunction &MF) {
- LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
- << " **********\n");
- // Only run if this pass is forced enabled or we detect the relevant function
- // attribute requesting SLH.
- if (!EnableSpeculativeLoadHardening &&
- !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
- return false;
- Subtarget = &MF.getSubtarget<X86Subtarget>();
- MRI = &MF.getRegInfo();
- TII = Subtarget->getInstrInfo();
- TRI = Subtarget->getRegisterInfo();
- // FIXME: Support for 32-bit.
- PS.emplace(MF, &X86::GR64_NOSPRegClass);
- if (MF.begin() == MF.end())
- // Nothing to do for a degenerate empty function...
- return false;
- // We support an alternative hardening technique based on a debug flag.
- if (HardenEdgesWithLFENCE) {
- hardenEdgesWithLFENCE(MF);
- return true;
- }
- // Create a dummy debug loc to use for all the generated code here.
- DebugLoc Loc;
- MachineBasicBlock &Entry = *MF.begin();
- auto EntryInsertPt = Entry.SkipPHIsLabelsAndDebug(Entry.begin());
- // Do a quick scan to see if we have any checkable loads.
- bool HasVulnerableLoad = hasVulnerableLoad(MF);
- // See if we have any conditional branching blocks that we will need to trace
- // predicate state through.
- SmallVector<BlockCondInfo, 16> Infos = collectBlockCondInfo(MF);
- // If we have no interesting conditions or loads, nothing to do here.
- if (!HasVulnerableLoad && Infos.empty())
- return true;
- // The poison value is required to be an all-ones value for many aspects of
- // this mitigation.
- const int PoisonVal = -1;
- PS->PoisonReg = MRI->createVirtualRegister(PS->RC);
- BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV64ri32), PS->PoisonReg)
- .addImm(PoisonVal);
- ++NumInstsInserted;
- // If we have loads being hardened and we've asked for call and ret edges to
- // get a full fence-based mitigation, inject that fence.
- if (HasVulnerableLoad && FenceCallAndRet) {
- // We need to insert an LFENCE at the start of the function to suspend any
- // incoming misspeculation from the caller. This helps two-fold: the caller
- // may not have been protected as this code has been, and this code gets to
- // not take any specific action to protect across calls.
- // FIXME: We could skip this for functions which unconditionally return
- // a constant.
- BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::LFENCE));
- ++NumInstsInserted;
- ++NumLFENCEsInserted;
- }
- // If we guarded the entry with an LFENCE and have no conditionals to protect
- // in blocks, then we're done.
- if (FenceCallAndRet && Infos.empty())
- // We may have changed the function's code at this point to insert fences.
- return true;
- // For every basic block in the function which can b
- if (HardenInterprocedurally && !FenceCallAndRet) {
- // Set up the predicate state by extracting it from the incoming stack
- // pointer so we pick up any misspeculation in our caller.
- PS->InitialReg = extractPredStateFromSP(Entry, EntryInsertPt, Loc);
- } else {
- // Otherwise, just build the predicate state itself by zeroing a register
- // as we don't need any initial state.
- PS->InitialReg = MRI->createVirtualRegister(PS->RC);
- Register PredStateSubReg = MRI->createVirtualRegister(&X86::GR32RegClass);
- auto ZeroI = BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV32r0),
- PredStateSubReg);
- ++NumInstsInserted;
- MachineOperand *ZeroEFLAGSDefOp =
- ZeroI->findRegisterDefOperand(X86::EFLAGS);
- assert(ZeroEFLAGSDefOp && ZeroEFLAGSDefOp->isImplicit() &&
- "Must have an implicit def of EFLAGS!");
- ZeroEFLAGSDefOp->setIsDead(true);
- BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::SUBREG_TO_REG),
- PS->InitialReg)
- .addImm(0)
- .addReg(PredStateSubReg)
- .addImm(X86::sub_32bit);
- }
- // We're going to need to trace predicate state throughout the function's
- // CFG. Prepare for this by setting up our initial state of PHIs with unique
- // predecessor entries and all the initial predicate state.
- canonicalizePHIOperands(MF);
- // Track the updated values in an SSA updater to rewrite into SSA form at the
- // end.
- PS->SSA.Initialize(PS->InitialReg);
- PS->SSA.AddAvailableValue(&Entry, PS->InitialReg);
- // Trace through the CFG.
- auto CMovs = tracePredStateThroughCFG(MF, Infos);
- // We may also enter basic blocks in this function via exception handling
- // control flow. Here, if we are hardening interprocedurally, we need to
- // re-capture the predicate state from the throwing code. In the Itanium ABI,
- // the throw will always look like a call to __cxa_throw and will have the
- // predicate state in the stack pointer, so extract fresh predicate state from
- // the stack pointer and make it available in SSA.
- // FIXME: Handle non-itanium ABI EH models.
- if (HardenInterprocedurally) {
- for (MachineBasicBlock &MBB : MF) {
- assert(!MBB.isEHScopeEntry() && "Only Itanium ABI EH supported!");
- assert(!MBB.isEHFuncletEntry() && "Only Itanium ABI EH supported!");
- assert(!MBB.isCleanupFuncletEntry() && "Only Itanium ABI EH supported!");
- if (!MBB.isEHPad())
- continue;
- PS->SSA.AddAvailableValue(
- &MBB,
- extractPredStateFromSP(MBB, MBB.SkipPHIsAndLabels(MBB.begin()), Loc));
- }
- }
- if (HardenIndirectCallsAndJumps) {
- // If we are going to harden calls and jumps we need to unfold their memory
- // operands.
- unfoldCallAndJumpLoads(MF);
- // Then we trace predicate state through the indirect branches.
- auto IndirectBrCMovs = tracePredStateThroughIndirectBranches(MF);
- CMovs.append(IndirectBrCMovs.begin(), IndirectBrCMovs.end());
- }
- // Now that we have the predicate state available at the start of each block
- // in the CFG, trace it through each block, hardening vulnerable instructions
- // as we go.
- tracePredStateThroughBlocksAndHarden(MF);
- // Now rewrite all the uses of the pred state using the SSA updater to insert
- // PHIs connecting the state between blocks along the CFG edges.
- for (MachineInstr *CMovI : CMovs)
- for (MachineOperand &Op : CMovI->operands()) {
- if (!Op.isReg() || Op.getReg() != PS->InitialReg)
- continue;
- PS->SSA.RewriteUse(Op);
- }
- LLVM_DEBUG(dbgs() << "Final speculative load hardened function:\n"; MF.dump();
- dbgs() << "\n"; MF.verify(this));
- return true;
- }
- /// Implements the naive hardening approach of putting an LFENCE after every
- /// potentially mis-predicted control flow construct.
- ///
- /// We include this as an alternative mostly for the purpose of comparison. The
- /// performance impact of this is expected to be extremely severe and not
- /// practical for any real-world users.
- void X86SpeculativeLoadHardeningPass::hardenEdgesWithLFENCE(
- MachineFunction &MF) {
- // First, we scan the function looking for blocks that are reached along edges
- // that we might want to harden.
- SmallSetVector<MachineBasicBlock *, 8> Blocks;
- for (MachineBasicBlock &MBB : MF) {
- // If there are no or only one successor, nothing to do here.
- if (MBB.succ_size() <= 1)
- continue;
- // Skip blocks unless their terminators start with a branch. Other
- // terminators don't seem interesting for guarding against misspeculation.
- auto TermIt = MBB.getFirstTerminator();
- if (TermIt == MBB.end() || !TermIt->isBranch())
- continue;
- // Add all the non-EH-pad succossors to the blocks we want to harden. We
- // skip EH pads because there isn't really a condition of interest on
- // entering.
- for (MachineBasicBlock *SuccMBB : MBB.successors())
- if (!SuccMBB->isEHPad())
- Blocks.insert(SuccMBB);
- }
- for (MachineBasicBlock *MBB : Blocks) {
- auto InsertPt = MBB->SkipPHIsAndLabels(MBB->begin());
- BuildMI(*MBB, InsertPt, DebugLoc(), TII->get(X86::LFENCE));
- ++NumInstsInserted;
- ++NumLFENCEsInserted;
- }
- }
- SmallVector<X86SpeculativeLoadHardeningPass::BlockCondInfo, 16>
- X86SpeculativeLoadHardeningPass::collectBlockCondInfo(MachineFunction &MF) {
- SmallVector<BlockCondInfo, 16> Infos;
- // Walk the function and build up a summary for each block's conditions that
- // we need to trace through.
- for (MachineBasicBlock &MBB : MF) {
- // If there are no or only one successor, nothing to do here.
- if (MBB.succ_size() <= 1)
- continue;
- // We want to reliably handle any conditional branch terminators in the
- // MBB, so we manually analyze the branch. We can handle all of the
- // permutations here, including ones that analyze branch cannot.
- //
- // The approach is to walk backwards across the terminators, resetting at
- // any unconditional non-indirect branch, and track all conditional edges
- // to basic blocks as well as the fallthrough or unconditional successor
- // edge. For each conditional edge, we track the target and the opposite
- // condition code in order to inject a "no-op" cmov into that successor
- // that will harden the predicate. For the fallthrough/unconditional
- // edge, we inject a separate cmov for each conditional branch with
- // matching condition codes. This effectively implements an "and" of the
- // condition flags, even if there isn't a single condition flag that would
- // directly implement that. We don't bother trying to optimize either of
- // these cases because if such an optimization is possible, LLVM should
- // have optimized the conditional *branches* in that way already to reduce
- // instruction count. This late, we simply assume the minimal number of
- // branch instructions is being emitted and use that to guide our cmov
- // insertion.
- BlockCondInfo Info = {&MBB, {}, nullptr};
- // Now walk backwards through the terminators and build up successors they
- // reach and the conditions.
- for (MachineInstr &MI : llvm::reverse(MBB)) {
- // Once we've handled all the terminators, we're done.
- if (!MI.isTerminator())
- break;
- // If we see a non-branch terminator, we can't handle anything so bail.
- if (!MI.isBranch()) {
- Info.CondBrs.clear();
- break;
- }
- // If we see an unconditional branch, reset our state, clear any
- // fallthrough, and set this is the "else" successor.
- if (MI.getOpcode() == X86::JMP_1) {
- Info.CondBrs.clear();
- Info.UncondBr = &MI;
- continue;
- }
- // If we get an invalid condition, we have an indirect branch or some
- // other unanalyzable "fallthrough" case. We model this as a nullptr for
- // the destination so we can still guard any conditional successors.
- // Consider code sequences like:
- // ```
- // jCC L1
- // jmpq *%rax
- // ```
- // We still want to harden the edge to `L1`.
- if (X86::getCondFromBranch(MI) == X86::COND_INVALID) {
- Info.CondBrs.clear();
- Info.UncondBr = &MI;
- continue;
- }
- // We have a vanilla conditional branch, add it to our list.
- Info.CondBrs.push_back(&MI);
- }
- if (Info.CondBrs.empty()) {
- ++NumBranchesUntraced;
- LLVM_DEBUG(dbgs() << "WARNING: unable to secure successors of block:\n";
- MBB.dump());
- continue;
- }
- Infos.push_back(Info);
- }
- return Infos;
- }
- /// Trace the predicate state through the CFG, instrumenting each conditional
- /// branch such that misspeculation through an edge will poison the predicate
- /// state.
- ///
- /// Returns the list of inserted CMov instructions so that they can have their
- /// uses of the predicate state rewritten into proper SSA form once it is
- /// complete.
- SmallVector<MachineInstr *, 16>
- X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG(
- MachineFunction &MF, ArrayRef<BlockCondInfo> Infos) {
- // Collect the inserted cmov instructions so we can rewrite their uses of the
- // predicate state into SSA form.
- SmallVector<MachineInstr *, 16> CMovs;
- // Now walk all of the basic blocks looking for ones that end in conditional
- // jumps where we need to update this register along each edge.
- for (const BlockCondInfo &Info : Infos) {
- MachineBasicBlock &MBB = *Info.MBB;
- const SmallVectorImpl<MachineInstr *> &CondBrs = Info.CondBrs;
- MachineInstr *UncondBr = Info.UncondBr;
- LLVM_DEBUG(dbgs() << "Tracing predicate through block: " << MBB.getName()
- << "\n");
- ++NumCondBranchesTraced;
- // Compute the non-conditional successor as either the target of any
- // unconditional branch or the layout successor.
- MachineBasicBlock *UncondSucc =
- UncondBr ? (UncondBr->getOpcode() == X86::JMP_1
- ? UncondBr->getOperand(0).getMBB()
- : nullptr)
- : &*std::next(MachineFunction::iterator(&MBB));
- // Count how many edges there are to any given successor.
- SmallDenseMap<MachineBasicBlock *, int> SuccCounts;
- if (UncondSucc)
- ++SuccCounts[UncondSucc];
- for (auto *CondBr : CondBrs)
- ++SuccCounts[CondBr->getOperand(0).getMBB()];
- // A lambda to insert cmov instructions into a block checking all of the
- // condition codes in a sequence.
- auto BuildCheckingBlockForSuccAndConds =
- [&](MachineBasicBlock &MBB, MachineBasicBlock &Succ, int SuccCount,
- MachineInstr *Br, MachineInstr *&UncondBr,
- ArrayRef<X86::CondCode> Conds) {
- // First, we split the edge to insert the checking block into a safe
- // location.
- auto &CheckingMBB =
- (SuccCount == 1 && Succ.pred_size() == 1)
- ? Succ
- : splitEdge(MBB, Succ, SuccCount, Br, UncondBr, *TII);
- bool LiveEFLAGS = Succ.isLiveIn(X86::EFLAGS);
- if (!LiveEFLAGS)
- CheckingMBB.addLiveIn(X86::EFLAGS);
- // Now insert the cmovs to implement the checks.
- auto InsertPt = CheckingMBB.begin();
- assert((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) &&
- "Should never have a PHI in the initial checking block as it "
- "always has a single predecessor!");
- // We will wire each cmov to each other, but need to start with the
- // incoming pred state.
- unsigned CurStateReg = PS->InitialReg;
- for (X86::CondCode Cond : Conds) {
- int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
- auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes);
- Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
- // Note that we intentionally use an empty debug location so that
- // this picks up the preceding location.
- auto CMovI = BuildMI(CheckingMBB, InsertPt, DebugLoc(),
- TII->get(CMovOp), UpdatedStateReg)
- .addReg(CurStateReg)
- .addReg(PS->PoisonReg)
- .addImm(Cond);
- // If this is the last cmov and the EFLAGS weren't originally
- // live-in, mark them as killed.
- if (!LiveEFLAGS && Cond == Conds.back())
- CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
- ++NumInstsInserted;
- LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump();
- dbgs() << "\n");
- // The first one of the cmovs will be using the top level
- // `PredStateReg` and need to get rewritten into SSA form.
- if (CurStateReg == PS->InitialReg)
- CMovs.push_back(&*CMovI);
- // The next cmov should start from this one's def.
- CurStateReg = UpdatedStateReg;
- }
- // And put the last one into the available values for SSA form of our
- // predicate state.
- PS->SSA.AddAvailableValue(&CheckingMBB, CurStateReg);
- };
- std::vector<X86::CondCode> UncondCodeSeq;
- for (auto *CondBr : CondBrs) {
- MachineBasicBlock &Succ = *CondBr->getOperand(0).getMBB();
- int &SuccCount = SuccCounts[&Succ];
- X86::CondCode Cond = X86::getCondFromBranch(*CondBr);
- X86::CondCode InvCond = X86::GetOppositeBranchCondition(Cond);
- UncondCodeSeq.push_back(Cond);
- BuildCheckingBlockForSuccAndConds(MBB, Succ, SuccCount, CondBr, UncondBr,
- {InvCond});
- // Decrement the successor count now that we've split one of the edges.
- // We need to keep the count of edges to the successor accurate in order
- // to know above when to *replace* the successor in the CFG vs. just
- // adding the new successor.
- --SuccCount;
- }
- // Since we may have split edges and changed the number of successors,
- // normalize the probabilities. This avoids doing it each time we split an
- // edge.
- MBB.normalizeSuccProbs();
- // Finally, we need to insert cmovs into the "fallthrough" edge. Here, we
- // need to intersect the other condition codes. We can do this by just
- // doing a cmov for each one.
- if (!UncondSucc)
- // If we have no fallthrough to protect (perhaps it is an indirect jump?)
- // just skip this and continue.
- continue;
- assert(SuccCounts[UncondSucc] == 1 &&
- "We should never have more than one edge to the unconditional "
- "successor at this point because every other edge must have been "
- "split above!");
- // Sort and unique the codes to minimize them.
- llvm::sort(UncondCodeSeq);
- UncondCodeSeq.erase(std::unique(UncondCodeSeq.begin(), UncondCodeSeq.end()),
- UncondCodeSeq.end());
- // Build a checking version of the successor.
- BuildCheckingBlockForSuccAndConds(MBB, *UncondSucc, /*SuccCount*/ 1,
- UncondBr, UncondBr, UncondCodeSeq);
- }
- return CMovs;
- }
- /// Compute the register class for the unfolded load.
- ///
- /// FIXME: This should probably live in X86InstrInfo, potentially by adding
- /// a way to unfold into a newly created vreg rather than requiring a register
- /// input.
- static const TargetRegisterClass *
- getRegClassForUnfoldedLoad(MachineFunction &MF, const X86InstrInfo &TII,
- unsigned Opcode) {
- unsigned Index;
- unsigned UnfoldedOpc = TII.getOpcodeAfterMemoryUnfold(
- Opcode, /*UnfoldLoad*/ true, /*UnfoldStore*/ false, &Index);
- const MCInstrDesc &MCID = TII.get(UnfoldedOpc);
- return TII.getRegClass(MCID, Index, &TII.getRegisterInfo(), MF);
- }
- void X86SpeculativeLoadHardeningPass::unfoldCallAndJumpLoads(
- MachineFunction &MF) {
- for (MachineBasicBlock &MBB : MF)
- // We use make_early_inc_range here so we can remove instructions if needed
- // without disturbing the iteration.
- for (MachineInstr &MI : llvm::make_early_inc_range(MBB.instrs())) {
- // Must either be a call or a branch.
- if (!MI.isCall() && !MI.isBranch())
- continue;
- // We only care about loading variants of these instructions.
- if (!MI.mayLoad())
- continue;
- switch (MI.getOpcode()) {
- default: {
- LLVM_DEBUG(
- dbgs() << "ERROR: Found an unexpected loading branch or call "
- "instruction:\n";
- MI.dump(); dbgs() << "\n");
- report_fatal_error("Unexpected loading branch or call!");
- }
- case X86::FARCALL16m:
- case X86::FARCALL32m:
- case X86::FARCALL64m:
- case X86::FARJMP16m:
- case X86::FARJMP32m:
- case X86::FARJMP64m:
- // We cannot mitigate far jumps or calls, but we also don't expect them
- // to be vulnerable to Spectre v1.2 style attacks.
- continue;
- case X86::CALL16m:
- case X86::CALL16m_NT:
- case X86::CALL32m:
- case X86::CALL32m_NT:
- case X86::CALL64m:
- case X86::CALL64m_NT:
- case X86::JMP16m:
- case X86::JMP16m_NT:
- case X86::JMP32m:
- case X86::JMP32m_NT:
- case X86::JMP64m:
- case X86::JMP64m_NT:
- case X86::TAILJMPm64:
- case X86::TAILJMPm64_REX:
- case X86::TAILJMPm:
- case X86::TCRETURNmi64:
- case X86::TCRETURNmi: {
- // Use the generic unfold logic now that we know we're dealing with
- // expected instructions.
- // FIXME: We don't have test coverage for all of these!
- auto *UnfoldedRC = getRegClassForUnfoldedLoad(MF, *TII, MI.getOpcode());
- if (!UnfoldedRC) {
- LLVM_DEBUG(dbgs()
- << "ERROR: Unable to unfold load from instruction:\n";
- MI.dump(); dbgs() << "\n");
- report_fatal_error("Unable to unfold load!");
- }
- Register Reg = MRI->createVirtualRegister(UnfoldedRC);
- SmallVector<MachineInstr *, 2> NewMIs;
- // If we were able to compute an unfolded reg class, any failure here
- // is just a programming error so just assert.
- bool Unfolded =
- TII->unfoldMemoryOperand(MF, MI, Reg, /*UnfoldLoad*/ true,
- /*UnfoldStore*/ false, NewMIs);
- (void)Unfolded;
- assert(Unfolded &&
- "Computed unfolded register class but failed to unfold");
- // Now stitch the new instructions into place and erase the old one.
- for (auto *NewMI : NewMIs)
- MBB.insert(MI.getIterator(), NewMI);
- // Update the call site info.
- if (MI.isCandidateForCallSiteEntry())
- MF.eraseCallSiteInfo(&MI);
- MI.eraseFromParent();
- LLVM_DEBUG({
- dbgs() << "Unfolded load successfully into:\n";
- for (auto *NewMI : NewMIs) {
- NewMI->dump();
- dbgs() << "\n";
- }
- });
- continue;
- }
- }
- llvm_unreachable("Escaped switch with default!");
- }
- }
- /// Trace the predicate state through indirect branches, instrumenting them to
- /// poison the state if a target is reached that does not match the expected
- /// target.
- ///
- /// This is designed to mitigate Spectre variant 1 attacks where an indirect
- /// branch is trained to predict a particular target and then mispredicts that
- /// target in a way that can leak data. Despite using an indirect branch, this
- /// is really a variant 1 style attack: it does not steer execution to an
- /// arbitrary or attacker controlled address, and it does not require any
- /// special code executing next to the victim. This attack can also be mitigated
- /// through retpolines, but those require either replacing indirect branches
- /// with conditional direct branches or lowering them through a device that
- /// blocks speculation. This mitigation can replace these retpoline-style
- /// mitigations for jump tables and other indirect branches within a function
- /// when variant 2 isn't a risk while allowing limited speculation. Indirect
- /// calls, however, cannot be mitigated through this technique without changing
- /// the ABI in a fundamental way.
- SmallVector<MachineInstr *, 16>
- X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches(
- MachineFunction &MF) {
- // We use the SSAUpdater to insert PHI nodes for the target addresses of
- // indirect branches. We don't actually need the full power of the SSA updater
- // in this particular case as we always have immediately available values, but
- // this avoids us having to re-implement the PHI construction logic.
- MachineSSAUpdater TargetAddrSSA(MF);
- TargetAddrSSA.Initialize(MRI->createVirtualRegister(&X86::GR64RegClass));
- // Track which blocks were terminated with an indirect branch.
- SmallPtrSet<MachineBasicBlock *, 4> IndirectTerminatedMBBs;
- // We need to know what blocks end up reached via indirect branches. We
- // expect this to be a subset of those whose address is taken and so track it
- // directly via the CFG.
- SmallPtrSet<MachineBasicBlock *, 4> IndirectTargetMBBs;
- // Walk all the blocks which end in an indirect branch and make the
- // target address available.
- for (MachineBasicBlock &MBB : MF) {
- // Find the last terminator.
- auto MII = MBB.instr_rbegin();
- while (MII != MBB.instr_rend() && MII->isDebugInstr())
- ++MII;
- if (MII == MBB.instr_rend())
- continue;
- MachineInstr &TI = *MII;
- if (!TI.isTerminator() || !TI.isBranch())
- // No terminator or non-branch terminator.
- continue;
- unsigned TargetReg;
- switch (TI.getOpcode()) {
- default:
- // Direct branch or conditional branch (leading to fallthrough).
- continue;
- case X86::FARJMP16m:
- case X86::FARJMP32m:
- case X86::FARJMP64m:
- // We cannot mitigate far jumps or calls, but we also don't expect them
- // to be vulnerable to Spectre v1.2 or v2 (self trained) style attacks.
- continue;
- case X86::JMP16m:
- case X86::JMP16m_NT:
- case X86::JMP32m:
- case X86::JMP32m_NT:
- case X86::JMP64m:
- case X86::JMP64m_NT:
- // Mostly as documentation.
- report_fatal_error("Memory operand jumps should have been unfolded!");
- case X86::JMP16r:
- report_fatal_error(
- "Support for 16-bit indirect branches is not implemented.");
- case X86::JMP32r:
- report_fatal_error(
- "Support for 32-bit indirect branches is not implemented.");
- case X86::JMP64r:
- TargetReg = TI.getOperand(0).getReg();
- }
- // We have definitely found an indirect branch. Verify that there are no
- // preceding conditional branches as we don't yet support that.
- if (llvm::any_of(MBB.terminators(), [&](MachineInstr &OtherTI) {
- return !OtherTI.isDebugInstr() && &OtherTI != &TI;
- })) {
- LLVM_DEBUG({
- dbgs() << "ERROR: Found other terminators in a block with an indirect "
- "branch! This is not yet supported! Terminator sequence:\n";
- for (MachineInstr &MI : MBB.terminators()) {
- MI.dump();
- dbgs() << '\n';
- }
- });
- report_fatal_error("Unimplemented terminator sequence!");
- }
- // Make the target register an available value for this block.
- TargetAddrSSA.AddAvailableValue(&MBB, TargetReg);
- IndirectTerminatedMBBs.insert(&MBB);
- // Add all the successors to our target candidates.
- for (MachineBasicBlock *Succ : MBB.successors())
- IndirectTargetMBBs.insert(Succ);
- }
- // Keep track of the cmov instructions we insert so we can return them.
- SmallVector<MachineInstr *, 16> CMovs;
- // If we didn't find any indirect branches with targets, nothing to do here.
- if (IndirectTargetMBBs.empty())
- return CMovs;
- // We found indirect branches and targets that need to be instrumented to
- // harden loads within them. Walk the blocks of the function (to get a stable
- // ordering) and instrument each target of an indirect branch.
- for (MachineBasicBlock &MBB : MF) {
- // Skip the blocks that aren't candidate targets.
- if (!IndirectTargetMBBs.count(&MBB))
- continue;
- // We don't expect EH pads to ever be reached via an indirect branch. If
- // this is desired for some reason, we could simply skip them here rather
- // than asserting.
- assert(!MBB.isEHPad() &&
- "Unexpected EH pad as target of an indirect branch!");
- // We should never end up threading EFLAGS into a block to harden
- // conditional jumps as there would be an additional successor via the
- // indirect branch. As a consequence, all such edges would be split before
- // reaching here, and the inserted block will handle the EFLAGS-based
- // hardening.
- assert(!MBB.isLiveIn(X86::EFLAGS) &&
- "Cannot check within a block that already has live-in EFLAGS!");
- // We can't handle having non-indirect edges into this block unless this is
- // the only successor and we can synthesize the necessary target address.
- for (MachineBasicBlock *Pred : MBB.predecessors()) {
- // If we've already handled this by extracting the target directly,
- // nothing to do.
- if (IndirectTerminatedMBBs.count(Pred))
- continue;
- // Otherwise, we have to be the only successor. We generally expect this
- // to be true as conditional branches should have had a critical edge
- // split already. We don't however need to worry about EH pad successors
- // as they'll happily ignore the target and their hardening strategy is
- // resilient to all ways in which they could be reached speculatively.
- if (!llvm::all_of(Pred->successors(), [&](MachineBasicBlock *Succ) {
- return Succ->isEHPad() || Succ == &MBB;
- })) {
- LLVM_DEBUG({
- dbgs() << "ERROR: Found conditional entry to target of indirect "
- "branch!\n";
- Pred->dump();
- MBB.dump();
- });
- report_fatal_error("Cannot harden a conditional entry to a target of "
- "an indirect branch!");
- }
- // Now we need to compute the address of this block and install it as a
- // synthetic target in the predecessor. We do this at the bottom of the
- // predecessor.
- auto InsertPt = Pred->getFirstTerminator();
- Register TargetReg = MRI->createVirtualRegister(&X86::GR64RegClass);
- if (MF.getTarget().getCodeModel() == CodeModel::Small &&
- !Subtarget->isPositionIndependent()) {
- // Directly materialize it into an immediate.
- auto AddrI = BuildMI(*Pred, InsertPt, DebugLoc(),
- TII->get(X86::MOV64ri32), TargetReg)
- .addMBB(&MBB);
- ++NumInstsInserted;
- (void)AddrI;
- LLVM_DEBUG(dbgs() << " Inserting mov: "; AddrI->dump();
- dbgs() << "\n");
- } else {
- auto AddrI = BuildMI(*Pred, InsertPt, DebugLoc(), TII->get(X86::LEA64r),
- TargetReg)
- .addReg(/*Base*/ X86::RIP)
- .addImm(/*Scale*/ 1)
- .addReg(/*Index*/ 0)
- .addMBB(&MBB)
- .addReg(/*Segment*/ 0);
- ++NumInstsInserted;
- (void)AddrI;
- LLVM_DEBUG(dbgs() << " Inserting lea: "; AddrI->dump();
- dbgs() << "\n");
- }
- // And make this available.
- TargetAddrSSA.AddAvailableValue(Pred, TargetReg);
- }
- // Materialize the needed SSA value of the target. Note that we need the
- // middle of the block as this block might at the bottom have an indirect
- // branch back to itself. We can do this here because at this point, every
- // predecessor of this block has an available value. This is basically just
- // automating the construction of a PHI node for this target.
- Register TargetReg = TargetAddrSSA.GetValueInMiddleOfBlock(&MBB);
- // Insert a comparison of the incoming target register with this block's
- // address. This also requires us to mark the block as having its address
- // taken explicitly.
- MBB.setHasAddressTaken();
- auto InsertPt = MBB.SkipPHIsLabelsAndDebug(MBB.begin());
- if (MF.getTarget().getCodeModel() == CodeModel::Small &&
- !Subtarget->isPositionIndependent()) {
- // Check directly against a relocated immediate when we can.
- auto CheckI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::CMP64ri32))
- .addReg(TargetReg, RegState::Kill)
- .addMBB(&MBB);
- ++NumInstsInserted;
- (void)CheckI;
- LLVM_DEBUG(dbgs() << " Inserting cmp: "; CheckI->dump(); dbgs() << "\n");
- } else {
- // Otherwise compute the address into a register first.
- Register AddrReg = MRI->createVirtualRegister(&X86::GR64RegClass);
- auto AddrI =
- BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::LEA64r), AddrReg)
- .addReg(/*Base*/ X86::RIP)
- .addImm(/*Scale*/ 1)
- .addReg(/*Index*/ 0)
- .addMBB(&MBB)
- .addReg(/*Segment*/ 0);
- ++NumInstsInserted;
- (void)AddrI;
- LLVM_DEBUG(dbgs() << " Inserting lea: "; AddrI->dump(); dbgs() << "\n");
- auto CheckI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::CMP64rr))
- .addReg(TargetReg, RegState::Kill)
- .addReg(AddrReg, RegState::Kill);
- ++NumInstsInserted;
- (void)CheckI;
- LLVM_DEBUG(dbgs() << " Inserting cmp: "; CheckI->dump(); dbgs() << "\n");
- }
- // Now cmov over the predicate if the comparison wasn't equal.
- int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
- auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes);
- Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
- auto CMovI =
- BuildMI(MBB, InsertPt, DebugLoc(), TII->get(CMovOp), UpdatedStateReg)
- .addReg(PS->InitialReg)
- .addReg(PS->PoisonReg)
- .addImm(X86::COND_NE);
- CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
- ++NumInstsInserted;
- LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n");
- CMovs.push_back(&*CMovI);
- // And put the new value into the available values for SSA form of our
- // predicate state.
- PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg);
- }
- // Return all the newly inserted cmov instructions of the predicate state.
- return CMovs;
- }
- // Returns true if the MI has EFLAGS as a register def operand and it's live,
- // otherwise it returns false
- static bool isEFLAGSDefLive(const MachineInstr &MI) {
- if (const MachineOperand *DefOp = MI.findRegisterDefOperand(X86::EFLAGS)) {
- return !DefOp->isDead();
- }
- return false;
- }
- static bool isEFLAGSLive(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- const TargetRegisterInfo &TRI) {
- // Check if EFLAGS are alive by seeing if there is a def of them or they
- // live-in, and then seeing if that def is in turn used.
- for (MachineInstr &MI : llvm::reverse(llvm::make_range(MBB.begin(), I))) {
- if (MachineOperand *DefOp = MI.findRegisterDefOperand(X86::EFLAGS)) {
- // If the def is dead, then EFLAGS is not live.
- if (DefOp->isDead())
- return false;
- // Otherwise we've def'ed it, and it is live.
- return true;
- }
- // While at this instruction, also check if we use and kill EFLAGS
- // which means it isn't live.
- if (MI.killsRegister(X86::EFLAGS, &TRI))
- return false;
- }
- // If we didn't find anything conclusive (neither definitely alive or
- // definitely dead) return whether it lives into the block.
- return MBB.isLiveIn(X86::EFLAGS);
- }
- /// Trace the predicate state through each of the blocks in the function,
- /// hardening everything necessary along the way.
- ///
- /// We call this routine once the initial predicate state has been established
- /// for each basic block in the function in the SSA updater. This routine traces
- /// it through the instructions within each basic block, and for non-returning
- /// blocks informs the SSA updater about the final state that lives out of the
- /// block. Along the way, it hardens any vulnerable instruction using the
- /// currently valid predicate state. We have to do these two things together
- /// because the SSA updater only works across blocks. Within a block, we track
- /// the current predicate state directly and update it as it changes.
- ///
- /// This operates in two passes over each block. First, we analyze the loads in
- /// the block to determine which strategy will be used to harden them: hardening
- /// the address or hardening the loaded value when loaded into a register
- /// amenable to hardening. We have to process these first because the two
- /// strategies may interact -- later hardening may change what strategy we wish
- /// to use. We also will analyze data dependencies between loads and avoid
- /// hardening those loads that are data dependent on a load with a hardened
- /// address. We also skip hardening loads already behind an LFENCE as that is
- /// sufficient to harden them against misspeculation.
- ///
- /// Second, we actively trace the predicate state through the block, applying
- /// the hardening steps we determined necessary in the first pass as we go.
- ///
- /// These two passes are applied to each basic block. We operate one block at a
- /// time to simplify reasoning about reachability and sequencing.
- void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
- MachineFunction &MF) {
- SmallPtrSet<MachineInstr *, 16> HardenPostLoad;
- SmallPtrSet<MachineInstr *, 16> HardenLoadAddr;
- SmallSet<unsigned, 16> HardenedAddrRegs;
- SmallDenseMap<unsigned, unsigned, 32> AddrRegToHardenedReg;
- // Track the set of load-dependent registers through the basic block. Because
- // the values of these registers have an existing data dependency on a loaded
- // value which we would have checked, we can omit any checks on them.
- SparseBitVector<> LoadDepRegs;
- for (MachineBasicBlock &MBB : MF) {
- // The first pass over the block: collect all the loads which can have their
- // loaded value hardened and all the loads that instead need their address
- // hardened. During this walk we propagate load dependence for address
- // hardened loads and also look for LFENCE to stop hardening wherever
- // possible. When deciding whether or not to harden the loaded value or not,
- // we check to see if any registers used in the address will have been
- // hardened at this point and if so, harden any remaining address registers
- // as that often successfully re-uses hardened addresses and minimizes
- // instructions.
- //
- // FIXME: We should consider an aggressive mode where we continue to keep as
- // many loads value hardened even when some address register hardening would
- // be free (due to reuse).
- //
- // Note that we only need this pass if we are actually hardening loads.
- if (HardenLoads)
- for (MachineInstr &MI : MBB) {
- // We naively assume that all def'ed registers of an instruction have
- // a data dependency on all of their operands.
- // FIXME: Do a more careful analysis of x86 to build a conservative
- // model here.
- if (llvm::any_of(MI.uses(), [&](MachineOperand &Op) {
- return Op.isReg() && LoadDepRegs.test(Op.getReg());
- }))
- for (MachineOperand &Def : MI.defs())
- if (Def.isReg())
- LoadDepRegs.set(Def.getReg());
- // Both Intel and AMD are guiding that they will change the semantics of
- // LFENCE to be a speculation barrier, so if we see an LFENCE, there is
- // no more need to guard things in this block.
- if (MI.getOpcode() == X86::LFENCE)
- break;
- // If this instruction cannot load, nothing to do.
- if (!MI.mayLoad())
- continue;
- // Some instructions which "load" are trivially safe or unimportant.
- if (MI.getOpcode() == X86::MFENCE)
- continue;
- // Extract the memory operand information about this instruction.
- // FIXME: This doesn't handle loading pseudo instructions which we often
- // could handle with similarly generic logic. We probably need to add an
- // MI-layer routine similar to the MC-layer one we use here which maps
- // pseudos much like this maps real instructions.
- const MCInstrDesc &Desc = MI.getDesc();
- int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
- if (MemRefBeginIdx < 0) {
- LLVM_DEBUG(dbgs()
- << "WARNING: unable to harden loading instruction: ";
- MI.dump());
- continue;
- }
- MemRefBeginIdx += X86II::getOperandBias(Desc);
- MachineOperand &BaseMO =
- MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
- MachineOperand &IndexMO =
- MI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
- // If we have at least one (non-frame-index, non-RIP) register operand,
- // and neither operand is load-dependent, we need to check the load.
- unsigned BaseReg = 0, IndexReg = 0;
- if (!BaseMO.isFI() && BaseMO.getReg() != X86::RIP &&
- BaseMO.getReg() != X86::NoRegister)
- BaseReg = BaseMO.getReg();
- if (IndexMO.getReg() != X86::NoRegister)
- IndexReg = IndexMO.getReg();
- if (!BaseReg && !IndexReg)
- // No register operands!
- continue;
- // If any register operand is dependent, this load is dependent and we
- // needn't check it.
- // FIXME: Is this true in the case where we are hardening loads after
- // they complete? Unclear, need to investigate.
- if ((BaseReg && LoadDepRegs.test(BaseReg)) ||
- (IndexReg && LoadDepRegs.test(IndexReg)))
- continue;
- // If post-load hardening is enabled, this load is compatible with
- // post-load hardening, and we aren't already going to harden one of the
- // address registers, queue it up to be hardened post-load. Notably,
- // even once hardened this won't introduce a useful dependency that
- // could prune out subsequent loads.
- if (EnablePostLoadHardening && X86InstrInfo::isDataInvariantLoad(MI) &&
- !isEFLAGSDefLive(MI) && MI.getDesc().getNumDefs() == 1 &&
- MI.getOperand(0).isReg() &&
- canHardenRegister(MI.getOperand(0).getReg()) &&
- !HardenedAddrRegs.count(BaseReg) &&
- !HardenedAddrRegs.count(IndexReg)) {
- HardenPostLoad.insert(&MI);
- HardenedAddrRegs.insert(MI.getOperand(0).getReg());
- continue;
- }
- // Record this instruction for address hardening and record its register
- // operands as being address-hardened.
- HardenLoadAddr.insert(&MI);
- if (BaseReg)
- HardenedAddrRegs.insert(BaseReg);
- if (IndexReg)
- HardenedAddrRegs.insert(IndexReg);
- for (MachineOperand &Def : MI.defs())
- if (Def.isReg())
- LoadDepRegs.set(Def.getReg());
- }
- // Now re-walk the instructions in the basic block, and apply whichever
- // hardening strategy we have elected. Note that we do this in a second
- // pass specifically so that we have the complete set of instructions for
- // which we will do post-load hardening and can defer it in certain
- // circumstances.
- for (MachineInstr &MI : MBB) {
- if (HardenLoads) {
- // We cannot both require hardening the def of a load and its address.
- assert(!(HardenLoadAddr.count(&MI) && HardenPostLoad.count(&MI)) &&
- "Requested to harden both the address and def of a load!");
- // Check if this is a load whose address needs to be hardened.
- if (HardenLoadAddr.erase(&MI)) {
- const MCInstrDesc &Desc = MI.getDesc();
- int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
- assert(MemRefBeginIdx >= 0 && "Cannot have an invalid index here!");
- MemRefBeginIdx += X86II::getOperandBias(Desc);
- MachineOperand &BaseMO =
- MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
- MachineOperand &IndexMO =
- MI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
- hardenLoadAddr(MI, BaseMO, IndexMO, AddrRegToHardenedReg);
- continue;
- }
- // Test if this instruction is one of our post load instructions (and
- // remove it from the set if so).
- if (HardenPostLoad.erase(&MI)) {
- assert(!MI.isCall() && "Must not try to post-load harden a call!");
- // If this is a data-invariant load and there is no EFLAGS
- // interference, we want to try and sink any hardening as far as
- // possible.
- if (X86InstrInfo::isDataInvariantLoad(MI) && !isEFLAGSDefLive(MI)) {
- // Sink the instruction we'll need to harden as far as we can down
- // the graph.
- MachineInstr *SunkMI = sinkPostLoadHardenedInst(MI, HardenPostLoad);
- // If we managed to sink this instruction, update everything so we
- // harden that instruction when we reach it in the instruction
- // sequence.
- if (SunkMI != &MI) {
- // If in sinking there was no instruction needing to be hardened,
- // we're done.
- if (!SunkMI)
- continue;
- // Otherwise, add this to the set of defs we harden.
- HardenPostLoad.insert(SunkMI);
- continue;
- }
- }
- unsigned HardenedReg = hardenPostLoad(MI);
- // Mark the resulting hardened register as such so we don't re-harden.
- AddrRegToHardenedReg[HardenedReg] = HardenedReg;
- continue;
- }
- // Check for an indirect call or branch that may need its input hardened
- // even if we couldn't find the specific load used, or were able to
- // avoid hardening it for some reason. Note that here we cannot break
- // out afterward as we may still need to handle any call aspect of this
- // instruction.
- if ((MI.isCall() || MI.isBranch()) && HardenIndirectCallsAndJumps)
- hardenIndirectCallOrJumpInstr(MI, AddrRegToHardenedReg);
- }
- // After we finish hardening loads we handle interprocedural hardening if
- // enabled and relevant for this instruction.
- if (!HardenInterprocedurally)
- continue;
- if (!MI.isCall() && !MI.isReturn())
- continue;
- // If this is a direct return (IE, not a tail call) just directly harden
- // it.
- if (MI.isReturn() && !MI.isCall()) {
- hardenReturnInstr(MI);
- continue;
- }
- // Otherwise we have a call. We need to handle transferring the predicate
- // state into a call and recovering it after the call returns (unless this
- // is a tail call).
- assert(MI.isCall() && "Should only reach here for calls!");
- tracePredStateThroughCall(MI);
- }
- HardenPostLoad.clear();
- HardenLoadAddr.clear();
- HardenedAddrRegs.clear();
- AddrRegToHardenedReg.clear();
- // Currently, we only track data-dependent loads within a basic block.
- // FIXME: We should see if this is necessary or if we could be more
- // aggressive here without opening up attack avenues.
- LoadDepRegs.clear();
- }
- }
- /// Save EFLAGS into the returned GPR. This can in turn be restored with
- /// `restoreEFLAGS`.
- ///
- /// Note that LLVM can only lower very simple patterns of saved and restored
- /// EFLAGS registers. The restore should always be within the same basic block
- /// as the save so that no PHI nodes are inserted.
- unsigned X86SpeculativeLoadHardeningPass::saveEFLAGS(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
- DebugLoc Loc) {
- // FIXME: Hard coding this to a 32-bit register class seems weird, but matches
- // what instruction selection does.
- Register Reg = MRI->createVirtualRegister(&X86::GR32RegClass);
- // We directly copy the FLAGS register and rely on later lowering to clean
- // this up into the appropriate setCC instructions.
- BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), Reg).addReg(X86::EFLAGS);
- ++NumInstsInserted;
- return Reg;
- }
- /// Restore EFLAGS from the provided GPR. This should be produced by
- /// `saveEFLAGS`.
- ///
- /// This must be done within the same basic block as the save in order to
- /// reliably lower.
- void X86SpeculativeLoadHardeningPass::restoreEFLAGS(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc,
- Register Reg) {
- BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), X86::EFLAGS).addReg(Reg);
- ++NumInstsInserted;
- }
- /// Takes the current predicate state (in a register) and merges it into the
- /// stack pointer. The state is essentially a single bit, but we merge this in
- /// a way that won't form non-canonical pointers and also will be preserved
- /// across normal stack adjustments.
- void X86SpeculativeLoadHardeningPass::mergePredStateIntoSP(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc,
- unsigned PredStateReg) {
- Register TmpReg = MRI->createVirtualRegister(PS->RC);
- // FIXME: This hard codes a shift distance based on the number of bits needed
- // to stay canonical on 64-bit. We should compute this somehow and support
- // 32-bit as part of that.
- auto ShiftI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHL64ri), TmpReg)
- .addReg(PredStateReg, RegState::Kill)
- .addImm(47);
- ShiftI->addRegisterDead(X86::EFLAGS, TRI);
- ++NumInstsInserted;
- auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), X86::RSP)
- .addReg(X86::RSP)
- .addReg(TmpReg, RegState::Kill);
- OrI->addRegisterDead(X86::EFLAGS, TRI);
- ++NumInstsInserted;
- }
- /// Extracts the predicate state stored in the high bits of the stack pointer.
- unsigned X86SpeculativeLoadHardeningPass::extractPredStateFromSP(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
- DebugLoc Loc) {
- Register PredStateReg = MRI->createVirtualRegister(PS->RC);
- Register TmpReg = MRI->createVirtualRegister(PS->RC);
- // We know that the stack pointer will have any preserved predicate state in
- // its high bit. We just want to smear this across the other bits. Turns out,
- // this is exactly what an arithmetic right shift does.
- BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), TmpReg)
- .addReg(X86::RSP);
- auto ShiftI =
- BuildMI(MBB, InsertPt, Loc, TII->get(X86::SAR64ri), PredStateReg)
- .addReg(TmpReg, RegState::Kill)
- .addImm(TRI->getRegSizeInBits(*PS->RC) - 1);
- ShiftI->addRegisterDead(X86::EFLAGS, TRI);
- ++NumInstsInserted;
- return PredStateReg;
- }
- void X86SpeculativeLoadHardeningPass::hardenLoadAddr(
- MachineInstr &MI, MachineOperand &BaseMO, MachineOperand &IndexMO,
- SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) {
- MachineBasicBlock &MBB = *MI.getParent();
- const DebugLoc &Loc = MI.getDebugLoc();
- // Check if EFLAGS are alive by seeing if there is a def of them or they
- // live-in, and then seeing if that def is in turn used.
- bool EFLAGSLive = isEFLAGSLive(MBB, MI.getIterator(), *TRI);
- SmallVector<MachineOperand *, 2> HardenOpRegs;
- if (BaseMO.isFI()) {
- // A frame index is never a dynamically controllable load, so only
- // harden it if we're covering fixed address loads as well.
- LLVM_DEBUG(
- dbgs() << " Skipping hardening base of explicit stack frame load: ";
- MI.dump(); dbgs() << "\n");
- } else if (BaseMO.getReg() == X86::RSP) {
- // Some idempotent atomic operations are lowered directly to a locked
- // OR with 0 to the top of stack(or slightly offset from top) which uses an
- // explicit RSP register as the base.
- assert(IndexMO.getReg() == X86::NoRegister &&
- "Explicit RSP access with dynamic index!");
- LLVM_DEBUG(
- dbgs() << " Cannot harden base of explicit RSP offset in a load!");
- } else if (BaseMO.getReg() == X86::RIP ||
- BaseMO.getReg() == X86::NoRegister) {
- // For both RIP-relative addressed loads or absolute loads, we cannot
- // meaningfully harden them because the address being loaded has no
- // dynamic component.
- //
- // FIXME: When using a segment base (like TLS does) we end up with the
- // dynamic address being the base plus -1 because we can't mutate the
- // segment register here. This allows the signed 32-bit offset to point at
- // valid segment-relative addresses and load them successfully.
- LLVM_DEBUG(
- dbgs() << " Cannot harden base of "
- << (BaseMO.getReg() == X86::RIP ? "RIP-relative" : "no-base")
- << " address in a load!");
- } else {
- assert(BaseMO.isReg() &&
- "Only allowed to have a frame index or register base.");
- HardenOpRegs.push_back(&BaseMO);
- }
- if (IndexMO.getReg() != X86::NoRegister &&
- (HardenOpRegs.empty() ||
- HardenOpRegs.front()->getReg() != IndexMO.getReg()))
- HardenOpRegs.push_back(&IndexMO);
- assert((HardenOpRegs.size() == 1 || HardenOpRegs.size() == 2) &&
- "Should have exactly one or two registers to harden!");
- assert((HardenOpRegs.size() == 1 ||
- HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) &&
- "Should not have two of the same registers!");
- // Remove any registers that have alreaded been checked.
- llvm::erase_if(HardenOpRegs, [&](MachineOperand *Op) {
- // See if this operand's register has already been checked.
- auto It = AddrRegToHardenedReg.find(Op->getReg());
- if (It == AddrRegToHardenedReg.end())
- // Not checked, so retain this one.
- return false;
- // Otherwise, we can directly update this operand and remove it.
- Op->setReg(It->second);
- return true;
- });
- // If there are none left, we're done.
- if (HardenOpRegs.empty())
- return;
- // Compute the current predicate state.
- Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
- auto InsertPt = MI.getIterator();
- // If EFLAGS are live and we don't have access to instructions that avoid
- // clobbering EFLAGS we need to save and restore them. This in turn makes
- // the EFLAGS no longer live.
- unsigned FlagsReg = 0;
- if (EFLAGSLive && !Subtarget->hasBMI2()) {
- EFLAGSLive = false;
- FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
- }
- for (MachineOperand *Op : HardenOpRegs) {
- Register OpReg = Op->getReg();
- auto *OpRC = MRI->getRegClass(OpReg);
- Register TmpReg = MRI->createVirtualRegister(OpRC);
- // If this is a vector register, we'll need somewhat custom logic to handle
- // hardening it.
- if (!Subtarget->hasVLX() && (OpRC->hasSuperClassEq(&X86::VR128RegClass) ||
- OpRC->hasSuperClassEq(&X86::VR256RegClass))) {
- assert(Subtarget->hasAVX2() && "AVX2-specific register classes!");
- bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128RegClass);
- // Move our state into a vector register.
- // FIXME: We could skip this at the cost of longer encodings with AVX-512
- // but that doesn't seem likely worth it.
- Register VStateReg = MRI->createVirtualRegister(&X86::VR128RegClass);
- auto MovI =
- BuildMI(MBB, InsertPt, Loc, TII->get(X86::VMOV64toPQIrr), VStateReg)
- .addReg(StateReg);
- (void)MovI;
- ++NumInstsInserted;
- LLVM_DEBUG(dbgs() << " Inserting mov: "; MovI->dump(); dbgs() << "\n");
- // Broadcast it across the vector register.
- Register VBStateReg = MRI->createVirtualRegister(OpRC);
- auto BroadcastI = BuildMI(MBB, InsertPt, Loc,
- TII->get(Is128Bit ? X86::VPBROADCASTQrr
- : X86::VPBROADCASTQYrr),
- VBStateReg)
- .addReg(VStateReg);
- (void)BroadcastI;
- ++NumInstsInserted;
- LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();
- dbgs() << "\n");
- // Merge our potential poison state into the value with a vector or.
- auto OrI =
- BuildMI(MBB, InsertPt, Loc,
- TII->get(Is128Bit ? X86::VPORrr : X86::VPORYrr), TmpReg)
- .addReg(VBStateReg)
- .addReg(OpReg);
- (void)OrI;
- ++NumInstsInserted;
- LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
- } else if (OpRC->hasSuperClassEq(&X86::VR128XRegClass) ||
- OpRC->hasSuperClassEq(&X86::VR256XRegClass) ||
- OpRC->hasSuperClassEq(&X86::VR512RegClass)) {
- assert(Subtarget->hasAVX512() && "AVX512-specific register classes!");
- bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128XRegClass);
- bool Is256Bit = OpRC->hasSuperClassEq(&X86::VR256XRegClass);
- if (Is128Bit || Is256Bit)
- assert(Subtarget->hasVLX() && "AVX512VL-specific register classes!");
- // Broadcast our state into a vector register.
- Register VStateReg = MRI->createVirtualRegister(OpRC);
- unsigned BroadcastOp = Is128Bit ? X86::VPBROADCASTQrZ128rr
- : Is256Bit ? X86::VPBROADCASTQrZ256rr
- : X86::VPBROADCASTQrZrr;
- auto BroadcastI =
- BuildMI(MBB, InsertPt, Loc, TII->get(BroadcastOp), VStateReg)
- .addReg(StateReg);
- (void)BroadcastI;
- ++NumInstsInserted;
- LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();
- dbgs() << "\n");
- // Merge our potential poison state into the value with a vector or.
- unsigned OrOp = Is128Bit ? X86::VPORQZ128rr
- : Is256Bit ? X86::VPORQZ256rr : X86::VPORQZrr;
- auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOp), TmpReg)
- .addReg(VStateReg)
- .addReg(OpReg);
- (void)OrI;
- ++NumInstsInserted;
- LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
- } else {
- // FIXME: Need to support GR32 here for 32-bit code.
- assert(OpRC->hasSuperClassEq(&X86::GR64RegClass) &&
- "Not a supported register class for address hardening!");
- if (!EFLAGSLive) {
- // Merge our potential poison state into the value with an or.
- auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), TmpReg)
- .addReg(StateReg)
- .addReg(OpReg);
- OrI->addRegisterDead(X86::EFLAGS, TRI);
- ++NumInstsInserted;
- LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
- } else {
- // We need to avoid touching EFLAGS so shift out all but the least
- // significant bit using the instruction that doesn't update flags.
- auto ShiftI =
- BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHRX64rr), TmpReg)
- .addReg(OpReg)
- .addReg(StateReg);
- (void)ShiftI;
- ++NumInstsInserted;
- LLVM_DEBUG(dbgs() << " Inserting shrx: "; ShiftI->dump();
- dbgs() << "\n");
- }
- }
- // Record this register as checked and update the operand.
- assert(!AddrRegToHardenedReg.count(Op->getReg()) &&
- "Should not have checked this register yet!");
- AddrRegToHardenedReg[Op->getReg()] = TmpReg;
- Op->setReg(TmpReg);
- ++NumAddrRegsHardened;
- }
- // And restore the flags if needed.
- if (FlagsReg)
- restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg);
- }
- MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
- MachineInstr &InitialMI, SmallPtrSetImpl<MachineInstr *> &HardenedInstrs) {
- assert(X86InstrInfo::isDataInvariantLoad(InitialMI) &&
- "Cannot get here with a non-invariant load!");
- assert(!isEFLAGSDefLive(InitialMI) &&
- "Cannot get here with a data invariant load "
- "that interferes with EFLAGS!");
- // See if we can sink hardening the loaded value.
- auto SinkCheckToSingleUse =
- [&](MachineInstr &MI) -> Optional<MachineInstr *> {
- Register DefReg = MI.getOperand(0).getReg();
- // We need to find a single use which we can sink the check. We can
- // primarily do this because many uses may already end up checked on their
- // own.
- MachineInstr *SingleUseMI = nullptr;
- for (MachineInstr &UseMI : MRI->use_instructions(DefReg)) {
- // If we're already going to harden this use, it is data invariant, it
- // does not interfere with EFLAGS, and within our block.
- if (HardenedInstrs.count(&UseMI)) {
- if (!X86InstrInfo::isDataInvariantLoad(UseMI) || isEFLAGSDefLive(UseMI)) {
- // If we've already decided to harden a non-load, we must have sunk
- // some other post-load hardened instruction to it and it must itself
- // be data-invariant.
- assert(X86InstrInfo::isDataInvariant(UseMI) &&
- "Data variant instruction being hardened!");
- continue;
- }
- // Otherwise, this is a load and the load component can't be data
- // invariant so check how this register is being used.
- const MCInstrDesc &Desc = UseMI.getDesc();
- int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
- assert(MemRefBeginIdx >= 0 &&
- "Should always have mem references here!");
- MemRefBeginIdx += X86II::getOperandBias(Desc);
- MachineOperand &BaseMO =
- UseMI.getOperand(MemRefBeginIdx + X86::AddrBaseReg);
- MachineOperand &IndexMO =
- UseMI.getOperand(MemRefBeginIdx + X86::AddrIndexReg);
- if ((BaseMO.isReg() && BaseMO.getReg() == DefReg) ||
- (IndexMO.isReg() && IndexMO.getReg() == DefReg))
- // The load uses the register as part of its address making it not
- // invariant.
- return {};
- continue;
- }
- if (SingleUseMI)
- // We already have a single use, this would make two. Bail.
- return {};
- // If this single use isn't data invariant, isn't in this block, or has
- // interfering EFLAGS, we can't sink the hardening to it.
- if (!X86InstrInfo::isDataInvariant(UseMI) || UseMI.getParent() != MI.getParent() ||
- isEFLAGSDefLive(UseMI))
- return {};
- // If this instruction defines multiple registers bail as we won't harden
- // all of them.
- if (UseMI.getDesc().getNumDefs() > 1)
- return {};
- // If this register isn't a virtual register we can't walk uses of sanely,
- // just bail. Also check that its register class is one of the ones we
- // can harden.
- Register UseDefReg = UseMI.getOperand(0).getReg();
- if (!UseDefReg.isVirtual() || !canHardenRegister(UseDefReg))
- return {};
- SingleUseMI = &UseMI;
- }
- // If SingleUseMI is still null, there is no use that needs its own
- // checking. Otherwise, it is the single use that needs checking.
- return {SingleUseMI};
- };
- MachineInstr *MI = &InitialMI;
- while (Optional<MachineInstr *> SingleUse = SinkCheckToSingleUse(*MI)) {
- // Update which MI we're checking now.
- MI = *SingleUse;
- if (!MI)
- break;
- }
- return MI;
- }
- bool X86SpeculativeLoadHardeningPass::canHardenRegister(Register Reg) {
- auto *RC = MRI->getRegClass(Reg);
- int RegBytes = TRI->getRegSizeInBits(*RC) / 8;
- if (RegBytes > 8)
- // We don't support post-load hardening of vectors.
- return false;
- unsigned RegIdx = Log2_32(RegBytes);
- assert(RegIdx < 4 && "Unsupported register size");
- // If this register class is explicitly constrained to a class that doesn't
- // require REX prefix, we may not be able to satisfy that constraint when
- // emitting the hardening instructions, so bail out here.
- // FIXME: This seems like a pretty lame hack. The way this comes up is when we
- // end up both with a NOREX and REX-only register as operands to the hardening
- // instructions. It would be better to fix that code to handle this situation
- // rather than hack around it in this way.
- const TargetRegisterClass *NOREXRegClasses[] = {
- &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass,
- &X86::GR32_NOREXRegClass, &X86::GR64_NOREXRegClass};
- if (RC == NOREXRegClasses[RegIdx])
- return false;
- const TargetRegisterClass *GPRRegClasses[] = {
- &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32RegClass,
- &X86::GR64RegClass};
- return RC->hasSuperClassEq(GPRRegClasses[RegIdx]);
- }
- /// Harden a value in a register.
- ///
- /// This is the low-level logic to fully harden a value sitting in a register
- /// against leaking during speculative execution.
- ///
- /// Unlike hardening an address that is used by a load, this routine is required
- /// to hide *all* incoming bits in the register.
- ///
- /// `Reg` must be a virtual register. Currently, it is required to be a GPR no
- /// larger than the predicate state register. FIXME: We should support vector
- /// registers here by broadcasting the predicate state.
- ///
- /// The new, hardened virtual register is returned. It will have the same
- /// register class as `Reg`.
- unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
- Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
- DebugLoc Loc) {
- assert(canHardenRegister(Reg) && "Cannot harden this register!");
- assert(Reg.isVirtual() && "Cannot harden a physical register!");
- auto *RC = MRI->getRegClass(Reg);
- int Bytes = TRI->getRegSizeInBits(*RC) / 8;
- Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
- assert((Bytes == 1 || Bytes == 2 || Bytes == 4 || Bytes == 8) &&
- "Unknown register size");
- // FIXME: Need to teach this about 32-bit mode.
- if (Bytes != 8) {
- unsigned SubRegImms[] = {X86::sub_8bit, X86::sub_16bit, X86::sub_32bit};
- unsigned SubRegImm = SubRegImms[Log2_32(Bytes)];
- Register NarrowStateReg = MRI->createVirtualRegister(RC);
- BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), NarrowStateReg)
- .addReg(StateReg, 0, SubRegImm);
- StateReg = NarrowStateReg;
- }
- unsigned FlagsReg = 0;
- if (isEFLAGSLive(MBB, InsertPt, *TRI))
- FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
- Register NewReg = MRI->createVirtualRegister(RC);
- unsigned OrOpCodes[] = {X86::OR8rr, X86::OR16rr, X86::OR32rr, X86::OR64rr};
- unsigned OrOpCode = OrOpCodes[Log2_32(Bytes)];
- auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOpCode), NewReg)
- .addReg(StateReg)
- .addReg(Reg);
- OrI->addRegisterDead(X86::EFLAGS, TRI);
- ++NumInstsInserted;
- LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
- if (FlagsReg)
- restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg);
- return NewReg;
- }
- /// Harden a load by hardening the loaded value in the defined register.
- ///
- /// We can harden a non-leaking load into a register without touching the
- /// address by just hiding all of the loaded bits during misspeculation. We use
- /// an `or` instruction to do this because we set up our poison value as all
- /// ones. And the goal is just for the loaded bits to not be exposed to
- /// execution and coercing them to one is sufficient.
- ///
- /// Returns the newly hardened register.
- unsigned X86SpeculativeLoadHardeningPass::hardenPostLoad(MachineInstr &MI) {
- MachineBasicBlock &MBB = *MI.getParent();
- const DebugLoc &Loc = MI.getDebugLoc();
- auto &DefOp = MI.getOperand(0);
- Register OldDefReg = DefOp.getReg();
- auto *DefRC = MRI->getRegClass(OldDefReg);
- // Because we want to completely replace the uses of this def'ed value with
- // the hardened value, create a dedicated new register that will only be used
- // to communicate the unhardened value to the hardening.
- Register UnhardenedReg = MRI->createVirtualRegister(DefRC);
- DefOp.setReg(UnhardenedReg);
- // Now harden this register's value, getting a hardened reg that is safe to
- // use. Note that we insert the instructions to compute this *after* the
- // defining instruction, not before it.
- unsigned HardenedReg = hardenValueInRegister(
- UnhardenedReg, MBB, std::next(MI.getIterator()), Loc);
- // Finally, replace the old register (which now only has the uses of the
- // original def) with the hardened register.
- MRI->replaceRegWith(/*FromReg*/ OldDefReg, /*ToReg*/ HardenedReg);
- ++NumPostLoadRegsHardened;
- return HardenedReg;
- }
- /// Harden a return instruction.
- ///
- /// Returns implicitly perform a load which we need to harden. Without hardening
- /// this load, an attacker my speculatively write over the return address to
- /// steer speculation of the return to an attacker controlled address. This is
- /// called Spectre v1.1 or Bounds Check Bypass Store (BCBS) and is described in
- /// this paper:
- /// https://people.csail.mit.edu/vlk/spectre11.pdf
- ///
- /// We can harden this by introducing an LFENCE that will delay any load of the
- /// return address until prior instructions have retired (and thus are not being
- /// speculated), or we can harden the address used by the implicit load: the
- /// stack pointer.
- ///
- /// If we are not using an LFENCE, hardening the stack pointer has an additional
- /// benefit: it allows us to pass the predicate state accumulated in this
- /// function back to the caller. In the absence of a BCBS attack on the return,
- /// the caller will typically be resumed and speculatively executed due to the
- /// Return Stack Buffer (RSB) prediction which is very accurate and has a high
- /// priority. It is possible that some code from the caller will be executed
- /// speculatively even during a BCBS-attacked return until the steering takes
- /// effect. Whenever this happens, the caller can recover the (poisoned)
- /// predicate state from the stack pointer and continue to harden loads.
- void X86SpeculativeLoadHardeningPass::hardenReturnInstr(MachineInstr &MI) {
- MachineBasicBlock &MBB = *MI.getParent();
- const DebugLoc &Loc = MI.getDebugLoc();
- auto InsertPt = MI.getIterator();
- if (FenceCallAndRet)
- // No need to fence here as we'll fence at the return site itself. That
- // handles more cases than we can handle here.
- return;
- // Take our predicate state, shift it to the high 17 bits (so that we keep
- // pointers canonical) and merge it into RSP. This will allow the caller to
- // extract it when we return (speculatively).
- mergePredStateIntoSP(MBB, InsertPt, Loc, PS->SSA.GetValueAtEndOfBlock(&MBB));
- }
- /// Trace the predicate state through a call.
- ///
- /// There are several layers of this needed to handle the full complexity of
- /// calls.
- ///
- /// First, we need to send the predicate state into the called function. We do
- /// this by merging it into the high bits of the stack pointer.
- ///
- /// For tail calls, this is all we need to do.
- ///
- /// For calls where we might return and resume the control flow, we need to
- /// extract the predicate state from the high bits of the stack pointer after
- /// control returns from the called function.
- ///
- /// We also need to verify that we intended to return to this location in the
- /// code. An attacker might arrange for the processor to mispredict the return
- /// to this valid but incorrect return address in the program rather than the
- /// correct one. See the paper on this attack, called "ret2spec" by the
- /// researchers, here:
- /// https://christian-rossow.de/publications/ret2spec-ccs2018.pdf
- ///
- /// The way we verify that we returned to the correct location is by preserving
- /// the expected return address across the call. One technique involves taking
- /// advantage of the red-zone to load the return address from `8(%rsp)` where it
- /// was left by the RET instruction when it popped `%rsp`. Alternatively, we can
- /// directly save the address into a register that will be preserved across the
- /// call. We compare this intended return address against the address
- /// immediately following the call (the observed return address). If these
- /// mismatch, we have detected misspeculation and can poison our predicate
- /// state.
- void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
- MachineInstr &MI) {
- MachineBasicBlock &MBB = *MI.getParent();
- MachineFunction &MF = *MBB.getParent();
- auto InsertPt = MI.getIterator();
- const DebugLoc &Loc = MI.getDebugLoc();
- if (FenceCallAndRet) {
- if (MI.isReturn())
- // Tail call, we don't return to this function.
- // FIXME: We should also handle noreturn calls.
- return;
- // We don't need to fence before the call because the function should fence
- // in its entry. However, we do need to fence after the call returns.
- // Fencing before the return doesn't correctly handle cases where the return
- // itself is mispredicted.
- BuildMI(MBB, std::next(InsertPt), Loc, TII->get(X86::LFENCE));
- ++NumInstsInserted;
- ++NumLFENCEsInserted;
- return;
- }
- // First, we transfer the predicate state into the called function by merging
- // it into the stack pointer. This will kill the current def of the state.
- Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
- mergePredStateIntoSP(MBB, InsertPt, Loc, StateReg);
- // If this call is also a return, it is a tail call and we don't need anything
- // else to handle it so just return. Also, if there are no further
- // instructions and no successors, this call does not return so we can also
- // bail.
- if (MI.isReturn() || (std::next(InsertPt) == MBB.end() && MBB.succ_empty()))
- return;
- // Create a symbol to track the return address and attach it to the call
- // machine instruction. We will lower extra symbols attached to call
- // instructions as label immediately following the call.
- MCSymbol *RetSymbol =
- MF.getContext().createTempSymbol("slh_ret_addr",
- /*AlwaysAddSuffix*/ true);
- MI.setPostInstrSymbol(MF, RetSymbol);
- const TargetRegisterClass *AddrRC = &X86::GR64RegClass;
- unsigned ExpectedRetAddrReg = 0;
- // If we have no red zones or if the function returns twice (possibly without
- // using the `ret` instruction) like setjmp, we need to save the expected
- // return address prior to the call.
- if (!Subtarget->getFrameLowering()->has128ByteRedZone(MF) ||
- MF.exposesReturnsTwice()) {
- // If we don't have red zones, we need to compute the expected return
- // address prior to the call and store it in a register that lives across
- // the call.
- //
- // In some ways, this is doubly satisfying as a mitigation because it will
- // also successfully detect stack smashing bugs in some cases (typically,
- // when a callee-saved register is used and the callee doesn't push it onto
- // the stack). But that isn't our primary goal, so we only use it as
- // a fallback.
- //
- // FIXME: It isn't clear that this is reliable in the face of
- // rematerialization in the register allocator. We somehow need to force
- // that to not occur for this particular instruction, and instead to spill
- // or otherwise preserve the value computed *prior* to the call.
- //
- // FIXME: It is even less clear why MachineCSE can't just fold this when we
- // end up having to use identical instructions both before and after the
- // call to feed the comparison.
- ExpectedRetAddrReg = MRI->createVirtualRegister(AddrRC);
- if (MF.getTarget().getCodeModel() == CodeModel::Small &&
- !Subtarget->isPositionIndependent()) {
- BuildMI(MBB, InsertPt, Loc, TII->get(X86::MOV64ri32), ExpectedRetAddrReg)
- .addSym(RetSymbol);
- } else {
- BuildMI(MBB, InsertPt, Loc, TII->get(X86::LEA64r), ExpectedRetAddrReg)
- .addReg(/*Base*/ X86::RIP)
- .addImm(/*Scale*/ 1)
- .addReg(/*Index*/ 0)
- .addSym(RetSymbol)
- .addReg(/*Segment*/ 0);
- }
- }
- // Step past the call to handle when it returns.
- ++InsertPt;
- // If we didn't pre-compute the expected return address into a register, then
- // red zones are enabled and the return address is still available on the
- // stack immediately after the call. As the very first instruction, we load it
- // into a register.
- if (!ExpectedRetAddrReg) {
- ExpectedRetAddrReg = MRI->createVirtualRegister(AddrRC);
- BuildMI(MBB, InsertPt, Loc, TII->get(X86::MOV64rm), ExpectedRetAddrReg)
- .addReg(/*Base*/ X86::RSP)
- .addImm(/*Scale*/ 1)
- .addReg(/*Index*/ 0)
- .addImm(/*Displacement*/ -8) // The stack pointer has been popped, so
- // the return address is 8-bytes past it.
- .addReg(/*Segment*/ 0);
- }
- // Now we extract the callee's predicate state from the stack pointer.
- unsigned NewStateReg = extractPredStateFromSP(MBB, InsertPt, Loc);
- // Test the expected return address against our actual address. If we can
- // form this basic block's address as an immediate, this is easy. Otherwise
- // we compute it.
- if (MF.getTarget().getCodeModel() == CodeModel::Small &&
- !Subtarget->isPositionIndependent()) {
- // FIXME: Could we fold this with the load? It would require careful EFLAGS
- // management.
- BuildMI(MBB, InsertPt, Loc, TII->get(X86::CMP64ri32))
- .addReg(ExpectedRetAddrReg, RegState::Kill)
- .addSym(RetSymbol);
- } else {
- Register ActualRetAddrReg = MRI->createVirtualRegister(AddrRC);
- BuildMI(MBB, InsertPt, Loc, TII->get(X86::LEA64r), ActualRetAddrReg)
- .addReg(/*Base*/ X86::RIP)
- .addImm(/*Scale*/ 1)
- .addReg(/*Index*/ 0)
- .addSym(RetSymbol)
- .addReg(/*Segment*/ 0);
- BuildMI(MBB, InsertPt, Loc, TII->get(X86::CMP64rr))
- .addReg(ExpectedRetAddrReg, RegState::Kill)
- .addReg(ActualRetAddrReg, RegState::Kill);
- }
- // Now conditionally update the predicate state we just extracted if we ended
- // up at a different return address than expected.
- int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
- auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes);
- Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
- auto CMovI = BuildMI(MBB, InsertPt, Loc, TII->get(CMovOp), UpdatedStateReg)
- .addReg(NewStateReg, RegState::Kill)
- .addReg(PS->PoisonReg)
- .addImm(X86::COND_NE);
- CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
- ++NumInstsInserted;
- LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n");
- PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg);
- }
- /// An attacker may speculatively store over a value that is then speculatively
- /// loaded and used as the target of an indirect call or jump instruction. This
- /// is called Spectre v1.2 or Bounds Check Bypass Store (BCBS) and is described
- /// in this paper:
- /// https://people.csail.mit.edu/vlk/spectre11.pdf
- ///
- /// When this happens, the speculative execution of the call or jump will end up
- /// being steered to this attacker controlled address. While most such loads
- /// will be adequately hardened already, we want to ensure that they are
- /// definitively treated as needing post-load hardening. While address hardening
- /// is sufficient to prevent secret data from leaking to the attacker, it may
- /// not be sufficient to prevent an attacker from steering speculative
- /// execution. We forcibly unfolded all relevant loads above and so will always
- /// have an opportunity to post-load harden here, we just need to scan for cases
- /// not already flagged and add them.
- void X86SpeculativeLoadHardeningPass::hardenIndirectCallOrJumpInstr(
- MachineInstr &MI,
- SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) {
- switch (MI.getOpcode()) {
- case X86::FARCALL16m:
- case X86::FARCALL32m:
- case X86::FARCALL64m:
- case X86::FARJMP16m:
- case X86::FARJMP32m:
- case X86::FARJMP64m:
- // We don't need to harden either far calls or far jumps as they are
- // safe from Spectre.
- return;
- default:
- break;
- }
- // We should never see a loading instruction at this point, as those should
- // have been unfolded.
- assert(!MI.mayLoad() && "Found a lingering loading instruction!");
- // If the first operand isn't a register, this is a branch or call
- // instruction with an immediate operand which doesn't need to be hardened.
- if (!MI.getOperand(0).isReg())
- return;
- // For all of these, the target register is the first operand of the
- // instruction.
- auto &TargetOp = MI.getOperand(0);
- Register OldTargetReg = TargetOp.getReg();
- // Try to lookup a hardened version of this register. We retain a reference
- // here as we want to update the map to track any newly computed hardened
- // register.
- unsigned &HardenedTargetReg = AddrRegToHardenedReg[OldTargetReg];
- // If we don't have a hardened register yet, compute one. Otherwise, just use
- // the already hardened register.
- //
- // FIXME: It is a little suspect that we use partially hardened registers that
- // only feed addresses. The complexity of partial hardening with SHRX
- // continues to pile up. Should definitively measure its value and consider
- // eliminating it.
- if (!HardenedTargetReg)
- HardenedTargetReg = hardenValueInRegister(
- OldTargetReg, *MI.getParent(), MI.getIterator(), MI.getDebugLoc());
- // Set the target operand to the hardened register.
- TargetOp.setReg(HardenedTargetReg);
- ++NumCallsOrJumpsHardened;
- }
- INITIALIZE_PASS_BEGIN(X86SpeculativeLoadHardeningPass, PASS_KEY,
- "X86 speculative load hardener", false, false)
- INITIALIZE_PASS_END(X86SpeculativeLoadHardeningPass, PASS_KEY,
- "X86 speculative load hardener", false, false)
- FunctionPass *llvm::createX86SpeculativeLoadHardeningPass() {
- return new X86SpeculativeLoadHardeningPass();
- }
|