NoInferenceModelRunner.h 1.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354
  1. #pragma once
  2. #ifdef __GNUC__
  3. #pragma GCC diagnostic push
  4. #pragma GCC diagnostic ignored "-Wunused-parameter"
  5. #endif
  6. //===- NoInferenceModelRunner.h ---- noop ML model runner ------*- C++ -*-===//
  7. //
  8. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  9. // See https://llvm.org/LICENSE.txt for license information.
  10. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  11. //
  12. //===----------------------------------------------------------------------===//
  13. //
  14. #ifndef LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
  15. #define LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
  16. #include "llvm/Config/llvm-config.h"
  17. /// While not strictly necessary to conditionally compile this, it really
  18. /// has no usecase outside the 'development' mode.
  19. #ifdef LLVM_HAVE_TF_API
  20. #include "llvm/Analysis/MLModelRunner.h"
  21. #include "llvm/Analysis/Utils/TFUtils.h"
  22. namespace llvm {
  23. /// A pseudo model runner. We use it to store feature values when collecting
  24. /// logs for the default policy, in 'development' mode, but never ask it to
  25. /// 'run'.
  26. class NoInferenceModelRunner : public MLModelRunner {
  27. public:
  28. NoInferenceModelRunner(LLVMContext &Ctx,
  29. const std::vector<TensorSpec> &Inputs);
  30. static bool classof(const MLModelRunner *R) {
  31. return R->getKind() == MLModelRunner::Kind::NoOp;
  32. }
  33. private:
  34. void *evaluateUntyped() override {
  35. llvm_unreachable("We shouldn't call run on this model runner.");
  36. }
  37. void *getTensorUntyped(size_t Index) override;
  38. std::vector<std::unique_ptr<char[]>> ValuesBuffer;
  39. };
  40. } // namespace llvm
  41. #endif // defined(LLVM_HAVE_TF_API)
  42. #endif // LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
  43. #ifdef __GNUC__
  44. #pragma GCC diagnostic pop
  45. #endif