call_op_set.h 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036
  1. //
  2. //
  3. // Copyright 2018 gRPC authors.
  4. //
  5. // Licensed under the Apache License, Version 2.0 (the "License");
  6. // you may not use this file except in compliance with the License.
  7. // You may obtain a copy of the License at
  8. //
  9. // http://www.apache.org/licenses/LICENSE-2.0
  10. //
  11. // Unless required by applicable law or agreed to in writing, software
  12. // distributed under the License is distributed on an "AS IS" BASIS,
  13. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. // See the License for the specific language governing permissions and
  15. // limitations under the License.
  16. //
  17. //
  18. #ifndef GRPCPP_IMPL_CALL_OP_SET_H
  19. #define GRPCPP_IMPL_CALL_OP_SET_H
  20. #include <cstring>
  21. #include <map>
  22. #include <memory>
  23. #include <grpc/grpc.h>
  24. #include <grpc/impl/compression_types.h>
  25. #include <grpc/impl/grpc_types.h>
  26. #include <grpc/slice.h>
  27. #include <grpc/support/alloc.h>
  28. #include <grpc/support/log.h>
  29. #include <grpcpp/client_context.h>
  30. #include <grpcpp/completion_queue.h>
  31. #include <grpcpp/impl/call.h>
  32. #include <grpcpp/impl/call_hook.h>
  33. #include <grpcpp/impl/call_op_set_interface.h>
  34. #include <grpcpp/impl/codegen/intercepted_channel.h>
  35. #include <grpcpp/impl/completion_queue_tag.h>
  36. #include <grpcpp/impl/interceptor_common.h>
  37. #include <grpcpp/impl/serialization_traits.h>
  38. #include <grpcpp/support/byte_buffer.h>
  39. #include <grpcpp/support/config.h>
  40. #include <grpcpp/support/slice.h>
  41. #include <grpcpp/support/string_ref.h>
  42. namespace grpc {
  43. namespace internal {
  44. class Call;
  45. class CallHook;
  46. // TODO(yangg) if the map is changed before we send, the pointers will be a
  47. // mess. Make sure it does not happen.
  48. inline grpc_metadata* FillMetadataArray(
  49. const std::multimap<TString, TString>& metadata,
  50. size_t* metadata_count, const TString& optional_error_details) {
  51. *metadata_count = metadata.size() + (optional_error_details.empty() ? 0 : 1);
  52. if (*metadata_count == 0) {
  53. return nullptr;
  54. }
  55. grpc_metadata* metadata_array = static_cast<grpc_metadata*>(
  56. gpr_malloc((*metadata_count) * sizeof(grpc_metadata)));
  57. size_t i = 0;
  58. for (auto iter = metadata.cbegin(); iter != metadata.cend(); ++iter, ++i) {
  59. metadata_array[i].key = SliceReferencingString(iter->first);
  60. metadata_array[i].value = SliceReferencingString(iter->second);
  61. }
  62. if (!optional_error_details.empty()) {
  63. metadata_array[i].key = grpc_slice_from_static_buffer(
  64. kBinaryErrorDetailsKey, sizeof(kBinaryErrorDetailsKey) - 1);
  65. metadata_array[i].value = SliceReferencingString(optional_error_details);
  66. }
  67. return metadata_array;
  68. }
  69. } // namespace internal
  70. /// Per-message write options.
  71. class WriteOptions {
  72. public:
  73. WriteOptions() : flags_(0), last_message_(false) {}
  74. /// Clear all flags.
  75. inline void Clear() { flags_ = 0; }
  76. /// Returns raw flags bitset.
  77. inline uint32_t flags() const { return flags_; }
  78. /// Sets flag for the disabling of compression for the next message write.
  79. ///
  80. /// \sa GRPC_WRITE_NO_COMPRESS
  81. inline WriteOptions& set_no_compression() {
  82. SetBit(GRPC_WRITE_NO_COMPRESS);
  83. return *this;
  84. }
  85. /// Clears flag for the disabling of compression for the next message write.
  86. ///
  87. /// \sa GRPC_WRITE_NO_COMPRESS
  88. inline WriteOptions& clear_no_compression() {
  89. ClearBit(GRPC_WRITE_NO_COMPRESS);
  90. return *this;
  91. }
  92. /// Get value for the flag indicating whether compression for the next
  93. /// message write is forcefully disabled.
  94. ///
  95. /// \sa GRPC_WRITE_NO_COMPRESS
  96. inline bool get_no_compression() const {
  97. return GetBit(GRPC_WRITE_NO_COMPRESS);
  98. }
  99. /// Sets flag indicating that the write may be buffered and need not go out on
  100. /// the wire immediately.
  101. ///
  102. /// \sa GRPC_WRITE_BUFFER_HINT
  103. inline WriteOptions& set_buffer_hint() {
  104. SetBit(GRPC_WRITE_BUFFER_HINT);
  105. return *this;
  106. }
  107. /// Clears flag indicating that the write may be buffered and need not go out
  108. /// on the wire immediately.
  109. ///
  110. /// \sa GRPC_WRITE_BUFFER_HINT
  111. inline WriteOptions& clear_buffer_hint() {
  112. ClearBit(GRPC_WRITE_BUFFER_HINT);
  113. return *this;
  114. }
  115. /// Get value for the flag indicating that the write may be buffered and need
  116. /// not go out on the wire immediately.
  117. ///
  118. /// \sa GRPC_WRITE_BUFFER_HINT
  119. inline bool get_buffer_hint() const { return GetBit(GRPC_WRITE_BUFFER_HINT); }
  120. /// corked bit: aliases set_buffer_hint currently, with the intent that
  121. /// set_buffer_hint will be removed in the future
  122. inline WriteOptions& set_corked() {
  123. SetBit(GRPC_WRITE_BUFFER_HINT);
  124. return *this;
  125. }
  126. inline WriteOptions& clear_corked() {
  127. ClearBit(GRPC_WRITE_BUFFER_HINT);
  128. return *this;
  129. }
  130. inline bool is_corked() const { return GetBit(GRPC_WRITE_BUFFER_HINT); }
  131. /// last-message bit: indicates this is the last message in a stream
  132. /// client-side: makes Write the equivalent of performing Write, WritesDone
  133. /// in a single step
  134. /// server-side: hold the Write until the service handler returns (sync api)
  135. /// or until Finish is called (async api)
  136. inline WriteOptions& set_last_message() {
  137. last_message_ = true;
  138. return *this;
  139. }
  140. /// Clears flag indicating that this is the last message in a stream,
  141. /// disabling coalescing.
  142. inline WriteOptions& clear_last_message() {
  143. last_message_ = false;
  144. return *this;
  145. }
  146. /// Get value for the flag indicating that this is the last message, and
  147. /// should be coalesced with trailing metadata.
  148. ///
  149. /// \sa GRPC_WRITE_LAST_MESSAGE
  150. bool is_last_message() const { return last_message_; }
  151. /// Guarantee that all bytes have been written to the socket before completing
  152. /// this write (usually writes are completed when they pass flow control).
  153. inline WriteOptions& set_write_through() {
  154. SetBit(GRPC_WRITE_THROUGH);
  155. return *this;
  156. }
  157. inline WriteOptions& clear_write_through() {
  158. ClearBit(GRPC_WRITE_THROUGH);
  159. return *this;
  160. }
  161. inline bool is_write_through() const { return GetBit(GRPC_WRITE_THROUGH); }
  162. private:
  163. void SetBit(const uint32_t mask) { flags_ |= mask; }
  164. void ClearBit(const uint32_t mask) { flags_ &= ~mask; }
  165. bool GetBit(const uint32_t mask) const { return (flags_ & mask) != 0; }
  166. uint32_t flags_;
  167. bool last_message_;
  168. };
  169. namespace internal {
  170. /// Default argument for CallOpSet. The Unused parameter is unused by
  171. /// the class, but can be used for generating multiple names for the
  172. /// same thing.
  173. template <int Unused>
  174. class CallNoOp {
  175. protected:
  176. void AddOp(grpc_op* /*ops*/, size_t* /*nops*/) {}
  177. void FinishOp(bool* /*status*/) {}
  178. void SetInterceptionHookPoint(
  179. InterceptorBatchMethodsImpl* /*interceptor_methods*/) {}
  180. void SetFinishInterceptionHookPoint(
  181. InterceptorBatchMethodsImpl* /*interceptor_methods*/) {}
  182. void SetHijackingState(InterceptorBatchMethodsImpl* /*interceptor_methods*/) {
  183. }
  184. };
  185. class CallOpSendInitialMetadata {
  186. public:
  187. CallOpSendInitialMetadata() : send_(false) {
  188. maybe_compression_level_.is_set = false;
  189. }
  190. void SendInitialMetadata(std::multimap<TString, TString>* metadata,
  191. uint32_t flags) {
  192. maybe_compression_level_.is_set = false;
  193. send_ = true;
  194. flags_ = flags;
  195. metadata_map_ = metadata;
  196. }
  197. void set_compression_level(grpc_compression_level level) {
  198. maybe_compression_level_.is_set = true;
  199. maybe_compression_level_.level = level;
  200. }
  201. protected:
  202. void AddOp(grpc_op* ops, size_t* nops) {
  203. if (!send_ || hijacked_) return;
  204. grpc_op* op = &ops[(*nops)++];
  205. op->op = GRPC_OP_SEND_INITIAL_METADATA;
  206. op->flags = flags_;
  207. op->reserved = nullptr;
  208. initial_metadata_ =
  209. FillMetadataArray(*metadata_map_, &initial_metadata_count_, "");
  210. op->data.send_initial_metadata.count = initial_metadata_count_;
  211. op->data.send_initial_metadata.metadata = initial_metadata_;
  212. op->data.send_initial_metadata.maybe_compression_level.is_set =
  213. maybe_compression_level_.is_set;
  214. if (maybe_compression_level_.is_set) {
  215. op->data.send_initial_metadata.maybe_compression_level.level =
  216. maybe_compression_level_.level;
  217. }
  218. }
  219. void FinishOp(bool* /*status*/) {
  220. if (!send_ || hijacked_) return;
  221. gpr_free(initial_metadata_);
  222. send_ = false;
  223. }
  224. void SetInterceptionHookPoint(
  225. InterceptorBatchMethodsImpl* interceptor_methods) {
  226. if (!send_) return;
  227. interceptor_methods->AddInterceptionHookPoint(
  228. experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA);
  229. interceptor_methods->SetSendInitialMetadata(metadata_map_);
  230. }
  231. void SetFinishInterceptionHookPoint(
  232. InterceptorBatchMethodsImpl* /*interceptor_methods*/) {}
  233. void SetHijackingState(InterceptorBatchMethodsImpl* /*interceptor_methods*/) {
  234. hijacked_ = true;
  235. }
  236. bool hijacked_ = false;
  237. bool send_;
  238. uint32_t flags_;
  239. size_t initial_metadata_count_;
  240. std::multimap<TString, TString>* metadata_map_;
  241. grpc_metadata* initial_metadata_;
  242. struct {
  243. bool is_set;
  244. grpc_compression_level level;
  245. } maybe_compression_level_;
  246. };
  247. class CallOpSendMessage {
  248. public:
  249. CallOpSendMessage() : send_buf_() {}
  250. /// Send \a message using \a options for the write. The \a options are cleared
  251. /// after use.
  252. template <class M>
  253. Status SendMessage(const M& message,
  254. WriteOptions options) GRPC_MUST_USE_RESULT;
  255. template <class M>
  256. Status SendMessage(const M& message) GRPC_MUST_USE_RESULT;
  257. /// Send \a message using \a options for the write. The \a options are cleared
  258. /// after use. This form of SendMessage allows gRPC to reference \a message
  259. /// beyond the lifetime of SendMessage.
  260. template <class M>
  261. Status SendMessagePtr(const M* message,
  262. WriteOptions options) GRPC_MUST_USE_RESULT;
  263. /// This form of SendMessage allows gRPC to reference \a message beyond the
  264. /// lifetime of SendMessage.
  265. template <class M>
  266. Status SendMessagePtr(const M* message) GRPC_MUST_USE_RESULT;
  267. protected:
  268. void AddOp(grpc_op* ops, size_t* nops) {
  269. if (msg_ == nullptr && !send_buf_.Valid()) return;
  270. if (hijacked_) {
  271. serializer_ = nullptr;
  272. return;
  273. }
  274. if (msg_ != nullptr) {
  275. GPR_ASSERT(serializer_(msg_).ok());
  276. }
  277. serializer_ = nullptr;
  278. grpc_op* op = &ops[(*nops)++];
  279. op->op = GRPC_OP_SEND_MESSAGE;
  280. op->flags = write_options_.flags();
  281. op->reserved = nullptr;
  282. op->data.send_message.send_message = send_buf_.c_buffer();
  283. // Flags are per-message: clear them after use.
  284. write_options_.Clear();
  285. }
  286. void FinishOp(bool* status) {
  287. if (msg_ == nullptr && !send_buf_.Valid()) return;
  288. send_buf_.Clear();
  289. if (hijacked_ && failed_send_) {
  290. // Hijacking interceptor failed this Op
  291. *status = false;
  292. } else if (!*status) {
  293. // This Op was passed down to core and the Op failed
  294. failed_send_ = true;
  295. }
  296. }
  297. void SetInterceptionHookPoint(
  298. InterceptorBatchMethodsImpl* interceptor_methods) {
  299. if (msg_ == nullptr && !send_buf_.Valid()) return;
  300. interceptor_methods->AddInterceptionHookPoint(
  301. experimental::InterceptionHookPoints::PRE_SEND_MESSAGE);
  302. interceptor_methods->SetSendMessage(&send_buf_, &msg_, &failed_send_,
  303. serializer_);
  304. }
  305. void SetFinishInterceptionHookPoint(
  306. InterceptorBatchMethodsImpl* interceptor_methods) {
  307. if (msg_ != nullptr || send_buf_.Valid()) {
  308. interceptor_methods->AddInterceptionHookPoint(
  309. experimental::InterceptionHookPoints::POST_SEND_MESSAGE);
  310. }
  311. send_buf_.Clear();
  312. msg_ = nullptr;
  313. // The contents of the SendMessage value that was previously set
  314. // has had its references stolen by core's operations
  315. interceptor_methods->SetSendMessage(nullptr, nullptr, &failed_send_,
  316. nullptr);
  317. }
  318. void SetHijackingState(InterceptorBatchMethodsImpl* /*interceptor_methods*/) {
  319. hijacked_ = true;
  320. }
  321. private:
  322. const void* msg_ = nullptr; // The original non-serialized message
  323. bool hijacked_ = false;
  324. bool failed_send_ = false;
  325. ByteBuffer send_buf_;
  326. WriteOptions write_options_;
  327. std::function<Status(const void*)> serializer_;
  328. };
  329. template <class M>
  330. Status CallOpSendMessage::SendMessage(const M& message, WriteOptions options) {
  331. write_options_ = options;
  332. // Serialize immediately since we do not have access to the message pointer
  333. bool own_buf;
  334. Status result = SerializationTraits<M>::Serialize(
  335. message, send_buf_.bbuf_ptr(), &own_buf);
  336. if (!own_buf) {
  337. send_buf_.Duplicate();
  338. }
  339. return result;
  340. }
  341. template <class M>
  342. Status CallOpSendMessage::SendMessage(const M& message) {
  343. return SendMessage(message, WriteOptions());
  344. }
  345. template <class M>
  346. Status CallOpSendMessage::SendMessagePtr(const M* message,
  347. WriteOptions options) {
  348. msg_ = message;
  349. write_options_ = options;
  350. // Store the serializer for later since we have access to the message
  351. serializer_ = [this](const void* message) {
  352. bool own_buf;
  353. // TODO(vjpai): Remove the void below when possible
  354. // The void in the template parameter below should not be needed
  355. // (since it should be implicit) but is needed due to an observed
  356. // difference in behavior between clang and gcc for certain internal users
  357. Status result = SerializationTraits<M>::Serialize(
  358. *static_cast<const M*>(message), send_buf_.bbuf_ptr(), &own_buf);
  359. if (!own_buf) {
  360. send_buf_.Duplicate();
  361. }
  362. return result;
  363. };
  364. return Status();
  365. }
  366. template <class M>
  367. Status CallOpSendMessage::SendMessagePtr(const M* message) {
  368. return SendMessagePtr(message, WriteOptions());
  369. }
  370. template <class R>
  371. class CallOpRecvMessage {
  372. public:
  373. void RecvMessage(R* message) { message_ = message; }
  374. // Do not change status if no message is received.
  375. void AllowNoMessage() { allow_not_getting_message_ = true; }
  376. bool got_message = false;
  377. protected:
  378. void AddOp(grpc_op* ops, size_t* nops) {
  379. if (message_ == nullptr || hijacked_) return;
  380. grpc_op* op = &ops[(*nops)++];
  381. op->op = GRPC_OP_RECV_MESSAGE;
  382. op->flags = 0;
  383. op->reserved = nullptr;
  384. op->data.recv_message.recv_message = recv_buf_.c_buffer_ptr();
  385. }
  386. void FinishOp(bool* status) {
  387. if (message_ == nullptr) return;
  388. if (recv_buf_.Valid()) {
  389. if (*status) {
  390. got_message = *status =
  391. SerializationTraits<R>::Deserialize(recv_buf_.bbuf_ptr(), message_)
  392. .ok();
  393. recv_buf_.Release();
  394. } else {
  395. got_message = false;
  396. recv_buf_.Clear();
  397. }
  398. } else if (hijacked_) {
  399. if (hijacked_recv_message_failed_) {
  400. FinishOpRecvMessageFailureHandler(status);
  401. } else {
  402. // The op was hijacked and it was successful. There is no further action
  403. // to be performed since the message is already in its non-serialized
  404. // form.
  405. }
  406. } else {
  407. FinishOpRecvMessageFailureHandler(status);
  408. }
  409. }
  410. void SetInterceptionHookPoint(
  411. InterceptorBatchMethodsImpl* interceptor_methods) {
  412. if (message_ == nullptr) return;
  413. interceptor_methods->SetRecvMessage(message_,
  414. &hijacked_recv_message_failed_);
  415. }
  416. void SetFinishInterceptionHookPoint(
  417. InterceptorBatchMethodsImpl* interceptor_methods) {
  418. if (message_ == nullptr) return;
  419. interceptor_methods->AddInterceptionHookPoint(
  420. experimental::InterceptionHookPoints::POST_RECV_MESSAGE);
  421. if (!got_message) interceptor_methods->SetRecvMessage(nullptr, nullptr);
  422. }
  423. void SetHijackingState(InterceptorBatchMethodsImpl* interceptor_methods) {
  424. hijacked_ = true;
  425. if (message_ == nullptr) return;
  426. interceptor_methods->AddInterceptionHookPoint(
  427. experimental::InterceptionHookPoints::PRE_RECV_MESSAGE);
  428. got_message = true;
  429. }
  430. private:
  431. // Sets got_message and \a status for a failed recv message op
  432. void FinishOpRecvMessageFailureHandler(bool* status) {
  433. got_message = false;
  434. if (!allow_not_getting_message_) {
  435. *status = false;
  436. }
  437. }
  438. R* message_ = nullptr;
  439. ByteBuffer recv_buf_;
  440. bool allow_not_getting_message_ = false;
  441. bool hijacked_ = false;
  442. bool hijacked_recv_message_failed_ = false;
  443. };
  444. class DeserializeFunc {
  445. public:
  446. virtual Status Deserialize(ByteBuffer* buf) = 0;
  447. virtual ~DeserializeFunc() {}
  448. };
  449. template <class R>
  450. class DeserializeFuncType final : public DeserializeFunc {
  451. public:
  452. explicit DeserializeFuncType(R* message) : message_(message) {}
  453. Status Deserialize(ByteBuffer* buf) override {
  454. return SerializationTraits<R>::Deserialize(buf->bbuf_ptr(), message_);
  455. }
  456. ~DeserializeFuncType() override {}
  457. private:
  458. R* message_; // Not a managed pointer because management is external to this
  459. };
  460. class CallOpGenericRecvMessage {
  461. public:
  462. template <class R>
  463. void RecvMessage(R* message) {
  464. // Use an explicit base class pointer to avoid resolution error in the
  465. // following unique_ptr::reset for some old implementations.
  466. DeserializeFunc* func = new DeserializeFuncType<R>(message);
  467. deserialize_.reset(func);
  468. message_ = message;
  469. }
  470. // Do not change status if no message is received.
  471. void AllowNoMessage() { allow_not_getting_message_ = true; }
  472. bool got_message = false;
  473. protected:
  474. void AddOp(grpc_op* ops, size_t* nops) {
  475. if (!deserialize_ || hijacked_) return;
  476. grpc_op* op = &ops[(*nops)++];
  477. op->op = GRPC_OP_RECV_MESSAGE;
  478. op->flags = 0;
  479. op->reserved = nullptr;
  480. op->data.recv_message.recv_message = recv_buf_.c_buffer_ptr();
  481. }
  482. void FinishOp(bool* status) {
  483. if (!deserialize_) return;
  484. if (recv_buf_.Valid()) {
  485. if (*status) {
  486. got_message = true;
  487. *status = deserialize_->Deserialize(&recv_buf_).ok();
  488. recv_buf_.Release();
  489. } else {
  490. got_message = false;
  491. recv_buf_.Clear();
  492. }
  493. } else if (hijacked_) {
  494. if (hijacked_recv_message_failed_) {
  495. FinishOpRecvMessageFailureHandler(status);
  496. } else {
  497. // The op was hijacked and it was successful. There is no further action
  498. // to be performed since the message is already in its non-serialized
  499. // form.
  500. }
  501. } else {
  502. got_message = false;
  503. if (!allow_not_getting_message_) {
  504. *status = false;
  505. }
  506. }
  507. }
  508. void SetInterceptionHookPoint(
  509. InterceptorBatchMethodsImpl* interceptor_methods) {
  510. if (!deserialize_) return;
  511. interceptor_methods->SetRecvMessage(message_,
  512. &hijacked_recv_message_failed_);
  513. }
  514. void SetFinishInterceptionHookPoint(
  515. InterceptorBatchMethodsImpl* interceptor_methods) {
  516. if (!deserialize_) return;
  517. interceptor_methods->AddInterceptionHookPoint(
  518. experimental::InterceptionHookPoints::POST_RECV_MESSAGE);
  519. if (!got_message) interceptor_methods->SetRecvMessage(nullptr, nullptr);
  520. deserialize_.reset();
  521. }
  522. void SetHijackingState(InterceptorBatchMethodsImpl* interceptor_methods) {
  523. hijacked_ = true;
  524. if (!deserialize_) return;
  525. interceptor_methods->AddInterceptionHookPoint(
  526. experimental::InterceptionHookPoints::PRE_RECV_MESSAGE);
  527. got_message = true;
  528. }
  529. private:
  530. // Sets got_message and \a status for a failed recv message op
  531. void FinishOpRecvMessageFailureHandler(bool* status) {
  532. got_message = false;
  533. if (!allow_not_getting_message_) {
  534. *status = false;
  535. }
  536. }
  537. void* message_ = nullptr;
  538. std::unique_ptr<DeserializeFunc> deserialize_;
  539. ByteBuffer recv_buf_;
  540. bool allow_not_getting_message_ = false;
  541. bool hijacked_ = false;
  542. bool hijacked_recv_message_failed_ = false;
  543. };
  544. class CallOpClientSendClose {
  545. public:
  546. CallOpClientSendClose() : send_(false) {}
  547. void ClientSendClose() { send_ = true; }
  548. protected:
  549. void AddOp(grpc_op* ops, size_t* nops) {
  550. if (!send_ || hijacked_) return;
  551. grpc_op* op = &ops[(*nops)++];
  552. op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
  553. op->flags = 0;
  554. op->reserved = nullptr;
  555. }
  556. void FinishOp(bool* /*status*/) { send_ = false; }
  557. void SetInterceptionHookPoint(
  558. InterceptorBatchMethodsImpl* interceptor_methods) {
  559. if (!send_) return;
  560. interceptor_methods->AddInterceptionHookPoint(
  561. experimental::InterceptionHookPoints::PRE_SEND_CLOSE);
  562. }
  563. void SetFinishInterceptionHookPoint(
  564. InterceptorBatchMethodsImpl* /*interceptor_methods*/) {}
  565. void SetHijackingState(InterceptorBatchMethodsImpl* /*interceptor_methods*/) {
  566. hijacked_ = true;
  567. }
  568. private:
  569. bool hijacked_ = false;
  570. bool send_;
  571. };
  572. class CallOpServerSendStatus {
  573. public:
  574. CallOpServerSendStatus() : send_status_available_(false) {}
  575. void ServerSendStatus(
  576. std::multimap<TString, TString>* trailing_metadata,
  577. const Status& status) {
  578. send_error_details_ = status.error_details();
  579. metadata_map_ = trailing_metadata;
  580. send_status_available_ = true;
  581. send_status_code_ = static_cast<grpc_status_code>(status.error_code());
  582. send_error_message_ = status.error_message();
  583. }
  584. protected:
  585. void AddOp(grpc_op* ops, size_t* nops) {
  586. if (!send_status_available_ || hijacked_) return;
  587. trailing_metadata_ = FillMetadataArray(
  588. *metadata_map_, &trailing_metadata_count_, send_error_details_);
  589. grpc_op* op = &ops[(*nops)++];
  590. op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
  591. op->data.send_status_from_server.trailing_metadata_count =
  592. trailing_metadata_count_;
  593. op->data.send_status_from_server.trailing_metadata = trailing_metadata_;
  594. op->data.send_status_from_server.status = send_status_code_;
  595. error_message_slice_ = SliceReferencingString(send_error_message_);
  596. op->data.send_status_from_server.status_details =
  597. send_error_message_.empty() ? nullptr : &error_message_slice_;
  598. op->flags = 0;
  599. op->reserved = nullptr;
  600. }
  601. void FinishOp(bool* /*status*/) {
  602. if (!send_status_available_ || hijacked_) return;
  603. gpr_free(trailing_metadata_);
  604. send_status_available_ = false;
  605. }
  606. void SetInterceptionHookPoint(
  607. InterceptorBatchMethodsImpl* interceptor_methods) {
  608. if (!send_status_available_) return;
  609. interceptor_methods->AddInterceptionHookPoint(
  610. experimental::InterceptionHookPoints::PRE_SEND_STATUS);
  611. interceptor_methods->SetSendTrailingMetadata(metadata_map_);
  612. interceptor_methods->SetSendStatus(&send_status_code_, &send_error_details_,
  613. &send_error_message_);
  614. }
  615. void SetFinishInterceptionHookPoint(
  616. InterceptorBatchMethodsImpl* /*interceptor_methods*/) {}
  617. void SetHijackingState(InterceptorBatchMethodsImpl* /*interceptor_methods*/) {
  618. hijacked_ = true;
  619. }
  620. private:
  621. bool hijacked_ = false;
  622. bool send_status_available_;
  623. grpc_status_code send_status_code_;
  624. TString send_error_details_;
  625. TString send_error_message_;
  626. size_t trailing_metadata_count_;
  627. std::multimap<TString, TString>* metadata_map_;
  628. grpc_metadata* trailing_metadata_;
  629. grpc_slice error_message_slice_;
  630. };
  631. class CallOpRecvInitialMetadata {
  632. public:
  633. CallOpRecvInitialMetadata() : metadata_map_(nullptr) {}
  634. void RecvInitialMetadata(grpc::ClientContext* context) {
  635. context->initial_metadata_received_ = true;
  636. metadata_map_ = &context->recv_initial_metadata_;
  637. }
  638. protected:
  639. void AddOp(grpc_op* ops, size_t* nops) {
  640. if (metadata_map_ == nullptr || hijacked_) return;
  641. grpc_op* op = &ops[(*nops)++];
  642. op->op = GRPC_OP_RECV_INITIAL_METADATA;
  643. op->data.recv_initial_metadata.recv_initial_metadata = metadata_map_->arr();
  644. op->flags = 0;
  645. op->reserved = nullptr;
  646. }
  647. void FinishOp(bool* /*status*/) {
  648. if (metadata_map_ == nullptr || hijacked_) return;
  649. }
  650. void SetInterceptionHookPoint(
  651. InterceptorBatchMethodsImpl* interceptor_methods) {
  652. interceptor_methods->SetRecvInitialMetadata(metadata_map_);
  653. }
  654. void SetFinishInterceptionHookPoint(
  655. InterceptorBatchMethodsImpl* interceptor_methods) {
  656. if (metadata_map_ == nullptr) return;
  657. interceptor_methods->AddInterceptionHookPoint(
  658. experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA);
  659. metadata_map_ = nullptr;
  660. }
  661. void SetHijackingState(InterceptorBatchMethodsImpl* interceptor_methods) {
  662. hijacked_ = true;
  663. if (metadata_map_ == nullptr) return;
  664. interceptor_methods->AddInterceptionHookPoint(
  665. experimental::InterceptionHookPoints::PRE_RECV_INITIAL_METADATA);
  666. }
  667. private:
  668. bool hijacked_ = false;
  669. MetadataMap* metadata_map_;
  670. };
  671. class CallOpClientRecvStatus {
  672. public:
  673. CallOpClientRecvStatus()
  674. : recv_status_(nullptr), debug_error_string_(nullptr) {}
  675. void ClientRecvStatus(grpc::ClientContext* context, Status* status) {
  676. client_context_ = context;
  677. metadata_map_ = &client_context_->trailing_metadata_;
  678. recv_status_ = status;
  679. error_message_ = grpc_empty_slice();
  680. }
  681. protected:
  682. void AddOp(grpc_op* ops, size_t* nops) {
  683. if (recv_status_ == nullptr || hijacked_) return;
  684. grpc_op* op = &ops[(*nops)++];
  685. op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
  686. op->data.recv_status_on_client.trailing_metadata = metadata_map_->arr();
  687. op->data.recv_status_on_client.status = &status_code_;
  688. op->data.recv_status_on_client.status_details = &error_message_;
  689. op->data.recv_status_on_client.error_string = &debug_error_string_;
  690. op->flags = 0;
  691. op->reserved = nullptr;
  692. }
  693. void FinishOp(bool* /*status*/) {
  694. if (recv_status_ == nullptr || hijacked_) return;
  695. if (static_cast<StatusCode>(status_code_) == StatusCode::OK) {
  696. *recv_status_ = Status();
  697. GPR_DEBUG_ASSERT(debug_error_string_ == nullptr);
  698. } else {
  699. *recv_status_ =
  700. Status(static_cast<StatusCode>(status_code_),
  701. GRPC_SLICE_IS_EMPTY(error_message_)
  702. ? TString()
  703. : TString(reinterpret_cast<const char*>GRPC_SLICE_START_PTR(error_message_),
  704. reinterpret_cast<const char*>GRPC_SLICE_END_PTR(error_message_)),
  705. metadata_map_->GetBinaryErrorDetails());
  706. if (debug_error_string_ != nullptr) {
  707. client_context_->set_debug_error_string(debug_error_string_);
  708. gpr_free(const_cast<char*>(debug_error_string_));
  709. }
  710. }
  711. // TODO(soheil): Find callers that set debug string even for status OK,
  712. // and fix them.
  713. grpc_slice_unref(error_message_);
  714. }
  715. void SetInterceptionHookPoint(
  716. InterceptorBatchMethodsImpl* interceptor_methods) {
  717. interceptor_methods->SetRecvStatus(recv_status_);
  718. interceptor_methods->SetRecvTrailingMetadata(metadata_map_);
  719. }
  720. void SetFinishInterceptionHookPoint(
  721. InterceptorBatchMethodsImpl* interceptor_methods) {
  722. if (recv_status_ == nullptr) return;
  723. interceptor_methods->AddInterceptionHookPoint(
  724. experimental::InterceptionHookPoints::POST_RECV_STATUS);
  725. recv_status_ = nullptr;
  726. }
  727. void SetHijackingState(InterceptorBatchMethodsImpl* interceptor_methods) {
  728. hijacked_ = true;
  729. if (recv_status_ == nullptr) return;
  730. interceptor_methods->AddInterceptionHookPoint(
  731. experimental::InterceptionHookPoints::PRE_RECV_STATUS);
  732. }
  733. private:
  734. bool hijacked_ = false;
  735. grpc::ClientContext* client_context_;
  736. MetadataMap* metadata_map_;
  737. Status* recv_status_;
  738. const char* debug_error_string_;
  739. grpc_status_code status_code_;
  740. grpc_slice error_message_;
  741. };
  742. template <class Op1 = CallNoOp<1>, class Op2 = CallNoOp<2>,
  743. class Op3 = CallNoOp<3>, class Op4 = CallNoOp<4>,
  744. class Op5 = CallNoOp<5>, class Op6 = CallNoOp<6>>
  745. class CallOpSet;
  746. /// Primary implementation of CallOpSetInterface.
  747. /// Since we cannot use variadic templates, we declare slots up to
  748. /// the maximum count of ops we'll need in a set. We leverage the
  749. /// empty base class optimization to slim this class (especially
  750. /// when there are many unused slots used). To avoid duplicate base classes,
  751. /// the template parameter for CallNoOp is varied by argument position.
  752. template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
  753. class CallOpSet : public CallOpSetInterface,
  754. public Op1,
  755. public Op2,
  756. public Op3,
  757. public Op4,
  758. public Op5,
  759. public Op6 {
  760. public:
  761. CallOpSet() : core_cq_tag_(this), return_tag_(this) {}
  762. // The copy constructor and assignment operator reset the value of
  763. // core_cq_tag_, return_tag_, done_intercepting_ and interceptor_methods_
  764. // since those are only meaningful on a specific object, not across objects.
  765. CallOpSet(const CallOpSet& other)
  766. : core_cq_tag_(this),
  767. return_tag_(this),
  768. call_(other.call_),
  769. done_intercepting_(false),
  770. interceptor_methods_(InterceptorBatchMethodsImpl()) {}
  771. CallOpSet& operator=(const CallOpSet& other) {
  772. if (&other == this) {
  773. return *this;
  774. }
  775. core_cq_tag_ = this;
  776. return_tag_ = this;
  777. call_ = other.call_;
  778. done_intercepting_ = false;
  779. interceptor_methods_ = InterceptorBatchMethodsImpl();
  780. return *this;
  781. }
  782. void FillOps(Call* call) override {
  783. done_intercepting_ = false;
  784. grpc_call_ref(call->call());
  785. call_ =
  786. *call; // It's fine to create a copy of call since it's just pointers
  787. if (RunInterceptors()) {
  788. ContinueFillOpsAfterInterception();
  789. } else {
  790. // After the interceptors are run, ContinueFillOpsAfterInterception will
  791. // be run
  792. }
  793. }
  794. bool FinalizeResult(void** tag, bool* status) override {
  795. if (done_intercepting_) {
  796. // Complete the avalanching since we are done with this batch of ops
  797. call_.cq()->CompleteAvalanching();
  798. // We have already finished intercepting and filling in the results. This
  799. // round trip from the core needed to be made because interceptors were
  800. // run
  801. *tag = return_tag_;
  802. *status = saved_status_;
  803. grpc_call_unref(call_.call());
  804. return true;
  805. }
  806. this->Op1::FinishOp(status);
  807. this->Op2::FinishOp(status);
  808. this->Op3::FinishOp(status);
  809. this->Op4::FinishOp(status);
  810. this->Op5::FinishOp(status);
  811. this->Op6::FinishOp(status);
  812. saved_status_ = *status;
  813. if (RunInterceptorsPostRecv()) {
  814. *tag = return_tag_;
  815. grpc_call_unref(call_.call());
  816. return true;
  817. }
  818. // Interceptors are going to be run, so we can't return the tag just yet.
  819. // After the interceptors are run, ContinueFinalizeResultAfterInterception
  820. return false;
  821. }
  822. void set_output_tag(void* return_tag) { return_tag_ = return_tag; }
  823. void* core_cq_tag() override { return core_cq_tag_; }
  824. /// set_core_cq_tag is used to provide a different core CQ tag than "this".
  825. /// This is used for callback-based tags, where the core tag is the core
  826. /// callback function. It does not change the use or behavior of any other
  827. /// function (such as FinalizeResult)
  828. void set_core_cq_tag(void* core_cq_tag) { core_cq_tag_ = core_cq_tag; }
  829. // This will be called while interceptors are run if the RPC is a hijacked
  830. // RPC. This should set hijacking state for each of the ops.
  831. void SetHijackingState() override {
  832. this->Op1::SetHijackingState(&interceptor_methods_);
  833. this->Op2::SetHijackingState(&interceptor_methods_);
  834. this->Op3::SetHijackingState(&interceptor_methods_);
  835. this->Op4::SetHijackingState(&interceptor_methods_);
  836. this->Op5::SetHijackingState(&interceptor_methods_);
  837. this->Op6::SetHijackingState(&interceptor_methods_);
  838. }
  839. // Should be called after interceptors are done running
  840. void ContinueFillOpsAfterInterception() override {
  841. static const size_t MAX_OPS = 6;
  842. grpc_op ops[MAX_OPS];
  843. size_t nops = 0;
  844. this->Op1::AddOp(ops, &nops);
  845. this->Op2::AddOp(ops, &nops);
  846. this->Op3::AddOp(ops, &nops);
  847. this->Op4::AddOp(ops, &nops);
  848. this->Op5::AddOp(ops, &nops);
  849. this->Op6::AddOp(ops, &nops);
  850. grpc_call_error err =
  851. grpc_call_start_batch(call_.call(), ops, nops, core_cq_tag(), nullptr);
  852. if (err != GRPC_CALL_OK) {
  853. // A failure here indicates an API misuse; for example, doing a Write
  854. // while another Write is already pending on the same RPC or invoking
  855. // WritesDone multiple times
  856. gpr_log(GPR_ERROR, "API misuse of type %s observed",
  857. grpc_call_error_to_string(err));
  858. GPR_ASSERT(false);
  859. }
  860. }
  861. // Should be called after interceptors are done running on the finalize result
  862. // path
  863. void ContinueFinalizeResultAfterInterception() override {
  864. done_intercepting_ = true;
  865. // The following call_start_batch is internally-generated so no need for an
  866. // explanatory log on failure.
  867. GPR_ASSERT(grpc_call_start_batch(call_.call(), nullptr, 0, core_cq_tag(),
  868. nullptr) == GRPC_CALL_OK);
  869. }
  870. private:
  871. // Returns true if no interceptors need to be run
  872. bool RunInterceptors() {
  873. interceptor_methods_.ClearState();
  874. interceptor_methods_.SetCallOpSetInterface(this);
  875. interceptor_methods_.SetCall(&call_);
  876. this->Op1::SetInterceptionHookPoint(&interceptor_methods_);
  877. this->Op2::SetInterceptionHookPoint(&interceptor_methods_);
  878. this->Op3::SetInterceptionHookPoint(&interceptor_methods_);
  879. this->Op4::SetInterceptionHookPoint(&interceptor_methods_);
  880. this->Op5::SetInterceptionHookPoint(&interceptor_methods_);
  881. this->Op6::SetInterceptionHookPoint(&interceptor_methods_);
  882. if (interceptor_methods_.InterceptorsListEmpty()) {
  883. return true;
  884. }
  885. // This call will go through interceptors and would need to
  886. // schedule new batches, so delay completion queue shutdown
  887. call_.cq()->RegisterAvalanching();
  888. return interceptor_methods_.RunInterceptors();
  889. }
  890. // Returns true if no interceptors need to be run
  891. bool RunInterceptorsPostRecv() {
  892. // Call and OpSet had already been set on the set state.
  893. // SetReverse also clears previously set hook points
  894. interceptor_methods_.SetReverse();
  895. this->Op1::SetFinishInterceptionHookPoint(&interceptor_methods_);
  896. this->Op2::SetFinishInterceptionHookPoint(&interceptor_methods_);
  897. this->Op3::SetFinishInterceptionHookPoint(&interceptor_methods_);
  898. this->Op4::SetFinishInterceptionHookPoint(&interceptor_methods_);
  899. this->Op5::SetFinishInterceptionHookPoint(&interceptor_methods_);
  900. this->Op6::SetFinishInterceptionHookPoint(&interceptor_methods_);
  901. return interceptor_methods_.RunInterceptors();
  902. }
  903. void* core_cq_tag_;
  904. void* return_tag_;
  905. Call call_;
  906. bool done_intercepting_ = false;
  907. InterceptorBatchMethodsImpl interceptor_methods_;
  908. bool saved_status_;
  909. };
  910. } // namespace internal
  911. } // namespace grpc
  912. #endif // GRPCPP_IMPL_CALL_OP_SET_H