binarylog_end2end_test.go 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061
  1. /*
  2. *
  3. * Copyright 2018 gRPC authors.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. */
  18. package binarylog_test
  19. import (
  20. "context"
  21. "fmt"
  22. "io"
  23. "net"
  24. "sort"
  25. "sync"
  26. "testing"
  27. "time"
  28. "github.com/golang/protobuf/proto"
  29. "google.golang.org/grpc"
  30. "google.golang.org/grpc/binarylog"
  31. "google.golang.org/grpc/credentials/insecure"
  32. "google.golang.org/grpc/grpclog"
  33. iblog "google.golang.org/grpc/internal/binarylog"
  34. "google.golang.org/grpc/internal/grpctest"
  35. "google.golang.org/grpc/metadata"
  36. "google.golang.org/grpc/status"
  37. binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
  38. testgrpc "google.golang.org/grpc/interop/grpc_testing"
  39. testpb "google.golang.org/grpc/interop/grpc_testing"
  40. )
  41. var grpclogLogger = grpclog.Component("binarylog")
  42. type s struct {
  43. grpctest.Tester
  44. }
  45. func Test(t *testing.T) {
  46. grpctest.RunSubTests(t, s{})
  47. }
  48. func init() {
  49. // Setting environment variable in tests doesn't work because of the init
  50. // orders. Set the loggers directly here.
  51. iblog.SetLogger(iblog.AllLogger)
  52. binarylog.SetSink(testSink)
  53. }
  54. var testSink = &testBinLogSink{}
  55. type testBinLogSink struct {
  56. mu sync.Mutex
  57. buf []*binlogpb.GrpcLogEntry
  58. }
  59. func (s *testBinLogSink) Write(e *binlogpb.GrpcLogEntry) error {
  60. s.mu.Lock()
  61. s.buf = append(s.buf, e)
  62. s.mu.Unlock()
  63. return nil
  64. }
  65. func (s *testBinLogSink) Close() error { return nil }
  66. // Returns all client entris if client is true, otherwise return all server
  67. // entries.
  68. func (s *testBinLogSink) logEntries(client bool) []*binlogpb.GrpcLogEntry {
  69. logger := binlogpb.GrpcLogEntry_LOGGER_SERVER
  70. if client {
  71. logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
  72. }
  73. var ret []*binlogpb.GrpcLogEntry
  74. s.mu.Lock()
  75. for _, e := range s.buf {
  76. if e.Logger == logger {
  77. ret = append(ret, e)
  78. }
  79. }
  80. s.mu.Unlock()
  81. return ret
  82. }
  83. func (s *testBinLogSink) clear() {
  84. s.mu.Lock()
  85. s.buf = nil
  86. s.mu.Unlock()
  87. }
  88. var (
  89. // For headers:
  90. testMetadata = metadata.MD{
  91. "key1": []string{"value1"},
  92. "key2": []string{"value2"},
  93. }
  94. // For trailers:
  95. testTrailerMetadata = metadata.MD{
  96. "tkey1": []string{"trailerValue1"},
  97. "tkey2": []string{"trailerValue2"},
  98. }
  99. // The id for which the service handler should return error.
  100. errorID int32 = 32202
  101. globalRPCID uint64 // RPC id starts with 1, but we do ++ at the beginning of each test.
  102. )
  103. func idToPayload(id int32) *testpb.Payload {
  104. return &testpb.Payload{Body: []byte{byte(id), byte(id >> 8), byte(id >> 16), byte(id >> 24)}}
  105. }
  106. func payloadToID(p *testpb.Payload) int32 {
  107. if p == nil || len(p.Body) != 4 {
  108. panic("invalid payload")
  109. }
  110. return int32(p.Body[0]) + int32(p.Body[1])<<8 + int32(p.Body[2])<<16 + int32(p.Body[3])<<24
  111. }
  112. type testServer struct {
  113. testgrpc.UnimplementedTestServiceServer
  114. te *test
  115. }
  116. func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
  117. md, ok := metadata.FromIncomingContext(ctx)
  118. if ok {
  119. if err := grpc.SendHeader(ctx, md); err != nil {
  120. return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want <nil>", md, err)
  121. }
  122. if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil {
  123. return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want <nil>", testTrailerMetadata, err)
  124. }
  125. }
  126. if id := payloadToID(in.Payload); id == errorID {
  127. return nil, fmt.Errorf("got error id: %v", id)
  128. }
  129. return &testpb.SimpleResponse{Payload: in.Payload}, nil
  130. }
  131. func (s *testServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error {
  132. md, ok := metadata.FromIncomingContext(stream.Context())
  133. if ok {
  134. if err := stream.SendHeader(md); err != nil {
  135. return status.Errorf(status.Code(err), "stream.SendHeader(%v) = %v, want %v", md, err, nil)
  136. }
  137. stream.SetTrailer(testTrailerMetadata)
  138. }
  139. for {
  140. in, err := stream.Recv()
  141. if err == io.EOF {
  142. // read done.
  143. return nil
  144. }
  145. if err != nil {
  146. return err
  147. }
  148. if id := payloadToID(in.Payload); id == errorID {
  149. return fmt.Errorf("got error id: %v", id)
  150. }
  151. if err := stream.Send(&testpb.StreamingOutputCallResponse{Payload: in.Payload}); err != nil {
  152. return err
  153. }
  154. }
  155. }
  156. func (s *testServer) StreamingInputCall(stream testgrpc.TestService_StreamingInputCallServer) error {
  157. md, ok := metadata.FromIncomingContext(stream.Context())
  158. if ok {
  159. if err := stream.SendHeader(md); err != nil {
  160. return status.Errorf(status.Code(err), "stream.SendHeader(%v) = %v, want %v", md, err, nil)
  161. }
  162. stream.SetTrailer(testTrailerMetadata)
  163. }
  164. for {
  165. in, err := stream.Recv()
  166. if err == io.EOF {
  167. // read done.
  168. return stream.SendAndClose(&testpb.StreamingInputCallResponse{AggregatedPayloadSize: 0})
  169. }
  170. if err != nil {
  171. return err
  172. }
  173. if id := payloadToID(in.Payload); id == errorID {
  174. return fmt.Errorf("got error id: %v", id)
  175. }
  176. }
  177. }
  178. func (s *testServer) StreamingOutputCall(in *testpb.StreamingOutputCallRequest, stream testgrpc.TestService_StreamingOutputCallServer) error {
  179. md, ok := metadata.FromIncomingContext(stream.Context())
  180. if ok {
  181. if err := stream.SendHeader(md); err != nil {
  182. return status.Errorf(status.Code(err), "stream.SendHeader(%v) = %v, want %v", md, err, nil)
  183. }
  184. stream.SetTrailer(testTrailerMetadata)
  185. }
  186. if id := payloadToID(in.Payload); id == errorID {
  187. return fmt.Errorf("got error id: %v", id)
  188. }
  189. for i := 0; i < 5; i++ {
  190. if err := stream.Send(&testpb.StreamingOutputCallResponse{Payload: in.Payload}); err != nil {
  191. return err
  192. }
  193. }
  194. return nil
  195. }
  196. // test is an end-to-end test. It should be created with the newTest
  197. // func, modified as needed, and then started with its startServer method.
  198. // It should be cleaned up with the tearDown method.
  199. type test struct {
  200. t *testing.T
  201. testService testgrpc.TestServiceServer // nil means none
  202. // srv and srvAddr are set once startServer is called.
  203. srv *grpc.Server
  204. srvAddr string // Server IP without port.
  205. srvIP net.IP
  206. srvPort int
  207. cc *grpc.ClientConn // nil until requested via clientConn
  208. // Fields for client address. Set by the service handler.
  209. clientAddrMu sync.Mutex
  210. clientIP net.IP
  211. clientPort int
  212. }
  213. func (te *test) tearDown() {
  214. if te.cc != nil {
  215. te.cc.Close()
  216. te.cc = nil
  217. }
  218. te.srv.Stop()
  219. }
  220. // newTest returns a new test using the provided testing.T and
  221. // environment. It is returned with default values. Tests should
  222. // modify it before calling its startServer and clientConn methods.
  223. func newTest(t *testing.T) *test {
  224. te := &test{
  225. t: t,
  226. }
  227. return te
  228. }
  229. type listenerWrapper struct {
  230. net.Listener
  231. te *test
  232. }
  233. func (lw *listenerWrapper) Accept() (net.Conn, error) {
  234. conn, err := lw.Listener.Accept()
  235. if err != nil {
  236. return nil, err
  237. }
  238. lw.te.clientAddrMu.Lock()
  239. lw.te.clientIP = conn.RemoteAddr().(*net.TCPAddr).IP
  240. lw.te.clientPort = conn.RemoteAddr().(*net.TCPAddr).Port
  241. lw.te.clientAddrMu.Unlock()
  242. return conn, nil
  243. }
  244. // startServer starts a gRPC server listening. Callers should defer a
  245. // call to te.tearDown to clean up.
  246. func (te *test) startServer(ts testgrpc.TestServiceServer) {
  247. te.testService = ts
  248. lis, err := net.Listen("tcp", "localhost:0")
  249. lis = &listenerWrapper{
  250. Listener: lis,
  251. te: te,
  252. }
  253. if err != nil {
  254. te.t.Fatalf("Failed to listen: %v", err)
  255. }
  256. var opts []grpc.ServerOption
  257. s := grpc.NewServer(opts...)
  258. te.srv = s
  259. if te.testService != nil {
  260. testgrpc.RegisterTestServiceServer(s, te.testService)
  261. }
  262. go s.Serve(lis)
  263. te.srvAddr = lis.Addr().String()
  264. te.srvIP = lis.Addr().(*net.TCPAddr).IP
  265. te.srvPort = lis.Addr().(*net.TCPAddr).Port
  266. }
  267. func (te *test) clientConn() *grpc.ClientConn {
  268. if te.cc != nil {
  269. return te.cc
  270. }
  271. opts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()}
  272. var err error
  273. te.cc, err = grpc.Dial(te.srvAddr, opts...)
  274. if err != nil {
  275. te.t.Fatalf("Dial(%q) = %v", te.srvAddr, err)
  276. }
  277. return te.cc
  278. }
  279. type rpcType int
  280. const (
  281. unaryRPC rpcType = iota
  282. clientStreamRPC
  283. serverStreamRPC
  284. fullDuplexStreamRPC
  285. cancelRPC
  286. )
  287. type rpcConfig struct {
  288. count int // Number of requests and responses for streaming RPCs.
  289. success bool // Whether the RPC should succeed or return error.
  290. callType rpcType // Type of RPC.
  291. }
  292. func (te *test) doUnaryCall(c *rpcConfig) (*testpb.SimpleRequest, *testpb.SimpleResponse, error) {
  293. var (
  294. resp *testpb.SimpleResponse
  295. req *testpb.SimpleRequest
  296. err error
  297. )
  298. tc := testgrpc.NewTestServiceClient(te.clientConn())
  299. if c.success {
  300. req = &testpb.SimpleRequest{Payload: idToPayload(errorID + 1)}
  301. } else {
  302. req = &testpb.SimpleRequest{Payload: idToPayload(errorID)}
  303. }
  304. ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  305. defer cancel()
  306. ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  307. resp, err = tc.UnaryCall(ctx, req)
  308. return req, resp, err
  309. }
  310. func (te *test) doFullDuplexCallRoundtrip(c *rpcConfig) ([]proto.Message, []proto.Message, error) {
  311. var (
  312. reqs []proto.Message
  313. resps []proto.Message
  314. err error
  315. )
  316. tc := testgrpc.NewTestServiceClient(te.clientConn())
  317. ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  318. defer cancel()
  319. ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  320. stream, err := tc.FullDuplexCall(ctx)
  321. if err != nil {
  322. return reqs, resps, err
  323. }
  324. if c.callType == cancelRPC {
  325. cancel()
  326. return reqs, resps, context.Canceled
  327. }
  328. var startID int32
  329. if !c.success {
  330. startID = errorID
  331. }
  332. for i := 0; i < c.count; i++ {
  333. req := &testpb.StreamingOutputCallRequest{
  334. Payload: idToPayload(int32(i) + startID),
  335. }
  336. reqs = append(reqs, req)
  337. if err = stream.Send(req); err != nil {
  338. return reqs, resps, err
  339. }
  340. var resp *testpb.StreamingOutputCallResponse
  341. if resp, err = stream.Recv(); err != nil {
  342. return reqs, resps, err
  343. }
  344. resps = append(resps, resp)
  345. }
  346. if err = stream.CloseSend(); err != nil && err != io.EOF {
  347. return reqs, resps, err
  348. }
  349. if _, err = stream.Recv(); err != io.EOF {
  350. return reqs, resps, err
  351. }
  352. return reqs, resps, nil
  353. }
  354. func (te *test) doClientStreamCall(c *rpcConfig) ([]proto.Message, proto.Message, error) {
  355. var (
  356. reqs []proto.Message
  357. resp *testpb.StreamingInputCallResponse
  358. err error
  359. )
  360. tc := testgrpc.NewTestServiceClient(te.clientConn())
  361. ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  362. defer cancel()
  363. ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  364. stream, err := tc.StreamingInputCall(ctx)
  365. if err != nil {
  366. return reqs, resp, err
  367. }
  368. var startID int32
  369. if !c.success {
  370. startID = errorID
  371. }
  372. for i := 0; i < c.count; i++ {
  373. req := &testpb.StreamingInputCallRequest{
  374. Payload: idToPayload(int32(i) + startID),
  375. }
  376. reqs = append(reqs, req)
  377. if err = stream.Send(req); err != nil {
  378. return reqs, resp, err
  379. }
  380. }
  381. resp, err = stream.CloseAndRecv()
  382. return reqs, resp, err
  383. }
  384. func (te *test) doServerStreamCall(c *rpcConfig) (proto.Message, []proto.Message, error) {
  385. var (
  386. req *testpb.StreamingOutputCallRequest
  387. resps []proto.Message
  388. err error
  389. )
  390. tc := testgrpc.NewTestServiceClient(te.clientConn())
  391. ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
  392. defer cancel()
  393. ctx = metadata.NewOutgoingContext(ctx, testMetadata)
  394. var startID int32
  395. if !c.success {
  396. startID = errorID
  397. }
  398. req = &testpb.StreamingOutputCallRequest{Payload: idToPayload(startID)}
  399. stream, err := tc.StreamingOutputCall(ctx, req)
  400. if err != nil {
  401. return req, resps, err
  402. }
  403. for {
  404. var resp *testpb.StreamingOutputCallResponse
  405. resp, err := stream.Recv()
  406. if err == io.EOF {
  407. return req, resps, nil
  408. } else if err != nil {
  409. return req, resps, err
  410. }
  411. resps = append(resps, resp)
  412. }
  413. }
  414. type expectedData struct {
  415. te *test
  416. cc *rpcConfig
  417. method string
  418. requests []proto.Message
  419. responses []proto.Message
  420. err error
  421. }
  422. func (ed *expectedData) newClientHeaderEntry(client bool, rpcID, inRPCID uint64) *binlogpb.GrpcLogEntry {
  423. logger := binlogpb.GrpcLogEntry_LOGGER_CLIENT
  424. var peer *binlogpb.Address
  425. if !client {
  426. logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
  427. ed.te.clientAddrMu.Lock()
  428. peer = &binlogpb.Address{
  429. Address: ed.te.clientIP.String(),
  430. IpPort: uint32(ed.te.clientPort),
  431. }
  432. if ed.te.clientIP.To4() != nil {
  433. peer.Type = binlogpb.Address_TYPE_IPV4
  434. } else {
  435. peer.Type = binlogpb.Address_TYPE_IPV6
  436. }
  437. ed.te.clientAddrMu.Unlock()
  438. }
  439. return &binlogpb.GrpcLogEntry{
  440. Timestamp: nil,
  441. CallId: rpcID,
  442. SequenceIdWithinCall: inRPCID,
  443. Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
  444. Logger: logger,
  445. Payload: &binlogpb.GrpcLogEntry_ClientHeader{
  446. ClientHeader: &binlogpb.ClientHeader{
  447. Metadata: iblog.MdToMetadataProto(testMetadata),
  448. MethodName: ed.method,
  449. Authority: ed.te.srvAddr,
  450. },
  451. },
  452. Peer: peer,
  453. }
  454. }
  455. func (ed *expectedData) newServerHeaderEntry(client bool, rpcID, inRPCID uint64) *binlogpb.GrpcLogEntry {
  456. logger := binlogpb.GrpcLogEntry_LOGGER_SERVER
  457. var peer *binlogpb.Address
  458. if client {
  459. logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
  460. peer = &binlogpb.Address{
  461. Address: ed.te.srvIP.String(),
  462. IpPort: uint32(ed.te.srvPort),
  463. }
  464. if ed.te.srvIP.To4() != nil {
  465. peer.Type = binlogpb.Address_TYPE_IPV4
  466. } else {
  467. peer.Type = binlogpb.Address_TYPE_IPV6
  468. }
  469. }
  470. return &binlogpb.GrpcLogEntry{
  471. Timestamp: nil,
  472. CallId: rpcID,
  473. SequenceIdWithinCall: inRPCID,
  474. Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
  475. Logger: logger,
  476. Payload: &binlogpb.GrpcLogEntry_ServerHeader{
  477. ServerHeader: &binlogpb.ServerHeader{
  478. Metadata: iblog.MdToMetadataProto(testMetadata),
  479. },
  480. },
  481. Peer: peer,
  482. }
  483. }
  484. func (ed *expectedData) newClientMessageEntry(client bool, rpcID, inRPCID uint64, msg proto.Message) *binlogpb.GrpcLogEntry {
  485. logger := binlogpb.GrpcLogEntry_LOGGER_CLIENT
  486. if !client {
  487. logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
  488. }
  489. data, err := proto.Marshal(msg)
  490. if err != nil {
  491. grpclogLogger.Infof("binarylogging_testing: failed to marshal proto message: %v", err)
  492. }
  493. return &binlogpb.GrpcLogEntry{
  494. Timestamp: nil,
  495. CallId: rpcID,
  496. SequenceIdWithinCall: inRPCID,
  497. Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
  498. Logger: logger,
  499. Payload: &binlogpb.GrpcLogEntry_Message{
  500. Message: &binlogpb.Message{
  501. Length: uint32(len(data)),
  502. Data: data,
  503. },
  504. },
  505. }
  506. }
  507. func (ed *expectedData) newServerMessageEntry(client bool, rpcID, inRPCID uint64, msg proto.Message) *binlogpb.GrpcLogEntry {
  508. logger := binlogpb.GrpcLogEntry_LOGGER_CLIENT
  509. if !client {
  510. logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
  511. }
  512. data, err := proto.Marshal(msg)
  513. if err != nil {
  514. grpclogLogger.Infof("binarylogging_testing: failed to marshal proto message: %v", err)
  515. }
  516. return &binlogpb.GrpcLogEntry{
  517. Timestamp: nil,
  518. CallId: rpcID,
  519. SequenceIdWithinCall: inRPCID,
  520. Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
  521. Logger: logger,
  522. Payload: &binlogpb.GrpcLogEntry_Message{
  523. Message: &binlogpb.Message{
  524. Length: uint32(len(data)),
  525. Data: data,
  526. },
  527. },
  528. }
  529. }
  530. func (ed *expectedData) newHalfCloseEntry(client bool, rpcID, inRPCID uint64) *binlogpb.GrpcLogEntry {
  531. logger := binlogpb.GrpcLogEntry_LOGGER_CLIENT
  532. if !client {
  533. logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
  534. }
  535. return &binlogpb.GrpcLogEntry{
  536. Timestamp: nil,
  537. CallId: rpcID,
  538. SequenceIdWithinCall: inRPCID,
  539. Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
  540. Payload: nil, // No payload here.
  541. Logger: logger,
  542. }
  543. }
  544. func (ed *expectedData) newServerTrailerEntry(client bool, rpcID, inRPCID uint64, stErr error) *binlogpb.GrpcLogEntry {
  545. logger := binlogpb.GrpcLogEntry_LOGGER_SERVER
  546. var peer *binlogpb.Address
  547. if client {
  548. logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
  549. peer = &binlogpb.Address{
  550. Address: ed.te.srvIP.String(),
  551. IpPort: uint32(ed.te.srvPort),
  552. }
  553. if ed.te.srvIP.To4() != nil {
  554. peer.Type = binlogpb.Address_TYPE_IPV4
  555. } else {
  556. peer.Type = binlogpb.Address_TYPE_IPV6
  557. }
  558. }
  559. st, ok := status.FromError(stErr)
  560. if !ok {
  561. grpclogLogger.Info("binarylogging: error in trailer is not a status error")
  562. }
  563. stProto := st.Proto()
  564. var (
  565. detailsBytes []byte
  566. err error
  567. )
  568. if stProto != nil && len(stProto.Details) != 0 {
  569. detailsBytes, err = proto.Marshal(stProto)
  570. if err != nil {
  571. grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
  572. }
  573. }
  574. return &binlogpb.GrpcLogEntry{
  575. Timestamp: nil,
  576. CallId: rpcID,
  577. SequenceIdWithinCall: inRPCID,
  578. Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
  579. Logger: logger,
  580. Payload: &binlogpb.GrpcLogEntry_Trailer{
  581. Trailer: &binlogpb.Trailer{
  582. Metadata: iblog.MdToMetadataProto(testTrailerMetadata),
  583. // st will be nil if err was not a status error, but nil is ok.
  584. StatusCode: uint32(st.Code()),
  585. StatusMessage: st.Message(),
  586. StatusDetails: detailsBytes,
  587. },
  588. },
  589. Peer: peer,
  590. }
  591. }
  592. func (ed *expectedData) newCancelEntry(rpcID, inRPCID uint64) *binlogpb.GrpcLogEntry {
  593. return &binlogpb.GrpcLogEntry{
  594. Timestamp: nil,
  595. CallId: rpcID,
  596. SequenceIdWithinCall: inRPCID,
  597. Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
  598. Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT,
  599. Payload: nil,
  600. }
  601. }
  602. func (ed *expectedData) toClientLogEntries() []*binlogpb.GrpcLogEntry {
  603. var (
  604. ret []*binlogpb.GrpcLogEntry
  605. idInRPC uint64 = 1
  606. )
  607. ret = append(ret, ed.newClientHeaderEntry(true, globalRPCID, idInRPC))
  608. idInRPC++
  609. switch ed.cc.callType {
  610. case unaryRPC, fullDuplexStreamRPC:
  611. for i := 0; i < len(ed.requests); i++ {
  612. ret = append(ret, ed.newClientMessageEntry(true, globalRPCID, idInRPC, ed.requests[i]))
  613. idInRPC++
  614. if i == 0 {
  615. // First message, append ServerHeader.
  616. ret = append(ret, ed.newServerHeaderEntry(true, globalRPCID, idInRPC))
  617. idInRPC++
  618. }
  619. if !ed.cc.success {
  620. // There is no response in the RPC error case.
  621. continue
  622. }
  623. ret = append(ret, ed.newServerMessageEntry(true, globalRPCID, idInRPC, ed.responses[i]))
  624. idInRPC++
  625. }
  626. if ed.cc.success && ed.cc.callType == fullDuplexStreamRPC {
  627. ret = append(ret, ed.newHalfCloseEntry(true, globalRPCID, idInRPC))
  628. idInRPC++
  629. }
  630. case clientStreamRPC, serverStreamRPC:
  631. for i := 0; i < len(ed.requests); i++ {
  632. ret = append(ret, ed.newClientMessageEntry(true, globalRPCID, idInRPC, ed.requests[i]))
  633. idInRPC++
  634. }
  635. if ed.cc.callType == clientStreamRPC {
  636. ret = append(ret, ed.newHalfCloseEntry(true, globalRPCID, idInRPC))
  637. idInRPC++
  638. }
  639. ret = append(ret, ed.newServerHeaderEntry(true, globalRPCID, idInRPC))
  640. idInRPC++
  641. if ed.cc.success {
  642. for i := 0; i < len(ed.responses); i++ {
  643. ret = append(ret, ed.newServerMessageEntry(true, globalRPCID, idInRPC, ed.responses[0]))
  644. idInRPC++
  645. }
  646. }
  647. }
  648. if ed.cc.callType == cancelRPC {
  649. ret = append(ret, ed.newCancelEntry(globalRPCID, idInRPC))
  650. idInRPC++
  651. } else {
  652. ret = append(ret, ed.newServerTrailerEntry(true, globalRPCID, idInRPC, ed.err))
  653. idInRPC++
  654. }
  655. return ret
  656. }
  657. func (ed *expectedData) toServerLogEntries() []*binlogpb.GrpcLogEntry {
  658. var (
  659. ret []*binlogpb.GrpcLogEntry
  660. idInRPC uint64 = 1
  661. )
  662. ret = append(ret, ed.newClientHeaderEntry(false, globalRPCID, idInRPC))
  663. idInRPC++
  664. switch ed.cc.callType {
  665. case unaryRPC:
  666. ret = append(ret, ed.newClientMessageEntry(false, globalRPCID, idInRPC, ed.requests[0]))
  667. idInRPC++
  668. ret = append(ret, ed.newServerHeaderEntry(false, globalRPCID, idInRPC))
  669. idInRPC++
  670. if ed.cc.success {
  671. ret = append(ret, ed.newServerMessageEntry(false, globalRPCID, idInRPC, ed.responses[0]))
  672. idInRPC++
  673. }
  674. case fullDuplexStreamRPC:
  675. ret = append(ret, ed.newServerHeaderEntry(false, globalRPCID, idInRPC))
  676. idInRPC++
  677. for i := 0; i < len(ed.requests); i++ {
  678. ret = append(ret, ed.newClientMessageEntry(false, globalRPCID, idInRPC, ed.requests[i]))
  679. idInRPC++
  680. if !ed.cc.success {
  681. // There is no response in the RPC error case.
  682. continue
  683. }
  684. ret = append(ret, ed.newServerMessageEntry(false, globalRPCID, idInRPC, ed.responses[i]))
  685. idInRPC++
  686. }
  687. if ed.cc.success && ed.cc.callType == fullDuplexStreamRPC {
  688. ret = append(ret, ed.newHalfCloseEntry(false, globalRPCID, idInRPC))
  689. idInRPC++
  690. }
  691. case clientStreamRPC:
  692. ret = append(ret, ed.newServerHeaderEntry(false, globalRPCID, idInRPC))
  693. idInRPC++
  694. for i := 0; i < len(ed.requests); i++ {
  695. ret = append(ret, ed.newClientMessageEntry(false, globalRPCID, idInRPC, ed.requests[i]))
  696. idInRPC++
  697. }
  698. if ed.cc.success {
  699. ret = append(ret, ed.newHalfCloseEntry(false, globalRPCID, idInRPC))
  700. idInRPC++
  701. ret = append(ret, ed.newServerMessageEntry(false, globalRPCID, idInRPC, ed.responses[0]))
  702. idInRPC++
  703. }
  704. case serverStreamRPC:
  705. ret = append(ret, ed.newClientMessageEntry(false, globalRPCID, idInRPC, ed.requests[0]))
  706. idInRPC++
  707. ret = append(ret, ed.newServerHeaderEntry(false, globalRPCID, idInRPC))
  708. idInRPC++
  709. for i := 0; i < len(ed.responses); i++ {
  710. ret = append(ret, ed.newServerMessageEntry(false, globalRPCID, idInRPC, ed.responses[0]))
  711. idInRPC++
  712. }
  713. }
  714. ret = append(ret, ed.newServerTrailerEntry(false, globalRPCID, idInRPC, ed.err))
  715. idInRPC++
  716. return ret
  717. }
  718. func runRPCs(t *testing.T, cc *rpcConfig) *expectedData {
  719. te := newTest(t)
  720. te.startServer(&testServer{te: te})
  721. defer te.tearDown()
  722. expect := &expectedData{
  723. te: te,
  724. cc: cc,
  725. }
  726. switch cc.callType {
  727. case unaryRPC:
  728. expect.method = "/grpc.testing.TestService/UnaryCall"
  729. req, resp, err := te.doUnaryCall(cc)
  730. expect.requests = []proto.Message{req}
  731. expect.responses = []proto.Message{resp}
  732. expect.err = err
  733. case clientStreamRPC:
  734. expect.method = "/grpc.testing.TestService/StreamingInputCall"
  735. reqs, resp, err := te.doClientStreamCall(cc)
  736. expect.requests = reqs
  737. expect.responses = []proto.Message{resp}
  738. expect.err = err
  739. case serverStreamRPC:
  740. expect.method = "/grpc.testing.TestService/StreamingOutputCall"
  741. req, resps, err := te.doServerStreamCall(cc)
  742. expect.responses = resps
  743. expect.requests = []proto.Message{req}
  744. expect.err = err
  745. case fullDuplexStreamRPC, cancelRPC:
  746. expect.method = "/grpc.testing.TestService/FullDuplexCall"
  747. expect.requests, expect.responses, expect.err = te.doFullDuplexCallRoundtrip(cc)
  748. }
  749. if cc.success != (expect.err == nil) {
  750. t.Fatalf("cc.success: %v, got error: %v", cc.success, expect.err)
  751. }
  752. te.cc.Close()
  753. te.srv.GracefulStop() // Wait for the server to stop.
  754. return expect
  755. }
  756. // equalLogEntry sorts the metadata entries by key (to compare metadata).
  757. //
  758. // This function is typically called with only two entries. It's written in this
  759. // way so the code can be put in a for loop instead of copied twice.
  760. func equalLogEntry(entries ...*binlogpb.GrpcLogEntry) (equal bool) {
  761. for i, e := range entries {
  762. // Clear out some fields we don't compare.
  763. e.Timestamp = nil
  764. e.CallId = 0 // CallID is global to the binary, hard to compare.
  765. if h := e.GetClientHeader(); h != nil {
  766. h.Timeout = nil
  767. tmp := append(h.Metadata.Entry[:0], h.Metadata.Entry...)
  768. h.Metadata.Entry = tmp
  769. sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key })
  770. }
  771. if h := e.GetServerHeader(); h != nil {
  772. tmp := append(h.Metadata.Entry[:0], h.Metadata.Entry...)
  773. h.Metadata.Entry = tmp
  774. sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key })
  775. }
  776. if h := e.GetTrailer(); h != nil {
  777. sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key })
  778. }
  779. if i > 0 && !proto.Equal(e, entries[i-1]) {
  780. return false
  781. }
  782. }
  783. return true
  784. }
  785. func testClientBinaryLog(t *testing.T, c *rpcConfig) error {
  786. defer testSink.clear()
  787. expect := runRPCs(t, c)
  788. want := expect.toClientLogEntries()
  789. var got []*binlogpb.GrpcLogEntry
  790. // In racy cases, some entries are not logged when the RPC is finished (e.g.
  791. // context.Cancel).
  792. //
  793. // Check 10 times, with a sleep of 1/100 seconds between each check. Makes
  794. // it an 1-second wait in total.
  795. for i := 0; i < 10; i++ {
  796. got = testSink.logEntries(true) // all client entries.
  797. if len(want) == len(got) {
  798. break
  799. }
  800. time.Sleep(100 * time.Millisecond)
  801. }
  802. if len(want) != len(got) {
  803. for i, e := range want {
  804. t.Errorf("in want: %d, %s", i, e.GetType())
  805. }
  806. for i, e := range got {
  807. t.Errorf("in got: %d, %s", i, e.GetType())
  808. }
  809. return fmt.Errorf("didn't get same amount of log entries, want: %d, got: %d", len(want), len(got))
  810. }
  811. var errored bool
  812. for i := 0; i < len(got); i++ {
  813. if !equalLogEntry(want[i], got[i]) {
  814. t.Errorf("entry: %d, want %+v, got %+v", i, want[i], got[i])
  815. errored = true
  816. }
  817. }
  818. if errored {
  819. return fmt.Errorf("test failed")
  820. }
  821. return nil
  822. }
  823. func (s) TestClientBinaryLogUnaryRPC(t *testing.T) {
  824. if err := testClientBinaryLog(t, &rpcConfig{success: true, callType: unaryRPC}); err != nil {
  825. t.Fatal(err)
  826. }
  827. }
  828. func (s) TestClientBinaryLogUnaryRPCError(t *testing.T) {
  829. if err := testClientBinaryLog(t, &rpcConfig{success: false, callType: unaryRPC}); err != nil {
  830. t.Fatal(err)
  831. }
  832. }
  833. func (s) TestClientBinaryLogClientStreamRPC(t *testing.T) {
  834. count := 5
  835. if err := testClientBinaryLog(t, &rpcConfig{count: count, success: true, callType: clientStreamRPC}); err != nil {
  836. t.Fatal(err)
  837. }
  838. }
  839. func (s) TestClientBinaryLogClientStreamRPCError(t *testing.T) {
  840. count := 1
  841. if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: clientStreamRPC}); err != nil {
  842. t.Fatal(err)
  843. }
  844. }
  845. func (s) TestClientBinaryLogServerStreamRPC(t *testing.T) {
  846. count := 5
  847. if err := testClientBinaryLog(t, &rpcConfig{count: count, success: true, callType: serverStreamRPC}); err != nil {
  848. t.Fatal(err)
  849. }
  850. }
  851. func (s) TestClientBinaryLogServerStreamRPCError(t *testing.T) {
  852. count := 5
  853. if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: serverStreamRPC}); err != nil {
  854. t.Fatal(err)
  855. }
  856. }
  857. func (s) TestClientBinaryLogFullDuplexRPC(t *testing.T) {
  858. count := 5
  859. if err := testClientBinaryLog(t, &rpcConfig{count: count, success: true, callType: fullDuplexStreamRPC}); err != nil {
  860. t.Fatal(err)
  861. }
  862. }
  863. func (s) TestClientBinaryLogFullDuplexRPCError(t *testing.T) {
  864. count := 5
  865. if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: fullDuplexStreamRPC}); err != nil {
  866. t.Fatal(err)
  867. }
  868. }
  869. func (s) TestClientBinaryLogCancel(t *testing.T) {
  870. count := 5
  871. if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: cancelRPC}); err != nil {
  872. t.Fatal(err)
  873. }
  874. }
  875. func testServerBinaryLog(t *testing.T, c *rpcConfig) error {
  876. defer testSink.clear()
  877. expect := runRPCs(t, c)
  878. want := expect.toServerLogEntries()
  879. var got []*binlogpb.GrpcLogEntry
  880. // In racy cases, some entries are not logged when the RPC is finished (e.g.
  881. // context.Cancel). This is unlikely to happen on server side, but it does
  882. // no harm to retry.
  883. //
  884. // Check 10 times, with a sleep of 1/100 seconds between each check. Makes
  885. // it an 1-second wait in total.
  886. for i := 0; i < 10; i++ {
  887. got = testSink.logEntries(false) // all server entries.
  888. if len(want) == len(got) {
  889. break
  890. }
  891. time.Sleep(100 * time.Millisecond)
  892. }
  893. if len(want) != len(got) {
  894. for i, e := range want {
  895. t.Errorf("in want: %d, %s", i, e.GetType())
  896. }
  897. for i, e := range got {
  898. t.Errorf("in got: %d, %s", i, e.GetType())
  899. }
  900. return fmt.Errorf("didn't get same amount of log entries, want: %d, got: %d", len(want), len(got))
  901. }
  902. var errored bool
  903. for i := 0; i < len(got); i++ {
  904. if !equalLogEntry(want[i], got[i]) {
  905. t.Errorf("entry: %d, want %+v, got %+v", i, want[i], got[i])
  906. errored = true
  907. }
  908. }
  909. if errored {
  910. return fmt.Errorf("test failed")
  911. }
  912. return nil
  913. }
  914. func (s) TestServerBinaryLogUnaryRPC(t *testing.T) {
  915. if err := testServerBinaryLog(t, &rpcConfig{success: true, callType: unaryRPC}); err != nil {
  916. t.Fatal(err)
  917. }
  918. }
  919. func (s) TestServerBinaryLogUnaryRPCError(t *testing.T) {
  920. if err := testServerBinaryLog(t, &rpcConfig{success: false, callType: unaryRPC}); err != nil {
  921. t.Fatal(err)
  922. }
  923. }
  924. func (s) TestServerBinaryLogClientStreamRPC(t *testing.T) {
  925. count := 5
  926. if err := testServerBinaryLog(t, &rpcConfig{count: count, success: true, callType: clientStreamRPC}); err != nil {
  927. t.Fatal(err)
  928. }
  929. }
  930. func (s) TestServerBinaryLogClientStreamRPCError(t *testing.T) {
  931. count := 1
  932. if err := testServerBinaryLog(t, &rpcConfig{count: count, success: false, callType: clientStreamRPC}); err != nil {
  933. t.Fatal(err)
  934. }
  935. }
  936. func (s) TestServerBinaryLogServerStreamRPC(t *testing.T) {
  937. count := 5
  938. if err := testServerBinaryLog(t, &rpcConfig{count: count, success: true, callType: serverStreamRPC}); err != nil {
  939. t.Fatal(err)
  940. }
  941. }
  942. func (s) TestServerBinaryLogServerStreamRPCError(t *testing.T) {
  943. count := 5
  944. if err := testServerBinaryLog(t, &rpcConfig{count: count, success: false, callType: serverStreamRPC}); err != nil {
  945. t.Fatal(err)
  946. }
  947. }
  948. func (s) TestServerBinaryLogFullDuplex(t *testing.T) {
  949. count := 5
  950. if err := testServerBinaryLog(t, &rpcConfig{count: count, success: true, callType: fullDuplexStreamRPC}); err != nil {
  951. t.Fatal(err)
  952. }
  953. }
  954. func (s) TestServerBinaryLogFullDuplexError(t *testing.T) {
  955. count := 5
  956. if err := testServerBinaryLog(t, &rpcConfig{count: count, success: false, callType: fullDuplexStreamRPC}); err != nil {
  957. t.Fatal(err)
  958. }
  959. }