main.go 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /*
  2. *
  3. * Copyright 2017 gRPC authors.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. */
  18. /*
  19. Package main provides a client used for benchmarking. Before running the
  20. client, the user would need to launch the grpc server.
  21. To start the server before running the client, you can run look for the command
  22. under the following file:
  23. benchmark/server/main.go
  24. After starting the server, the client can be run. An example of how to run this
  25. command is:
  26. go run benchmark/client/main.go -test_name=grpc_test
  27. If the server is running on a different port than 50051, then use the port flag
  28. for the client to hit the server on the correct port.
  29. An example for how to run this command on a different port can be found here:
  30. go run benchmark/client/main.go -test_name=grpc_test -port=8080
  31. */
  32. package main
  33. import (
  34. "context"
  35. "flag"
  36. "fmt"
  37. "os"
  38. "runtime"
  39. "runtime/pprof"
  40. "sync"
  41. "time"
  42. "google.golang.org/grpc"
  43. "google.golang.org/grpc/benchmark"
  44. "google.golang.org/grpc/benchmark/stats"
  45. "google.golang.org/grpc/credentials/insecure"
  46. "google.golang.org/grpc/grpclog"
  47. "google.golang.org/grpc/internal/syscall"
  48. testgrpc "google.golang.org/grpc/interop/grpc_testing"
  49. testpb "google.golang.org/grpc/interop/grpc_testing"
  50. )
  51. var (
  52. port = flag.String("port", "50051", "Localhost port to connect to.")
  53. numRPC = flag.Int("r", 1, "The number of concurrent RPCs on each connection.")
  54. numConn = flag.Int("c", 1, "The number of parallel connections.")
  55. warmupDur = flag.Int("w", 10, "Warm-up duration in seconds")
  56. duration = flag.Int("d", 60, "Benchmark duration in seconds")
  57. rqSize = flag.Int("req", 1, "Request message size in bytes.")
  58. rspSize = flag.Int("resp", 1, "Response message size in bytes.")
  59. rpcType = flag.String("rpc_type", "unary",
  60. `Configure different client rpc type. Valid options are:
  61. unary;
  62. streaming.`)
  63. testName = flag.String("test_name", "", "Name of the test used for creating profiles.")
  64. wg sync.WaitGroup
  65. hopts = stats.HistogramOptions{
  66. NumBuckets: 2495,
  67. GrowthFactor: .01,
  68. }
  69. mu sync.Mutex
  70. hists []*stats.Histogram
  71. logger = grpclog.Component("benchmark")
  72. )
  73. func main() {
  74. flag.Parse()
  75. if *testName == "" {
  76. logger.Fatal("-test_name not set")
  77. }
  78. req := &testpb.SimpleRequest{
  79. ResponseType: testpb.PayloadType_COMPRESSABLE,
  80. ResponseSize: int32(*rspSize),
  81. Payload: &testpb.Payload{
  82. Type: testpb.PayloadType_COMPRESSABLE,
  83. Body: make([]byte, *rqSize),
  84. },
  85. }
  86. connectCtx, connectCancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
  87. defer connectCancel()
  88. ccs := buildConnections(connectCtx)
  89. warmDeadline := time.Now().Add(time.Duration(*warmupDur) * time.Second)
  90. endDeadline := warmDeadline.Add(time.Duration(*duration) * time.Second)
  91. cf, err := os.Create("/tmp/" + *testName + ".cpu")
  92. if err != nil {
  93. logger.Fatalf("Error creating file: %v", err)
  94. }
  95. defer cf.Close()
  96. pprof.StartCPUProfile(cf)
  97. cpuBeg := syscall.GetCPUTime()
  98. for _, cc := range ccs {
  99. runWithConn(cc, req, warmDeadline, endDeadline)
  100. }
  101. wg.Wait()
  102. cpu := time.Duration(syscall.GetCPUTime() - cpuBeg)
  103. pprof.StopCPUProfile()
  104. mf, err := os.Create("/tmp/" + *testName + ".mem")
  105. if err != nil {
  106. logger.Fatalf("Error creating file: %v", err)
  107. }
  108. defer mf.Close()
  109. runtime.GC() // materialize all statistics
  110. if err := pprof.WriteHeapProfile(mf); err != nil {
  111. logger.Fatalf("Error writing memory profile: %v", err)
  112. }
  113. hist := stats.NewHistogram(hopts)
  114. for _, h := range hists {
  115. hist.Merge(h)
  116. }
  117. parseHist(hist)
  118. fmt.Println("Client CPU utilization:", cpu)
  119. fmt.Println("Client CPU profile:", cf.Name())
  120. fmt.Println("Client Mem Profile:", mf.Name())
  121. }
  122. func buildConnections(ctx context.Context) []*grpc.ClientConn {
  123. ccs := make([]*grpc.ClientConn, *numConn)
  124. for i := range ccs {
  125. ccs[i] = benchmark.NewClientConnWithContext(ctx, "localhost:"+*port, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock())
  126. }
  127. return ccs
  128. }
  129. func runWithConn(cc *grpc.ClientConn, req *testpb.SimpleRequest, warmDeadline, endDeadline time.Time) {
  130. for i := 0; i < *numRPC; i++ {
  131. wg.Add(1)
  132. go func() {
  133. defer wg.Done()
  134. caller := makeCaller(cc, req)
  135. hist := stats.NewHistogram(hopts)
  136. for {
  137. start := time.Now()
  138. if start.After(endDeadline) {
  139. mu.Lock()
  140. hists = append(hists, hist)
  141. mu.Unlock()
  142. return
  143. }
  144. caller()
  145. elapsed := time.Since(start)
  146. if start.After(warmDeadline) {
  147. hist.Add(elapsed.Nanoseconds())
  148. }
  149. }
  150. }()
  151. }
  152. }
  153. func makeCaller(cc *grpc.ClientConn, req *testpb.SimpleRequest) func() {
  154. client := testgrpc.NewBenchmarkServiceClient(cc)
  155. if *rpcType == "unary" {
  156. return func() {
  157. if _, err := client.UnaryCall(context.Background(), req); err != nil {
  158. logger.Fatalf("RPC failed: %v", err)
  159. }
  160. }
  161. }
  162. stream, err := client.StreamingCall(context.Background())
  163. if err != nil {
  164. logger.Fatalf("RPC failed: %v", err)
  165. }
  166. return func() {
  167. if err := stream.Send(req); err != nil {
  168. logger.Fatalf("Streaming RPC failed to send: %v", err)
  169. }
  170. if _, err := stream.Recv(); err != nil {
  171. logger.Fatalf("Streaming RPC failed to read: %v", err)
  172. }
  173. }
  174. }
  175. func parseHist(hist *stats.Histogram) {
  176. fmt.Println("qps:", float64(hist.Count)/float64(*duration))
  177. fmt.Printf("Latency: (50/90/99 %%ile): %v/%v/%v\n",
  178. time.Duration(median(.5, hist)),
  179. time.Duration(median(.9, hist)),
  180. time.Duration(median(.99, hist)))
  181. }
  182. func median(percentile float64, h *stats.Histogram) int64 {
  183. need := int64(float64(h.Count) * percentile)
  184. have := int64(0)
  185. for _, bucket := range h.Buckets {
  186. count := bucket.Count
  187. if have+count >= need {
  188. percent := float64(need-have) / float64(count)
  189. return int64((1.0-percent)*bucket.LowBound + percent*bucket.LowBound*(1.0+hopts.GrowthFactor))
  190. }
  191. have += bucket.Count
  192. }
  193. panic("should have found a bound")
  194. }