metrics.proto 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714
  1. // Copyright 2019, OpenTelemetry Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package opentelemetry.proto.metrics.v1;
  16. import "opentelemetry/proto/common/v1/common.proto";
  17. import "opentelemetry/proto/resource/v1/resource.proto";
  18. option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1";
  19. option java_multiple_files = true;
  20. option java_package = "io.opentelemetry.proto.metrics.v1";
  21. option java_outer_classname = "MetricsProto";
  22. option go_package = "go.opentelemetry.io/proto/otlp/metrics/v1";
  23. // MetricsData represents the metrics data that can be stored in a persistent
  24. // storage, OR can be embedded by other protocols that transfer OTLP metrics
  25. // data but do not implement the OTLP protocol.
  26. //
  27. // MetricsData
  28. // └─── ResourceMetrics
  29. // ├── Resource
  30. // ├── SchemaURL
  31. // └── ScopeMetrics
  32. // ├── Scope
  33. // ├── SchemaURL
  34. // └── Metric
  35. // ├── Name
  36. // ├── Description
  37. // ├── Unit
  38. // └── data
  39. // ├── Gauge
  40. // ├── Sum
  41. // ├── Histogram
  42. // ├── ExponentialHistogram
  43. // └── Summary
  44. //
  45. // The main difference between this message and collector protocol is that
  46. // in this message there will not be any "control" or "metadata" specific to
  47. // OTLP protocol.
  48. //
  49. // When new fields are added into this message, the OTLP request MUST be updated
  50. // as well.
  51. message MetricsData {
  52. // An array of ResourceMetrics.
  53. // For data coming from a single resource this array will typically contain
  54. // one element. Intermediary nodes that receive data from multiple origins
  55. // typically batch the data before forwarding further and in that case this
  56. // array will contain multiple elements.
  57. repeated ResourceMetrics resource_metrics = 1;
  58. }
  59. // A collection of ScopeMetrics from a Resource.
  60. message ResourceMetrics {
  61. reserved 1000;
  62. // The resource for the metrics in this message.
  63. // If this field is not set then no resource info is known.
  64. opentelemetry.proto.resource.v1.Resource resource = 1;
  65. // A list of metrics that originate from a resource.
  66. repeated ScopeMetrics scope_metrics = 2;
  67. // The Schema URL, if known. This is the identifier of the Schema that the resource data
  68. // is recorded in. Notably, the last part of the URL path is the version number of the
  69. // schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
  70. // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
  71. // This schema_url applies to the data in the "resource" field. It does not apply
  72. // to the data in the "scope_metrics" field which have their own schema_url field.
  73. string schema_url = 3;
  74. }
  75. // A collection of Metrics produced by an Scope.
  76. message ScopeMetrics {
  77. // The instrumentation scope information for the metrics in this message.
  78. // Semantically when InstrumentationScope isn't set, it is equivalent with
  79. // an empty instrumentation scope name (unknown).
  80. opentelemetry.proto.common.v1.InstrumentationScope scope = 1;
  81. // A list of metrics that originate from an instrumentation library.
  82. repeated Metric metrics = 2;
  83. // The Schema URL, if known. This is the identifier of the Schema that the metric data
  84. // is recorded in. Notably, the last part of the URL path is the version number of the
  85. // schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
  86. // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
  87. // This schema_url applies to all metrics in the "metrics" field.
  88. string schema_url = 3;
  89. }
  90. // Defines a Metric which has one or more timeseries. The following is a
  91. // brief summary of the Metric data model. For more details, see:
  92. //
  93. // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
  94. //
  95. // The data model and relation between entities is shown in the
  96. // diagram below. Here, "DataPoint" is the term used to refer to any
  97. // one of the specific data point value types, and "points" is the term used
  98. // to refer to any one of the lists of points contained in the Metric.
  99. //
  100. // - Metric is composed of a metadata and data.
  101. // - Metadata part contains a name, description, unit.
  102. // - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
  103. // - DataPoint contains timestamps, attributes, and one of the possible value type
  104. // fields.
  105. //
  106. // Metric
  107. // +------------+
  108. // |name |
  109. // |description |
  110. // |unit | +------------------------------------+
  111. // |data |---> |Gauge, Sum, Histogram, Summary, ... |
  112. // +------------+ +------------------------------------+
  113. //
  114. // Data [One of Gauge, Sum, Histogram, Summary, ...]
  115. // +-----------+
  116. // |... | // Metadata about the Data.
  117. // |points |--+
  118. // +-----------+ |
  119. // | +---------------------------+
  120. // | |DataPoint 1 |
  121. // v |+------+------+ +------+ |
  122. // +-----+ ||label |label |...|label | |
  123. // | 1 |-->||value1|value2|...|valueN| |
  124. // +-----+ |+------+------+ +------+ |
  125. // | . | |+-----+ |
  126. // | . | ||value| |
  127. // | . | |+-----+ |
  128. // | . | +---------------------------+
  129. // | . | .
  130. // | . | .
  131. // | . | .
  132. // | . | +---------------------------+
  133. // | . | |DataPoint M |
  134. // +-----+ |+------+------+ +------+ |
  135. // | M |-->||label |label |...|label | |
  136. // +-----+ ||value1|value2|...|valueN| |
  137. // |+------+------+ +------+ |
  138. // |+-----+ |
  139. // ||value| |
  140. // |+-----+ |
  141. // +---------------------------+
  142. //
  143. // Each distinct type of DataPoint represents the output of a specific
  144. // aggregation function, the result of applying the DataPoint's
  145. // associated function of to one or more measurements.
  146. //
  147. // All DataPoint types have three common fields:
  148. // - Attributes includes key-value pairs associated with the data point
  149. // - TimeUnixNano is required, set to the end time of the aggregation
  150. // - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
  151. // having an AggregationTemporality field, as discussed below.
  152. //
  153. // Both TimeUnixNano and StartTimeUnixNano values are expressed as
  154. // UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
  155. //
  156. // # TimeUnixNano
  157. //
  158. // This field is required, having consistent interpretation across
  159. // DataPoint types. TimeUnixNano is the moment corresponding to when
  160. // the data point's aggregate value was captured.
  161. //
  162. // Data points with the 0 value for TimeUnixNano SHOULD be rejected
  163. // by consumers.
  164. //
  165. // # StartTimeUnixNano
  166. //
  167. // StartTimeUnixNano in general allows detecting when a sequence of
  168. // observations is unbroken. This field indicates to consumers the
  169. // start time for points with cumulative and delta
  170. // AggregationTemporality, and it should be included whenever possible
  171. // to support correct rate calculation. Although it may be omitted
  172. // when the start time is truly unknown, setting StartTimeUnixNano is
  173. // strongly encouraged.
  174. message Metric {
  175. reserved 4, 6, 8;
  176. // name of the metric.
  177. string name = 1;
  178. // description of the metric, which can be used in documentation.
  179. string description = 2;
  180. // unit in which the metric value is reported. Follows the format
  181. // described by http://unitsofmeasure.org/ucum.html.
  182. string unit = 3;
  183. // Data determines the aggregation type (if any) of the metric, what is the
  184. // reported value type for the data points, as well as the relatationship to
  185. // the time interval over which they are reported.
  186. oneof data {
  187. Gauge gauge = 5;
  188. Sum sum = 7;
  189. Histogram histogram = 9;
  190. ExponentialHistogram exponential_histogram = 10;
  191. Summary summary = 11;
  192. }
  193. // Additional metadata attributes that describe the metric. [Optional].
  194. // Attributes are non-identifying.
  195. // Consumers SHOULD NOT need to be aware of these attributes.
  196. // These attributes MAY be used to encode information allowing
  197. // for lossless roundtrip translation to / from another data model.
  198. // Attribute keys MUST be unique (it is not allowed to have more than one
  199. // attribute with the same key).
  200. repeated opentelemetry.proto.common.v1.KeyValue metadata = 12;
  201. }
  202. // Gauge represents the type of a scalar metric that always exports the
  203. // "current value" for every data point. It should be used for an "unknown"
  204. // aggregation.
  205. //
  206. // A Gauge does not support different aggregation temporalities. Given the
  207. // aggregation is unknown, points cannot be combined using the same
  208. // aggregation, regardless of aggregation temporalities. Therefore,
  209. // AggregationTemporality is not included. Consequently, this also means
  210. // "StartTimeUnixNano" is ignored for all data points.
  211. message Gauge {
  212. repeated NumberDataPoint data_points = 1;
  213. }
  214. // Sum represents the type of a scalar metric that is calculated as a sum of all
  215. // reported measurements over a time interval.
  216. message Sum {
  217. repeated NumberDataPoint data_points = 1;
  218. // aggregation_temporality describes if the aggregator reports delta changes
  219. // since last report time, or cumulative changes since a fixed start time.
  220. AggregationTemporality aggregation_temporality = 2;
  221. // If "true" means that the sum is monotonic.
  222. bool is_monotonic = 3;
  223. }
  224. // Histogram represents the type of a metric that is calculated by aggregating
  225. // as a Histogram of all reported measurements over a time interval.
  226. message Histogram {
  227. repeated HistogramDataPoint data_points = 1;
  228. // aggregation_temporality describes if the aggregator reports delta changes
  229. // since last report time, or cumulative changes since a fixed start time.
  230. AggregationTemporality aggregation_temporality = 2;
  231. }
  232. // ExponentialHistogram represents the type of a metric that is calculated by aggregating
  233. // as a ExponentialHistogram of all reported double measurements over a time interval.
  234. message ExponentialHistogram {
  235. repeated ExponentialHistogramDataPoint data_points = 1;
  236. // aggregation_temporality describes if the aggregator reports delta changes
  237. // since last report time, or cumulative changes since a fixed start time.
  238. AggregationTemporality aggregation_temporality = 2;
  239. }
  240. // Summary metric data are used to convey quantile summaries,
  241. // a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
  242. // and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
  243. // data type. These data points cannot always be merged in a meaningful way.
  244. // While they can be useful in some applications, histogram data points are
  245. // recommended for new applications.
  246. // Summary metrics do not have an aggregation temporality field. This is
  247. // because the count and sum fields of a SummaryDataPoint are assumed to be
  248. // cumulative values.
  249. message Summary {
  250. repeated SummaryDataPoint data_points = 1;
  251. }
  252. // AggregationTemporality defines how a metric aggregator reports aggregated
  253. // values. It describes how those values relate to the time interval over
  254. // which they are aggregated.
  255. enum AggregationTemporality {
  256. // UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
  257. AGGREGATION_TEMPORALITY_UNSPECIFIED = 0;
  258. // DELTA is an AggregationTemporality for a metric aggregator which reports
  259. // changes since last report time. Successive metrics contain aggregation of
  260. // values from continuous and non-overlapping intervals.
  261. //
  262. // The values for a DELTA metric are based only on the time interval
  263. // associated with one measurement cycle. There is no dependency on
  264. // previous measurements like is the case for CUMULATIVE metrics.
  265. //
  266. // For example, consider a system measuring the number of requests that
  267. // it receives and reports the sum of these requests every second as a
  268. // DELTA metric:
  269. //
  270. // 1. The system starts receiving at time=t_0.
  271. // 2. A request is received, the system measures 1 request.
  272. // 3. A request is received, the system measures 1 request.
  273. // 4. A request is received, the system measures 1 request.
  274. // 5. The 1 second collection cycle ends. A metric is exported for the
  275. // number of requests received over the interval of time t_0 to
  276. // t_0+1 with a value of 3.
  277. // 6. A request is received, the system measures 1 request.
  278. // 7. A request is received, the system measures 1 request.
  279. // 8. The 1 second collection cycle ends. A metric is exported for the
  280. // number of requests received over the interval of time t_0+1 to
  281. // t_0+2 with a value of 2.
  282. AGGREGATION_TEMPORALITY_DELTA = 1;
  283. // CUMULATIVE is an AggregationTemporality for a metric aggregator which
  284. // reports changes since a fixed start time. This means that current values
  285. // of a CUMULATIVE metric depend on all previous measurements since the
  286. // start time. Because of this, the sender is required to retain this state
  287. // in some form. If this state is lost or invalidated, the CUMULATIVE metric
  288. // values MUST be reset and a new fixed start time following the last
  289. // reported measurement time sent MUST be used.
  290. //
  291. // For example, consider a system measuring the number of requests that
  292. // it receives and reports the sum of these requests every second as a
  293. // CUMULATIVE metric:
  294. //
  295. // 1. The system starts receiving at time=t_0.
  296. // 2. A request is received, the system measures 1 request.
  297. // 3. A request is received, the system measures 1 request.
  298. // 4. A request is received, the system measures 1 request.
  299. // 5. The 1 second collection cycle ends. A metric is exported for the
  300. // number of requests received over the interval of time t_0 to
  301. // t_0+1 with a value of 3.
  302. // 6. A request is received, the system measures 1 request.
  303. // 7. A request is received, the system measures 1 request.
  304. // 8. The 1 second collection cycle ends. A metric is exported for the
  305. // number of requests received over the interval of time t_0 to
  306. // t_0+2 with a value of 5.
  307. // 9. The system experiences a fault and loses state.
  308. // 10. The system recovers and resumes receiving at time=t_1.
  309. // 11. A request is received, the system measures 1 request.
  310. // 12. The 1 second collection cycle ends. A metric is exported for the
  311. // number of requests received over the interval of time t_1 to
  312. // t_0+1 with a value of 1.
  313. //
  314. // Note: Even though, when reporting changes since last report time, using
  315. // CUMULATIVE is valid, it is not recommended. This may cause problems for
  316. // systems that do not use start_time to determine when the aggregation
  317. // value was reset (e.g. Prometheus).
  318. AGGREGATION_TEMPORALITY_CUMULATIVE = 2;
  319. }
  320. // DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
  321. // bit-field representing 32 distinct boolean flags. Each flag defined in this
  322. // enum is a bit-mask. To test the presence of a single flag in the flags of
  323. // a data point, for example, use an expression like:
  324. //
  325. // (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
  326. //
  327. enum DataPointFlags {
  328. // The zero value for the enum. Should not be used for comparisons.
  329. // Instead use bitwise "and" with the appropriate mask as shown above.
  330. DATA_POINT_FLAGS_DO_NOT_USE = 0;
  331. // This DataPoint is valid but has no recorded value. This value
  332. // SHOULD be used to reflect explicitly missing data in a series, as
  333. // for an equivalent to the Prometheus "staleness marker".
  334. DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1;
  335. // Bits 2-31 are reserved for future use.
  336. }
  337. // NumberDataPoint is a single data point in a timeseries that describes the
  338. // time-varying scalar value of a metric.
  339. message NumberDataPoint {
  340. reserved 1;
  341. // The set of key/value pairs that uniquely identify the timeseries from
  342. // where this point belongs. The list may be empty (may contain 0 elements).
  343. // Attribute keys MUST be unique (it is not allowed to have more than one
  344. // attribute with the same key).
  345. repeated opentelemetry.proto.common.v1.KeyValue attributes = 7;
  346. // StartTimeUnixNano is optional but strongly encouraged, see the
  347. // the detailed comments above Metric.
  348. //
  349. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
  350. // 1970.
  351. fixed64 start_time_unix_nano = 2;
  352. // TimeUnixNano is required, see the detailed comments above Metric.
  353. //
  354. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
  355. // 1970.
  356. fixed64 time_unix_nano = 3;
  357. // The value itself. A point is considered invalid when one of the recognized
  358. // value fields is not present inside this oneof.
  359. oneof value {
  360. double as_double = 4;
  361. sfixed64 as_int = 6;
  362. }
  363. // (Optional) List of exemplars collected from
  364. // measurements that were used to form the data point
  365. repeated Exemplar exemplars = 5;
  366. // Flags that apply to this specific data point. See DataPointFlags
  367. // for the available flags and their meaning.
  368. uint32 flags = 8;
  369. }
  370. // HistogramDataPoint is a single data point in a timeseries that describes the
  371. // time-varying values of a Histogram. A Histogram contains summary statistics
  372. // for a population of values, it may optionally contain the distribution of
  373. // those values across a set of buckets.
  374. //
  375. // If the histogram contains the distribution of values, then both
  376. // "explicit_bounds" and "bucket counts" fields must be defined.
  377. // If the histogram does not contain the distribution of values, then both
  378. // "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
  379. // "sum" are known.
  380. message HistogramDataPoint {
  381. reserved 1;
  382. // The set of key/value pairs that uniquely identify the timeseries from
  383. // where this point belongs. The list may be empty (may contain 0 elements).
  384. // Attribute keys MUST be unique (it is not allowed to have more than one
  385. // attribute with the same key).
  386. repeated opentelemetry.proto.common.v1.KeyValue attributes = 9;
  387. // StartTimeUnixNano is optional but strongly encouraged, see the
  388. // the detailed comments above Metric.
  389. //
  390. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
  391. // 1970.
  392. fixed64 start_time_unix_nano = 2;
  393. // TimeUnixNano is required, see the detailed comments above Metric.
  394. //
  395. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
  396. // 1970.
  397. fixed64 time_unix_nano = 3;
  398. // count is the number of values in the population. Must be non-negative. This
  399. // value must be equal to the sum of the "count" fields in buckets if a
  400. // histogram is provided.
  401. fixed64 count = 4;
  402. // sum of the values in the population. If count is zero then this field
  403. // must be zero.
  404. //
  405. // Note: Sum should only be filled out when measuring non-negative discrete
  406. // events, and is assumed to be monotonic over the values of these events.
  407. // Negative events *can* be recorded, but sum should not be filled out when
  408. // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
  409. // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram
  410. optional double sum = 5;
  411. // bucket_counts is an optional field contains the count values of histogram
  412. // for each bucket.
  413. //
  414. // The sum of the bucket_counts must equal the value in the count field.
  415. //
  416. // The number of elements in bucket_counts array must be by one greater than
  417. // the number of elements in explicit_bounds array.
  418. repeated fixed64 bucket_counts = 6;
  419. // explicit_bounds specifies buckets with explicitly defined bounds for values.
  420. //
  421. // The boundaries for bucket at index i are:
  422. //
  423. // (-infinity, explicit_bounds[i]] for i == 0
  424. // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
  425. // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
  426. //
  427. // The values in the explicit_bounds array must be strictly increasing.
  428. //
  429. // Histogram buckets are inclusive of their upper boundary, except the last
  430. // bucket where the boundary is at infinity. This format is intentionally
  431. // compatible with the OpenMetrics histogram definition.
  432. repeated double explicit_bounds = 7;
  433. // (Optional) List of exemplars collected from
  434. // measurements that were used to form the data point
  435. repeated Exemplar exemplars = 8;
  436. // Flags that apply to this specific data point. See DataPointFlags
  437. // for the available flags and their meaning.
  438. uint32 flags = 10;
  439. // min is the minimum value over (start_time, end_time].
  440. optional double min = 11;
  441. // max is the maximum value over (start_time, end_time].
  442. optional double max = 12;
  443. }
  444. // ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
  445. // time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
  446. // summary statistics for a population of values, it may optionally contain the
  447. // distribution of those values across a set of buckets.
  448. //
  449. message ExponentialHistogramDataPoint {
  450. // The set of key/value pairs that uniquely identify the timeseries from
  451. // where this point belongs. The list may be empty (may contain 0 elements).
  452. // Attribute keys MUST be unique (it is not allowed to have more than one
  453. // attribute with the same key).
  454. repeated opentelemetry.proto.common.v1.KeyValue attributes = 1;
  455. // StartTimeUnixNano is optional but strongly encouraged, see the
  456. // the detailed comments above Metric.
  457. //
  458. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
  459. // 1970.
  460. fixed64 start_time_unix_nano = 2;
  461. // TimeUnixNano is required, see the detailed comments above Metric.
  462. //
  463. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
  464. // 1970.
  465. fixed64 time_unix_nano = 3;
  466. // count is the number of values in the population. Must be
  467. // non-negative. This value must be equal to the sum of the "bucket_counts"
  468. // values in the positive and negative Buckets plus the "zero_count" field.
  469. fixed64 count = 4;
  470. // sum of the values in the population. If count is zero then this field
  471. // must be zero.
  472. //
  473. // Note: Sum should only be filled out when measuring non-negative discrete
  474. // events, and is assumed to be monotonic over the values of these events.
  475. // Negative events *can* be recorded, but sum should not be filled out when
  476. // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
  477. // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram
  478. optional double sum = 5;
  479. // scale describes the resolution of the histogram. Boundaries are
  480. // located at powers of the base, where:
  481. //
  482. // base = (2^(2^-scale))
  483. //
  484. // The histogram bucket identified by `index`, a signed integer,
  485. // contains values that are greater than (base^index) and
  486. // less than or equal to (base^(index+1)).
  487. //
  488. // The positive and negative ranges of the histogram are expressed
  489. // separately. Negative values are mapped by their absolute value
  490. // into the negative range using the same scale as the positive range.
  491. //
  492. // scale is not restricted by the protocol, as the permissible
  493. // values depend on the range of the data.
  494. sint32 scale = 6;
  495. // zero_count is the count of values that are either exactly zero or
  496. // within the region considered zero by the instrumentation at the
  497. // tolerated degree of precision. This bucket stores values that
  498. // cannot be expressed using the standard exponential formula as
  499. // well as values that have been rounded to zero.
  500. //
  501. // Implementations MAY consider the zero bucket to have probability
  502. // mass equal to (zero_count / count).
  503. fixed64 zero_count = 7;
  504. // positive carries the positive range of exponential bucket counts.
  505. Buckets positive = 8;
  506. // negative carries the negative range of exponential bucket counts.
  507. Buckets negative = 9;
  508. // Buckets are a set of bucket counts, encoded in a contiguous array
  509. // of counts.
  510. message Buckets {
  511. // Offset is the bucket index of the first entry in the bucket_counts array.
  512. //
  513. // Note: This uses a varint encoding as a simple form of compression.
  514. sint32 offset = 1;
  515. // bucket_counts is an array of count values, where bucket_counts[i] carries
  516. // the count of the bucket at index (offset+i). bucket_counts[i] is the count
  517. // of values greater than base^(offset+i) and less than or equal to
  518. // base^(offset+i+1).
  519. //
  520. // Note: By contrast, the explicit HistogramDataPoint uses
  521. // fixed64. This field is expected to have many buckets,
  522. // especially zeros, so uint64 has been selected to ensure
  523. // varint encoding.
  524. repeated uint64 bucket_counts = 2;
  525. }
  526. // Flags that apply to this specific data point. See DataPointFlags
  527. // for the available flags and their meaning.
  528. uint32 flags = 10;
  529. // (Optional) List of exemplars collected from
  530. // measurements that were used to form the data point
  531. repeated Exemplar exemplars = 11;
  532. // min is the minimum value over (start_time, end_time].
  533. optional double min = 12;
  534. // max is the maximum value over (start_time, end_time].
  535. optional double max = 13;
  536. // ZeroThreshold may be optionally set to convey the width of the zero
  537. // region. Where the zero region is defined as the closed interval
  538. // [-ZeroThreshold, ZeroThreshold].
  539. // When ZeroThreshold is 0, zero count bucket stores values that cannot be
  540. // expressed using the standard exponential formula as well as values that
  541. // have been rounded to zero.
  542. double zero_threshold = 14;
  543. }
  544. // SummaryDataPoint is a single data point in a timeseries that describes the
  545. // time-varying values of a Summary metric. The count and sum fields represent
  546. // cumulative values.
  547. message SummaryDataPoint {
  548. reserved 1;
  549. // The set of key/value pairs that uniquely identify the timeseries from
  550. // where this point belongs. The list may be empty (may contain 0 elements).
  551. // Attribute keys MUST be unique (it is not allowed to have more than one
  552. // attribute with the same key).
  553. repeated opentelemetry.proto.common.v1.KeyValue attributes = 7;
  554. // StartTimeUnixNano is optional but strongly encouraged, see the
  555. // the detailed comments above Metric.
  556. //
  557. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
  558. // 1970.
  559. fixed64 start_time_unix_nano = 2;
  560. // TimeUnixNano is required, see the detailed comments above Metric.
  561. //
  562. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
  563. // 1970.
  564. fixed64 time_unix_nano = 3;
  565. // count is the number of values in the population. Must be non-negative.
  566. fixed64 count = 4;
  567. // sum of the values in the population. If count is zero then this field
  568. // must be zero.
  569. //
  570. // Note: Sum should only be filled out when measuring non-negative discrete
  571. // events, and is assumed to be monotonic over the values of these events.
  572. // Negative events *can* be recorded, but sum should not be filled out when
  573. // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
  574. // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary
  575. double sum = 5;
  576. // Represents the value at a given quantile of a distribution.
  577. //
  578. // To record Min and Max values following conventions are used:
  579. // - The 1.0 quantile is equivalent to the maximum value observed.
  580. // - The 0.0 quantile is equivalent to the minimum value observed.
  581. //
  582. // See the following issue for more context:
  583. // https://github.com/open-telemetry/opentelemetry-proto/issues/125
  584. message ValueAtQuantile {
  585. // The quantile of a distribution. Must be in the interval
  586. // [0.0, 1.0].
  587. double quantile = 1;
  588. // The value at the given quantile of a distribution.
  589. //
  590. // Quantile values must NOT be negative.
  591. double value = 2;
  592. }
  593. // (Optional) list of values at different quantiles of the distribution calculated
  594. // from the current snapshot. The quantiles must be strictly increasing.
  595. repeated ValueAtQuantile quantile_values = 6;
  596. // Flags that apply to this specific data point. See DataPointFlags
  597. // for the available flags and their meaning.
  598. uint32 flags = 8;
  599. }
  600. // A representation of an exemplar, which is a sample input measurement.
  601. // Exemplars also hold information about the environment when the measurement
  602. // was recorded, for example the span and trace ID of the active span when the
  603. // exemplar was recorded.
  604. message Exemplar {
  605. reserved 1;
  606. // The set of key/value pairs that were filtered out by the aggregator, but
  607. // recorded alongside the original measurement. Only key/value pairs that were
  608. // filtered out by the aggregator should be included
  609. repeated opentelemetry.proto.common.v1.KeyValue filtered_attributes = 7;
  610. // time_unix_nano is the exact time when this exemplar was recorded
  611. //
  612. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
  613. // 1970.
  614. fixed64 time_unix_nano = 2;
  615. // The value of the measurement that was recorded. An exemplar is
  616. // considered invalid when one of the recognized value fields is not present
  617. // inside this oneof.
  618. oneof value {
  619. double as_double = 3;
  620. sfixed64 as_int = 6;
  621. }
  622. // (Optional) Span ID of the exemplar trace.
  623. // span_id may be missing if the measurement is not recorded inside a trace
  624. // or if the trace is not sampled.
  625. bytes span_id = 4;
  626. // (Optional) Trace ID of the exemplar trace.
  627. // trace_id may be missing if the measurement is not recorded inside a trace
  628. // or if the trace is not sampled.
  629. bytes trace_id = 5;
  630. }