123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691 |
- // Copyright 2019, OpenTelemetry Authors
- //
- // Licensed under the Apache License, Version 2.0 (the "License");
- // you may not use this file except in compliance with the License.
- // You may obtain a copy of the License at
- //
- // http://www.apache.org/licenses/LICENSE-2.0
- //
- // Unless required by applicable law or agreed to in writing, software
- // distributed under the License is distributed on an "AS IS" BASIS,
- // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- // See the License for the specific language governing permissions and
- // limitations under the License.
- syntax = "proto3";
- package opentelemetry.proto.metrics.v1;
- import "opentelemetry/proto/common/v1/common.proto";
- import "opentelemetry/proto/resource/v1/resource.proto";
- option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1";
- option java_multiple_files = true;
- option java_package = "io.opentelemetry.proto.metrics.v1";
- option java_outer_classname = "MetricsProto";
- option go_package = "go.opentelemetry.io/proto/otlp/metrics/v1";
- // MetricsData represents the metrics data that can be stored in a persistent
- // storage, OR can be embedded by other protocols that transfer OTLP metrics
- // data but do not implement the OTLP protocol.
- //
- // The main difference between this message and collector protocol is that
- // in this message there will not be any "control" or "metadata" specific to
- // OTLP protocol.
- //
- // When new fields are added into this message, the OTLP request MUST be updated
- // as well.
- message MetricsData {
- // An array of ResourceMetrics.
- // For data coming from a single resource this array will typically contain
- // one element. Intermediary nodes that receive data from multiple origins
- // typically batch the data before forwarding further and in that case this
- // array will contain multiple elements.
- repeated ResourceMetrics resource_metrics = 1;
- }
- // A collection of ScopeMetrics from a Resource.
- message ResourceMetrics {
- reserved 1000;
- // The resource for the metrics in this message.
- // If this field is not set then no resource info is known.
- opentelemetry.proto.resource.v1.Resource resource = 1;
- // A list of metrics that originate from a resource.
- repeated ScopeMetrics scope_metrics = 2;
- // The Schema URL, if known. This is the identifier of the Schema that the resource data
- // is recorded in. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to the data in the "resource" field. It does not apply
- // to the data in the "scope_metrics" field which have their own schema_url field.
- string schema_url = 3;
- }
- // A collection of Metrics produced by an Scope.
- message ScopeMetrics {
- // The instrumentation scope information for the metrics in this message.
- // Semantically when InstrumentationScope isn't set, it is equivalent with
- // an empty instrumentation scope name (unknown).
- opentelemetry.proto.common.v1.InstrumentationScope scope = 1;
- // A list of metrics that originate from an instrumentation library.
- repeated Metric metrics = 2;
- // The Schema URL, if known. This is the identifier of the Schema that the metric data
- // is recorded in. To learn more about Schema URL see
- // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to all metrics in the "metrics" field.
- string schema_url = 3;
- }
- // Defines a Metric which has one or more timeseries. The following is a
- // brief summary of the Metric data model. For more details, see:
- //
- // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
- //
- //
- // The data model and relation between entities is shown in the
- // diagram below. Here, "DataPoint" is the term used to refer to any
- // one of the specific data point value types, and "points" is the term used
- // to refer to any one of the lists of points contained in the Metric.
- //
- // - Metric is composed of a metadata and data.
- // - Metadata part contains a name, description, unit.
- // - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
- // - DataPoint contains timestamps, attributes, and one of the possible value type
- // fields.
- //
- // Metric
- // +------------+
- // |name |
- // |description |
- // |unit | +------------------------------------+
- // |data |---> |Gauge, Sum, Histogram, Summary, ... |
- // +------------+ +------------------------------------+
- //
- // Data [One of Gauge, Sum, Histogram, Summary, ...]
- // +-----------+
- // |... | // Metadata about the Data.
- // |points |--+
- // +-----------+ |
- // | +---------------------------+
- // | |DataPoint 1 |
- // v |+------+------+ +------+ |
- // +-----+ ||label |label |...|label | |
- // | 1 |-->||value1|value2|...|valueN| |
- // +-----+ |+------+------+ +------+ |
- // | . | |+-----+ |
- // | . | ||value| |
- // | . | |+-----+ |
- // | . | +---------------------------+
- // | . | .
- // | . | .
- // | . | .
- // | . | +---------------------------+
- // | . | |DataPoint M |
- // +-----+ |+------+------+ +------+ |
- // | M |-->||label |label |...|label | |
- // +-----+ ||value1|value2|...|valueN| |
- // |+------+------+ +------+ |
- // |+-----+ |
- // ||value| |
- // |+-----+ |
- // +---------------------------+
- //
- // Each distinct type of DataPoint represents the output of a specific
- // aggregation function, the result of applying the DataPoint's
- // associated function of to one or more measurements.
- //
- // All DataPoint types have three common fields:
- // - Attributes includes key-value pairs associated with the data point
- // - TimeUnixNano is required, set to the end time of the aggregation
- // - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
- // having an AggregationTemporality field, as discussed below.
- //
- // Both TimeUnixNano and StartTimeUnixNano values are expressed as
- // UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
- //
- // # TimeUnixNano
- //
- // This field is required, having consistent interpretation across
- // DataPoint types. TimeUnixNano is the moment corresponding to when
- // the data point's aggregate value was captured.
- //
- // Data points with the 0 value for TimeUnixNano SHOULD be rejected
- // by consumers.
- //
- // # StartTimeUnixNano
- //
- // StartTimeUnixNano in general allows detecting when a sequence of
- // observations is unbroken. This field indicates to consumers the
- // start time for points with cumulative and delta
- // AggregationTemporality, and it should be included whenever possible
- // to support correct rate calculation. Although it may be omitted
- // when the start time is truly unknown, setting StartTimeUnixNano is
- // strongly encouraged.
- message Metric {
- reserved 4, 6, 8;
- // name of the metric.
- string name = 1;
- // description of the metric, which can be used in documentation.
- string description = 2;
- // unit in which the metric value is reported. Follows the format
- // described by http://unitsofmeasure.org/ucum.html.
- string unit = 3;
- // Data determines the aggregation type (if any) of the metric, what is the
- // reported value type for the data points, as well as the relatationship to
- // the time interval over which they are reported.
- oneof data {
- Gauge gauge = 5;
- Sum sum = 7;
- Histogram histogram = 9;
- ExponentialHistogram exponential_histogram = 10;
- Summary summary = 11;
- }
- // Additional metadata attributes that describe the metric. [Optional].
- // Attributes are non-identifying.
- // Consumers SHOULD NOT need to be aware of these attributes.
- // These attributes MAY be used to encode information allowing
- // for lossless roundtrip translation to / from another data model.
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- repeated opentelemetry.proto.common.v1.KeyValue metadata = 12;
- }
- // Gauge represents the type of a scalar metric that always exports the
- // "current value" for every data point. It should be used for an "unknown"
- // aggregation.
- //
- // A Gauge does not support different aggregation temporalities. Given the
- // aggregation is unknown, points cannot be combined using the same
- // aggregation, regardless of aggregation temporalities. Therefore,
- // AggregationTemporality is not included. Consequently, this also means
- // "StartTimeUnixNano" is ignored for all data points.
- message Gauge {
- repeated NumberDataPoint data_points = 1;
- }
- // Sum represents the type of a scalar metric that is calculated as a sum of all
- // reported measurements over a time interval.
- message Sum {
- repeated NumberDataPoint data_points = 1;
- // aggregation_temporality describes if the aggregator reports delta changes
- // since last report time, or cumulative changes since a fixed start time.
- AggregationTemporality aggregation_temporality = 2;
- // If "true" means that the sum is monotonic.
- bool is_monotonic = 3;
- }
- // Histogram represents the type of a metric that is calculated by aggregating
- // as a Histogram of all reported measurements over a time interval.
- message Histogram {
- repeated HistogramDataPoint data_points = 1;
- // aggregation_temporality describes if the aggregator reports delta changes
- // since last report time, or cumulative changes since a fixed start time.
- AggregationTemporality aggregation_temporality = 2;
- }
- // ExponentialHistogram represents the type of a metric that is calculated by aggregating
- // as a ExponentialHistogram of all reported double measurements over a time interval.
- message ExponentialHistogram {
- repeated ExponentialHistogramDataPoint data_points = 1;
- // aggregation_temporality describes if the aggregator reports delta changes
- // since last report time, or cumulative changes since a fixed start time.
- AggregationTemporality aggregation_temporality = 2;
- }
- // Summary metric data are used to convey quantile summaries,
- // a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
- // and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
- // data type. These data points cannot always be merged in a meaningful way.
- // While they can be useful in some applications, histogram data points are
- // recommended for new applications.
- message Summary {
- repeated SummaryDataPoint data_points = 1;
- }
- // AggregationTemporality defines how a metric aggregator reports aggregated
- // values. It describes how those values relate to the time interval over
- // which they are aggregated.
- enum AggregationTemporality {
- // UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
- AGGREGATION_TEMPORALITY_UNSPECIFIED = 0;
- // DELTA is an AggregationTemporality for a metric aggregator which reports
- // changes since last report time. Successive metrics contain aggregation of
- // values from continuous and non-overlapping intervals.
- //
- // The values for a DELTA metric are based only on the time interval
- // associated with one measurement cycle. There is no dependency on
- // previous measurements like is the case for CUMULATIVE metrics.
- //
- // For example, consider a system measuring the number of requests that
- // it receives and reports the sum of these requests every second as a
- // DELTA metric:
- //
- // 1. The system starts receiving at time=t_0.
- // 2. A request is received, the system measures 1 request.
- // 3. A request is received, the system measures 1 request.
- // 4. A request is received, the system measures 1 request.
- // 5. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_0 to
- // t_0+1 with a value of 3.
- // 6. A request is received, the system measures 1 request.
- // 7. A request is received, the system measures 1 request.
- // 8. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_0+1 to
- // t_0+2 with a value of 2.
- AGGREGATION_TEMPORALITY_DELTA = 1;
- // CUMULATIVE is an AggregationTemporality for a metric aggregator which
- // reports changes since a fixed start time. This means that current values
- // of a CUMULATIVE metric depend on all previous measurements since the
- // start time. Because of this, the sender is required to retain this state
- // in some form. If this state is lost or invalidated, the CUMULATIVE metric
- // values MUST be reset and a new fixed start time following the last
- // reported measurement time sent MUST be used.
- //
- // For example, consider a system measuring the number of requests that
- // it receives and reports the sum of these requests every second as a
- // CUMULATIVE metric:
- //
- // 1. The system starts receiving at time=t_0.
- // 2. A request is received, the system measures 1 request.
- // 3. A request is received, the system measures 1 request.
- // 4. A request is received, the system measures 1 request.
- // 5. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_0 to
- // t_0+1 with a value of 3.
- // 6. A request is received, the system measures 1 request.
- // 7. A request is received, the system measures 1 request.
- // 8. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_0 to
- // t_0+2 with a value of 5.
- // 9. The system experiences a fault and loses state.
- // 10. The system recovers and resumes receiving at time=t_1.
- // 11. A request is received, the system measures 1 request.
- // 12. The 1 second collection cycle ends. A metric is exported for the
- // number of requests received over the interval of time t_1 to
- // t_0+1 with a value of 1.
- //
- // Note: Even though, when reporting changes since last report time, using
- // CUMULATIVE is valid, it is not recommended. This may cause problems for
- // systems that do not use start_time to determine when the aggregation
- // value was reset (e.g. Prometheus).
- AGGREGATION_TEMPORALITY_CUMULATIVE = 2;
- }
- // DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
- // bit-field representing 32 distinct boolean flags. Each flag defined in this
- // enum is a bit-mask. To test the presence of a single flag in the flags of
- // a data point, for example, use an expression like:
- //
- // (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
- //
- enum DataPointFlags {
- // The zero value for the enum. Should not be used for comparisons.
- // Instead use bitwise "and" with the appropriate mask as shown above.
- DATA_POINT_FLAGS_DO_NOT_USE = 0;
- // This DataPoint is valid but has no recorded value. This value
- // SHOULD be used to reflect explicitly missing data in a series, as
- // for an equivalent to the Prometheus "staleness marker".
- DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1;
- // Bits 2-31 are reserved for future use.
- }
- // NumberDataPoint is a single data point in a timeseries that describes the
- // time-varying scalar value of a metric.
- message NumberDataPoint {
- reserved 1;
- // The set of key/value pairs that uniquely identify the timeseries from
- // where this point belongs. The list may be empty (may contain 0 elements).
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- repeated opentelemetry.proto.common.v1.KeyValue attributes = 7;
- // StartTimeUnixNano is optional but strongly encouraged, see the
- // the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- fixed64 start_time_unix_nano = 2;
- // TimeUnixNano is required, see the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- fixed64 time_unix_nano = 3;
- // The value itself. A point is considered invalid when one of the recognized
- // value fields is not present inside this oneof.
- oneof value {
- double as_double = 4;
- sfixed64 as_int = 6;
- }
- // (Optional) List of exemplars collected from
- // measurements that were used to form the data point
- repeated Exemplar exemplars = 5;
- // Flags that apply to this specific data point. See DataPointFlags
- // for the available flags and their meaning.
- uint32 flags = 8;
- }
- // HistogramDataPoint is a single data point in a timeseries that describes the
- // time-varying values of a Histogram. A Histogram contains summary statistics
- // for a population of values, it may optionally contain the distribution of
- // those values across a set of buckets.
- //
- // If the histogram contains the distribution of values, then both
- // "explicit_bounds" and "bucket counts" fields must be defined.
- // If the histogram does not contain the distribution of values, then both
- // "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
- // "sum" are known.
- message HistogramDataPoint {
- reserved 1;
- // The set of key/value pairs that uniquely identify the timeseries from
- // where this point belongs. The list may be empty (may contain 0 elements).
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- repeated opentelemetry.proto.common.v1.KeyValue attributes = 9;
- // StartTimeUnixNano is optional but strongly encouraged, see the
- // the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- fixed64 start_time_unix_nano = 2;
- // TimeUnixNano is required, see the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- fixed64 time_unix_nano = 3;
- // count is the number of values in the population. Must be non-negative. This
- // value must be equal to the sum of the "count" fields in buckets if a
- // histogram is provided.
- fixed64 count = 4;
- // sum of the values in the population. If count is zero then this field
- // must be zero.
- //
- // Note: Sum should only be filled out when measuring non-negative discrete
- // events, and is assumed to be monotonic over the values of these events.
- // Negative events *can* be recorded, but sum should not be filled out when
- // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
- // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
- optional double sum = 5;
- // bucket_counts is an optional field contains the count values of histogram
- // for each bucket.
- //
- // The sum of the bucket_counts must equal the value in the count field.
- //
- // The number of elements in bucket_counts array must be by one greater than
- // the number of elements in explicit_bounds array.
- repeated fixed64 bucket_counts = 6;
- // explicit_bounds specifies buckets with explicitly defined bounds for values.
- //
- // The boundaries for bucket at index i are:
- //
- // (-infinity, explicit_bounds[i]] for i == 0
- // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
- // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
- //
- // The values in the explicit_bounds array must be strictly increasing.
- //
- // Histogram buckets are inclusive of their upper boundary, except the last
- // bucket where the boundary is at infinity. This format is intentionally
- // compatible with the OpenMetrics histogram definition.
- repeated double explicit_bounds = 7;
- // (Optional) List of exemplars collected from
- // measurements that were used to form the data point
- repeated Exemplar exemplars = 8;
- // Flags that apply to this specific data point. See DataPointFlags
- // for the available flags and their meaning.
- uint32 flags = 10;
- // min is the minimum value over (start_time, end_time].
- optional double min = 11;
- // max is the maximum value over (start_time, end_time].
- optional double max = 12;
- }
- // ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
- // time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
- // summary statistics for a population of values, it may optionally contain the
- // distribution of those values across a set of buckets.
- //
- message ExponentialHistogramDataPoint {
- // The set of key/value pairs that uniquely identify the timeseries from
- // where this point belongs. The list may be empty (may contain 0 elements).
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- repeated opentelemetry.proto.common.v1.KeyValue attributes = 1;
- // StartTimeUnixNano is optional but strongly encouraged, see the
- // the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- fixed64 start_time_unix_nano = 2;
- // TimeUnixNano is required, see the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- fixed64 time_unix_nano = 3;
- // count is the number of values in the population. Must be
- // non-negative. This value must be equal to the sum of the "bucket_counts"
- // values in the positive and negative Buckets plus the "zero_count" field.
- fixed64 count = 4;
- // sum of the values in the population. If count is zero then this field
- // must be zero.
- //
- // Note: Sum should only be filled out when measuring non-negative discrete
- // events, and is assumed to be monotonic over the values of these events.
- // Negative events *can* be recorded, but sum should not be filled out when
- // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
- // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
- optional double sum = 5;
-
- // scale describes the resolution of the histogram. Boundaries are
- // located at powers of the base, where:
- //
- // base = (2^(2^-scale))
- //
- // The histogram bucket identified by `index`, a signed integer,
- // contains values that are greater than (base^index) and
- // less than or equal to (base^(index+1)).
- //
- // The positive and negative ranges of the histogram are expressed
- // separately. Negative values are mapped by their absolute value
- // into the negative range using the same scale as the positive range.
- //
- // scale is not restricted by the protocol, as the permissible
- // values depend on the range of the data.
- sint32 scale = 6;
- // zero_count is the count of values that are either exactly zero or
- // within the region considered zero by the instrumentation at the
- // tolerated degree of precision. This bucket stores values that
- // cannot be expressed using the standard exponential formula as
- // well as values that have been rounded to zero.
- //
- // Implementations MAY consider the zero bucket to have probability
- // mass equal to (zero_count / count).
- fixed64 zero_count = 7;
- // positive carries the positive range of exponential bucket counts.
- Buckets positive = 8;
- // negative carries the negative range of exponential bucket counts.
- Buckets negative = 9;
- // Buckets are a set of bucket counts, encoded in a contiguous array
- // of counts.
- message Buckets {
- // Offset is the bucket index of the first entry in the bucket_counts array.
- //
- // Note: This uses a varint encoding as a simple form of compression.
- sint32 offset = 1;
- // bucket_counts is an array of count values, where bucket_counts[i] carries
- // the count of the bucket at index (offset+i). bucket_counts[i] is the count
- // of values greater than base^(offset+i) and less than or equal to
- // base^(offset+i+1).
- //
- // Note: By contrast, the explicit HistogramDataPoint uses
- // fixed64. This field is expected to have many buckets,
- // especially zeros, so uint64 has been selected to ensure
- // varint encoding.
- repeated uint64 bucket_counts = 2;
- }
- // Flags that apply to this specific data point. See DataPointFlags
- // for the available flags and their meaning.
- uint32 flags = 10;
- // (Optional) List of exemplars collected from
- // measurements that were used to form the data point
- repeated Exemplar exemplars = 11;
- // min is the minimum value over (start_time, end_time].
- optional double min = 12;
- // max is the maximum value over (start_time, end_time].
- optional double max = 13;
- // ZeroThreshold may be optionally set to convey the width of the zero
- // region. Where the zero region is defined as the closed interval
- // [-ZeroThreshold, ZeroThreshold].
- // When ZeroThreshold is 0, zero count bucket stores values that cannot be
- // expressed using the standard exponential formula as well as values that
- // have been rounded to zero.
- double zero_threshold = 14;
- }
- // SummaryDataPoint is a single data point in a timeseries that describes the
- // time-varying values of a Summary metric.
- message SummaryDataPoint {
- reserved 1;
- // The set of key/value pairs that uniquely identify the timeseries from
- // where this point belongs. The list may be empty (may contain 0 elements).
- // Attribute keys MUST be unique (it is not allowed to have more than one
- // attribute with the same key).
- repeated opentelemetry.proto.common.v1.KeyValue attributes = 7;
- // StartTimeUnixNano is optional but strongly encouraged, see the
- // the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- fixed64 start_time_unix_nano = 2;
- // TimeUnixNano is required, see the detailed comments above Metric.
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- fixed64 time_unix_nano = 3;
- // count is the number of values in the population. Must be non-negative.
- fixed64 count = 4;
- // sum of the values in the population. If count is zero then this field
- // must be zero.
- //
- // Note: Sum should only be filled out when measuring non-negative discrete
- // events, and is assumed to be monotonic over the values of these events.
- // Negative events *can* be recorded, but sum should not be filled out when
- // doing so. This is specifically to enforce compatibility w/ OpenMetrics,
- // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
- double sum = 5;
- // Represents the value at a given quantile of a distribution.
- //
- // To record Min and Max values following conventions are used:
- // - The 1.0 quantile is equivalent to the maximum value observed.
- // - The 0.0 quantile is equivalent to the minimum value observed.
- //
- // See the following issue for more context:
- // https://github.com/open-telemetry/opentelemetry-proto/issues/125
- message ValueAtQuantile {
- // The quantile of a distribution. Must be in the interval
- // [0.0, 1.0].
- double quantile = 1;
- // The value at the given quantile of a distribution.
- //
- // Quantile values must NOT be negative.
- double value = 2;
- }
- // (Optional) list of values at different quantiles of the distribution calculated
- // from the current snapshot. The quantiles must be strictly increasing.
- repeated ValueAtQuantile quantile_values = 6;
- // Flags that apply to this specific data point. See DataPointFlags
- // for the available flags and their meaning.
- uint32 flags = 8;
- }
- // A representation of an exemplar, which is a sample input measurement.
- // Exemplars also hold information about the environment when the measurement
- // was recorded, for example the span and trace ID of the active span when the
- // exemplar was recorded.
- message Exemplar {
- reserved 1;
- // The set of key/value pairs that were filtered out by the aggregator, but
- // recorded alongside the original measurement. Only key/value pairs that were
- // filtered out by the aggregator should be included
- repeated opentelemetry.proto.common.v1.KeyValue filtered_attributes = 7;
- // time_unix_nano is the exact time when this exemplar was recorded
- //
- // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
- // 1970.
- fixed64 time_unix_nano = 2;
- // The value of the measurement that was recorded. An exemplar is
- // considered invalid when one of the recognized value fields is not present
- // inside this oneof.
- oneof value {
- double as_double = 3;
- sfixed64 as_int = 6;
- }
- // (Optional) Span ID of the exemplar trace.
- // span_id may be missing if the measurement is not recorded inside a trace
- // or if the trace is not sampled.
- bytes span_id = 4;
- // (Optional) Trace ID of the exemplar trace.
- // trace_id may be missing if the measurement is not recorded inside a trace
- // or if the trace is not sampled.
- bytes trace_id = 5;
- }
|