Browse Source

YQ Connector:metrics (one more time)

custom httppuller has been added

Revert "Revert "metrics have been added""
This reverts commit e2a874f25a443edf946bab9a7f077239ba569ab0, reversing
changes made to 2dbbc3a1a033dd09ad29f0c168d8ea7fef97309e.
hcpp 1 year ago
parent
commit
b7716e9978

+ 147 - 0
library/go/core/metrics/buckets.go

@@ -0,0 +1,147 @@
+package metrics
+
+import (
+	"sort"
+	"time"
+)
+
+var (
+	_ DurationBuckets = (*durationBuckets)(nil)
+	_ Buckets         = (*buckets)(nil)
+)
+
+const (
+	errBucketsCountNeedsGreaterThanZero = "n needs to be > 0"
+	errBucketsStartNeedsGreaterThanZero = "start needs to be > 0"
+	errBucketsFactorNeedsGreaterThanOne = "factor needs to be > 1"
+)
+
+type durationBuckets struct {
+	buckets []time.Duration
+}
+
+// NewDurationBuckets returns new DurationBuckets implementation.
+func NewDurationBuckets(bk ...time.Duration) DurationBuckets {
+	sort.Slice(bk, func(i, j int) bool {
+		return bk[i] < bk[j]
+	})
+	return durationBuckets{buckets: bk}
+}
+
+func (d durationBuckets) Size() int {
+	return len(d.buckets)
+}
+
+func (d durationBuckets) MapDuration(dv time.Duration) (idx int) {
+	for _, bound := range d.buckets {
+		if dv < bound {
+			break
+		}
+		idx++
+	}
+	return
+}
+
+func (d durationBuckets) UpperBound(idx int) time.Duration {
+	if idx > d.Size()-1 {
+		panic("idx is out of bounds")
+	}
+	return d.buckets[idx]
+}
+
+type buckets struct {
+	buckets []float64
+}
+
+// NewBuckets returns new Buckets implementation.
+func NewBuckets(bk ...float64) Buckets {
+	sort.Slice(bk, func(i, j int) bool {
+		return bk[i] < bk[j]
+	})
+	return buckets{buckets: bk}
+}
+
+func (d buckets) Size() int {
+	return len(d.buckets)
+}
+
+func (d buckets) MapValue(v float64) (idx int) {
+	for _, bound := range d.buckets {
+		if v < bound {
+			break
+		}
+		idx++
+	}
+	return
+}
+
+func (d buckets) UpperBound(idx int) float64 {
+	if idx > d.Size()-1 {
+		panic("idx is out of bounds")
+	}
+	return d.buckets[idx]
+}
+
+// MakeLinearBuckets creates a set of linear value buckets.
+func MakeLinearBuckets(start, width float64, n int) Buckets {
+	if n <= 0 {
+		panic(errBucketsCountNeedsGreaterThanZero)
+	}
+	bounds := make([]float64, n)
+	for i := range bounds {
+		bounds[i] = start + (float64(i) * width)
+	}
+	return NewBuckets(bounds...)
+}
+
+// MakeLinearDurationBuckets creates a set of linear duration buckets.
+func MakeLinearDurationBuckets(start, width time.Duration, n int) DurationBuckets {
+	if n <= 0 {
+		panic(errBucketsCountNeedsGreaterThanZero)
+	}
+	buckets := make([]time.Duration, n)
+	for i := range buckets {
+		buckets[i] = start + (time.Duration(i) * width)
+	}
+	return NewDurationBuckets(buckets...)
+}
+
+// MakeExponentialBuckets creates a set of exponential value buckets.
+func MakeExponentialBuckets(start, factor float64, n int) Buckets {
+	if n <= 0 {
+		panic(errBucketsCountNeedsGreaterThanZero)
+	}
+	if start <= 0 {
+		panic(errBucketsStartNeedsGreaterThanZero)
+	}
+	if factor <= 1 {
+		panic(errBucketsFactorNeedsGreaterThanOne)
+	}
+	buckets := make([]float64, n)
+	curr := start
+	for i := range buckets {
+		buckets[i] = curr
+		curr *= factor
+	}
+	return NewBuckets(buckets...)
+}
+
+// MakeExponentialDurationBuckets creates a set of exponential duration buckets.
+func MakeExponentialDurationBuckets(start time.Duration, factor float64, n int) DurationBuckets {
+	if n <= 0 {
+		panic(errBucketsCountNeedsGreaterThanZero)
+	}
+	if start <= 0 {
+		panic(errBucketsStartNeedsGreaterThanZero)
+	}
+	if factor <= 1 {
+		panic(errBucketsFactorNeedsGreaterThanOne)
+	}
+	buckets := make([]time.Duration, n)
+	curr := start
+	for i := range buckets {
+		buckets[i] = curr
+		curr = time.Duration(float64(curr) * factor)
+	}
+	return NewDurationBuckets(buckets...)
+}

+ 183 - 0
library/go/core/metrics/buckets_test.go

@@ -0,0 +1,183 @@
+package metrics
+
+import (
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestNewDurationBuckets(t *testing.T) {
+	buckets := []time.Duration{
+		1 * time.Second,
+		3 * time.Second,
+		5 * time.Second,
+	}
+	bk := NewDurationBuckets(buckets...)
+
+	expect := durationBuckets{
+		buckets: []time.Duration{
+			1 * time.Second,
+			3 * time.Second,
+			5 * time.Second,
+		},
+	}
+	assert.Equal(t, expect, bk)
+}
+
+func Test_durationBuckets_MapDuration(t *testing.T) {
+	bk := NewDurationBuckets([]time.Duration{
+		1 * time.Second,
+		3 * time.Second,
+		5 * time.Second,
+	}...)
+
+	for i := 0; i <= bk.Size(); i++ {
+		assert.Equal(t, i, bk.MapDuration(time.Duration(i*2)*time.Second))
+	}
+}
+
+func Test_durationBuckets_Size(t *testing.T) {
+	var buckets []time.Duration
+	for i := 1; i < 3; i++ {
+		buckets = append(buckets, time.Duration(i)*time.Second)
+		bk := NewDurationBuckets(buckets...)
+		assert.Equal(t, i, bk.Size())
+	}
+}
+
+func Test_durationBuckets_UpperBound(t *testing.T) {
+	bk := NewDurationBuckets([]time.Duration{
+		1 * time.Second,
+		2 * time.Second,
+		3 * time.Second,
+	}...)
+
+	assert.Panics(t, func() { bk.UpperBound(999) })
+
+	for i := 0; i < bk.Size()-1; i++ {
+		assert.Equal(t, time.Duration(i+1)*time.Second, bk.UpperBound(i))
+	}
+}
+
+func TestNewBuckets(t *testing.T) {
+	bk := NewBuckets(1, 3, 5)
+
+	expect := buckets{
+		buckets: []float64{1, 3, 5},
+	}
+	assert.Equal(t, expect, bk)
+}
+
+func Test_buckets_MapValue(t *testing.T) {
+	bk := NewBuckets(1, 3, 5)
+
+	for i := 0; i <= bk.Size(); i++ {
+		assert.Equal(t, i, bk.MapValue(float64(i*2)))
+	}
+}
+
+func Test_buckets_Size(t *testing.T) {
+	var buckets []float64
+	for i := 1; i < 3; i++ {
+		buckets = append(buckets, float64(i))
+		bk := NewBuckets(buckets...)
+		assert.Equal(t, i, bk.Size())
+	}
+}
+
+func Test_buckets_UpperBound(t *testing.T) {
+	bk := NewBuckets(1, 2, 3)
+
+	assert.Panics(t, func() { bk.UpperBound(999) })
+
+	for i := 0; i < bk.Size()-1; i++ {
+		assert.Equal(t, float64(i+1), bk.UpperBound(i))
+	}
+}
+
+func TestMakeLinearBuckets_CorrectParameters_NotPanics(t *testing.T) {
+	assert.NotPanics(t, func() {
+		assert.Equal(t,
+			NewBuckets(0.0, 1.0, 2.0),
+			MakeLinearBuckets(0, 1, 3),
+		)
+	})
+}
+
+func TestMakeLinearBucketsPanicsOnBadCount(t *testing.T) {
+	assert.Panics(t, func() {
+		MakeLinearBuckets(0, 1, 0)
+	})
+}
+
+func TestMakeLinearDurationBuckets(t *testing.T) {
+	assert.NotPanics(t, func() {
+		assert.Equal(t,
+			NewDurationBuckets(0, time.Second, 2*time.Second),
+			MakeLinearDurationBuckets(0*time.Second, 1*time.Second, 3),
+		)
+	})
+}
+
+func TestMakeLinearDurationBucketsPanicsOnBadCount(t *testing.T) {
+	assert.Panics(t, func() {
+		MakeLinearDurationBuckets(0*time.Second, 1*time.Second, 0)
+	})
+}
+
+func TestMakeExponentialBuckets(t *testing.T) {
+	assert.NotPanics(t, func() {
+		assert.Equal(
+			t,
+			NewBuckets(2, 4, 8),
+			MakeExponentialBuckets(2, 2, 3),
+		)
+	})
+}
+
+func TestMakeExponentialBucketsPanicsOnBadCount(t *testing.T) {
+	assert.Panics(t, func() {
+		MakeExponentialBuckets(2, 2, 0)
+	})
+}
+
+func TestMakeExponentialBucketsPanicsOnBadStart(t *testing.T) {
+	assert.Panics(t, func() {
+		MakeExponentialBuckets(0, 2, 2)
+	})
+}
+
+func TestMakeExponentialBucketsPanicsOnBadFactor(t *testing.T) {
+	assert.Panics(t, func() {
+		MakeExponentialBuckets(2, 1, 2)
+	})
+}
+
+func TestMakeExponentialDurationBuckets(t *testing.T) {
+	assert.NotPanics(t, func() {
+		assert.Equal(
+			t,
+			NewDurationBuckets(2*time.Second, 4*time.Second, 8*time.Second),
+			MakeExponentialDurationBuckets(2*time.Second, 2, 3),
+		)
+	})
+}
+
+func TestMakeExponentialDurationBucketsPanicsOnBadCount(t *testing.T) {
+	assert.Panics(t, func() {
+		MakeExponentialDurationBuckets(2*time.Second, 2, 0)
+	})
+}
+
+func TestMakeExponentialDurationBucketsPanicsOnBadStart(t *testing.T) {
+	assert.Panics(t, func() {
+		MakeExponentialDurationBuckets(0, 2, 2)
+	})
+}
+
+func TestMakeExponentialDurationBucketsPanicsOnBadFactor(t *testing.T) {
+	assert.Panics(t, func() {
+		MakeExponentialDurationBuckets(2*time.Second, 1, 2)
+	})
+}

+ 9 - 0
library/go/core/metrics/collect/collect.go

@@ -0,0 +1,9 @@
+package collect
+
+import (
+	"context"
+
+	"github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+type Func func(ctx context.Context, r metrics.Registry, c metrics.CollectPolicy)

+ 78 - 0
library/go/core/metrics/collect/policy/inflight/inflight.go

@@ -0,0 +1,78 @@
+package inflight
+
+import (
+	"context"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/ydb-platform/ydb/library/go/core/metrics"
+	"github.com/ydb-platform/ydb/library/go/x/xsync"
+)
+
+var _ metrics.CollectPolicy = (*inflightPolicy)(nil)
+
+type inflightPolicy struct {
+	addCollectLock sync.Mutex
+	collect        atomic.Value // func(ctx context.Context)
+
+	minUpdateInterval time.Duration
+	lastUpdate        time.Time
+
+	inflight xsync.SingleInflight
+}
+
+func NewCollectorPolicy(opts ...Option) metrics.CollectPolicy {
+	c := &inflightPolicy{
+		minUpdateInterval: time.Second,
+		inflight:          xsync.NewSingleInflight(),
+	}
+	c.collect.Store(func(context.Context) {})
+
+	for _, opt := range opts {
+		opt(c)
+	}
+
+	return c
+}
+
+func (i *inflightPolicy) RegisteredCounter(counterFunc func() int64) func() int64 {
+	return func() int64 {
+		i.tryInflightUpdate()
+		return counterFunc()
+	}
+}
+
+func (i *inflightPolicy) RegisteredGauge(gaugeFunc func() float64) func() float64 {
+	return func() float64 {
+		i.tryInflightUpdate()
+		return gaugeFunc()
+	}
+}
+
+func (i *inflightPolicy) AddCollect(collect func(context.Context)) {
+	oldCollect := i.getCollect()
+	i.setCollect(func(ctx context.Context) {
+		oldCollect(ctx)
+		collect(ctx)
+	})
+}
+
+func (i *inflightPolicy) tryInflightUpdate() {
+	i.inflight.Do(func() {
+		if time.Since(i.lastUpdate) < i.minUpdateInterval {
+			return
+		}
+
+		i.getCollect()(context.Background())
+		i.lastUpdate = time.Now()
+	})
+}
+
+func (i *inflightPolicy) getCollect() func(context.Context) {
+	return i.collect.Load().(func(context.Context))
+}
+
+func (i *inflightPolicy) setCollect(collect func(context.Context)) {
+	i.collect.Store(collect)
+}

+ 11 - 0
library/go/core/metrics/collect/policy/inflight/inflight_opts.go

@@ -0,0 +1,11 @@
+package inflight
+
+import "time"
+
+type Option func(*inflightPolicy)
+
+func WithMinCollectInterval(interval time.Duration) Option {
+	return func(c *inflightPolicy) {
+		c.minUpdateInterval = interval
+	}
+}

+ 8 - 0
library/go/core/metrics/collect/policy/inflight/ya.make

@@ -0,0 +1,8 @@
+GO_LIBRARY()
+
+SRCS(
+    inflight.go
+    inflight_opts.go
+)
+
+END()

+ 1 - 0
library/go/core/metrics/collect/policy/ya.make

@@ -0,0 +1 @@
+RECURSE(inflight)

+ 229 - 0
library/go/core/metrics/collect/system.go

@@ -0,0 +1,229 @@
+// dashboard generator for these metrics can be found at: github.com/ydb-platform/ydb/arcadia/library/go/yandex/monitoring-dashboards
+package collect
+
+import (
+	"context"
+	"os"
+	"runtime"
+	"runtime/debug"
+	"time"
+
+	"github.com/prometheus/procfs"
+	"github.com/ydb-platform/ydb/library/go/core/buildinfo"
+	"github.com/ydb-platform/ydb/library/go/core/metrics"
+)
+
+var _ Func = GoMetrics
+
+func GoMetrics(_ context.Context, r metrics.Registry, c metrics.CollectPolicy) {
+	if r == nil {
+		return
+	}
+	r = r.WithPrefix("go")
+
+	var stats debug.GCStats
+	stats.PauseQuantiles = make([]time.Duration, 5) // Minimum, 25%, 50%, 75%, and maximum pause times.
+	var numGoroutine, numThread int
+	var ms runtime.MemStats
+
+	c.AddCollect(func(context.Context) {
+		debug.ReadGCStats(&stats)
+		runtime.ReadMemStats(&ms)
+
+		numThread, _ = runtime.ThreadCreateProfile(nil)
+		numGoroutine = runtime.NumGoroutine()
+	})
+
+	gcRegistry := r.WithPrefix("gc")
+	gcRegistry.FuncCounter("num", c.RegisteredCounter(func() int64 {
+		return stats.NumGC
+	}))
+	gcRegistry.FuncCounter(r.ComposeName("pause", "total", "ns"), c.RegisteredCounter(func() int64 {
+		return stats.PauseTotal.Nanoseconds()
+	}))
+	gcRegistry.FuncGauge(r.ComposeName("pause", "quantile", "min"), c.RegisteredGauge(func() float64 {
+		return stats.PauseQuantiles[0].Seconds()
+	}))
+	gcRegistry.FuncGauge(r.ComposeName("pause", "quantile", "25"), c.RegisteredGauge(func() float64 {
+		return stats.PauseQuantiles[1].Seconds()
+	}))
+	gcRegistry.FuncGauge(r.ComposeName("pause", "quantile", "50"), c.RegisteredGauge(func() float64 {
+		return stats.PauseQuantiles[2].Seconds()
+	}))
+	gcRegistry.FuncGauge(r.ComposeName("pause", "quantile", "75"), c.RegisteredGauge(func() float64 {
+		return stats.PauseQuantiles[3].Seconds()
+	}))
+	gcRegistry.FuncGauge(r.ComposeName("pause", "quantile", "max"), c.RegisteredGauge(func() float64 {
+		return stats.PauseQuantiles[4].Seconds()
+	}))
+	gcRegistry.FuncGauge(r.ComposeName("last", "ts"), c.RegisteredGauge(func() float64 {
+		return float64(ms.LastGC)
+	}))
+	gcRegistry.FuncCounter(r.ComposeName("forced", "num"), c.RegisteredCounter(func() int64 {
+		return int64(ms.NumForcedGC)
+	}))
+
+	r.FuncGauge(r.ComposeName("goroutine", "num"), c.RegisteredGauge(func() float64 {
+		return float64(numGoroutine)
+	}))
+	r.FuncGauge(r.ComposeName("thread", "num"), c.RegisteredGauge(func() float64 {
+		return float64(numThread)
+	}))
+
+	memRegistry := r.WithPrefix("mem")
+	memRegistry.FuncCounter(r.ComposeName("alloc", "total"), c.RegisteredCounter(func() int64 {
+		return int64(ms.TotalAlloc)
+	}))
+	memRegistry.FuncGauge("sys", c.RegisteredGauge(func() float64 {
+		return float64(ms.Sys)
+	}))
+	memRegistry.FuncCounter("lookups", c.RegisteredCounter(func() int64 {
+		return int64(ms.Lookups)
+	}))
+	memRegistry.FuncCounter("mallocs", c.RegisteredCounter(func() int64 {
+		return int64(ms.Mallocs)
+	}))
+	memRegistry.FuncCounter("frees", c.RegisteredCounter(func() int64 {
+		return int64(ms.Frees)
+	}))
+	memRegistry.FuncGauge(r.ComposeName("heap", "alloc"), c.RegisteredGauge(func() float64 {
+		return float64(ms.HeapAlloc)
+	}))
+	memRegistry.FuncGauge(r.ComposeName("heap", "sys"), c.RegisteredGauge(func() float64 {
+		return float64(ms.HeapSys)
+	}))
+	memRegistry.FuncGauge(r.ComposeName("heap", "idle"), c.RegisteredGauge(func() float64 {
+		return float64(ms.HeapIdle)
+	}))
+	memRegistry.FuncGauge(r.ComposeName("heap", "inuse"), c.RegisteredGauge(func() float64 {
+		return float64(ms.HeapInuse)
+	}))
+	memRegistry.FuncGauge(r.ComposeName("heap", "released"), c.RegisteredGauge(func() float64 {
+		return float64(ms.HeapReleased)
+	}))
+	memRegistry.FuncGauge(r.ComposeName("heap", "objects"), c.RegisteredGauge(func() float64 {
+		return float64(ms.HeapObjects)
+	}))
+
+	memRegistry.FuncGauge(r.ComposeName("stack", "inuse"), c.RegisteredGauge(func() float64 {
+		return float64(ms.StackInuse)
+	}))
+	memRegistry.FuncGauge(r.ComposeName("stack", "sys"), c.RegisteredGauge(func() float64 {
+		return float64(ms.StackSys)
+	}))
+
+	memRegistry.FuncGauge(r.ComposeName("span", "inuse"), c.RegisteredGauge(func() float64 {
+		return float64(ms.MSpanInuse)
+	}))
+	memRegistry.FuncGauge(r.ComposeName("span", "sys"), c.RegisteredGauge(func() float64 {
+		return float64(ms.MSpanSys)
+	}))
+
+	memRegistry.FuncGauge(r.ComposeName("cache", "inuse"), c.RegisteredGauge(func() float64 {
+		return float64(ms.MCacheInuse)
+	}))
+	memRegistry.FuncGauge(r.ComposeName("cache", "sys"), c.RegisteredGauge(func() float64 {
+		return float64(ms.MCacheSys)
+	}))
+
+	memRegistry.FuncGauge(r.ComposeName("buck", "hash", "sys"), c.RegisteredGauge(func() float64 {
+		return float64(ms.BuckHashSys)
+	}))
+	memRegistry.FuncGauge(r.ComposeName("gc", "sys"), c.RegisteredGauge(func() float64 {
+		return float64(ms.GCSys)
+	}))
+	memRegistry.FuncGauge(r.ComposeName("other", "sys"), c.RegisteredGauge(func() float64 {
+		return float64(ms.OtherSys)
+	}))
+	memRegistry.FuncGauge(r.ComposeName("gc", "next"), c.RegisteredGauge(func() float64 {
+		return float64(ms.NextGC)
+	}))
+
+	memRegistry.FuncGauge(r.ComposeName("gc", "cpu", "fraction"), c.RegisteredGauge(func() float64 {
+		return ms.GCCPUFraction
+	}))
+}
+
+var _ Func = ProcessMetrics
+
+func ProcessMetrics(_ context.Context, r metrics.Registry, c metrics.CollectPolicy) {
+	if r == nil {
+		return
+	}
+	buildVersion := buildinfo.Info.ArcadiaSourceRevision
+	r.WithTags(map[string]string{"revision": buildVersion}).Gauge("build").Set(1.0)
+
+	pid := os.Getpid()
+	proc, err := procfs.NewProc(pid)
+	if err != nil {
+		return
+	}
+
+	procRegistry := r.WithPrefix("proc")
+
+	var ioStat procfs.ProcIO
+	var procStat procfs.ProcStat
+	var fd int
+	var cpuWait uint64
+
+	const clocksPerSec = 100
+
+	c.AddCollect(func(ctx context.Context) {
+		if gatheredFD, err := proc.FileDescriptorsLen(); err == nil {
+			fd = gatheredFD
+		}
+
+		if gatheredIOStat, err := proc.IO(); err == nil {
+			ioStat.SyscW = gatheredIOStat.SyscW
+			ioStat.WriteBytes = gatheredIOStat.WriteBytes
+			ioStat.SyscR = gatheredIOStat.SyscR
+			ioStat.ReadBytes = gatheredIOStat.ReadBytes
+		}
+
+		if gatheredStat, err := proc.Stat(); err == nil {
+			procStat.UTime = gatheredStat.UTime
+			procStat.STime = gatheredStat.STime
+			procStat.RSS = gatheredStat.RSS
+		}
+
+		if gatheredSched, err := proc.Schedstat(); err == nil {
+			cpuWait = gatheredSched.WaitingNanoseconds
+		}
+	})
+
+	procRegistry.FuncGauge("fd", c.RegisteredGauge(func() float64 {
+		return float64(fd)
+	}))
+
+	ioRegistry := procRegistry.WithPrefix("io")
+	ioRegistry.FuncCounter(r.ComposeName("read", "count"), c.RegisteredCounter(func() int64 {
+		return int64(ioStat.SyscR)
+	}))
+	ioRegistry.FuncCounter(r.ComposeName("read", "bytes"), c.RegisteredCounter(func() int64 {
+		return int64(ioStat.ReadBytes)
+	}))
+	ioRegistry.FuncCounter(r.ComposeName("write", "count"), c.RegisteredCounter(func() int64 {
+		return int64(ioStat.SyscW)
+	}))
+	ioRegistry.FuncCounter(r.ComposeName("write", "bytes"), c.RegisteredCounter(func() int64 {
+		return int64(ioStat.WriteBytes)
+	}))
+
+	cpuRegistry := procRegistry.WithPrefix("cpu")
+	cpuRegistry.FuncCounter(r.ComposeName("total", "ns"), c.RegisteredCounter(func() int64 {
+		return int64(procStat.UTime+procStat.STime) * (1_000_000_000 / clocksPerSec)
+	}))
+	cpuRegistry.FuncCounter(r.ComposeName("user", "ns"), c.RegisteredCounter(func() int64 {
+		return int64(procStat.UTime) * (1_000_000_000 / clocksPerSec)
+	}))
+	cpuRegistry.FuncCounter(r.ComposeName("system", "ns"), c.RegisteredCounter(func() int64 {
+		return int64(procStat.STime) * (1_000_000_000 / clocksPerSec)
+	}))
+	cpuRegistry.FuncCounter(r.ComposeName("wait", "ns"), c.RegisteredCounter(func() int64 {
+		return int64(cpuWait)
+	}))
+
+	procRegistry.FuncGauge(r.ComposeName("mem", "rss"), c.RegisteredGauge(func() float64 {
+		return float64(procStat.RSS)
+	}))
+}

+ 10 - 0
library/go/core/metrics/collect/ya.make

@@ -0,0 +1,10 @@
+GO_LIBRARY()
+
+SRCS(
+    collect.go
+    system.go
+)
+
+END()
+
+RECURSE(policy)

+ 3 - 0
library/go/core/metrics/gotest/ya.make

@@ -0,0 +1,3 @@
+GO_TEST_FOR(library/go/core/metrics)
+
+END()

Some files were not shown because too many files changed in this diff