Update module prometheus/client_golang to v0.9.4 (#245)
Update module prometheus/client_golang to v0.9.4 Reviewed-on: https://kolaente.dev/vikunja/api/pulls/245
This commit is contained in:
29
vendor/github.com/prometheus/client_golang/prometheus/build_info.go
generated
vendored
Normal file
29
vendor/github.com/prometheus/client_golang/prometheus/build_info.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.12
|
||||
|
||||
package prometheus
|
||||
|
||||
import "runtime/debug"
|
||||
|
||||
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+.
|
||||
func readBuildInfo() (path, version, sum string) {
|
||||
path, version, sum = "unknown", "unknown", "unknown"
|
||||
if bi, ok := debug.ReadBuildInfo(); ok {
|
||||
path = bi.Main.Path
|
||||
version = bi.Main.Version
|
||||
sum = bi.Main.Sum
|
||||
}
|
||||
return
|
||||
}
|
22
vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
generated
vendored
Normal file
22
vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.12
|
||||
|
||||
package prometheus
|
||||
|
||||
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before
|
||||
// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
|
||||
func readBuildInfo() (path, version, sum string) {
|
||||
return "unknown", "unknown", "unknown"
|
||||
}
|
121
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
121
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
@ -14,9 +14,9 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -26,16 +26,41 @@ type goCollector struct {
|
||||
gcDesc *Desc
|
||||
goInfoDesc *Desc
|
||||
|
||||
// metrics to describe and collect
|
||||
metrics memStatsMetrics
|
||||
// ms... are memstats related.
|
||||
msLast *runtime.MemStats // Previously collected memstats.
|
||||
msLastTimestamp time.Time
|
||||
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
|
||||
msMetrics memStatsMetrics
|
||||
msRead func(*runtime.MemStats) // For mocking in tests.
|
||||
msMaxWait time.Duration // Wait time for fresh memstats.
|
||||
msMaxAge time.Duration // Maximum allowed age of old memstats.
|
||||
}
|
||||
|
||||
// NewGoCollector returns a collector which exports metrics about the current Go
|
||||
// NewGoCollector returns a collector that exports metrics about the current Go
|
||||
// process. This includes memory stats. To collect those, runtime.ReadMemStats
|
||||
// is called. This causes a stop-the-world, which is very short with Go1.9+
|
||||
// (~25µs). However, with older Go versions, the stop-the-world duration depends
|
||||
// on the heap size and can be quite significant (~1.7 ms/GiB as per
|
||||
// is called. This requires to “stop the world”, which usually only happens for
|
||||
// garbage collection (GC). Take the following implications into account when
|
||||
// deciding whether to use the Go collector:
|
||||
//
|
||||
// 1. The performance impact of stopping the world is the more relevant the more
|
||||
// frequently metrics are collected. However, with Go1.9 or later the
|
||||
// stop-the-world time per metrics collection is very short (~25µs) so that the
|
||||
// performance impact will only matter in rare cases. However, with older Go
|
||||
// versions, the stop-the-world duration depends on the heap size and can be
|
||||
// quite significant (~1.7 ms/GiB as per
|
||||
// https://go-review.googlesource.com/c/go/+/34937).
|
||||
//
|
||||
// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
|
||||
// metrics collection happens to coincide with GC, it will only complete after
|
||||
// GC has finished. Usually, GC is fast enough to not cause problems. However,
|
||||
// with a very large heap, GC might take multiple seconds, which is enough to
|
||||
// cause scrape timeouts in common setups. To avoid this problem, the Go
|
||||
// collector will use the memstats from a previous collection if
|
||||
// runtime.ReadMemStats takes more than 1s. However, if there are no previously
|
||||
// collected memstats, or their collection is more than 5m ago, the collection
|
||||
// will block until runtime.ReadMemStats succeeds. (The problem might be solved
|
||||
// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
|
||||
// issue.)
|
||||
func NewGoCollector() Collector {
|
||||
return &goCollector{
|
||||
goroutinesDesc: NewDesc(
|
||||
@ -54,7 +79,11 @@ func NewGoCollector() Collector {
|
||||
"go_info",
|
||||
"Information about the Go environment.",
|
||||
nil, Labels{"version": runtime.Version()}),
|
||||
metrics: memStatsMetrics{
|
||||
msLast: &runtime.MemStats{},
|
||||
msRead: runtime.ReadMemStats,
|
||||
msMaxWait: time.Second,
|
||||
msMaxAge: 5 * time.Minute,
|
||||
msMetrics: memStatsMetrics{
|
||||
{
|
||||
desc: NewDesc(
|
||||
memstatNamespace("alloc_bytes"),
|
||||
@ -253,7 +282,7 @@ func NewGoCollector() Collector {
|
||||
}
|
||||
|
||||
func memstatNamespace(s string) string {
|
||||
return fmt.Sprintf("go_memstats_%s", s)
|
||||
return "go_memstats_" + s
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
@ -262,13 +291,27 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.threadsDesc
|
||||
ch <- c.gcDesc
|
||||
ch <- c.goInfoDesc
|
||||
for _, i := range c.metrics {
|
||||
for _, i := range c.msMetrics {
|
||||
ch <- i.desc
|
||||
}
|
||||
}
|
||||
|
||||
// Collect returns the current state of all metrics of the collector.
|
||||
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||
var (
|
||||
ms = &runtime.MemStats{}
|
||||
done = make(chan struct{})
|
||||
)
|
||||
// Start reading memstats first as it might take a while.
|
||||
go func() {
|
||||
c.msRead(ms)
|
||||
c.msMtx.Lock()
|
||||
c.msLast = ms
|
||||
c.msLastTimestamp = time.Now()
|
||||
c.msMtx.Unlock()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
||||
n, _ := runtime.ThreadCreateProfile(nil)
|
||||
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
|
||||
@ -286,9 +329,31 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
||||
|
||||
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
||||
|
||||
ms := &runtime.MemStats{}
|
||||
runtime.ReadMemStats(ms)
|
||||
for _, i := range c.metrics {
|
||||
timer := time.NewTimer(c.msMaxWait)
|
||||
select {
|
||||
case <-done: // Our own ReadMemStats succeeded in time. Use it.
|
||||
timer.Stop() // Important for high collection frequencies to not pile up timers.
|
||||
c.msCollect(ch, ms)
|
||||
return
|
||||
case <-timer.C: // Time out, use last memstats if possible. Continue below.
|
||||
}
|
||||
c.msMtx.Lock()
|
||||
if time.Since(c.msLastTimestamp) < c.msMaxAge {
|
||||
// Last memstats are recent enough. Collect from them under the lock.
|
||||
c.msCollect(ch, c.msLast)
|
||||
c.msMtx.Unlock()
|
||||
return
|
||||
}
|
||||
// If we are here, the last memstats are too old or don't exist. We have
|
||||
// to wait until our own ReadMemStats finally completes. For that to
|
||||
// happen, we have to release the lock.
|
||||
c.msMtx.Unlock()
|
||||
<-done
|
||||
c.msCollect(ch, ms)
|
||||
}
|
||||
|
||||
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
|
||||
for _, i := range c.msMetrics {
|
||||
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
||||
}
|
||||
}
|
||||
@ -299,3 +364,33 @@ type memStatsMetrics []struct {
|
||||
eval func(*runtime.MemStats) float64
|
||||
valType ValueType
|
||||
}
|
||||
|
||||
// NewBuildInfoCollector returns a collector collecting a single metric
|
||||
// "go_build_info" with the constant value 1 and three labels "path", "version",
|
||||
// and "checksum". Their label values contain the main module path, version, and
|
||||
// checksum, respectively. The labels will only have meaningful values if the
|
||||
// binary is built with Go module support and from source code retrieved from
|
||||
// the source repository (rather than the local file system). This is usually
|
||||
// accomplished by building from outside of GOPATH, specifying the full address
|
||||
// of the main package, e.g. "GO111MODULE=on go run
|
||||
// github.com/prometheus/client_golang/examples/random". If built without Go
|
||||
// module support, all label values will be "unknown". If built with Go module
|
||||
// support but using the source code from the local file system, the "path" will
|
||||
// be set appropriately, but "checksum" will be empty and "version" will be
|
||||
// "(devel)".
|
||||
//
|
||||
// This collector uses only the build information for the main module. See
|
||||
// https://github.com/povilasv/prommod for an example of a collector for the
|
||||
// module dependencies.
|
||||
func NewBuildInfoCollector() Collector {
|
||||
path, version, sum := readBuildInfo()
|
||||
c := &selfCollector{MustNewConstMetric(
|
||||
NewDesc(
|
||||
"go_build_info",
|
||||
"Build information about the main Go module.",
|
||||
nil, Labels{"path": path, "version": version, "checksum": sum},
|
||||
),
|
||||
GaugeValue, 1)}
|
||||
c.init(c.self)
|
||||
return c
|
||||
}
|
||||
|
112
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
112
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@ -224,18 +224,21 @@ type histogramCounts struct {
|
||||
}
|
||||
|
||||
type histogram struct {
|
||||
// countAndHotIdx is a complicated one. For lock-free yet atomic
|
||||
// observations, we need to save the total count of observations again,
|
||||
// combined with the index of the currently-hot counts struct, so that
|
||||
// we can perform the operation on both values atomically. The least
|
||||
// significant bit defines the hot counts struct. The remaining 63 bits
|
||||
// represent the total count of observations. This happens under the
|
||||
// assumption that the 63bit count will never overflow. Rationale: An
|
||||
// observations takes about 30ns. Let's assume it could happen in
|
||||
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
|
||||
// which is about 3000 years.
|
||||
// countAndHotIdx enables lock-free writes with use of atomic updates.
|
||||
// The most significant bit is the hot index [0 or 1] of the count field
|
||||
// below. Observe calls update the hot one. All remaining bits count the
|
||||
// number of Observe calls. Observe starts by incrementing this counter,
|
||||
// and finish by incrementing the count field in the respective
|
||||
// histogramCounts, as a marker for completion.
|
||||
//
|
||||
// This has to be first in the struct for 64bit alignment. See
|
||||
// Calls of the Write method (which are non-mutating reads from the
|
||||
// perspective of the histogram) swap the hot–cold under the writeMtx
|
||||
// lock. A cooldown is awaited (while locked) by comparing the number of
|
||||
// observations with the initiation count. Once they match, then the
|
||||
// last observation on the now cool one has completed. All cool fields must
|
||||
// be merged into the new hot before releasing writeMtx.
|
||||
//
|
||||
// Fields with atomic access first! See alignment constraint:
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
countAndHotIdx uint64
|
||||
|
||||
@ -243,16 +246,14 @@ type histogram struct {
|
||||
desc *Desc
|
||||
writeMtx sync.Mutex // Only used in the Write method.
|
||||
|
||||
upperBounds []float64
|
||||
|
||||
// Two counts, one is "hot" for lock-free observations, the other is
|
||||
// "cold" for writing out a dto.Metric. It has to be an array of
|
||||
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||
counts [2]*histogramCounts
|
||||
hotIdx int // Index of currently-hot counts. Only used within Write.
|
||||
|
||||
labelPairs []*dto.LabelPair
|
||||
upperBounds []float64
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
func (h *histogram) Desc() *Desc {
|
||||
@ -271,11 +272,11 @@ func (h *histogram) Observe(v float64) {
|
||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||
i := sort.SearchFloat64s(h.upperBounds, v)
|
||||
|
||||
// We increment h.countAndHotIdx by 2 so that the counter in the upper
|
||||
// 63 bits gets incremented by 1. At the same time, we get the new value
|
||||
// We increment h.countAndHotIdx so that the counter in the lower
|
||||
// 63 bits gets incremented. At the same time, we get the new value
|
||||
// back, which we can use to find the currently-hot counts.
|
||||
n := atomic.AddUint64(&h.countAndHotIdx, 2)
|
||||
hotCounts := h.counts[n%2]
|
||||
n := atomic.AddUint64(&h.countAndHotIdx, 1)
|
||||
hotCounts := h.counts[n>>63]
|
||||
|
||||
if i < len(h.upperBounds) {
|
||||
atomic.AddUint64(&hotCounts.buckets[i], 1)
|
||||
@ -293,72 +294,43 @@ func (h *histogram) Observe(v float64) {
|
||||
}
|
||||
|
||||
func (h *histogram) Write(out *dto.Metric) error {
|
||||
var (
|
||||
his = &dto.Histogram{}
|
||||
buckets = make([]*dto.Bucket, len(h.upperBounds))
|
||||
hotCounts, coldCounts *histogramCounts
|
||||
count uint64
|
||||
)
|
||||
|
||||
// For simplicity, we mutex the rest of this method. It is not in the
|
||||
// hot path, i.e. Observe is called much more often than Write. The
|
||||
// complication of making Write lock-free isn't worth it.
|
||||
// For simplicity, we protect this whole method by a mutex. It is not in
|
||||
// the hot path, i.e. Observe is called much more often than Write. The
|
||||
// complication of making Write lock-free isn't worth it, if possible at
|
||||
// all.
|
||||
h.writeMtx.Lock()
|
||||
defer h.writeMtx.Unlock()
|
||||
|
||||
// This is a bit arcane, which is why the following spells out this if
|
||||
// clause in English:
|
||||
//
|
||||
// If the currently-hot counts struct is #0, we atomically increment
|
||||
// h.countAndHotIdx by 1 so that from now on Observe will use the counts
|
||||
// struct #1. Furthermore, the atomic increment gives us the new value,
|
||||
// which, in its most significant 63 bits, tells us the count of
|
||||
// observations done so far up to and including currently ongoing
|
||||
// observations still using the counts struct just changed from hot to
|
||||
// cold. To have a normal uint64 for the count, we bitshift by 1 and
|
||||
// save the result in count. We also set h.hotIdx to 1 for the next
|
||||
// Write call, and we will refer to counts #1 as hotCounts and to counts
|
||||
// #0 as coldCounts.
|
||||
//
|
||||
// If the currently-hot counts struct is #1, we do the corresponding
|
||||
// things the other way round. We have to _decrement_ h.countAndHotIdx
|
||||
// (which is a bit arcane in itself, as we have to express -1 with an
|
||||
// unsigned int...).
|
||||
if h.hotIdx == 0 {
|
||||
count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
|
||||
h.hotIdx = 1
|
||||
hotCounts = h.counts[1]
|
||||
coldCounts = h.counts[0]
|
||||
} else {
|
||||
count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
|
||||
h.hotIdx = 0
|
||||
hotCounts = h.counts[0]
|
||||
coldCounts = h.counts[1]
|
||||
}
|
||||
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
|
||||
// without touching the count bits. See the struct comments for a full
|
||||
// description of the algorithm.
|
||||
n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
|
||||
// count is contained unchanged in the lower 63 bits.
|
||||
count := n & ((1 << 63) - 1)
|
||||
// The most significant bit tells us which counts is hot. The complement
|
||||
// is thus the cold one.
|
||||
hotCounts := h.counts[n>>63]
|
||||
coldCounts := h.counts[(^n)>>63]
|
||||
|
||||
// Now we have to wait for the now-declared-cold counts to actually cool
|
||||
// down, i.e. wait for all observations still using it to finish. That's
|
||||
// the case once the count in the cold counts struct is the same as the
|
||||
// one atomically retrieved from the upper 63bits of h.countAndHotIdx.
|
||||
for {
|
||||
if count == atomic.LoadUint64(&coldCounts.count) {
|
||||
break
|
||||
}
|
||||
// Await cooldown.
|
||||
for count != atomic.LoadUint64(&coldCounts.count) {
|
||||
runtime.Gosched() // Let observations get work done.
|
||||
}
|
||||
|
||||
his.SampleCount = proto.Uint64(count)
|
||||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
|
||||
his := &dto.Histogram{
|
||||
Bucket: make([]*dto.Bucket, len(h.upperBounds)),
|
||||
SampleCount: proto.Uint64(count),
|
||||
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
|
||||
}
|
||||
var cumCount uint64
|
||||
for i, upperBound := range h.upperBounds {
|
||||
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
|
||||
buckets[i] = &dto.Bucket{
|
||||
his.Bucket[i] = &dto.Bucket{
|
||||
CumulativeCount: proto.Uint64(cumCount),
|
||||
UpperBound: proto.Float64(upperBound),
|
||||
}
|
||||
}
|
||||
|
||||
his.Bucket = buckets
|
||||
out.Histogram = his
|
||||
out.Label = h.labelPairs
|
||||
|
||||
|
2
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
@ -330,6 +330,8 @@ type fancyResponseWriterDelegator struct {
|
||||
}
|
||||
|
||||
func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
|
||||
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
|
||||
//remove support from client_golang yet.
|
||||
return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
|
6
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
6
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
@ -126,7 +126,7 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
||||
}
|
||||
|
||||
// Set up process metric collection if supported by the runtime.
|
||||
if _, err := procfs.NewStat(); err == nil {
|
||||
if _, err := procfs.NewDefaultFS(); err == nil {
|
||||
c.collectFn = c.processCollect
|
||||
} else {
|
||||
c.collectFn = func(ch chan<- Metric) {
|
||||
@ -166,7 +166,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
return
|
||||
}
|
||||
|
||||
if stat, err := p.NewStat(); err == nil {
|
||||
if stat, err := p.Stat(); err == nil {
|
||||
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
|
||||
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
|
||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
||||
@ -185,7 +185,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
c.reportError(ch, c.openFDs, err)
|
||||
}
|
||||
|
||||
if limits, err := p.NewLimits(); err == nil {
|
||||
if limits, err := p.Limits(); err == nil {
|
||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
||||
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
|
||||
} else {
|
||||
|
159
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
159
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
@ -74,8 +74,11 @@ type closeNotifierDelegator struct{ *responseWriterDelegator }
|
||||
type flusherDelegator struct{ *responseWriterDelegator }
|
||||
type hijackerDelegator struct{ *responseWriterDelegator }
|
||||
type readerFromDelegator struct{ *responseWriterDelegator }
|
||||
type pusherDelegator struct{ *responseWriterDelegator }
|
||||
|
||||
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
|
||||
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
|
||||
//remove support from client_golang yet.
|
||||
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
func (d flusherDelegator) Flush() {
|
||||
@ -92,6 +95,9 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
||||
d.written += n
|
||||
return n, err
|
||||
}
|
||||
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
||||
return d.ResponseWriter.(http.Pusher).Push(target, opts)
|
||||
}
|
||||
|
||||
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
|
||||
|
||||
@ -195,4 +201,157 @@ func init() {
|
||||
http.CloseNotifier
|
||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
|
||||
return pusherDelegator{d}
|
||||
}
|
||||
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Flusher
|
||||
}{d, pusherDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
}
|
||||
|
||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
||||
d := &responseWriterDelegator{
|
||||
ResponseWriter: w,
|
||||
observeWriteHeader: observeWriteHeaderFunc,
|
||||
}
|
||||
|
||||
id := 0
|
||||
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
|
||||
//remove support from client_golang yet.
|
||||
if _, ok := w.(http.CloseNotifier); ok {
|
||||
id += closeNotifier
|
||||
}
|
||||
if _, ok := w.(http.Flusher); ok {
|
||||
id += flusher
|
||||
}
|
||||
if _, ok := w.(http.Hijacker); ok {
|
||||
id += hijacker
|
||||
}
|
||||
if _, ok := w.(io.ReaderFrom); ok {
|
||||
id += readerFrom
|
||||
}
|
||||
if _, ok := w.(http.Pusher); ok {
|
||||
id += pusher
|
||||
}
|
||||
|
||||
return pickDelegator[id](d)
|
||||
}
|
||||
|
181
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
181
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
@ -1,181 +0,0 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type pusherDelegator struct{ *responseWriterDelegator }
|
||||
|
||||
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
||||
return d.ResponseWriter.(http.Pusher).Push(target, opts)
|
||||
}
|
||||
|
||||
func init() {
|
||||
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
|
||||
return pusherDelegator{d}
|
||||
}
|
||||
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Flusher
|
||||
}{d, pusherDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
}
|
||||
|
||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
||||
d := &responseWriterDelegator{
|
||||
ResponseWriter: w,
|
||||
observeWriteHeader: observeWriteHeaderFunc,
|
||||
}
|
||||
|
||||
id := 0
|
||||
if _, ok := w.(http.CloseNotifier); ok {
|
||||
id += closeNotifier
|
||||
}
|
||||
if _, ok := w.(http.Flusher); ok {
|
||||
id += flusher
|
||||
}
|
||||
if _, ok := w.(http.Hijacker); ok {
|
||||
id += hijacker
|
||||
}
|
||||
if _, ok := w.(io.ReaderFrom); ok {
|
||||
id += readerFrom
|
||||
}
|
||||
if _, ok := w.(http.Pusher); ok {
|
||||
id += pusher
|
||||
}
|
||||
|
||||
return pickDelegator[id](d)
|
||||
}
|
44
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
generated
vendored
44
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
generated
vendored
@ -1,44 +0,0 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
||||
d := &responseWriterDelegator{
|
||||
ResponseWriter: w,
|
||||
observeWriteHeader: observeWriteHeaderFunc,
|
||||
}
|
||||
|
||||
id := 0
|
||||
if _, ok := w.(http.CloseNotifier); ok {
|
||||
id += closeNotifier
|
||||
}
|
||||
if _, ok := w.(http.Flusher); ok {
|
||||
id += flusher
|
||||
}
|
||||
if _, ok := w.(http.Hijacker); ok {
|
||||
id += hijacker
|
||||
}
|
||||
if _, ok := w.(io.ReaderFrom); ok {
|
||||
id += readerFrom
|
||||
}
|
||||
|
||||
return pickDelegator[id](d)
|
||||
}
|
47
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
47
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
@ -84,10 +84,32 @@ func Handler() http.Handler {
|
||||
// instrumentation. Use the InstrumentMetricHandler function to apply the same
|
||||
// kind of instrumentation as it is used by the Handler function.
|
||||
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||
var inFlightSem chan struct{}
|
||||
var (
|
||||
inFlightSem chan struct{}
|
||||
errCnt = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "promhttp_metric_handler_errors_total",
|
||||
Help: "Total number of internal errors encountered by the promhttp metric handler.",
|
||||
},
|
||||
[]string{"cause"},
|
||||
)
|
||||
)
|
||||
|
||||
if opts.MaxRequestsInFlight > 0 {
|
||||
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
|
||||
}
|
||||
if opts.Registry != nil {
|
||||
// Initialize all possibilites that can occur below.
|
||||
errCnt.WithLabelValues("gathering")
|
||||
errCnt.WithLabelValues("encoding")
|
||||
if err := opts.Registry.Register(errCnt); err != nil {
|
||||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
||||
errCnt = are.ExistingCollector.(*prometheus.CounterVec)
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
||||
if inFlightSem != nil {
|
||||
@ -106,6 +128,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||
if opts.ErrorLog != nil {
|
||||
opts.ErrorLog.Println("error gathering metrics:", err)
|
||||
}
|
||||
errCnt.WithLabelValues("gathering").Inc()
|
||||
switch opts.ErrorHandling {
|
||||
case PanicOnError:
|
||||
panic(err)
|
||||
@ -146,6 +169,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||
if opts.ErrorLog != nil {
|
||||
opts.ErrorLog.Println("error encoding and sending metric family:", err)
|
||||
}
|
||||
errCnt.WithLabelValues("encoding").Inc()
|
||||
switch opts.ErrorHandling {
|
||||
case PanicOnError:
|
||||
panic(err)
|
||||
@ -236,9 +260,12 @@ const (
|
||||
// Ignore errors and try to serve as many metrics as possible. However,
|
||||
// if no metrics can be served, serve an HTTP status code 500 and the
|
||||
// last error message in the body. Only use this in deliberate "best
|
||||
// effort" metrics collection scenarios. It is recommended to at least
|
||||
// log errors (by providing an ErrorLog in HandlerOpts) to not mask
|
||||
// errors completely.
|
||||
// effort" metrics collection scenarios. In this case, it is highly
|
||||
// recommended to provide other means of detecting errors: By setting an
|
||||
// ErrorLog in HandlerOpts, the errors are logged. By providing a
|
||||
// Registry in HandlerOpts, the exposed metrics include an error counter
|
||||
// "promhttp_metric_handler_errors_total", which can be used for
|
||||
// alerts.
|
||||
ContinueOnError
|
||||
// Panic upon the first error encountered (useful for "crash only" apps).
|
||||
PanicOnError
|
||||
@ -261,6 +288,18 @@ type HandlerOpts struct {
|
||||
// logged regardless of the configured ErrorHandling provided ErrorLog
|
||||
// is not nil.
|
||||
ErrorHandling HandlerErrorHandling
|
||||
// If Registry is not nil, it is used to register a metric
|
||||
// "promhttp_metric_handler_errors_total", partitioned by "cause". A
|
||||
// failed registration causes a panic. Note that this error counter is
|
||||
// different from the instrumentation you get from the various
|
||||
// InstrumentHandler... helpers. It counts errors that don't necessarily
|
||||
// result in a non-2xx HTTP status code. There are two typical cases:
|
||||
// (1) Encoding errors that only happen after streaming of the HTTP body
|
||||
// has already started (and the status code 200 has been sent). This
|
||||
// should only happen with custom collectors. (2) Collection errors with
|
||||
// no effect on the HTTP status code because ErrorHandling is set to
|
||||
// ContinueOnError.
|
||||
Registry prometheus.Registerer
|
||||
// If DisableCompression is true, the handler will never compress the
|
||||
// response, even if requested by the client.
|
||||
DisableCompression bool
|
||||
|
122
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
122
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
@ -14,7 +14,9 @@
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@ -95,3 +97,123 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentTrace is used to offer flexibility in instrumenting the available
|
||||
// httptrace.ClientTrace hook functions. Each function is passed a float64
|
||||
// representing the time in seconds since the start of the http request. A user
|
||||
// may choose to use separately buckets Histograms, or implement custom
|
||||
// instance labels on a per function basis.
|
||||
type InstrumentTrace struct {
|
||||
GotConn func(float64)
|
||||
PutIdleConn func(float64)
|
||||
GotFirstResponseByte func(float64)
|
||||
Got100Continue func(float64)
|
||||
DNSStart func(float64)
|
||||
DNSDone func(float64)
|
||||
ConnectStart func(float64)
|
||||
ConnectDone func(float64)
|
||||
TLSHandshakeStart func(float64)
|
||||
TLSHandshakeDone func(float64)
|
||||
WroteHeaders func(float64)
|
||||
Wait100Continue func(float64)
|
||||
WroteRequest func(float64)
|
||||
}
|
||||
|
||||
// InstrumentRoundTripperTrace is a middleware that wraps the provided
|
||||
// RoundTripper and reports times to hook functions provided in the
|
||||
// InstrumentTrace struct. Hook functions that are not present in the provided
|
||||
// InstrumentTrace struct are ignored. Times reported to the hook functions are
|
||||
// time since the start of the request. Only with Go1.9+, those times are
|
||||
// guaranteed to never be negative. (Earlier Go versions are not using a
|
||||
// monotonic clock.) Note that partitioning of Histograms is expensive and
|
||||
// should be used judiciously.
|
||||
//
|
||||
// For hook functions that receive an error as an argument, no observations are
|
||||
// made in the event of a non-nil error value.
|
||||
//
|
||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
|
||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||
start := time.Now()
|
||||
|
||||
trace := &httptrace.ClientTrace{
|
||||
GotConn: func(_ httptrace.GotConnInfo) {
|
||||
if it.GotConn != nil {
|
||||
it.GotConn(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
PutIdleConn: func(err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if it.PutIdleConn != nil {
|
||||
it.PutIdleConn(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
DNSStart: func(_ httptrace.DNSStartInfo) {
|
||||
if it.DNSStart != nil {
|
||||
it.DNSStart(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
DNSDone: func(_ httptrace.DNSDoneInfo) {
|
||||
if it.DNSDone != nil {
|
||||
it.DNSDone(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
ConnectStart: func(_, _ string) {
|
||||
if it.ConnectStart != nil {
|
||||
it.ConnectStart(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
ConnectDone: func(_, _ string, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if it.ConnectDone != nil {
|
||||
it.ConnectDone(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
GotFirstResponseByte: func() {
|
||||
if it.GotFirstResponseByte != nil {
|
||||
it.GotFirstResponseByte(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
Got100Continue: func() {
|
||||
if it.Got100Continue != nil {
|
||||
it.Got100Continue(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
TLSHandshakeStart: func() {
|
||||
if it.TLSHandshakeStart != nil {
|
||||
it.TLSHandshakeStart(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if it.TLSHandshakeDone != nil {
|
||||
it.TLSHandshakeDone(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
WroteHeaders: func() {
|
||||
if it.WroteHeaders != nil {
|
||||
it.WroteHeaders(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
Wait100Continue: func() {
|
||||
if it.Wait100Continue != nil {
|
||||
it.Wait100Continue(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
WroteRequest: func(_ httptrace.WroteRequestInfo) {
|
||||
if it.WroteRequest != nil {
|
||||
it.WroteRequest(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
}
|
||||
r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
|
||||
|
||||
return next.RoundTrip(r)
|
||||
})
|
||||
}
|
||||
|
144
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
generated
vendored
144
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
generated
vendored
@ -1,144 +0,0 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"time"
|
||||
)
|
||||
|
||||
// InstrumentTrace is used to offer flexibility in instrumenting the available
|
||||
// httptrace.ClientTrace hook functions. Each function is passed a float64
|
||||
// representing the time in seconds since the start of the http request. A user
|
||||
// may choose to use separately buckets Histograms, or implement custom
|
||||
// instance labels on a per function basis.
|
||||
type InstrumentTrace struct {
|
||||
GotConn func(float64)
|
||||
PutIdleConn func(float64)
|
||||
GotFirstResponseByte func(float64)
|
||||
Got100Continue func(float64)
|
||||
DNSStart func(float64)
|
||||
DNSDone func(float64)
|
||||
ConnectStart func(float64)
|
||||
ConnectDone func(float64)
|
||||
TLSHandshakeStart func(float64)
|
||||
TLSHandshakeDone func(float64)
|
||||
WroteHeaders func(float64)
|
||||
Wait100Continue func(float64)
|
||||
WroteRequest func(float64)
|
||||
}
|
||||
|
||||
// InstrumentRoundTripperTrace is a middleware that wraps the provided
|
||||
// RoundTripper and reports times to hook functions provided in the
|
||||
// InstrumentTrace struct. Hook functions that are not present in the provided
|
||||
// InstrumentTrace struct are ignored. Times reported to the hook functions are
|
||||
// time since the start of the request. Only with Go1.9+, those times are
|
||||
// guaranteed to never be negative. (Earlier Go versions are not using a
|
||||
// monotonic clock.) Note that partitioning of Histograms is expensive and
|
||||
// should be used judiciously.
|
||||
//
|
||||
// For hook functions that receive an error as an argument, no observations are
|
||||
// made in the event of a non-nil error value.
|
||||
//
|
||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
|
||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||
start := time.Now()
|
||||
|
||||
trace := &httptrace.ClientTrace{
|
||||
GotConn: func(_ httptrace.GotConnInfo) {
|
||||
if it.GotConn != nil {
|
||||
it.GotConn(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
PutIdleConn: func(err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if it.PutIdleConn != nil {
|
||||
it.PutIdleConn(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
DNSStart: func(_ httptrace.DNSStartInfo) {
|
||||
if it.DNSStart != nil {
|
||||
it.DNSStart(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
DNSDone: func(_ httptrace.DNSDoneInfo) {
|
||||
if it.DNSDone != nil {
|
||||
it.DNSDone(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
ConnectStart: func(_, _ string) {
|
||||
if it.ConnectStart != nil {
|
||||
it.ConnectStart(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
ConnectDone: func(_, _ string, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if it.ConnectDone != nil {
|
||||
it.ConnectDone(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
GotFirstResponseByte: func() {
|
||||
if it.GotFirstResponseByte != nil {
|
||||
it.GotFirstResponseByte(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
Got100Continue: func() {
|
||||
if it.Got100Continue != nil {
|
||||
it.Got100Continue(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
TLSHandshakeStart: func() {
|
||||
if it.TLSHandshakeStart != nil {
|
||||
it.TLSHandshakeStart(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if it.TLSHandshakeDone != nil {
|
||||
it.TLSHandshakeDone(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
WroteHeaders: func() {
|
||||
if it.WroteHeaders != nil {
|
||||
it.WroteHeaders(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
Wait100Continue: func() {
|
||||
if it.Wait100Continue != nil {
|
||||
it.Wait100Continue(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
WroteRequest: func(_ httptrace.WroteRequestInfo) {
|
||||
if it.WroteRequest != nil {
|
||||
it.WroteRequest(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
}
|
||||
r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
|
||||
|
||||
return next.RoundTrip(r)
|
||||
})
|
||||
}
|
115
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
115
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
@ -39,7 +39,7 @@ const quantileLabel = "quantile"
|
||||
// A typical use-case is the observation of request latencies. By default, a
|
||||
// Summary provides the median, the 90th and the 99th percentile of the latency
|
||||
// as rank estimations. However, the default behavior will change in the
|
||||
// upcoming v0.10 of the library. There will be no rank estimations at all by
|
||||
// upcoming v1.0.0 of the library. There will be no rank estimations at all by
|
||||
// default. For a sane transition, it is recommended to set the desired rank
|
||||
// estimations explicitly.
|
||||
//
|
||||
@ -61,7 +61,7 @@ type Summary interface {
|
||||
// DefObjectives are the default Summary quantile values.
|
||||
//
|
||||
// Deprecated: DefObjectives will not be used as the default objectives in
|
||||
// v0.10 of the library. The default Summary will have no quantiles then.
|
||||
// v1.0.0 of the library. The default Summary will have no quantiles then.
|
||||
var (
|
||||
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
||||
|
||||
@ -86,7 +86,7 @@ const (
|
||||
// mandatory to set Name to a non-empty string. While all other fields are
|
||||
// optional and can safely be left at their zero value, it is recommended to set
|
||||
// a help string and to explicitly set the Objectives field to the desired value
|
||||
// as the default value will change in the upcoming v0.10 of the library.
|
||||
// as the default value will change in the upcoming v1.0.0 of the library.
|
||||
type SummaryOpts struct {
|
||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||
// name of the Summary (created by joining these components with
|
||||
@ -127,9 +127,10 @@ type SummaryOpts struct {
|
||||
// its zero value (i.e. nil). To create a Summary without Objectives,
|
||||
// set it to an empty map (i.e. map[float64]float64{}).
|
||||
//
|
||||
// Deprecated: Note that the current value of DefObjectives is
|
||||
// deprecated. It will be replaced by an empty map in v0.10 of the
|
||||
// library. Please explicitly set Objectives to the desired value.
|
||||
// Note that the current value of DefObjectives is deprecated. It will
|
||||
// be replaced by an empty map in v1.0.0 of the library. Please
|
||||
// explicitly set Objectives to the desired value to avoid problems
|
||||
// during the transition.
|
||||
Objectives map[float64]float64
|
||||
|
||||
// MaxAge defines the duration for which an observation stays relevant
|
||||
@ -405,18 +406,21 @@ type summaryCounts struct {
|
||||
}
|
||||
|
||||
type noObjectivesSummary struct {
|
||||
// countAndHotIdx is a complicated one. For lock-free yet atomic
|
||||
// observations, we need to save the total count of observations again,
|
||||
// combined with the index of the currently-hot counts struct, so that
|
||||
// we can perform the operation on both values atomically. The least
|
||||
// significant bit defines the hot counts struct. The remaining 63 bits
|
||||
// represent the total count of observations. This happens under the
|
||||
// assumption that the 63bit count will never overflow. Rationale: An
|
||||
// observations takes about 30ns. Let's assume it could happen in
|
||||
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
|
||||
// which is about 3000 years.
|
||||
// countAndHotIdx enables lock-free writes with use of atomic updates.
|
||||
// The most significant bit is the hot index [0 or 1] of the count field
|
||||
// below. Observe calls update the hot one. All remaining bits count the
|
||||
// number of Observe calls. Observe starts by incrementing this counter,
|
||||
// and finish by incrementing the count field in the respective
|
||||
// summaryCounts, as a marker for completion.
|
||||
//
|
||||
// This has to be first in the struct for 64bit alignment. See
|
||||
// Calls of the Write method (which are non-mutating reads from the
|
||||
// perspective of the summary) swap the hot–cold under the writeMtx
|
||||
// lock. A cooldown is awaited (while locked) by comparing the number of
|
||||
// observations with the initiation count. Once they match, then the
|
||||
// last observation on the now cool one has completed. All cool fields must
|
||||
// be merged into the new hot before releasing writeMtx.
|
||||
|
||||
// Fields with atomic access first! See alignment constraint:
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
countAndHotIdx uint64
|
||||
|
||||
@ -429,7 +433,6 @@ type noObjectivesSummary struct {
|
||||
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||
counts [2]*summaryCounts
|
||||
hotIdx int // Index of currently-hot counts. Only used within Write.
|
||||
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
@ -439,11 +442,11 @@ func (s *noObjectivesSummary) Desc() *Desc {
|
||||
}
|
||||
|
||||
func (s *noObjectivesSummary) Observe(v float64) {
|
||||
// We increment s.countAndHotIdx by 2 so that the counter in the upper
|
||||
// 63 bits gets incremented by 1. At the same time, we get the new value
|
||||
// We increment h.countAndHotIdx so that the counter in the lower
|
||||
// 63 bits gets incremented. At the same time, we get the new value
|
||||
// back, which we can use to find the currently-hot counts.
|
||||
n := atomic.AddUint64(&s.countAndHotIdx, 2)
|
||||
hotCounts := s.counts[n%2]
|
||||
n := atomic.AddUint64(&s.countAndHotIdx, 1)
|
||||
hotCounts := s.counts[n>>63]
|
||||
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
@ -458,61 +461,33 @@ func (s *noObjectivesSummary) Observe(v float64) {
|
||||
}
|
||||
|
||||
func (s *noObjectivesSummary) Write(out *dto.Metric) error {
|
||||
var (
|
||||
sum = &dto.Summary{}
|
||||
hotCounts, coldCounts *summaryCounts
|
||||
count uint64
|
||||
)
|
||||
|
||||
// For simplicity, we mutex the rest of this method. It is not in the
|
||||
// hot path, i.e. Observe is called much more often than Write. The
|
||||
// complication of making Write lock-free isn't worth it.
|
||||
// For simplicity, we protect this whole method by a mutex. It is not in
|
||||
// the hot path, i.e. Observe is called much more often than Write. The
|
||||
// complication of making Write lock-free isn't worth it, if possible at
|
||||
// all.
|
||||
s.writeMtx.Lock()
|
||||
defer s.writeMtx.Unlock()
|
||||
|
||||
// This is a bit arcane, which is why the following spells out this if
|
||||
// clause in English:
|
||||
//
|
||||
// If the currently-hot counts struct is #0, we atomically increment
|
||||
// s.countAndHotIdx by 1 so that from now on Observe will use the counts
|
||||
// struct #1. Furthermore, the atomic increment gives us the new value,
|
||||
// which, in its most significant 63 bits, tells us the count of
|
||||
// observations done so far up to and including currently ongoing
|
||||
// observations still using the counts struct just changed from hot to
|
||||
// cold. To have a normal uint64 for the count, we bitshift by 1 and
|
||||
// save the result in count. We also set s.hotIdx to 1 for the next
|
||||
// Write call, and we will refer to counts #1 as hotCounts and to counts
|
||||
// #0 as coldCounts.
|
||||
//
|
||||
// If the currently-hot counts struct is #1, we do the corresponding
|
||||
// things the other way round. We have to _decrement_ s.countAndHotIdx
|
||||
// (which is a bit arcane in itself, as we have to express -1 with an
|
||||
// unsigned int...).
|
||||
if s.hotIdx == 0 {
|
||||
count = atomic.AddUint64(&s.countAndHotIdx, 1) >> 1
|
||||
s.hotIdx = 1
|
||||
hotCounts = s.counts[1]
|
||||
coldCounts = s.counts[0]
|
||||
} else {
|
||||
count = atomic.AddUint64(&s.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
|
||||
s.hotIdx = 0
|
||||
hotCounts = s.counts[0]
|
||||
coldCounts = s.counts[1]
|
||||
}
|
||||
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
|
||||
// without touching the count bits. See the struct comments for a full
|
||||
// description of the algorithm.
|
||||
n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
|
||||
// count is contained unchanged in the lower 63 bits.
|
||||
count := n & ((1 << 63) - 1)
|
||||
// The most significant bit tells us which counts is hot. The complement
|
||||
// is thus the cold one.
|
||||
hotCounts := s.counts[n>>63]
|
||||
coldCounts := s.counts[(^n)>>63]
|
||||
|
||||
// Now we have to wait for the now-declared-cold counts to actually cool
|
||||
// down, i.e. wait for all observations still using it to finish. That's
|
||||
// the case once the count in the cold counts struct is the same as the
|
||||
// one atomically retrieved from the upper 63bits of s.countAndHotIdx.
|
||||
for {
|
||||
if count == atomic.LoadUint64(&coldCounts.count) {
|
||||
break
|
||||
}
|
||||
// Await cooldown.
|
||||
for count != atomic.LoadUint64(&coldCounts.count) {
|
||||
runtime.Gosched() // Let observations get work done.
|
||||
}
|
||||
|
||||
sum.SampleCount = proto.Uint64(count)
|
||||
sum.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
|
||||
sum := &dto.Summary{
|
||||
SampleCount: proto.Uint64(count),
|
||||
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
|
||||
}
|
||||
|
||||
out.Summary = sum
|
||||
out.Label = s.labelPairs
|
||||
|
Reference in New Issue
Block a user