Merge pull request #6889 from a-robinson/prom

Update the prometheus library and its dependencies to the most recent versions
This commit is contained in:
Wojciech Tyczynski 2015-04-16 10:12:37 +02:00
commit e453a2c4f3
26 changed files with 674 additions and 374 deletions

18
Godeps/Godeps.json generated
View File

@ -317,8 +317,8 @@
"Rev": "05017fcccf23c823bfdea560dcc958a136e54fb7" "Rev": "05017fcccf23c823bfdea560dcc958a136e54fb7"
}, },
{ {
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/ext", "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
"Rev": "ba7d65ac66e9da93a714ca18f6d1bc7a0c09100c" "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
}, },
{ {
"ImportPath": "github.com/miekg/dns", "ImportPath": "github.com/miekg/dns",
@ -348,18 +348,18 @@
}, },
{ {
"ImportPath": "github.com/prometheus/client_golang/model", "ImportPath": "github.com/prometheus/client_golang/model",
"Comment": "0.2.0-5-gde5f7a2", "Comment": "0.4.0-1-g692492e",
"Rev": "de5f7a2db9d883392ce3ad667087280fe1ff9cea" "Rev": "692492e54b553a81013254cc1fba4b6dd76fad30"
}, },
{ {
"ImportPath": "github.com/prometheus/client_golang/prometheus", "ImportPath": "github.com/prometheus/client_golang/prometheus",
"Comment": "0.2.0-5-gde5f7a2", "Comment": "0.4.0-1-g692492e",
"Rev": "de5f7a2db9d883392ce3ad667087280fe1ff9cea" "Rev": "692492e54b553a81013254cc1fba4b6dd76fad30"
}, },
{ {
"ImportPath": "github.com/prometheus/client_golang/text", "ImportPath": "github.com/prometheus/client_golang/text",
"Comment": "0.2.0-5-gde5f7a2", "Comment": "0.4.0-1-g692492e",
"Rev": "de5f7a2db9d883392ce3ad667087280fe1ff9cea" "Rev": "692492e54b553a81013254cc1fba4b6dd76fad30"
}, },
{ {
"ImportPath": "github.com/prometheus/client_model/go", "ImportPath": "github.com/prometheus/client_model/go",
@ -368,7 +368,7 @@
}, },
{ {
"ImportPath": "github.com/prometheus/procfs", "ImportPath": "github.com/prometheus/procfs",
"Rev": "6c34ef819e19b4e16f410100ace4aa006f0e3bf8" "Rev": "490cc6eb5fa45bf8a8b7b73c8bc82a8160e8531d"
}, },
{ {
"ImportPath": "github.com/rackspace/gophercloud", "ImportPath": "github.com/rackspace/gophercloud",

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package ext package pbutil
import ( import (
"bytes" "bytes"
@ -21,6 +21,8 @@ import (
"testing" "testing"
"testing/quick" "testing/quick"
"github.com/matttproud/golang_protobuf_extensions/pbtest"
. "github.com/golang/protobuf/proto" . "github.com/golang/protobuf/proto"
. "github.com/golang/protobuf/proto/testdata" . "github.com/golang/protobuf/proto/testdata"
) )
@ -138,10 +140,10 @@ I expect it may. Let's hope you enjoy testing as much as we do.`),
func TestEndToEndValid(t *testing.T) { func TestEndToEndValid(t *testing.T) {
for _, test := range [][]Message{ for _, test := range [][]Message{
[]Message{&Empty{}}, {&Empty{}},
[]Message{&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}}, {&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}},
[]Message{&GoEnum{Foo: FOO_FOO1.Enum()}}, {&GoEnum{Foo: FOO_FOO1.Enum()}},
[]Message{&Strings{ {&Strings{
StringField: String(`This is my gigantic, unhappy string. It exceeds StringField: String(`This is my gigantic, unhappy string. It exceeds
the encoding size of a single byte varint. We are using it to fuzz test the the encoding size of a single byte varint. We are using it to fuzz test the
correctness of the header decoding mechanisms, which may prove problematic. correctness of the header decoding mechanisms, which may prove problematic.
@ -176,45 +178,6 @@ I expect it may. Let's hope you enjoy testing as much as we do.`),
} }
} }
// visitMessage empties the private state fields of the quick.Value()-generated
// Protocol Buffer messages, for they cause an inordinate amount of problems.
// This is because we are using an automated fuzz generator on a type with
// private fields.
func visitMessage(m Message) {
t := reflect.TypeOf(m)
if t.Kind() != reflect.Ptr {
return
}
derefed := t.Elem()
if derefed.Kind() != reflect.Struct {
return
}
v := reflect.ValueOf(m)
elem := v.Elem()
for i := 0; i < elem.NumField(); i++ {
field := elem.FieldByIndex([]int{i})
fieldType := field.Type()
if fieldType.Implements(reflect.TypeOf((*Message)(nil)).Elem()) {
visitMessage(field.Interface().(Message))
}
if field.Kind() == reflect.Slice {
for i := 0; i < field.Len(); i++ {
elem := field.Index(i)
elemType := elem.Type()
if elemType.Implements(reflect.TypeOf((*Message)(nil)).Elem()) {
visitMessage(elem.Interface().(Message))
}
}
}
}
if field := elem.FieldByName("XXX_unrecognized"); field.IsValid() {
field.Set(reflect.ValueOf([]byte{}))
}
if field := elem.FieldByName("XXX_extensions"); field.IsValid() {
field.Set(reflect.ValueOf(nil))
}
}
// rndMessage generates a random valid Protocol Buffer message. // rndMessage generates a random valid Protocol Buffer message.
func rndMessage(r *rand.Rand) Message { func rndMessage(r *rand.Rand) Message {
var t reflect.Type var t reflect.Type
@ -307,7 +270,9 @@ func rndMessage(r *rand.Rand) Message {
if !ok { if !ok {
panic("attempt to generate illegal item; consult item 11") panic("attempt to generate illegal item; consult item 11")
} }
visitMessage(v.Interface().(Message)) if err := pbtest.SanitizeGenerated(v.Interface().(Message)); err != nil {
panic(err)
}
return v.Interface().(Message) return v.Interface().(Message)
} }

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package ext package pbutil
import ( import (
"encoding/binary" "encoding/binary"

View File

@ -12,5 +12,5 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// Package ext enables record length-delimited Protocol Buffer streaming. // Package pbutil provides record length-delimited Protocol Buffer streaming.
package ext package pbutil

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package ext package pbutil
import ( import (
"encoding/binary" "encoding/binary"

View File

@ -27,7 +27,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ext package pbutil
import ( import (
. "github.com/golang/protobuf/proto" . "github.com/golang/protobuf/proto"

View File

@ -14,10 +14,8 @@
package model package model
import ( import (
"encoding/binary"
"encoding/json" "encoding/json"
"fmt" "fmt"
"hash/fnv"
"sort" "sort"
"strings" "strings"
) )
@ -66,37 +64,7 @@ func (m Metric) String() string {
// Fingerprint returns a Metric's Fingerprint. // Fingerprint returns a Metric's Fingerprint.
func (m Metric) Fingerprint() Fingerprint { func (m Metric) Fingerprint() Fingerprint {
labelNames := make([]string, 0, len(m)) return metricToFingerprint(m)
maxLength := 0
for labelName, labelValue := range m {
labelNames = append(labelNames, string(labelName))
if len(labelName) > maxLength {
maxLength = len(labelName)
}
if len(labelValue) > maxLength {
maxLength = len(labelValue)
}
}
sort.Strings(labelNames)
summer := fnv.New64a()
buf := make([]byte, maxLength)
for _, labelName := range labelNames {
labelValue := m[LabelName(labelName)]
copy(buf, labelName)
summer.Write(buf[:len(labelName)])
summer.Write(separator)
copy(buf, labelValue)
summer.Write(buf[:len(labelValue)])
}
return Fingerprint(binary.LittleEndian.Uint64(summer.Sum(nil)))
} }
// Clone returns a copy of the Metric. // Clone returns a copy of the Metric.
@ -133,7 +101,7 @@ type COWMetric struct {
// Set sets a label name in the wrapped Metric to a given value and copies the // Set sets a label name in the wrapped Metric to a given value and copies the
// Metric initially, if it is not already a copy. // Metric initially, if it is not already a copy.
func (m COWMetric) Set(ln LabelName, lv LabelValue) { func (m *COWMetric) Set(ln LabelName, lv LabelValue) {
m.doCOW() m.doCOW()
m.Metric[ln] = lv m.Metric[ln] = lv
} }

View File

@ -22,7 +22,7 @@ func testMetric(t testing.TB) {
}{ }{
{ {
input: Metric{}, input: Metric{},
fingerprint: 2676020557754725067, fingerprint: 14695981039346656037,
}, },
{ {
input: Metric{ input: Metric{
@ -30,31 +30,27 @@ func testMetric(t testing.TB) {
"occupation": "robot", "occupation": "robot",
"manufacturer": "westinghouse", "manufacturer": "westinghouse",
}, },
fingerprint: 13260944541294022935, fingerprint: 11310079640881077873,
}, },
{ {
input: Metric{ input: Metric{
"x": "y", "x": "y",
}, },
fingerprint: 1470933794305433534, fingerprint: 13948396922932177635,
}, },
// The following two demonstrate a bug in fingerprinting. They
// should not have the same fingerprint with a sane
// fingerprinting function. See
// https://github.com/prometheus/client_golang/issues/74 .
{ {
input: Metric{ input: Metric{
"a": "bb", "a": "bb",
"b": "c", "b": "c",
}, },
fingerprint: 3734646176939799877, fingerprint: 3198632812309449502,
}, },
{ {
input: Metric{ input: Metric{
"a": "b", "a": "b",
"bb": "c", "bb": "c",
}, },
fingerprint: 3734646176939799877, fingerprint: 5774953389407657638,
}, },
} }
@ -74,3 +70,52 @@ func BenchmarkMetric(b *testing.B) {
testMetric(b) testMetric(b)
} }
} }
func TestCOWMetric(t *testing.T) {
testMetric := Metric{
"to_delete": "test1",
"to_change": "test2",
}
scenarios := []struct {
fn func(*COWMetric)
out Metric
}{
{
fn: func(cm *COWMetric) {
cm.Delete("to_delete")
},
out: Metric{
"to_change": "test2",
},
},
{
fn: func(cm *COWMetric) {
cm.Set("to_change", "changed")
},
out: Metric{
"to_delete": "test1",
"to_change": "changed",
},
},
}
for i, s := range scenarios {
orig := testMetric.Clone()
cm := &COWMetric{
Metric: orig,
}
s.fn(cm)
// Test that the original metric was not modified.
if !orig.Equal(testMetric) {
t.Fatalf("%d. original metric changed; expected %v, got %v", i, testMetric, orig)
}
// Test that the new metric has the right changes.
if !cm.Metric.Equal(s.out) {
t.Fatalf("%d. copied metric doesn't contain expected changes; expected %v, got %v", i, s.out, cm.Metric)
}
}
}

View File

@ -17,6 +17,7 @@ import (
"bytes" "bytes"
"hash" "hash"
"hash/fnv" "hash/fnv"
"sync"
) )
// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is // SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
@ -28,7 +29,7 @@ var (
// cache the signature of an empty label set. // cache the signature of an empty label set.
emptyLabelSignature = fnv.New64a().Sum64() emptyLabelSignature = fnv.New64a().Sum64()
hashAndBufPool = make(chan *hashAndBuf, 1024) hashAndBufPool sync.Pool
) )
type hashAndBuf struct { type hashAndBuf struct {
@ -37,19 +38,15 @@ type hashAndBuf struct {
} }
func getHashAndBuf() *hashAndBuf { func getHashAndBuf() *hashAndBuf {
select { hb := hashAndBufPool.Get()
case hb := <-hashAndBufPool: if hb == nil {
return hb
default:
return &hashAndBuf{h: fnv.New64a()} return &hashAndBuf{h: fnv.New64a()}
} }
return hb.(*hashAndBuf)
} }
func putHashAndBuf(hb *hashAndBuf) { func putHashAndBuf(hb *hashAndBuf) {
select { hashAndBufPool.Put(hb)
case hashAndBufPool <- hb:
default:
}
} }
// LabelsToSignature returns a unique signature (i.e., fingerprint) for a given // LabelsToSignature returns a unique signature (i.e., fingerprint) for a given
@ -63,10 +60,10 @@ func LabelsToSignature(labels map[string]string) uint64 {
hb := getHashAndBuf() hb := getHashAndBuf()
defer putHashAndBuf(hb) defer putHashAndBuf(hb)
for k, v := range labels { for labelName, labelValue := range labels {
hb.b.WriteString(k) hb.b.WriteString(labelName)
hb.b.WriteByte(SeparatorByte) hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(v) hb.b.WriteString(labelValue)
hb.h.Write(hb.b.Bytes()) hb.h.Write(hb.b.Bytes())
result ^= hb.h.Sum64() result ^= hb.h.Sum64()
hb.h.Reset() hb.h.Reset()
@ -75,10 +72,34 @@ func LabelsToSignature(labels map[string]string) uint64 {
return result return result
} }
// LabelValuesToSignature returns a unique signature (i.e., fingerprint) for the // metricToFingerprint works exactly as LabelsToSignature but takes a Metric as
// values of a given label set. // parameter (rather than a label map) and returns a Fingerprint.
func LabelValuesToSignature(labels map[string]string) uint64 { func metricToFingerprint(m Metric) Fingerprint {
if len(labels) == 0 { if len(m) == 0 {
return Fingerprint(emptyLabelSignature)
}
var result uint64
hb := getHashAndBuf()
defer putHashAndBuf(hb)
for labelName, labelValue := range m {
hb.b.WriteString(string(labelName))
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(labelValue))
hb.h.Write(hb.b.Bytes())
result ^= hb.h.Sum64()
hb.h.Reset()
hb.b.Reset()
}
return Fingerprint(result)
}
// SignatureForLabels works like LabelsToSignature but takes a Metric as
// parameter (rather than a label map) and only includes the labels with the
// specified LabelNames into the signature calculation.
func SignatureForLabels(m Metric, labels LabelNames) uint64 {
if len(m) == 0 || len(labels) == 0 {
return emptyLabelSignature return emptyLabelSignature
} }
@ -86,8 +107,10 @@ func LabelValuesToSignature(labels map[string]string) uint64 {
hb := getHashAndBuf() hb := getHashAndBuf()
defer putHashAndBuf(hb) defer putHashAndBuf(hb)
for _, v := range labels { for _, label := range labels {
hb.b.WriteString(v) hb.b.WriteString(string(label))
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(m[label]))
hb.h.Write(hb.b.Bytes()) hb.h.Write(hb.b.Bytes())
result ^= hb.h.Sum64() result ^= hb.h.Sum64()
hb.h.Reset() hb.h.Reset()
@ -95,3 +118,33 @@ func LabelValuesToSignature(labels map[string]string) uint64 {
} }
return result return result
} }
// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
// parameter (rather than a label map) and excludes the labels with any of the
// specified LabelNames from the signature calculation.
func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
if len(m) == 0 {
return emptyLabelSignature
}
var result uint64
hb := getHashAndBuf()
defer putHashAndBuf(hb)
for labelName, labelValue := range m {
if _, exclude := labels[labelName]; exclude {
continue
}
hb.b.WriteString(string(labelName))
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(labelValue))
hb.h.Write(hb.b.Bytes())
result ^= hb.h.Sum64()
hb.h.Reset()
hb.b.Reset()
}
if result == 0 {
return emptyLabelSignature
}
return result
}

View File

@ -15,10 +15,11 @@ package model
import ( import (
"runtime" "runtime"
"sync"
"testing" "testing"
) )
func testLabelsToSignature(t testing.TB) { func TestLabelsToSignature(t *testing.T) {
var scenarios = []struct { var scenarios = []struct {
in map[string]string in map[string]string
out uint64 out uint64
@ -42,57 +43,112 @@ func testLabelsToSignature(t testing.TB) {
} }
} }
func TestLabelToSignature(t *testing.T) { func TestMetricToFingerprint(t *testing.T) {
testLabelsToSignature(t) var scenarios = []struct {
} in Metric
out Fingerprint
func TestEmptyLabelSignature(t *testing.T) { }{
input := []map[string]string{nil, {}} {
in: Metric{},
var ms runtime.MemStats out: 14695981039346656037,
runtime.ReadMemStats(&ms) },
{
alloc := ms.Alloc in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
out: 12952432476264840823,
for _, labels := range input { },
LabelsToSignature(labels)
} }
runtime.ReadMemStats(&ms) for i, scenario := range scenarios {
actual := metricToFingerprint(scenario.in)
if got := ms.Alloc; alloc != got { if actual != scenario.out {
t.Fatal("expected LabelsToSignature with empty labels not to perform allocations") t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
}
}
func BenchmarkLabelToSignature(b *testing.B) {
for i := 0; i < b.N; i++ {
testLabelsToSignature(b)
}
}
func benchmarkLabelValuesToSignature(b *testing.B, l map[string]string, e uint64) {
for i := 0; i < b.N; i++ {
if a := LabelValuesToSignature(l); a != e {
b.Fatalf("expected signature of %d for %s, got %d", e, l, a)
} }
} }
} }
func BenchmarkLabelValuesToSignatureScalar(b *testing.B) { func TestSignatureForLabels(t *testing.T) {
benchmarkLabelValuesToSignature(b, nil, 14695981039346656037) var scenarios = []struct {
in Metric
labels LabelNames
out uint64
}{
{
in: Metric{},
labels: nil,
out: 14695981039346656037,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: LabelNames{"fear", "name"},
out: 12952432476264840823,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
labels: LabelNames{"fear", "name"},
out: 12952432476264840823,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: LabelNames{},
out: 14695981039346656037,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: nil,
out: 14695981039346656037,
},
}
for i, scenario := range scenarios {
actual := SignatureForLabels(scenario.in, scenario.labels)
if actual != scenario.out {
t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
}
}
} }
func BenchmarkLabelValuesToSignatureSingle(b *testing.B) { func TestSignatureWithoutLabels(t *testing.T) {
benchmarkLabelValuesToSignature(b, map[string]string{"first-label": "first-label-value"}, 2653746141194979650) var scenarios = []struct {
} in Metric
labels map[LabelName]struct{}
out uint64
}{
{
in: Metric{},
labels: nil,
out: 14695981039346656037,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: map[LabelName]struct{}{"fear": struct{}{}, "name": struct{}{}},
out: 14695981039346656037,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
labels: map[LabelName]struct{}{"foo": struct{}{}},
out: 12952432476264840823,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: map[LabelName]struct{}{},
out: 12952432476264840823,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: nil,
out: 12952432476264840823,
},
}
func BenchmarkLabelValuesToSignatureDouble(b *testing.B) { for i, scenario := range scenarios {
benchmarkLabelValuesToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 8893559499616767364) actual := SignatureWithoutLabels(scenario.in, scenario.labels)
}
func BenchmarkLabelValuesToSignatureTriple(b *testing.B) { if actual != scenario.out {
benchmarkLabelValuesToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 1685970066862087833) t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
}
}
} }
func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) { func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) {
@ -118,3 +174,83 @@ func BenchmarkLabelToSignatureDouble(b *testing.B) {
func BenchmarkLabelToSignatureTriple(b *testing.B) { func BenchmarkLabelToSignatureTriple(b *testing.B) {
benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676) benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676)
} }
func benchmarkMetricToFingerprint(b *testing.B, m Metric, e Fingerprint) {
for i := 0; i < b.N; i++ {
if a := metricToFingerprint(m); a != e {
b.Fatalf("expected signature of %d for %s, got %d", e, m, a)
}
}
}
func BenchmarkMetricToFingerprintScalar(b *testing.B) {
benchmarkMetricToFingerprint(b, nil, 14695981039346656037)
}
func BenchmarkMetricToFingerprintSingle(b *testing.B) {
benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value"}, 5147259542624943964)
}
func BenchmarkMetricToFingerprintDouble(b *testing.B) {
benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528)
}
func BenchmarkMetricToFingerprintTriple(b *testing.B) {
benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676)
}
func TestEmptyLabelSignature(t *testing.T) {
input := []map[string]string{nil, {}}
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
alloc := ms.Alloc
for _, labels := range input {
LabelsToSignature(labels)
}
runtime.ReadMemStats(&ms)
if got := ms.Alloc; alloc != got {
t.Fatal("expected LabelsToSignature with empty labels not to perform allocations")
}
}
func benchmarkMetricToFingerprintConc(b *testing.B, m Metric, e Fingerprint, concLevel int) {
var start, end sync.WaitGroup
start.Add(1)
end.Add(concLevel)
for i := 0; i < concLevel; i++ {
go func() {
start.Wait()
for j := b.N / concLevel; j >= 0; j-- {
if a := metricToFingerprint(m); a != e {
b.Fatalf("expected signature of %d for %s, got %d", e, m, a)
}
}
end.Done()
}()
}
b.ResetTimer()
start.Done()
end.Wait()
}
func BenchmarkMetricToFingerprintTripleConc1(b *testing.B) {
benchmarkMetricToFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1)
}
func BenchmarkMetricToFingerprintTripleConc2(b *testing.B) {
benchmarkMetricToFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2)
}
func BenchmarkMetricToFingerprintTripleConc4(b *testing.B) {
benchmarkMetricToFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4)
}
func BenchmarkMetricToFingerprintTripleConc8(b *testing.B) {
benchmarkMetricToFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8)
}

View File

@ -88,6 +88,7 @@ func (t Timestamp) String() string {
return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
} }
// MarshalJSON implements the json.Marshaler interface.
func (t Timestamp) MarshalJSON() ([]byte, error) { func (t Timestamp) MarshalJSON() ([]byte, error) {
return []byte(t.String()), nil return []byte(t.String()), nil
} }

View File

@ -18,8 +18,10 @@ import (
"fmt" "fmt"
"math" "math"
"net/http" "net/http"
"os"
"runtime" "runtime"
"sort" "sort"
"time"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
@ -498,3 +500,19 @@ func ExampleHistogram() {
// > // >
// > // >
} }
func ExamplePushCollectors() {
hostname, _ := os.Hostname()
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "db_backup_last_completion_time",
Help: "The timestamp of the last succesful completion of a DB backup.",
})
completionTime.Set(float64(time.Now().Unix()))
if err := prometheus.PushCollectors(
"db_backup", hostname,
"http://pushgateway:9091",
completionTime,
); err != nil {
fmt.Println("Could not push completion time to Pushgateway:", err)
}
}

View File

@ -13,6 +13,8 @@
package prometheus package prometheus
import "github.com/prometheus/procfs"
type processCollector struct { type processCollector struct {
pid int pid int
collectFn func(chan<- Metric) collectFn func(chan<- Metric)
@ -79,7 +81,7 @@ func NewProcessCollectorPIDFn(
} }
// Set up process metric collection if supported by the runtime. // Set up process metric collection if supported by the runtime.
if processCollectSupported() { if _, err := procfs.NewStat(); err == nil {
c.collectFn = c.processCollect c.collectFn = c.processCollect
} }
@ -100,3 +102,41 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
func (c *processCollector) Collect(ch chan<- Metric) { func (c *processCollector) Collect(ch chan<- Metric) {
c.collectFn(ch) c.collectFn(ch)
} }
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
// client allows users to configure the error behavior.
func (c *processCollector) processCollect(ch chan<- Metric) {
pid, err := c.pidFn()
if err != nil {
return
}
p, err := procfs.NewProc(pid)
if err != nil {
return
}
if stat, err := p.NewStat(); err == nil {
c.cpuTotal.Set(stat.CPUTime())
ch <- c.cpuTotal
c.vsize.Set(float64(stat.VirtualMemory()))
ch <- c.vsize
c.rss.Set(float64(stat.ResidentMemory()))
ch <- c.rss
if startTime, err := stat.StartTime(); err == nil {
c.startTime.Set(startTime)
ch <- c.startTime
}
}
if fds, err := p.FileDescriptorsLen(); err == nil {
c.openFDs.Set(float64(fds))
ch <- c.openFDs
}
if limits, err := p.NewLimits(); err == nil {
c.maxFDs.Set(float64(limits.OpenFiles))
ch <- c.maxFDs
}
}

View File

@ -1,63 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux,cgo plan9,cgo solaris,cgo
package prometheus
import "github.com/prometheus/procfs"
func processCollectSupported() bool {
if _, err := procfs.NewStat(); err == nil {
return true
}
return false
}
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
// client allows users to configure the error behavior.
func (c *processCollector) processCollect(ch chan<- Metric) {
pid, err := c.pidFn()
if err != nil {
return
}
p, err := procfs.NewProc(pid)
if err != nil {
return
}
if stat, err := p.NewStat(); err == nil {
c.cpuTotal.Set(stat.CPUTime())
ch <- c.cpuTotal
c.vsize.Set(float64(stat.VirtualMemory()))
ch <- c.vsize
c.rss.Set(float64(stat.ResidentMemory()))
ch <- c.rss
if startTime, err := stat.StartTime(); err == nil {
c.startTime.Set(startTime)
ch <- c.startTime
}
}
if fds, err := p.FileDescriptorsLen(); err == nil {
c.openFDs.Set(float64(fds))
ch <- c.openFDs
}
if limits, err := p.NewLimits(); err == nil {
c.maxFDs.Set(float64(limits.OpenFiles))
ch <- c.maxFDs
}
}

View File

@ -1,24 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux,!plan9,!solaris !cgo
package prometheus
func processCollectSupported() bool {
return false
}
func (c *processCollector) processCollect(ch chan<- Metric) {
panic("unreachable")
}

View File

@ -0,0 +1,65 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package prometheus
// Push triggers a metric collection by the default registry and pushes all
// collected metrics to the Pushgateway specified by addr. See the Pushgateway
// documentation for detailed implications of the job and instance
// parameter. instance can be left empty. You can use just host:port or ip:port
// as url, in which case 'http://' is added automatically. You can also include
// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
//
// Note that all previously pushed metrics with the same job and instance will
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
// to push to the Pushgateway.)
func Push(job, instance, url string) error {
return defRegistry.Push(job, instance, url, "PUT")
}
// PushAdd works like Push, but only previously pushed metrics with the same
// name (and the same job and instance) will be replaced. (It uses HTTP method
// 'POST' to push to the Pushgateway.)
func PushAdd(job, instance, url string) error {
return defRegistry.Push(job, instance, url, "POST")
}
// PushCollectors works like Push, but it does not collect from the default
// registry. Instead, it collects from the provided collectors. It is a
// convenient way to push only a few metrics.
func PushCollectors(job, instance, url string, collectors ...Collector) error {
return pushCollectors(job, instance, url, "PUT", collectors...)
}
// PushAddCollectors works like PushAdd, but it does not collect from the
// default registry. Instead, it collects from the provided collectors. It is a
// convenient way to push only a few metrics.
func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
return pushCollectors(job, instance, url, "POST", collectors...)
}
func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
r := newRegistry()
for _, collector := range collectors {
if _, err := r.Register(collector); err != nil {
return err
}
}
return r.Push(job, instance, url, method)
}

View File

@ -158,14 +158,19 @@ func Unregister(c Collector) bool {
// SetMetricFamilyInjectionHook sets a function that is called whenever metrics // SetMetricFamilyInjectionHook sets a function that is called whenever metrics
// are collected. The hook function must be set before metrics collection begins // are collected. The hook function must be set before metrics collection begins
// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The // (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The
// MetricFamily protobufs returned by the hook function are added to the // MetricFamily protobufs returned by the hook function are merged with the
// delivered metrics. Each returned MetricFamily must have a unique name (also // metrics collected in the usual way.
// taking into account the MetricFamilies created in the regular way).
// //
// This is a way to directly inject MetricFamily protobufs managed and owned by // This is a way to directly inject MetricFamily protobufs managed and owned by
// the caller. The caller has full responsibility. No sanity checks are // the caller. The caller has full responsibility. As no registration of the
// performed on the returned protobufs (besides the name checks described // injected metrics has happened, there is no descriptor to check against, and
// above). The function must be callable at any time and concurrently. // there are no registration-time checks. If collect-time checks are disabled
// (see function EnableCollectChecks), no sanity checks are performed on the
// returned protobufs at all. If collect-checks are enabled, type and uniqueness
// checks are performed, but no further consistency checks (which would require
// knowledge of a metric descriptor).
//
// The function must be callable at any time and concurrently.
func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
defRegistry.metricFamilyInjectionHook = hook defRegistry.metricFamilyInjectionHook = hook
} }
@ -187,30 +192,10 @@ func EnableCollectChecks(b bool) {
defRegistry.collectChecksEnabled = b defRegistry.collectChecksEnabled = b
} }
// Push triggers a metric collection and pushes all collected metrics to the
// Pushgateway specified by addr. See the Pushgateway documentation for detailed
// implications of the job and instance parameter. instance can be left
// empty. The Pushgateway will then use the client's IP number instead. Use just
// host:port or ip:port ass addr. (Don't add 'http://' or any path.)
//
// Note that all previously pushed metrics with the same job and instance will
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
// to push to the Pushgateway.)
func Push(job, instance, addr string) error {
return defRegistry.Push(job, instance, addr, "PUT")
}
// PushAdd works like Push, but only previously pushed metrics with the same
// name (and the same job and instance) will be replaced. (It uses HTTP method
// 'POST' to push to the Pushgateway.)
func PushAdd(job, instance, addr string) error {
return defRegistry.Push(job, instance, addr, "POST")
}
// encoder is a function that writes a dto.MetricFamily to an io.Writer in a // encoder is a function that writes a dto.MetricFamily to an io.Writer in a
// certain encoding. It returns the number of bytes written and any error // certain encoding. It returns the number of bytes written and any error
// encountered. Note that ext.WriteDelimited and text.MetricFamilyToText are // encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText
// encoders. // are encoders.
type encoder func(io.Writer, *dto.MetricFamily) (int, error) type encoder func(io.Writer, *dto.MetricFamily) (int, error)
type registry struct { type registry struct {
@ -346,10 +331,13 @@ func (r *registry) Unregister(c Collector) bool {
return true return true
} }
func (r *registry) Push(job, instance, addr, method string) error { func (r *registry) Push(job, instance, pushURL, method string) error {
u := fmt.Sprintf("http://%s/metrics/jobs/%s", addr, url.QueryEscape(job)) if !strings.Contains(pushURL, "://") {
pushURL = "http://" + pushURL
}
pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job))
if instance != "" { if instance != "" {
u += "/instances/" + url.QueryEscape(instance) pushURL += "/instances/" + url.QueryEscape(instance)
} }
buf := r.getBuf() buf := r.getBuf()
defer r.giveBuf(buf) defer r.giveBuf(buf)
@ -359,7 +347,7 @@ func (r *registry) Push(job, instance, addr, method string) error {
} }
return err return err
} }
req, err := http.NewRequest(method, u, buf) req, err := http.NewRequest(method, pushURL, buf)
if err != nil { if err != nil {
return err return err
} }
@ -370,7 +358,7 @@ func (r *registry) Push(job, instance, addr, method string) error {
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != 202 { if resp.StatusCode != 202 {
return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, u) return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL)
} }
return nil return nil
} }
@ -479,10 +467,26 @@ func (r *registry) writePB(w io.Writer, writeEncoded encoder) (int, error) {
if r.metricFamilyInjectionHook != nil { if r.metricFamilyInjectionHook != nil {
for _, mf := range r.metricFamilyInjectionHook() { for _, mf := range r.metricFamilyInjectionHook() {
if _, exists := metricFamiliesByName[mf.GetName()]; exists { existingMF, exists := metricFamiliesByName[mf.GetName()]
return 0, fmt.Errorf("metric family with duplicate name injected: %s", mf) if !exists {
metricFamiliesByName[mf.GetName()] = mf
if r.collectChecksEnabled {
for _, m := range mf.Metric {
if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil {
return 0, err
}
}
}
continue
}
for _, m := range mf.Metric {
if r.collectChecksEnabled {
if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil {
return 0, err
}
}
existingMF.Metric = append(existingMF.Metric, m)
} }
metricFamiliesByName[mf.GetName()] = mf
} }
} }
@ -523,11 +527,42 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
) )
} }
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
h := fnv.New64a()
var buf bytes.Buffer
buf.WriteString(metricFamily.GetName())
buf.WriteByte(model.SeparatorByte)
h.Write(buf.Bytes())
for _, lp := range dtoMetric.Label {
buf.Reset()
buf.WriteString(lp.GetValue())
buf.WriteByte(model.SeparatorByte)
h.Write(buf.Bytes())
}
metricHash := h.Sum64()
if _, exists := metricHashes[metricHash]; exists {
return fmt.Errorf(
"collected metric %q was collected before with the same name and label values",
dtoMetric,
)
}
metricHashes[metricHash] = struct{}{}
if desc == nil {
return nil // Nothing left to check if we have no desc.
}
// Desc consistency with metric family. // Desc consistency with metric family.
if metricFamily.GetName() != desc.fqName {
return fmt.Errorf(
"collected metric %q has name %q but should have %q",
dtoMetric, metricFamily.GetName(), desc.fqName,
)
}
if metricFamily.GetHelp() != desc.help { if metricFamily.GetHelp() != desc.help {
return fmt.Errorf( return fmt.Errorf(
"collected metric %q has help %q but should have %q", "collected metric %q has help %q but should have %q",
dtoMetric, desc.help, metricFamily.GetHelp(), dtoMetric, metricFamily.GetHelp(), desc.help,
) )
} }
@ -557,27 +592,6 @@ func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *d
} }
} }
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
h := fnv.New64a()
var buf bytes.Buffer
buf.WriteString(desc.fqName)
buf.WriteByte(model.SeparatorByte)
h.Write(buf.Bytes())
for _, lp := range dtoMetric.Label {
buf.Reset()
buf.WriteString(lp.GetValue())
buf.WriteByte(model.SeparatorByte)
h.Write(buf.Bytes())
}
metricHash := h.Sum64()
if _, exists := metricHashes[metricHash]; exists {
return fmt.Errorf(
"collected metric %q was collected before with the same name and label values",
dtoMetric,
)
}
metricHashes[metricHash] = struct{}{}
r.mtx.RLock() // Remaining checks need the read lock. r.mtx.RLock() // Remaining checks need the read lock.
defer r.mtx.RUnlock() defer r.mtx.RUnlock()
@ -712,6 +726,15 @@ func (s metricSorter) Swap(i, j int) {
} }
func (s metricSorter) Less(i, j int) bool { func (s metricSorter) Less(i, j int) bool {
if len(s[i].Label) != len(s[j].Label) {
// This should not happen. The metrics are
// inconsistent. However, we have to deal with the fact, as
// people might use custom collectors or metric family injection
// to create inconsistent metrics. So let's simply compare the
// number of labels in this case. That will still yield
// reproducible sorting.
return len(s[i].Label) < len(s[j].Label)
}
for n, lp := range s[i].Label { for n, lp := range s[i].Label {
vi := lp.GetValue() vi := lp.GetValue()
vj := s[j].Label[n].GetValue() vj := s[j].Label[n].GetValue()

View File

@ -61,31 +61,29 @@ func testHandler(t testing.TB) {
varintBuf := make([]byte, binary.MaxVarintLen32) varintBuf := make([]byte, binary.MaxVarintLen32)
externalMetricFamily := []*dto.MetricFamily{ externalMetricFamily := &dto.MetricFamily{
{ Name: proto.String("externalname"),
Name: proto.String("externalname"), Help: proto.String("externaldocstring"),
Help: proto.String("externaldocstring"), Type: dto.MetricType_COUNTER.Enum(),
Type: dto.MetricType_COUNTER.Enum(), Metric: []*dto.Metric{
Metric: []*dto.Metric{ {
{ Label: []*dto.LabelPair{
Label: []*dto.LabelPair{ {
{ Name: proto.String("externallabelname"),
Name: proto.String("externallabelname"), Value: proto.String("externalval1"),
Value: proto.String("externalval1"),
},
{
Name: proto.String("externalconstname"),
Value: proto.String("externalconstvalue"),
},
}, },
Counter: &dto.Counter{ {
Value: proto.Float64(1), Name: proto.String("externalconstname"),
Value: proto.String("externalconstvalue"),
}, },
}, },
Counter: &dto.Counter{
Value: proto.Float64(1),
},
}, },
}, },
} }
marshaledExternalMetricFamily, err := proto.Marshal(externalMetricFamily[0]) marshaledExternalMetricFamily, err := proto.Marshal(externalMetricFamily)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -216,16 +214,42 @@ metric: <
expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > > expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
`) `)
externalMetricFamilyWithSameName := &dto.MetricFamily{
Name: proto.String("name"),
Help: proto.String("inconsistent help string does not matter here"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
{
Label: []*dto.LabelPair{
{
Name: proto.String("constname"),
Value: proto.String("constvalue"),
},
{
Name: proto.String("labelname"),
Value: proto.String("different_val"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(42),
},
},
},
}
expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"different_val" > counter:<value:42 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
`)
type output struct { type output struct {
headers map[string]string headers map[string]string
body []byte body []byte
} }
var scenarios = []struct { var scenarios = []struct {
headers map[string]string headers map[string]string
out output out output
withCounter bool collector Collector
withExternalMF bool externalMF []*dto.MetricFamily
}{ }{
{ // 0 { // 0
headers: map[string]string{ headers: map[string]string{
@ -281,7 +305,7 @@ metric: <
}, },
body: expectedMetricFamilyAsText, body: expectedMetricFamilyAsText,
}, },
withCounter: true, collector: metricVec,
}, },
{ // 5 { // 5
headers: map[string]string{ headers: map[string]string{
@ -293,7 +317,7 @@ metric: <
}, },
body: expectedMetricFamilyAsBytes, body: expectedMetricFamilyAsBytes,
}, },
withCounter: true, collector: metricVec,
}, },
{ // 6 { // 6
headers: map[string]string{ headers: map[string]string{
@ -305,7 +329,7 @@ metric: <
}, },
body: externalMetricFamilyAsText, body: externalMetricFamilyAsText,
}, },
withExternalMF: true, externalMF: []*dto.MetricFamily{externalMetricFamily},
}, },
{ // 7 { // 7
headers: map[string]string{ headers: map[string]string{
@ -317,7 +341,7 @@ metric: <
}, },
body: externalMetricFamilyAsBytes, body: externalMetricFamilyAsBytes,
}, },
withExternalMF: true, externalMF: []*dto.MetricFamily{externalMetricFamily},
}, },
{ // 8 { // 8
headers: map[string]string{ headers: map[string]string{
@ -335,8 +359,8 @@ metric: <
[]byte{}, []byte{},
), ),
}, },
withCounter: true, collector: metricVec,
withExternalMF: true, externalMF: []*dto.MetricFamily{externalMetricFamily},
}, },
{ // 9 { // 9
headers: map[string]string{ headers: map[string]string{
@ -359,7 +383,7 @@ metric: <
}, },
body: expectedMetricFamilyAsText, body: expectedMetricFamilyAsText,
}, },
withCounter: true, collector: metricVec,
}, },
{ // 11 { // 11
headers: map[string]string{ headers: map[string]string{
@ -377,8 +401,8 @@ metric: <
[]byte{}, []byte{},
), ),
}, },
withCounter: true, collector: metricVec,
withExternalMF: true, externalMF: []*dto.MetricFamily{externalMetricFamily},
}, },
{ // 12 { // 12
headers: map[string]string{ headers: map[string]string{
@ -396,8 +420,8 @@ metric: <
[]byte{}, []byte{},
), ),
}, },
withCounter: true, collector: metricVec,
withExternalMF: true, externalMF: []*dto.MetricFamily{externalMetricFamily},
}, },
{ // 13 { // 13
headers: map[string]string{ headers: map[string]string{
@ -415,8 +439,8 @@ metric: <
[]byte{}, []byte{},
), ),
}, },
withCounter: true, collector: metricVec,
withExternalMF: true, externalMF: []*dto.MetricFamily{externalMetricFamily},
}, },
{ // 14 { // 14
headers: map[string]string{ headers: map[string]string{
@ -434,20 +458,42 @@ metric: <
[]byte{}, []byte{},
), ),
}, },
withCounter: true, collector: metricVec,
withExternalMF: true, externalMF: []*dto.MetricFamily{externalMetricFamily},
},
{ // 15
headers: map[string]string{
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
},
out: output{
headers: map[string]string{
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,
},
body: bytes.Join(
[][]byte{
externalMetricFamilyAsProtoCompactText,
expectedMetricFamilyMergedWithExternalAsProtoCompactText,
},
[]byte{},
),
},
collector: metricVec,
externalMF: []*dto.MetricFamily{
externalMetricFamily,
externalMetricFamilyWithSameName,
},
}, },
} }
for i, scenario := range scenarios { for i, scenario := range scenarios {
registry := newRegistry() registry := newRegistry()
registry.collectChecksEnabled = true registry.collectChecksEnabled = true
if scenario.withCounter { if scenario.collector != nil {
registry.Register(metricVec) registry.Register(scenario.collector)
} }
if scenario.withExternalMF { if scenario.externalMF != nil {
registry.metricFamilyInjectionHook = func() []*dto.MetricFamily { registry.metricFamilyInjectionHook = func() []*dto.MetricFamily {
return externalMetricFamily return scenario.externalMF
} }
} }
writer := &fakeResponseWriter{ writer := &fakeResponseWriter{

View File

@ -16,6 +16,7 @@ package prometheus
import ( import (
"fmt" "fmt"
"hash/fnv" "hash/fnv"
"math"
"sort" "sort"
"sync" "sync"
"time" "time"
@ -277,10 +278,8 @@ func (s *summary) Write(out *dto.Metric) error {
s.bufMtx.Lock() s.bufMtx.Lock()
s.mtx.Lock() s.mtx.Lock()
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
if len(s.hotBuf) != 0 { s.swapBufs(time.Now())
s.swapBufs(time.Now())
}
s.bufMtx.Unlock() s.bufMtx.Unlock()
s.flushColdBuf() s.flushColdBuf()
@ -288,9 +287,15 @@ func (s *summary) Write(out *dto.Metric) error {
sum.SampleSum = proto.Float64(s.sum) sum.SampleSum = proto.Float64(s.sum)
for _, rank := range s.sortedObjectives { for _, rank := range s.sortedObjectives {
var q float64
if s.headStream.Count() == 0 {
q = math.NaN()
} else {
q = s.headStream.Query(rank)
}
qs = append(qs, &dto.Quantile{ qs = append(qs, &dto.Quantile{
Quantile: proto.Float64(rank), Quantile: proto.Float64(rank),
Value: proto.Float64(s.headStream.Query(rank)), Value: proto.Float64(q),
}) })
} }

View File

@ -289,6 +289,11 @@ func TestSummaryVecConcurrency(t *testing.T) {
} }
func TestSummaryDecay(t *testing.T) { func TestSummaryDecay(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test in short mode.")
// More because it depends on timing than because it is particularly long...
}
sum := NewSummary(SummaryOpts{ sum := NewSummary(SummaryOpts{
Name: "test_summary", Name: "test_summary",
Help: "helpless", Help: "helpless",
@ -315,6 +320,12 @@ func TestSummaryDecay(t *testing.T) {
} }
} }
tick.Stop() tick.Stop()
// Wait for MaxAge without observations and make sure quantiles are NaN.
time.Sleep(100 * time.Millisecond)
sum.Write(m)
if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) {
t.Errorf("got %f, want NaN after expiration", got)
}
} }
func getBounds(vars []float64, q, ε float64) (min, max float64) { func getBounds(vars []float64, q, ε float64) (min, max float64) {

View File

@ -21,7 +21,7 @@ import (
"testing" "testing"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/matttproud/golang_protobuf_extensions/ext" "github.com/matttproud/golang_protobuf_extensions/pbutil"
) )
// Benchmarks to show how much penalty text format parsing actually inflicts. // Benchmarks to show how much penalty text format parsing actually inflicts.
@ -101,7 +101,7 @@ func BenchmarkParseProto(b *testing.B) {
in := bytes.NewReader(data) in := bytes.NewReader(data)
for { for {
family.Reset() family.Reset()
if _, err := ext.ReadDelimited(in, family); err != nil { if _, err := pbutil.ReadDelimited(in, family); err != nil {
if err == io.EOF { if err == io.EOF {
break break
} }
@ -129,7 +129,7 @@ func BenchmarkParseProtoGzip(b *testing.B) {
} }
for { for {
family.Reset() family.Reset()
if _, err := ext.ReadDelimited(in, family); err != nil { if _, err := pbutil.ReadDelimited(in, family); err != nil {
if err == io.EOF { if err == io.EOF {
break break
} }
@ -156,7 +156,7 @@ func BenchmarkParseProtoMap(b *testing.B) {
in := bytes.NewReader(data) in := bytes.NewReader(data)
for { for {
family := &dto.MetricFamily{} family := &dto.MetricFamily{}
if _, err := ext.ReadDelimited(in, family); err != nil { if _, err := pbutil.ReadDelimited(in, family); err != nil {
if err == io.EOF { if err == io.EOF {
break break
} }

View File

@ -385,7 +385,7 @@ request_duration_microseconds_count 2693
}, },
}, },
}, },
}, },
} }
for i, scenario := range scenarios { for i, scenario := range scenarios {

View File

@ -18,7 +18,7 @@ import (
"io" "io"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/matttproud/golang_protobuf_extensions/ext" "github.com/matttproud/golang_protobuf_extensions/pbutil"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )
@ -27,7 +27,7 @@ import (
// protobuf format and returns the number of bytes written and any error // protobuf format and returns the number of bytes written and any error
// encountered. // encountered.
func WriteProtoDelimited(w io.Writer, p *dto.MetricFamily) (int, error) { func WriteProtoDelimited(w io.Writer, p *dto.MetricFamily) (int, error) {
return ext.WriteDelimited(w, p) return pbutil.WriteDelimited(w, p)
} }
// WriteProtoText writes the MetricFamily to the writer in text format and // WriteProtoText writes the MetricFamily to the writer in text format and

View File

@ -0,0 +1,5 @@
language: go
go:
- 1.3
- 1.4
- tip

View File

@ -4,8 +4,4 @@ This procfs package provides functions to retrieve system, kernel and process
metrics from the pseudo-filesystem proc. metrics from the pseudo-filesystem proc.
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) [![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
[![Circle CI](https://circleci.com/gh/prometheus/procfs.svg?style=svg)](https://circleci.com/gh/prometheus/procfs) [![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
# Testing
$ go test

View File

@ -7,8 +7,22 @@ import (
"os" "os"
) )
// #include <unistd.h> // Originally, this USER_HZ value was dynamically retrieved via a sysconf call which
import "C" // required cgo. However, that caused a lot of problems regarding
// cross-compilation. Alternatives such as running a binary to determine the
// value, or trying to derive it in some other way were all problematic.
// After much research it was determined that USER_HZ is actually hardcoded to
// 100 on all Go-supported platforms as of the time of this writing. This is
// why we decided to hardcode it here as well. It is not impossible that there
// could be systems with exceptions, but they should be very exotic edge cases,
// and in that case, the worst outcome will be two misreported metrics.
//
// See also the following discussions:
//
// - https://github.com/prometheus/node_exporter/issues/52
// - https://github.com/prometheus/procfs/pull/2
// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
const userHZ = 100
// ProcStat provides status information about the process, // ProcStat provides status information about the process,
// read from /proc/[pid]/stat. // read from /proc/[pid]/stat.
@ -152,14 +166,10 @@ func (s ProcStat) StartTime() (float64, error) {
if err != nil { if err != nil {
return 0, err return 0, err
} }
return float64(stat.BootTime) + (float64(s.Starttime) / ticks()), nil return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
} }
// CPUTime returns the total CPU user and system time in seconds. // CPUTime returns the total CPU user and system time in seconds.
func (s ProcStat) CPUTime() float64 { func (s ProcStat) CPUTime() float64 {
return float64(s.UTime+s.STime) / ticks() return float64(s.UTime+s.STime) / userHZ
}
func ticks() float64 {
return float64(C.sysconf(C._SC_CLK_TCK)) // most likely 100
} }