Skip to content

Commit

Permalink
Mempool RPC histograms #16
Browse files Browse the repository at this point in the history
  • Loading branch information
e-asphyx committed Jun 24, 2019
1 parent f7681bd commit 7883434
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 16 deletions.
70 changes: 54 additions & 16 deletions collector/mempool.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,20 @@ package collector

import (
"context"
"net/http"
"sync"
"time"

"github.com/ecadlabs/go-tezos"
"github.com/prometheus/client_golang/prometheus"
)

// MempoolOperationsCollector collects mempool operations count
type MempoolOperationsCollector struct {
prometheus.Collector // refers to counter
counter *prometheus.CounterVec
rpcTotalHist prometheus.Histogram
rpcConnectHist prometheus.Histogram

counter *prometheus.CounterVec
service *tezos.Service
chainID string
wg sync.WaitGroup
Expand Down Expand Up @@ -40,22 +44,41 @@ func (m *MempoolOperationsCollector) listener(ctx context.Context, pool string)
}
}

// NewMempoolOperationsCollectorCollector returns new mempool collector for given pools like "applied", "refused" etc.
func NewMempoolOperationsCollectorCollector(service *tezos.Service, chainID string, pools []string) *MempoolOperationsCollector {
counter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "tezos_node_mempool_operations_total",
Help: "The total number of mempool operations.",
},
[]string{"pool", "proto", "kind"},
)

c := MempoolOperationsCollector{
Collector: counter,
counter: counter,
service: service,
chainID: chainID,
c := &MempoolOperationsCollector{
counter: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "tezos_node_mempool_operations_total",
Help: "The total number of mempool operations.",
},
[]string{"pool", "proto", "kind"},
),
rpcTotalHist: prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "tezos_rpc_mempool_monitor_connection_total_duration_milliseconds",
Help: "The total life time of the mempool monitir RPC connection.",
Buckets: prometheus.ExponentialBuckets(250, 2, 10),
}),
rpcConnectHist: prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "tezos_rpc_mempool_monitor_connection_connect_duration_milliseconds",
Help: "Mempool monitor (re)connection duration (time until HTTP header arrives).",
Buckets: prometheus.ExponentialBuckets(250, 2, 10),
}),
chainID: chainID,
}

client := *service.Client
client.RPCStatusCallback = func(req *http.Request, status int, duration time.Duration, err error) {
c.rpcTotalHist.Observe(float64(duration / time.Millisecond))
}
client.RPCHeaderCallback = func(req *http.Request, resp *http.Response, duration time.Duration) {
c.rpcConnectHist.Observe(float64(duration / time.Millisecond))
}

srv := *service
srv.Client = &client
c.service = &srv

ctx, cancel := context.WithCancel(context.Background())
c.cancel = cancel

Expand All @@ -64,9 +87,24 @@ func NewMempoolOperationsCollectorCollector(service *tezos.Service, chainID stri
go c.listener(ctx, p)
}

return &c
return c
}

// Describe implements prometheus.Collector
func (m *MempoolOperationsCollector) Describe(ch chan<- *prometheus.Desc) {
m.counter.Describe(ch)
m.rpcTotalHist.Describe(ch)
m.rpcConnectHist.Describe(ch)
}

// Collect implements prometheus.Collector
func (m *MempoolOperationsCollector) Collect(ch chan<- prometheus.Metric) {
m.counter.Collect(ch)
m.rpcTotalHist.Collect(ch)
m.rpcConnectHist.Collect(ch)
}

// Shutdown stops all listeners
func (m *MempoolOperationsCollector) Shutdown(ctx context.Context) error {
m.cancel()

Expand Down
4 changes: 4 additions & 0 deletions collector/network.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,10 @@ func NewNetworkCollector(service *tezos.Service, timeout time.Duration, chainID
func (c *NetworkCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- sentBytesDesc
ch <- recvBytesDesc
ch <- connsDesc
ch <- peersDesc
ch <- pointsDesc
ch <- bootstrappedDesc
}

func getConnStats(ctx context.Context, service *tezos.Service) (map[string]map[string]int, error) {
Expand Down

0 comments on commit 7883434

Please sign in to comment.