added prometheus metrics

indexv1
Jakub Matys 2018-03-13 11:34:49 +01:00
parent ab2d2c2f0d
commit d7229f8db0
11 changed files with 243 additions and 43 deletions

View File

@ -4,6 +4,7 @@ import (
"blockbook/bchain"
"blockbook/bchain/coins/btc"
"blockbook/bchain/coins/zec"
"blockbook/common"
"fmt"
"reflect"
"time"
@ -11,7 +12,7 @@ import (
"github.com/juju/errors"
)
type blockChainFactory func(url string, user string, password string, timeout time.Duration, parse bool) (bchain.BlockChain, error)
type blockChainFactory func(url string, user string, password string, timeout time.Duration, parse bool, metrics *common.Metrics) (bchain.BlockChain, error)
var blockChainFactories = make(map[string]blockChainFactory)
@ -21,10 +22,10 @@ func init() {
}
// NewBlockChain creates bchain.BlockChain of type defined by parameter coin
func NewBlockChain(coin string, url string, user string, password string, timeout time.Duration, parse bool) (bchain.BlockChain, error) {
func NewBlockChain(coin string, url string, user string, password string, timeout time.Duration, parse bool, metrics *common.Metrics) (bchain.BlockChain, error) {
bcf, ok := blockChainFactories[coin]
if !ok {
return nil, errors.New(fmt.Sprint("Unsupported coin ", coin, ". Must be one of ", reflect.ValueOf(blockChainFactories).MapKeys()))
}
return bcf(url, user, password, timeout, parse)
return bcf(url, user, password, timeout, parse, metrics)
}

View File

@ -2,6 +2,7 @@ package btc
import (
"blockbook/bchain"
"blockbook/common"
"bytes"
"encoding/hex"
"encoding/json"
@ -28,10 +29,11 @@ type BitcoinRPC struct {
Network string
Mempool *bchain.Mempool
ParseBlocks bool
metrics *common.Metrics
}
// NewBitcoinRPC returns new BitcoinRPC instance.
func NewBitcoinRPC(url string, user string, password string, timeout time.Duration, parse bool) (bchain.BlockChain, error) {
func NewBitcoinRPC(url string, user string, password string, timeout time.Duration, parse bool, metrics *common.Metrics) (bchain.BlockChain, error) {
transport := &http.Transport{
Dial: (&net.Dialer{KeepAlive: 600 * time.Second}).Dial,
MaxIdleConns: 100,
@ -43,6 +45,7 @@ func NewBitcoinRPC(url string, user string, password string, timeout time.Durati
user: user,
password: password,
ParseBlocks: parse,
metrics: metrics,
}
chainName, err := s.GetBlockChainInfo()
if err != nil {
@ -63,7 +66,7 @@ func NewBitcoinRPC(url string, user string, password string, timeout time.Durati
s.Network = "testnet"
}
s.Mempool = bchain.NewMempool(s)
s.Mempool = bchain.NewMempool(s, metrics)
glog.Info("rpc: block chain ", s.Parser.Params.Name)
return s, nil
@ -254,7 +257,7 @@ func (b *BitcoinRPC) GetBestBlockHash() (string, error) {
res := resGetBestBlockHash{}
req := cmdGetBestBlockHash{Method: "getbestblockhash"}
err := b.call(&req, &res)
err := b.observeRPCLatency(req.Method, func() error { return b.call(&req, &res) })
if err != nil {
return "", err
@ -271,7 +274,7 @@ func (b *BitcoinRPC) GetBestBlockHeight() (uint32, error) {
res := resGetBlockCount{}
req := cmdGetBlockCount{Method: "getblockcount"}
err := b.call(&req, &res)
err := b.observeRPCLatency(req.Method, func() error { return b.call(&req, &res) })
if err != nil {
return 0, err
@ -288,7 +291,7 @@ func (b *BitcoinRPC) GetBlockChainInfo() (string, error) {
res := resGetBlockChainInfo{}
req := cmdGetBlockChainInfo{Method: "getblockchaininfo"}
err := b.call(&req, &res)
err := b.observeRPCLatency(req.Method, func() error { return b.call(&req, &res) })
if err != nil {
return "", err
@ -306,7 +309,7 @@ func (b *BitcoinRPC) GetBlockHash(height uint32) (string, error) {
res := resGetBlockHash{}
req := cmdGetBlockHash{Method: "getblockhash"}
req.Params.Height = height
err := b.call(&req, &res)
err := b.observeRPCLatency(req.Method, func() error { return b.call(&req, &res) })
if err != nil {
return "", errors.Annotatef(err, "height %v", height)
@ -325,7 +328,7 @@ func (b *BitcoinRPC) GetBlockHeader(hash string) (*bchain.BlockHeader, error) {
req := cmdGetBlockHeader{Method: "getblockheader"}
req.Params.BlockHash = hash
req.Params.Verbose = true
err := b.call(&req, &res)
err := b.observeRPCLatency(req.Method, func() error { return b.call(&req, &res) })
if err != nil {
return nil, errors.Annotatef(err, "hash %v", hash)
@ -388,7 +391,7 @@ func (b *BitcoinRPC) GetBlockRaw(hash string) ([]byte, error) {
req := cmdGetBlock{Method: "getblock"}
req.Params.BlockHash = hash
req.Params.Verbosity = 0
err := b.call(&req, &res)
err := b.observeRPCLatency(req.Method, func() error { return b.call(&req, &res) })
if err != nil {
return nil, errors.Annotatef(err, "hash %v", hash)
@ -408,7 +411,7 @@ func (b *BitcoinRPC) GetBlockList(hash string) (*bchain.Block, error) {
req := cmdGetBlock{Method: "getblock"}
req.Params.BlockHash = hash
req.Params.Verbosity = 1
err := b.call(&req, &res)
err := b.observeRPCLatency(req.Method, func() error { return b.call(&req, &res) })
if err != nil {
return nil, errors.Annotatef(err, "hash %v", hash)
@ -440,7 +443,7 @@ func (b *BitcoinRPC) GetBlockFull(hash string) (*bchain.Block, error) {
req := cmdGetBlock{Method: "getblock"}
req.Params.BlockHash = hash
req.Params.Verbosity = 2
err := b.call(&req, &res)
err := b.observeRPCLatency(req.Method, func() error { return b.call(&req, &res) })
if err != nil {
return nil, errors.Annotatef(err, "hash %v", hash)
@ -457,7 +460,7 @@ func (b *BitcoinRPC) GetMempool() ([]string, error) {
res := resGetMempool{}
req := cmdGetMempool{Method: "getrawmempool"}
err := b.call(&req, &res)
err := b.observeRPCLatency(req.Method, func() error { return b.call(&req, &res) })
if err != nil {
return nil, err
@ -476,7 +479,7 @@ func (b *BitcoinRPC) GetTransaction(txid string) (*bchain.Tx, error) {
req := cmdGetRawTransaction{Method: "getrawtransaction"}
req.Params.Txid = txid
req.Params.Verbose = true
err := b.call(&req, &res)
err := b.observeRPCLatency(req.Method, func() error { return b.call(&req, &res) })
if err != nil {
return nil, errors.Annotatef(err, "txid %v", txid)
@ -515,7 +518,7 @@ func (b *BitcoinRPC) EstimateSmartFee(blocks int, conservative bool) (float64, e
} else {
req.Params.EstimateMode = "ECONOMICAL"
}
err := b.call(&req, &res)
err := b.observeRPCLatency(req.Method, func() error { return b.call(&req, &res) })
if err != nil {
return 0, err
@ -533,7 +536,7 @@ func (b *BitcoinRPC) SendRawTransaction(tx string) (string, error) {
res := resSendRawTransaction{}
req := cmdSendRawTransaction{Method: "sendrawtransaction"}
req.Params = []string{tx}
err := b.call(&req, &res)
err := b.observeRPCLatency(req.Method, func() error { return b.call(&req, &res) })
if err != nil {
return "", err
@ -552,7 +555,7 @@ func (b *BitcoinRPC) GetMempoolEntry(txid string) (*bchain.MempoolEntry, error)
Method: "getmempoolentry",
Params: []string{txid},
}
err := b.call(&req, &res)
err := b.observeRPCLatency(req.Method, func() error { return b.call(&req, &res) })
if err != nil {
return nil, err
@ -563,6 +566,15 @@ func (b *BitcoinRPC) GetMempoolEntry(txid string) (*bchain.MempoolEntry, error)
return res.Result, nil
}
func (b *BitcoinRPC) observeRPCLatency(method string, fn func() error) error {
start := time.Now()
err := fn()
if err == nil {
b.metrics.BlockChainLatency.With(common.Labels{"coin": "bitcoin", "method": method}).Observe(float64(time.Since(start)) / 1e6) // in milliseconds
}
return err
}
func (b *BitcoinRPC) call(req interface{}, res interface{}) error {
httpData, err := json.Marshal(req)
if err != nil {

View File

@ -3,6 +3,7 @@ package zec
import (
"blockbook/bchain"
"blockbook/bchain/coins/btc"
"blockbook/common"
"time"
)
@ -10,8 +11,8 @@ type ZCashRPC struct {
*btc.BitcoinRPC
}
func NewZCashRPC(url string, user string, password string, timeout time.Duration, parse bool) (bchain.BlockChain, error) {
b, err := btc.NewBitcoinRPC(url, user, password, timeout, parse)
func NewZCashRPC(url string, user string, password string, timeout time.Duration, parse bool, metrics *common.Metrics) (bchain.BlockChain, error) {
b, err := btc.NewBitcoinRPC(url, user, password, timeout, parse, metrics)
if err != nil {
return nil, err
}

View File

@ -1,6 +1,7 @@
package bchain
import (
"blockbook/common"
"encoding/hex"
"sync"
"time"
@ -30,11 +31,12 @@ type Mempool struct {
txToInputOutput map[string]inputOutput
scriptToTx map[string][]outpoint
inputs map[outpoint]string
metrics *common.Metrics
}
// NewMempool creates new mempool handler.
func NewMempool(chain BlockChain) *Mempool {
return &Mempool{chain: chain}
func NewMempool(chain BlockChain, metrics *common.Metrics) *Mempool {
return &Mempool{chain: chain, metrics: metrics}
}
// GetTransactions returns slice of mempool transactions for given output script.
@ -76,6 +78,7 @@ func (m *Mempool) Resync(onNewTxAddr func(txid string, addr string)) error {
glog.V(1).Info("Mempool: resync")
txs, err := m.chain.GetMempool()
if err != nil {
m.metrics.MempoolResyncErrors.With(common.Labels{"error": err.Error()}).Inc()
return err
}
newTxToInputOutput := make(map[string]inputOutput, len(m.txToInputOutput)+1)
@ -86,6 +89,7 @@ func (m *Mempool) Resync(onNewTxAddr func(txid string, addr string)) error {
if !exists {
tx, err := m.chain.GetTransaction(txid)
if err != nil {
m.metrics.MempoolResyncErrors.With(common.Labels{"error": err.Error()}).Inc()
glog.Error("cannot get transaction ", txid, ": ", err)
continue
}
@ -116,6 +120,8 @@ func (m *Mempool) Resync(onNewTxAddr func(txid string, addr string)) error {
}
}
m.updateMappings(newTxToInputOutput, newScriptToTx, newInputs)
glog.Info("Mempool: resync finished in ", time.Since(start), ", ", len(m.txToInputOutput), " transactions in mempool")
d := time.Since(start)
glog.Info("Mempool: resync finished in ", d, ", ", len(m.txToInputOutput), " transactions in mempool")
m.metrics.MempoolResyncDuration.Observe(float64(d) / 1e6) // in milliseconds
return nil
}

View File

@ -14,6 +14,7 @@ import (
"blockbook/bchain"
"blockbook/bchain/coins"
"blockbook/common"
"blockbook/db"
"blockbook/server"
@ -109,8 +110,12 @@ func main() {
return
}
var err error
if chain, err = coins.NewBlockChain(*coin, *rpcURL, *rpcUser, *rpcPass, time.Duration(*rpcTimeout)*time.Second, *parse); err != nil {
metrics, err := common.GetMetrics()
if err != nil {
glog.Fatal("GetMetrics: ", err)
}
if chain, err = coins.NewBlockChain(*coin, *rpcURL, *rpcUser, *rpcPass, time.Duration(*rpcTimeout)*time.Second, *parse, metrics); err != nil {
glog.Fatal("rpc: ", err)
}
@ -138,7 +143,7 @@ func main() {
return
}
syncWorker, err = db.NewSyncWorker(index, chain, *syncWorkers, *syncChunk, *blockFrom, *dryRun, chanOsSignal)
syncWorker, err = db.NewSyncWorker(index, chain, *syncWorkers, *syncChunk, *blockFrom, *dryRun, chanOsSignal, metrics)
if err != nil {
glog.Fatalf("NewSyncWorker %v", err)
}
@ -154,7 +159,7 @@ func main() {
}
}
if txCache, err = db.NewTxCache(index, chain); err != nil {
if txCache, err = db.NewTxCache(index, chain, metrics); err != nil {
glog.Error("txCache ", err)
return
}
@ -181,7 +186,8 @@ func main() {
var socketIoServer *server.SocketIoServer
if *socketIoBinding != "" {
socketIoServer, err = server.NewSocketIoServer(*socketIoBinding, *certFiles, index, chain, txCache, *explorerURL)
socketIoServer, err = server.NewSocketIoServer(
*socketIoBinding, *certFiles, index, chain, txCache, *explorerURL, metrics)
if err != nil {
glog.Error("socketio: ", err)
return

117
common/metrics.go 100644
View File

@ -0,0 +1,117 @@
package common
import (
"reflect"
"github.com/prometheus/client_golang/prometheus"
)
type Metrics struct {
RPCRequests *prometheus.CounterVec
SubscribeRequests *prometheus.CounterVec
Clients *prometheus.GaugeVec
RequestDuration *prometheus.HistogramVec
IndexResyncDuration prometheus.Histogram
MempoolResyncDuration prometheus.Histogram
TxCacheEfficiency *prometheus.CounterVec
BlockChainLatency *prometheus.HistogramVec
IndexResyncErrors *prometheus.CounterVec
MempoolResyncErrors *prometheus.CounterVec
IndexDBSize prometheus.Gauge
}
type Labels = prometheus.Labels
func GetMetrics() (*Metrics, error) {
metrics := Metrics{}
metrics.RPCRequests = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "blockbook_rpc_requests",
Help: "Total number of RPC requests by transport, method and status",
},
[]string{"transport", "method", "status"},
)
metrics.SubscribeRequests = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "blockbook_subscribe_requests",
Help: "Total number of subscribe requests by transport, channel and status",
},
[]string{"transport", "channel", "status"},
)
metrics.Clients = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "blockbook_clients",
Help: "Number of currently connected clients by transport",
},
[]string{"transport"},
)
metrics.RequestDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "blockbook_request_duration",
Help: "Request duration by method (in microseconds)",
Buckets: []float64{1, 5, 10, 25, 50, 75, 100, 250},
},
[]string{"transport", "method"},
)
metrics.IndexResyncDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "blockbook_index_resync_duration",
Help: "Duration of index resync operation (in milliseconds)",
Buckets: []float64{100, 250, 500, 750, 1000, 10000, 30000, 60000},
},
)
metrics.MempoolResyncDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "blockbook_mempool_resync_duration",
Help: "Duration of mempool resync operation (in milliseconds)",
Buckets: []float64{1, 5, 10, 25, 50, 75, 100, 250},
},
)
metrics.TxCacheEfficiency = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "blockbook_txcache_efficiency",
Help: "Efficiency of txCache",
},
[]string{"status"},
)
metrics.BlockChainLatency = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "blockbook_blockchain_latency",
Help: "Latency of blockchain RPC by coin and method (in milliseconds)",
Buckets: []float64{1, 5, 10, 25, 50, 75, 100, 250},
},
[]string{"coin", "method"},
)
metrics.IndexResyncErrors = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "blockbook_index_resync_errors",
Help: "Number of errors of index resync operation",
},
[]string{"error"},
)
metrics.MempoolResyncErrors = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "blockbook_mempool_resync_errors",
Help: "Number of errors of mempool resync operation",
},
[]string{"error"},
)
metrics.IndexDBSize = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "blockbook_index_db_size",
Help: "Size of index database (in bytes)",
},
)
v := reflect.ValueOf(metrics)
for i := 0; i < v.NumField(); i++ {
c := v.Field(i).Interface().(prometheus.Collector)
err := prometheus.Register(c)
if err != nil {
return nil, err
}
}
return &metrics, nil
}

View File

@ -541,8 +541,13 @@ func dirSize(path string) (int64, error) {
}
// DatabaseSizeOnDisk returns size of the database in bytes
func (d *RocksDB) DatabaseSizeOnDisk() (int64, error) {
return dirSize(d.path)
func (d *RocksDB) DatabaseSizeOnDisk() int64 {
size, err := dirSize(d.path)
if err != nil {
glog.Error("rocksdb: DatabaseSizeOnDisk: ", err)
return 0
}
return size
}
// GetTx returns transaction stored in db and height of the block containing it

View File

@ -2,6 +2,7 @@ package db
import (
"blockbook/bchain"
"blockbook/common"
"os"
"sync"
"sync/atomic"
@ -19,10 +20,11 @@ type SyncWorker struct {
dryRun bool
startHeight uint32
chanOsSignal chan os.Signal
metrics *common.Metrics
}
// NewSyncWorker creates new SyncWorker and returns its handle
func NewSyncWorker(db *RocksDB, chain bchain.BlockChain, syncWorkers, syncChunk int, minStartHeight int, dryRun bool, chanOsSignal chan os.Signal) (*SyncWorker, error) {
func NewSyncWorker(db *RocksDB, chain bchain.BlockChain, syncWorkers, syncChunk int, minStartHeight int, dryRun bool, chanOsSignal chan os.Signal, metrics *common.Metrics) (*SyncWorker, error) {
if minStartHeight < 0 {
minStartHeight = 0
}
@ -34,12 +36,37 @@ func NewSyncWorker(db *RocksDB, chain bchain.BlockChain, syncWorkers, syncChunk
dryRun: dryRun,
startHeight: uint32(minStartHeight),
chanOsSignal: chanOsSignal,
metrics: metrics,
}, nil
}
var synced = errors.New("synced")
// ResyncIndex synchronizes index to the top of the blockchain
// onNewBlock is called when new block is connected, but not in initial parallel sync
func (w *SyncWorker) ResyncIndex(onNewBlock func(hash string)) error {
start := time.Now()
err := w.resyncIndex(onNewBlock)
switch err {
case nil:
d := time.Since(start)
glog.Info("resync: finished in ", d)
w.metrics.IndexResyncDuration.Observe(float64(d) / 1e6) // in milliseconds
w.metrics.IndexDBSize.Set(float64(w.db.DatabaseSizeOnDisk()))
fallthrough
case synced:
// this is not actually error but flag that resync wasn't necessary
return nil
}
w.metrics.IndexResyncErrors.With(common.Labels{"error": err.Error()}).Inc()
return err
}
func (w *SyncWorker) resyncIndex(onNewBlock func(hash string)) error {
remote, err := w.chain.GetBestBlockHash()
if err != nil {
return err
@ -53,7 +80,7 @@ func (w *SyncWorker) ResyncIndex(onNewBlock func(hash string)) error {
// network, we're done.
if local == remote {
glog.Infof("resync: synced on %d %s", localBestHeight, local)
return nil
return synced
}
var header *bchain.BlockHeader
@ -94,7 +121,7 @@ func (w *SyncWorker) ResyncIndex(onNewBlock func(hash string)) error {
if err != nil {
return err
}
return w.ResyncIndex(onNewBlock)
return w.resyncIndex(onNewBlock)
}
}
@ -128,7 +155,7 @@ func (w *SyncWorker) ResyncIndex(onNewBlock func(hash string)) error {
}
// after parallel load finish the sync using standard way,
// new blocks may have been created in the meantime
return w.ResyncIndex(onNewBlock)
return w.resyncIndex(onNewBlock)
}
}
@ -188,6 +215,7 @@ func (w *SyncWorker) connectBlocksParallel(lower, higher uint32) error {
return
}
glog.Error("Worker ", i, " connect block error ", err, ". Retrying...")
w.metrics.IndexResyncErrors.With(common.Labels{"error": err.Error()}).Inc()
time.Sleep(time.Millisecond * 500)
} else {
break
@ -219,6 +247,7 @@ ConnectLoop:
hash, err = w.chain.GetBlockHash(h)
if err != nil {
glog.Error("GetBlockHash error ", err)
w.metrics.IndexResyncErrors.With(common.Labels{"error": err.Error()}).Inc()
time.Sleep(time.Millisecond * 500)
continue
}

View File

@ -2,21 +2,24 @@ package db
import (
"blockbook/bchain"
"blockbook/common"
"github.com/golang/glog"
)
// TxCache is handle to TxCacheServer
type TxCache struct {
db *RocksDB
chain bchain.BlockChain
db *RocksDB
chain bchain.BlockChain
metrics *common.Metrics
}
// NewTxCache creates new TxCache interface and returns its handle
func NewTxCache(db *RocksDB, chain bchain.BlockChain) (*TxCache, error) {
func NewTxCache(db *RocksDB, chain bchain.BlockChain, metrics *common.Metrics) (*TxCache, error) {
return &TxCache{
db: db,
chain: chain,
db: db,
chain: chain,
metrics: metrics,
}, nil
}
@ -29,12 +32,14 @@ func (c *TxCache) GetTransaction(txid string, bestheight uint32) (*bchain.Tx, er
}
if tx != nil {
tx.Confirmations = bestheight - h
c.metrics.TxCacheEfficiency.With(common.Labels{"status": "hit"}).Inc()
return tx, nil
}
tx, err = c.chain.GetTransaction(txid)
if err != nil {
return nil, err
}
c.metrics.TxCacheEfficiency.With(common.Labels{"status": "miss"}).Inc()
// do not cache mempool transactions
if tx.Confirmations > 0 {
err = c.db.PutTx(tx, bestheight-tx.Confirmations, tx.Blocktime)

View File

@ -14,6 +14,7 @@ import (
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
// HTTPServer is handle to HttpServer
@ -47,6 +48,7 @@ func NewHTTPServer(httpServerBinding string, certFiles string, db *db.RocksDB, c
r.HandleFunc("/transactions/{address}/{lower}/{higher}", s.transactions)
r.HandleFunc("/confirmedTransactions/{address}/{lower}/{higher}", s.confirmedTransactions)
r.HandleFunc("/unconfirmedTransactions/{address}", s.unconfirmedTransactions)
r.HandleFunc("/metrics", promhttp.Handler().ServeHTTP)
var h http.Handler = r
h = handlers.LoggingHandler(os.Stderr, h)

View File

@ -2,12 +2,14 @@ package server
import (
"blockbook/bchain"
"blockbook/common"
"blockbook/db"
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"github.com/juju/errors"
@ -27,18 +29,21 @@ type SocketIoServer struct {
chain bchain.BlockChain
chainParser bchain.BlockChainParser
explorerURL string
metrics *common.Metrics
}
// NewSocketIoServer creates new SocketIo interface to blockbook and returns its handle
func NewSocketIoServer(binding string, certFiles string, db *db.RocksDB, chain bchain.BlockChain, txCache *db.TxCache, explorerURL string) (*SocketIoServer, error) {
func NewSocketIoServer(binding string, certFiles string, db *db.RocksDB, chain bchain.BlockChain, txCache *db.TxCache, explorerURL string, metrics *common.Metrics) (*SocketIoServer, error) {
server := gosocketio.NewServer(transport.GetDefaultWebsocketTransport())
server.On(gosocketio.OnConnection, func(c *gosocketio.Channel) {
glog.Info("Client connected ", c.Id())
metrics.Clients.With(common.Labels{"transport": "socketio"}).Inc()
})
server.On(gosocketio.OnDisconnection, func(c *gosocketio.Channel) {
glog.Info("Client disconnected ", c.Id())
metrics.Clients.With(common.Labels{"transport": "socketio"}).Dec()
})
server.On(gosocketio.OnError, func(c *gosocketio.Channel) {
@ -67,6 +72,7 @@ func NewSocketIoServer(binding string, certFiles string, db *db.RocksDB, chain b
chain: chain,
chainParser: chain.GetChainParser(),
explorerURL: explorerURL,
metrics: metrics,
}
// support for tests of socket.io interface
@ -192,7 +198,9 @@ func (s *SocketIoServer) onMessage(c *gosocketio.Channel, req map[string]json.Ra
var err error
var rv interface{}
method := string(req["method"])
t := time.Now()
params := req["params"]
defer s.metrics.RequestDuration.With(common.Labels{"transport": "socketio", "method": method}).Observe(float64(time.Since(t)) / 1e3) // in microseconds
f, ok := onMessageHandlers[method]
if ok {
rv, err = f(s, params)
@ -201,9 +209,11 @@ func (s *SocketIoServer) onMessage(c *gosocketio.Channel, req map[string]json.Ra
}
if err == nil {
glog.V(1).Info(c.Id(), " onMessage ", method, " success")
s.metrics.RPCRequests.With(common.Labels{"transport": "socketio", "method": method, "status": "success"}).Inc()
return rv
}
glog.Error(c.Id(), " onMessage ", method, ": ", errors.ErrorStack(err))
s.metrics.RPCRequests.With(common.Labels{"transport": "socketio", "method": method, "status": err.Error()}).Inc()
e := resultError{}
e.Error.Message = err.Error()
return e
@ -650,6 +660,11 @@ func (s *SocketIoServer) getMempoolEntry(txid string) (res resultGetMempoolEntry
// "bitcoind/hashblock"
// "bitcoind/addresstxid",["2MzTmvPJLZaLzD9XdN3jMtQA5NexC3rAPww","2NAZRJKr63tSdcTxTN3WaE9ZNDyXy6PgGuv"]
func (s *SocketIoServer) onSubscribe(c *gosocketio.Channel, req []byte) interface{} {
onError := func(id, sc, err string) {
glog.Error(id, " onSubscribe ", sc, ": ", err)
s.metrics.SubscribeRequests.With(common.Labels{"transport": "socketio", "channel": sc, "status": err}).Inc()
}
r := string(req)
glog.V(1).Info(c.Id(), " onSubscribe ", r)
var sc string
@ -658,12 +673,12 @@ func (s *SocketIoServer) onSubscribe(c *gosocketio.Channel, req []byte) interfac
var addrs []string
sc = r[1:i]
if sc != "bitcoind/addresstxid" {
glog.Error(c.Id(), " onSubscribe ", sc, ": invalid data")
onError(c.Id(), sc, "invalid data")
return nil
}
err := json.Unmarshal([]byte(r[i+2:]), &addrs)
if err != nil {
glog.Error(c.Id(), " onSubscribe ", sc, ": ", err)
onError(c.Id(), sc, err.Error())
return nil
}
for _, a := range addrs {
@ -672,11 +687,12 @@ func (s *SocketIoServer) onSubscribe(c *gosocketio.Channel, req []byte) interfac
} else {
sc = r[1 : len(r)-1]
if sc != "bitcoind/hashblock" {
glog.Error(c.Id(), " onSubscribe ", sc, ": invalid data")
onError(c.Id(), sc, "invalid data")
return nil
}
c.Join(sc)
}
s.metrics.SubscribeRequests.With(common.Labels{"transport": "socketio", "channel": sc, "status": "success"}).Inc()
return nil
}