2018-01-18 08:44:31 -07:00
package db
2017-08-28 09:50:57 -06:00
import (
2018-01-31 07:23:17 -07:00
"blockbook/bchain"
2018-05-22 04:56:51 -06:00
"blockbook/common"
2017-08-28 09:50:57 -06:00
"bytes"
"encoding/binary"
"encoding/hex"
Add fiat rates functionality (#316)
* Add initial commit for fiat rates functionality
* templates.go: use bash from current user's environment
* bitcoinrpc.go: add FiatRates and FiatRatesParams to config
* blockbook.go: add initFiatRatesDownloader kickoff
* bitcoin.json: add coingecko API URL
* rockdb.go: add FindTicker and StoreTicker functions
* rocksdb_test.go: add a simple test for storing and getting FiatRate tickers
* rocksdb: add FindLastTicker and convertDate, make FindTicker return strings
* rocksdb: add ConvertDate function and CoinGeckoTicker struct, update tests
* blockbook.go, fiat: finalize the CoinGecko downloader
* coingecko.go: do not stop syncing when encountered an error
* rocksdb_test: fix the exported function name
* worker.go: make getBlockInfoFromBlockID a public function
* public.go: apiTickers kickoff
* rocksdb_test: fix the unittest comment
* coingecko.go: update comments
* blockbook.go, fiat: reword CoinGecko -> FiatRates, fix binary search upper bound, remove assignment of goroutine call result
* rename coingecko -> fiat_rates
* fiat_rates: export only the necessary methods
* blockbook.go: update log message
* bitcoinrpc.go: remove fiatRates settings
* use CurrencyRatesTicker structure everywhere, fix time format string, update tests, use UTC time
* add /api/v2/tickers tests, store rates as strings (json.Number)
* fiat_rates: add more tests, metrics and tickers-list endpoint, make the "currency" parameter mandatory
* public, worker: move FiatRates API logic to worker.go
* fiat_rates: add a future date test, fix comments, add more checks, store time as a pointer
* rocksdb_test: remove unneeded code
* fiat_rates: add a "ping" call to check server availability
* fiat_rates: do not return empty ticker, return nil instead if not found
add a test for non-existent ticker
* rocksdb_test: remove Sleep from tests
* worker.go: do not propagate all API errors to the client
* move InitTestFiatRates from rocksdb.go to public_test.go
* public.go: fix FiatRatesFindLastTicker result check
* fiat_rates: mock API server responses
* remove commented-out code
* fiat_rates: add comment explaining what periodSeconds attribute is used for
* websocket.go: implement fiatRates websocket endpoints & add tests
* fiatRates: add getFiatRatesTickersList websocket endpoint & test
* fiatRates: make websocket getFiatRatesByDate accept an array of dates, add more tests
* fiatRates: remove getFiatRatesForBlockID from websocket endpoints
* fiatRates: remove "if test", use custom startTime instead
Update tests and mock data
* fiatRates: finalize websocket functionality
add "date" parameter to TickerList
return data timestamps where needed
fix sync bugs (nil timestamp, duplicate save)
* fiatRates: add FiatRates configs for different coins
* worker.go: make GetBlockInfoFromBlockID private again
* fiatRates: wait & retry on errors, remove Ping function
* websocket.go: remove incorrect comment
* fiatRates: move coingecko-related code to a separate file, use interface
* fiatRates: if the new rates are the same as previous, try five more times, and only then store them
* coingecko: fix getting actual rates, add a timestamp parameter to get uncached responses
* vertcoin_testnet.json: remove fiat rates parameters
* fiat_rates: add timestamp to log message about skipping the repeating rates
2019-12-17 02:40:02 -07:00
"encoding/json"
2018-08-22 08:20:52 -06:00
"fmt"
2018-07-27 11:46:21 -06:00
"math/big"
2018-02-05 02:31:22 -07:00
"os"
"path/filepath"
2019-05-06 00:37:16 -06:00
"sort"
2018-11-01 11:28:48 -06:00
"strconv"
2018-06-08 05:19:57 -06:00
"time"
2019-05-06 00:37:16 -06:00
"unsafe"
2017-08-28 09:50:57 -06:00
2019-01-09 15:24:25 -07:00
vlq "github.com/bsm/go-vlq"
2018-01-30 10:22:25 -07:00
"github.com/golang/glog"
2018-04-20 05:56:55 -06:00
"github.com/juju/errors"
2017-08-28 09:50:57 -06:00
"github.com/tecbot/gorocksdb"
)
2019-04-30 08:06:56 -06:00
const dbVersion = 5
2018-12-20 08:16:51 -07:00
const packedHeightBytes = 4
const maxAddrDescLen = 1024
2018-02-06 01:12:50 -07:00
// iterator creates snapshot, which takes lots of resources
// when doing huge scan, it is better to close it and reopen from time to time to free the resources
2018-06-01 08:01:58 -06:00
const refreshIterator = 5000000
2018-02-06 01:12:50 -07:00
Add fiat rates functionality (#316)
* Add initial commit for fiat rates functionality
* templates.go: use bash from current user's environment
* bitcoinrpc.go: add FiatRates and FiatRatesParams to config
* blockbook.go: add initFiatRatesDownloader kickoff
* bitcoin.json: add coingecko API URL
* rockdb.go: add FindTicker and StoreTicker functions
* rocksdb_test.go: add a simple test for storing and getting FiatRate tickers
* rocksdb: add FindLastTicker and convertDate, make FindTicker return strings
* rocksdb: add ConvertDate function and CoinGeckoTicker struct, update tests
* blockbook.go, fiat: finalize the CoinGecko downloader
* coingecko.go: do not stop syncing when encountered an error
* rocksdb_test: fix the exported function name
* worker.go: make getBlockInfoFromBlockID a public function
* public.go: apiTickers kickoff
* rocksdb_test: fix the unittest comment
* coingecko.go: update comments
* blockbook.go, fiat: reword CoinGecko -> FiatRates, fix binary search upper bound, remove assignment of goroutine call result
* rename coingecko -> fiat_rates
* fiat_rates: export only the necessary methods
* blockbook.go: update log message
* bitcoinrpc.go: remove fiatRates settings
* use CurrencyRatesTicker structure everywhere, fix time format string, update tests, use UTC time
* add /api/v2/tickers tests, store rates as strings (json.Number)
* fiat_rates: add more tests, metrics and tickers-list endpoint, make the "currency" parameter mandatory
* public, worker: move FiatRates API logic to worker.go
* fiat_rates: add a future date test, fix comments, add more checks, store time as a pointer
* rocksdb_test: remove unneeded code
* fiat_rates: add a "ping" call to check server availability
* fiat_rates: do not return empty ticker, return nil instead if not found
add a test for non-existent ticker
* rocksdb_test: remove Sleep from tests
* worker.go: do not propagate all API errors to the client
* move InitTestFiatRates from rocksdb.go to public_test.go
* public.go: fix FiatRatesFindLastTicker result check
* fiat_rates: mock API server responses
* remove commented-out code
* fiat_rates: add comment explaining what periodSeconds attribute is used for
* websocket.go: implement fiatRates websocket endpoints & add tests
* fiatRates: add getFiatRatesTickersList websocket endpoint & test
* fiatRates: make websocket getFiatRatesByDate accept an array of dates, add more tests
* fiatRates: remove getFiatRatesForBlockID from websocket endpoints
* fiatRates: remove "if test", use custom startTime instead
Update tests and mock data
* fiatRates: finalize websocket functionality
add "date" parameter to TickerList
return data timestamps where needed
fix sync bugs (nil timestamp, duplicate save)
* fiatRates: add FiatRates configs for different coins
* worker.go: make GetBlockInfoFromBlockID private again
* fiatRates: wait & retry on errors, remove Ping function
* websocket.go: remove incorrect comment
* fiatRates: move coingecko-related code to a separate file, use interface
* fiatRates: if the new rates are the same as previous, try five more times, and only then store them
* coingecko: fix getting actual rates, add a timestamp parameter to get uncached responses
* vertcoin_testnet.json: remove fiat rates parameters
* fiat_rates: add timestamp to log message about skipping the repeating rates
2019-12-17 02:40:02 -07:00
// FiatRatesTimeFormat is a format string for storing FiatRates timestamps in rocksdb
const FiatRatesTimeFormat = "20060102150405" // YYYYMMDDhhmmss
// CurrencyRatesTicker contains coin ticker data fetched from API
type CurrencyRatesTicker struct {
Timestamp * time . Time // return as unix timestamp in API
2019-12-19 09:30:19 -07:00
Rates map [ string ] float64
Add fiat rates functionality (#316)
* Add initial commit for fiat rates functionality
* templates.go: use bash from current user's environment
* bitcoinrpc.go: add FiatRates and FiatRatesParams to config
* blockbook.go: add initFiatRatesDownloader kickoff
* bitcoin.json: add coingecko API URL
* rockdb.go: add FindTicker and StoreTicker functions
* rocksdb_test.go: add a simple test for storing and getting FiatRate tickers
* rocksdb: add FindLastTicker and convertDate, make FindTicker return strings
* rocksdb: add ConvertDate function and CoinGeckoTicker struct, update tests
* blockbook.go, fiat: finalize the CoinGecko downloader
* coingecko.go: do not stop syncing when encountered an error
* rocksdb_test: fix the exported function name
* worker.go: make getBlockInfoFromBlockID a public function
* public.go: apiTickers kickoff
* rocksdb_test: fix the unittest comment
* coingecko.go: update comments
* blockbook.go, fiat: reword CoinGecko -> FiatRates, fix binary search upper bound, remove assignment of goroutine call result
* rename coingecko -> fiat_rates
* fiat_rates: export only the necessary methods
* blockbook.go: update log message
* bitcoinrpc.go: remove fiatRates settings
* use CurrencyRatesTicker structure everywhere, fix time format string, update tests, use UTC time
* add /api/v2/tickers tests, store rates as strings (json.Number)
* fiat_rates: add more tests, metrics and tickers-list endpoint, make the "currency" parameter mandatory
* public, worker: move FiatRates API logic to worker.go
* fiat_rates: add a future date test, fix comments, add more checks, store time as a pointer
* rocksdb_test: remove unneeded code
* fiat_rates: add a "ping" call to check server availability
* fiat_rates: do not return empty ticker, return nil instead if not found
add a test for non-existent ticker
* rocksdb_test: remove Sleep from tests
* worker.go: do not propagate all API errors to the client
* move InitTestFiatRates from rocksdb.go to public_test.go
* public.go: fix FiatRatesFindLastTicker result check
* fiat_rates: mock API server responses
* remove commented-out code
* fiat_rates: add comment explaining what periodSeconds attribute is used for
* websocket.go: implement fiatRates websocket endpoints & add tests
* fiatRates: add getFiatRatesTickersList websocket endpoint & test
* fiatRates: make websocket getFiatRatesByDate accept an array of dates, add more tests
* fiatRates: remove getFiatRatesForBlockID from websocket endpoints
* fiatRates: remove "if test", use custom startTime instead
Update tests and mock data
* fiatRates: finalize websocket functionality
add "date" parameter to TickerList
return data timestamps where needed
fix sync bugs (nil timestamp, duplicate save)
* fiatRates: add FiatRates configs for different coins
* worker.go: make GetBlockInfoFromBlockID private again
* fiatRates: wait & retry on errors, remove Ping function
* websocket.go: remove incorrect comment
* fiatRates: move coingecko-related code to a separate file, use interface
* fiatRates: if the new rates are the same as previous, try five more times, and only then store them
* coingecko: fix getting actual rates, add a timestamp parameter to get uncached responses
* vertcoin_testnet.json: remove fiat rates parameters
* fiat_rates: add timestamp to log message about skipping the repeating rates
2019-12-17 02:40:02 -07:00
}
// ResultTickerAsString contains formatted CurrencyRatesTicker data
type ResultTickerAsString struct {
2020-01-06 15:30:48 -07:00
Timestamp int64 ` json:"ts,omitempty" `
2020-01-22 06:25:03 -07:00
Rates map [ string ] float64 ` json:"rates" `
2019-12-19 09:30:19 -07:00
Error string ` json:"error,omitempty" `
Add fiat rates functionality (#316)
* Add initial commit for fiat rates functionality
* templates.go: use bash from current user's environment
* bitcoinrpc.go: add FiatRates and FiatRatesParams to config
* blockbook.go: add initFiatRatesDownloader kickoff
* bitcoin.json: add coingecko API URL
* rockdb.go: add FindTicker and StoreTicker functions
* rocksdb_test.go: add a simple test for storing and getting FiatRate tickers
* rocksdb: add FindLastTicker and convertDate, make FindTicker return strings
* rocksdb: add ConvertDate function and CoinGeckoTicker struct, update tests
* blockbook.go, fiat: finalize the CoinGecko downloader
* coingecko.go: do not stop syncing when encountered an error
* rocksdb_test: fix the exported function name
* worker.go: make getBlockInfoFromBlockID a public function
* public.go: apiTickers kickoff
* rocksdb_test: fix the unittest comment
* coingecko.go: update comments
* blockbook.go, fiat: reword CoinGecko -> FiatRates, fix binary search upper bound, remove assignment of goroutine call result
* rename coingecko -> fiat_rates
* fiat_rates: export only the necessary methods
* blockbook.go: update log message
* bitcoinrpc.go: remove fiatRates settings
* use CurrencyRatesTicker structure everywhere, fix time format string, update tests, use UTC time
* add /api/v2/tickers tests, store rates as strings (json.Number)
* fiat_rates: add more tests, metrics and tickers-list endpoint, make the "currency" parameter mandatory
* public, worker: move FiatRates API logic to worker.go
* fiat_rates: add a future date test, fix comments, add more checks, store time as a pointer
* rocksdb_test: remove unneeded code
* fiat_rates: add a "ping" call to check server availability
* fiat_rates: do not return empty ticker, return nil instead if not found
add a test for non-existent ticker
* rocksdb_test: remove Sleep from tests
* worker.go: do not propagate all API errors to the client
* move InitTestFiatRates from rocksdb.go to public_test.go
* public.go: fix FiatRatesFindLastTicker result check
* fiat_rates: mock API server responses
* remove commented-out code
* fiat_rates: add comment explaining what periodSeconds attribute is used for
* websocket.go: implement fiatRates websocket endpoints & add tests
* fiatRates: add getFiatRatesTickersList websocket endpoint & test
* fiatRates: make websocket getFiatRatesByDate accept an array of dates, add more tests
* fiatRates: remove getFiatRatesForBlockID from websocket endpoints
* fiatRates: remove "if test", use custom startTime instead
Update tests and mock data
* fiatRates: finalize websocket functionality
add "date" parameter to TickerList
return data timestamps where needed
fix sync bugs (nil timestamp, duplicate save)
* fiatRates: add FiatRates configs for different coins
* worker.go: make GetBlockInfoFromBlockID private again
* fiatRates: wait & retry on errors, remove Ping function
* websocket.go: remove incorrect comment
* fiatRates: move coingecko-related code to a separate file, use interface
* fiatRates: if the new rates are the same as previous, try five more times, and only then store them
* coingecko: fix getting actual rates, add a timestamp parameter to get uncached responses
* vertcoin_testnet.json: remove fiat rates parameters
* fiat_rates: add timestamp to log message about skipping the repeating rates
2019-12-17 02:40:02 -07:00
}
// ResultTickersAsString contains a formatted CurrencyRatesTicker list
type ResultTickersAsString struct {
Tickers [ ] ResultTickerAsString ` json:"tickers" `
}
// ResultTickerListAsString contains formatted data about available currency tickers
type ResultTickerListAsString struct {
2020-01-07 15:12:26 -07:00
Timestamp int64 ` json:"ts,omitempty" `
Add fiat rates functionality (#316)
* Add initial commit for fiat rates functionality
* templates.go: use bash from current user's environment
* bitcoinrpc.go: add FiatRates and FiatRatesParams to config
* blockbook.go: add initFiatRatesDownloader kickoff
* bitcoin.json: add coingecko API URL
* rockdb.go: add FindTicker and StoreTicker functions
* rocksdb_test.go: add a simple test for storing and getting FiatRate tickers
* rocksdb: add FindLastTicker and convertDate, make FindTicker return strings
* rocksdb: add ConvertDate function and CoinGeckoTicker struct, update tests
* blockbook.go, fiat: finalize the CoinGecko downloader
* coingecko.go: do not stop syncing when encountered an error
* rocksdb_test: fix the exported function name
* worker.go: make getBlockInfoFromBlockID a public function
* public.go: apiTickers kickoff
* rocksdb_test: fix the unittest comment
* coingecko.go: update comments
* blockbook.go, fiat: reword CoinGecko -> FiatRates, fix binary search upper bound, remove assignment of goroutine call result
* rename coingecko -> fiat_rates
* fiat_rates: export only the necessary methods
* blockbook.go: update log message
* bitcoinrpc.go: remove fiatRates settings
* use CurrencyRatesTicker structure everywhere, fix time format string, update tests, use UTC time
* add /api/v2/tickers tests, store rates as strings (json.Number)
* fiat_rates: add more tests, metrics and tickers-list endpoint, make the "currency" parameter mandatory
* public, worker: move FiatRates API logic to worker.go
* fiat_rates: add a future date test, fix comments, add more checks, store time as a pointer
* rocksdb_test: remove unneeded code
* fiat_rates: add a "ping" call to check server availability
* fiat_rates: do not return empty ticker, return nil instead if not found
add a test for non-existent ticker
* rocksdb_test: remove Sleep from tests
* worker.go: do not propagate all API errors to the client
* move InitTestFiatRates from rocksdb.go to public_test.go
* public.go: fix FiatRatesFindLastTicker result check
* fiat_rates: mock API server responses
* remove commented-out code
* fiat_rates: add comment explaining what periodSeconds attribute is used for
* websocket.go: implement fiatRates websocket endpoints & add tests
* fiatRates: add getFiatRatesTickersList websocket endpoint & test
* fiatRates: make websocket getFiatRatesByDate accept an array of dates, add more tests
* fiatRates: remove getFiatRatesForBlockID from websocket endpoints
* fiatRates: remove "if test", use custom startTime instead
Update tests and mock data
* fiatRates: finalize websocket functionality
add "date" parameter to TickerList
return data timestamps where needed
fix sync bugs (nil timestamp, duplicate save)
* fiatRates: add FiatRates configs for different coins
* worker.go: make GetBlockInfoFromBlockID private again
* fiatRates: wait & retry on errors, remove Ping function
* websocket.go: remove incorrect comment
* fiatRates: move coingecko-related code to a separate file, use interface
* fiatRates: if the new rates are the same as previous, try five more times, and only then store them
* coingecko: fix getting actual rates, add a timestamp parameter to get uncached responses
* vertcoin_testnet.json: remove fiat rates parameters
* fiat_rates: add timestamp to log message about skipping the repeating rates
2019-12-17 02:40:02 -07:00
Tickers [ ] string ` json:"available_currencies" `
Error string ` json:"error,omitempty" `
}
2018-04-23 09:05:23 -06:00
// RepairRocksDB calls RocksDb db repair function
2017-09-12 08:53:40 -06:00
func RepairRocksDB ( name string ) error {
2018-01-30 10:22:25 -07:00
glog . Infof ( "rocksdb: repair" )
2017-09-12 08:53:40 -06:00
opts := gorocksdb . NewDefaultOptions ( )
return gorocksdb . RepairDb ( name , opts )
}
2018-09-24 10:23:13 -06:00
type connectBlockStats struct {
txAddressesHit int
txAddressesMiss int
balancesHit int
balancesMiss int
}
2019-05-11 18:19:51 -06:00
// AddressBalanceDetail specifies what data are returned by GetAddressBalance
type AddressBalanceDetail int
const (
// AddressBalanceDetailNoUTXO returns address balance without utxos
AddressBalanceDetailNoUTXO = 0
// AddressBalanceDetailUTXO returns address balance with utxos
AddressBalanceDetailUTXO = 1
// addressBalanceDetailUTXOIndexed returns address balance with utxos and index for updates, used only internally
addressBalanceDetailUTXOIndexed = 2
)
2018-01-24 10:02:46 -07:00
// RocksDB handle
2017-08-28 09:50:57 -06:00
type RocksDB struct {
2018-10-01 05:22:03 -06:00
path string
db * gorocksdb . DB
wo * gorocksdb . WriteOptions
ro * gorocksdb . ReadOptions
cfh [ ] * gorocksdb . ColumnFamilyHandle
chainParser bchain . BlockChainParser
is * common . InternalState
metrics * common . Metrics
cache * gorocksdb . Cache
maxOpenFiles int
cbs connectBlockStats
2017-08-28 09:50:57 -06:00
}
2018-01-24 08:57:05 -07:00
const (
cfDefault = iota
cfHeight
2018-04-17 15:50:01 -06:00
cfAddresses
2018-08-16 07:31:11 -06:00
cfBlockTxs
2018-03-05 10:14:41 -07:00
cfTransactions
Add fiat rates functionality (#316)
* Add initial commit for fiat rates functionality
* templates.go: use bash from current user's environment
* bitcoinrpc.go: add FiatRates and FiatRatesParams to config
* blockbook.go: add initFiatRatesDownloader kickoff
* bitcoin.json: add coingecko API URL
* rockdb.go: add FindTicker and StoreTicker functions
* rocksdb_test.go: add a simple test for storing and getting FiatRate tickers
* rocksdb: add FindLastTicker and convertDate, make FindTicker return strings
* rocksdb: add ConvertDate function and CoinGeckoTicker struct, update tests
* blockbook.go, fiat: finalize the CoinGecko downloader
* coingecko.go: do not stop syncing when encountered an error
* rocksdb_test: fix the exported function name
* worker.go: make getBlockInfoFromBlockID a public function
* public.go: apiTickers kickoff
* rocksdb_test: fix the unittest comment
* coingecko.go: update comments
* blockbook.go, fiat: reword CoinGecko -> FiatRates, fix binary search upper bound, remove assignment of goroutine call result
* rename coingecko -> fiat_rates
* fiat_rates: export only the necessary methods
* blockbook.go: update log message
* bitcoinrpc.go: remove fiatRates settings
* use CurrencyRatesTicker structure everywhere, fix time format string, update tests, use UTC time
* add /api/v2/tickers tests, store rates as strings (json.Number)
* fiat_rates: add more tests, metrics and tickers-list endpoint, make the "currency" parameter mandatory
* public, worker: move FiatRates API logic to worker.go
* fiat_rates: add a future date test, fix comments, add more checks, store time as a pointer
* rocksdb_test: remove unneeded code
* fiat_rates: add a "ping" call to check server availability
* fiat_rates: do not return empty ticker, return nil instead if not found
add a test for non-existent ticker
* rocksdb_test: remove Sleep from tests
* worker.go: do not propagate all API errors to the client
* move InitTestFiatRates from rocksdb.go to public_test.go
* public.go: fix FiatRatesFindLastTicker result check
* fiat_rates: mock API server responses
* remove commented-out code
* fiat_rates: add comment explaining what periodSeconds attribute is used for
* websocket.go: implement fiatRates websocket endpoints & add tests
* fiatRates: add getFiatRatesTickersList websocket endpoint & test
* fiatRates: make websocket getFiatRatesByDate accept an array of dates, add more tests
* fiatRates: remove getFiatRatesForBlockID from websocket endpoints
* fiatRates: remove "if test", use custom startTime instead
Update tests and mock data
* fiatRates: finalize websocket functionality
add "date" parameter to TickerList
return data timestamps where needed
fix sync bugs (nil timestamp, duplicate save)
* fiatRates: add FiatRates configs for different coins
* worker.go: make GetBlockInfoFromBlockID private again
* fiatRates: wait & retry on errors, remove Ping function
* websocket.go: remove incorrect comment
* fiatRates: move coingecko-related code to a separate file, use interface
* fiatRates: if the new rates are the same as previous, try five more times, and only then store them
* coingecko: fix getting actual rates, add a timestamp parameter to get uncached responses
* vertcoin_testnet.json: remove fiat rates parameters
* fiat_rates: add timestamp to log message about skipping the repeating rates
2019-12-17 02:40:02 -07:00
cfFiatRates
2018-11-23 06:08:10 -07:00
// BitcoinType
cfAddressBalance
cfTxAddresses
// EthereumType
cfAddressContracts = cfAddressBalance
2018-01-24 08:57:05 -07:00
)
2018-11-23 06:08:10 -07:00
// common columns
2019-05-03 12:46:40 -06:00
var cfNames [ ] string
Add fiat rates functionality (#316)
* Add initial commit for fiat rates functionality
* templates.go: use bash from current user's environment
* bitcoinrpc.go: add FiatRates and FiatRatesParams to config
* blockbook.go: add initFiatRatesDownloader kickoff
* bitcoin.json: add coingecko API URL
* rockdb.go: add FindTicker and StoreTicker functions
* rocksdb_test.go: add a simple test for storing and getting FiatRate tickers
* rocksdb: add FindLastTicker and convertDate, make FindTicker return strings
* rocksdb: add ConvertDate function and CoinGeckoTicker struct, update tests
* blockbook.go, fiat: finalize the CoinGecko downloader
* coingecko.go: do not stop syncing when encountered an error
* rocksdb_test: fix the exported function name
* worker.go: make getBlockInfoFromBlockID a public function
* public.go: apiTickers kickoff
* rocksdb_test: fix the unittest comment
* coingecko.go: update comments
* blockbook.go, fiat: reword CoinGecko -> FiatRates, fix binary search upper bound, remove assignment of goroutine call result
* rename coingecko -> fiat_rates
* fiat_rates: export only the necessary methods
* blockbook.go: update log message
* bitcoinrpc.go: remove fiatRates settings
* use CurrencyRatesTicker structure everywhere, fix time format string, update tests, use UTC time
* add /api/v2/tickers tests, store rates as strings (json.Number)
* fiat_rates: add more tests, metrics and tickers-list endpoint, make the "currency" parameter mandatory
* public, worker: move FiatRates API logic to worker.go
* fiat_rates: add a future date test, fix comments, add more checks, store time as a pointer
* rocksdb_test: remove unneeded code
* fiat_rates: add a "ping" call to check server availability
* fiat_rates: do not return empty ticker, return nil instead if not found
add a test for non-existent ticker
* rocksdb_test: remove Sleep from tests
* worker.go: do not propagate all API errors to the client
* move InitTestFiatRates from rocksdb.go to public_test.go
* public.go: fix FiatRatesFindLastTicker result check
* fiat_rates: mock API server responses
* remove commented-out code
* fiat_rates: add comment explaining what periodSeconds attribute is used for
* websocket.go: implement fiatRates websocket endpoints & add tests
* fiatRates: add getFiatRatesTickersList websocket endpoint & test
* fiatRates: make websocket getFiatRatesByDate accept an array of dates, add more tests
* fiatRates: remove getFiatRatesForBlockID from websocket endpoints
* fiatRates: remove "if test", use custom startTime instead
Update tests and mock data
* fiatRates: finalize websocket functionality
add "date" parameter to TickerList
return data timestamps where needed
fix sync bugs (nil timestamp, duplicate save)
* fiatRates: add FiatRates configs for different coins
* worker.go: make GetBlockInfoFromBlockID private again
* fiatRates: wait & retry on errors, remove Ping function
* websocket.go: remove incorrect comment
* fiatRates: move coingecko-related code to a separate file, use interface
* fiatRates: if the new rates are the same as previous, try five more times, and only then store them
* coingecko: fix getting actual rates, add a timestamp parameter to get uncached responses
* vertcoin_testnet.json: remove fiat rates parameters
* fiat_rates: add timestamp to log message about skipping the repeating rates
2019-12-17 02:40:02 -07:00
var cfBaseNames = [ ] string { "default" , "height" , "addresses" , "blockTxs" , "transactions" , "fiatRates" }
2018-11-23 06:08:10 -07:00
// type specific columns
var cfNamesBitcoinType = [ ] string { "addressBalance" , "txAddresses" }
var cfNamesEthereumType = [ ] string { "addressContracts" }
2018-01-24 08:57:05 -07:00
2018-09-18 03:49:39 -06:00
func openDB ( path string , c * gorocksdb . Cache , openFiles int ) ( * gorocksdb . DB , [ ] * gorocksdb . ColumnFamilyHandle , error ) {
2018-08-23 03:15:59 -06:00
// opts with bloom filter
2018-09-18 03:49:39 -06:00
opts := createAndSetDBOptions ( 10 , c , openFiles )
2018-08-23 03:15:59 -06:00
// opts for addresses without bloom filter
// from documentation: if most of your queries are executed using iterators, you shouldn't set bloom filter
2018-09-18 03:49:39 -06:00
optsAddresses := createAndSetDBOptions ( 0 , c , openFiles )
2018-11-23 06:08:10 -07:00
// default, height, addresses, blockTxids, transactions
Add fiat rates functionality (#316)
* Add initial commit for fiat rates functionality
* templates.go: use bash from current user's environment
* bitcoinrpc.go: add FiatRates and FiatRatesParams to config
* blockbook.go: add initFiatRatesDownloader kickoff
* bitcoin.json: add coingecko API URL
* rockdb.go: add FindTicker and StoreTicker functions
* rocksdb_test.go: add a simple test for storing and getting FiatRate tickers
* rocksdb: add FindLastTicker and convertDate, make FindTicker return strings
* rocksdb: add ConvertDate function and CoinGeckoTicker struct, update tests
* blockbook.go, fiat: finalize the CoinGecko downloader
* coingecko.go: do not stop syncing when encountered an error
* rocksdb_test: fix the exported function name
* worker.go: make getBlockInfoFromBlockID a public function
* public.go: apiTickers kickoff
* rocksdb_test: fix the unittest comment
* coingecko.go: update comments
* blockbook.go, fiat: reword CoinGecko -> FiatRates, fix binary search upper bound, remove assignment of goroutine call result
* rename coingecko -> fiat_rates
* fiat_rates: export only the necessary methods
* blockbook.go: update log message
* bitcoinrpc.go: remove fiatRates settings
* use CurrencyRatesTicker structure everywhere, fix time format string, update tests, use UTC time
* add /api/v2/tickers tests, store rates as strings (json.Number)
* fiat_rates: add more tests, metrics and tickers-list endpoint, make the "currency" parameter mandatory
* public, worker: move FiatRates API logic to worker.go
* fiat_rates: add a future date test, fix comments, add more checks, store time as a pointer
* rocksdb_test: remove unneeded code
* fiat_rates: add a "ping" call to check server availability
* fiat_rates: do not return empty ticker, return nil instead if not found
add a test for non-existent ticker
* rocksdb_test: remove Sleep from tests
* worker.go: do not propagate all API errors to the client
* move InitTestFiatRates from rocksdb.go to public_test.go
* public.go: fix FiatRatesFindLastTicker result check
* fiat_rates: mock API server responses
* remove commented-out code
* fiat_rates: add comment explaining what periodSeconds attribute is used for
* websocket.go: implement fiatRates websocket endpoints & add tests
* fiatRates: add getFiatRatesTickersList websocket endpoint & test
* fiatRates: make websocket getFiatRatesByDate accept an array of dates, add more tests
* fiatRates: remove getFiatRatesForBlockID from websocket endpoints
* fiatRates: remove "if test", use custom startTime instead
Update tests and mock data
* fiatRates: finalize websocket functionality
add "date" parameter to TickerList
return data timestamps where needed
fix sync bugs (nil timestamp, duplicate save)
* fiatRates: add FiatRates configs for different coins
* worker.go: make GetBlockInfoFromBlockID private again
* fiatRates: wait & retry on errors, remove Ping function
* websocket.go: remove incorrect comment
* fiatRates: move coingecko-related code to a separate file, use interface
* fiatRates: if the new rates are the same as previous, try five more times, and only then store them
* coingecko: fix getting actual rates, add a timestamp parameter to get uncached responses
* vertcoin_testnet.json: remove fiat rates parameters
* fiat_rates: add timestamp to log message about skipping the repeating rates
2019-12-17 02:40:02 -07:00
cfOptions := [ ] * gorocksdb . Options { opts , opts , optsAddresses , opts , opts , opts }
2018-11-23 06:08:10 -07:00
// append type specific options
count := len ( cfNames ) - len ( cfOptions )
for i := 0 ; i < count ; i ++ {
cfOptions = append ( cfOptions , opts )
}
db , cfh , err := gorocksdb . OpenDbColumnFamilies ( opts , path , cfNames , cfOptions )
2017-08-28 09:50:57 -06:00
if err != nil {
2018-02-05 02:31:22 -07:00
return nil , nil , err
2017-08-28 09:50:57 -06:00
}
2018-02-05 02:31:22 -07:00
return db , cfh , nil
}
// NewRocksDB opens an internal handle to RocksDB environment. Close
// needs to be called to release it.
2018-09-18 03:49:39 -06:00
func NewRocksDB ( path string , cacheSize , maxOpenFiles int , parser bchain . BlockChainParser , metrics * common . Metrics ) ( d * RocksDB , err error ) {
glog . Infof ( "rocksdb: opening %s, required data version %v, cache size %v, max open files %v" , path , dbVersion , cacheSize , maxOpenFiles )
2018-11-23 06:08:10 -07:00
2019-05-03 12:46:40 -06:00
cfNames = append ( [ ] string { } , cfBaseNames ... )
2018-11-23 06:08:10 -07:00
chainType := parser . GetChainType ( )
if chainType == bchain . ChainBitcoinType {
cfNames = append ( cfNames , cfNamesBitcoinType ... )
} else if chainType == bchain . ChainEthereumType {
cfNames = append ( cfNames , cfNamesEthereumType ... )
} else {
return nil , errors . New ( "Unknown chain type" )
}
2018-08-23 13:14:16 -06:00
c := gorocksdb . NewLRUCache ( cacheSize )
2018-09-18 03:49:39 -06:00
db , cfh , err := openDB ( path , c , maxOpenFiles )
2018-10-16 05:35:50 -06:00
if err != nil {
return nil , err
}
2017-08-28 09:50:57 -06:00
wo := gorocksdb . NewDefaultWriteOptions ( )
ro := gorocksdb . NewDefaultReadOptions ( )
2018-10-01 05:22:03 -06:00
return & RocksDB { path , db , wo , ro , cfh , parser , nil , metrics , c , maxOpenFiles , connectBlockStats { } } , nil
2018-02-05 02:31:22 -07:00
}
2017-08-28 09:50:57 -06:00
2018-02-05 02:31:22 -07:00
func ( d * RocksDB ) closeDB ( ) error {
for _ , h := range d . cfh {
h . Destroy ( )
}
d . db . Close ( )
2018-05-22 04:56:51 -06:00
d . db = nil
2018-02-05 02:31:22 -07:00
return nil
2017-08-28 09:50:57 -06:00
}
Add fiat rates functionality (#316)
* Add initial commit for fiat rates functionality
* templates.go: use bash from current user's environment
* bitcoinrpc.go: add FiatRates and FiatRatesParams to config
* blockbook.go: add initFiatRatesDownloader kickoff
* bitcoin.json: add coingecko API URL
* rockdb.go: add FindTicker and StoreTicker functions
* rocksdb_test.go: add a simple test for storing and getting FiatRate tickers
* rocksdb: add FindLastTicker and convertDate, make FindTicker return strings
* rocksdb: add ConvertDate function and CoinGeckoTicker struct, update tests
* blockbook.go, fiat: finalize the CoinGecko downloader
* coingecko.go: do not stop syncing when encountered an error
* rocksdb_test: fix the exported function name
* worker.go: make getBlockInfoFromBlockID a public function
* public.go: apiTickers kickoff
* rocksdb_test: fix the unittest comment
* coingecko.go: update comments
* blockbook.go, fiat: reword CoinGecko -> FiatRates, fix binary search upper bound, remove assignment of goroutine call result
* rename coingecko -> fiat_rates
* fiat_rates: export only the necessary methods
* blockbook.go: update log message
* bitcoinrpc.go: remove fiatRates settings
* use CurrencyRatesTicker structure everywhere, fix time format string, update tests, use UTC time
* add /api/v2/tickers tests, store rates as strings (json.Number)
* fiat_rates: add more tests, metrics and tickers-list endpoint, make the "currency" parameter mandatory
* public, worker: move FiatRates API logic to worker.go
* fiat_rates: add a future date test, fix comments, add more checks, store time as a pointer
* rocksdb_test: remove unneeded code
* fiat_rates: add a "ping" call to check server availability
* fiat_rates: do not return empty ticker, return nil instead if not found
add a test for non-existent ticker
* rocksdb_test: remove Sleep from tests
* worker.go: do not propagate all API errors to the client
* move InitTestFiatRates from rocksdb.go to public_test.go
* public.go: fix FiatRatesFindLastTicker result check
* fiat_rates: mock API server responses
* remove commented-out code
* fiat_rates: add comment explaining what periodSeconds attribute is used for
* websocket.go: implement fiatRates websocket endpoints & add tests
* fiatRates: add getFiatRatesTickersList websocket endpoint & test
* fiatRates: make websocket getFiatRatesByDate accept an array of dates, add more tests
* fiatRates: remove getFiatRatesForBlockID from websocket endpoints
* fiatRates: remove "if test", use custom startTime instead
Update tests and mock data
* fiatRates: finalize websocket functionality
add "date" parameter to TickerList
return data timestamps where needed
fix sync bugs (nil timestamp, duplicate save)
* fiatRates: add FiatRates configs for different coins
* worker.go: make GetBlockInfoFromBlockID private again
* fiatRates: wait & retry on errors, remove Ping function
* websocket.go: remove incorrect comment
* fiatRates: move coingecko-related code to a separate file, use interface
* fiatRates: if the new rates are the same as previous, try five more times, and only then store them
* coingecko: fix getting actual rates, add a timestamp parameter to get uncached responses
* vertcoin_testnet.json: remove fiat rates parameters
* fiat_rates: add timestamp to log message about skipping the repeating rates
2019-12-17 02:40:02 -07:00
// FiatRatesConvertDate checks if the date is in correct format and returns the Time object.
// Possible formats are: YYYYMMDDhhmmss, YYYYMMDDhhmm, YYYYMMDDhh, YYYYMMDD
func FiatRatesConvertDate ( date string ) ( * time . Time , error ) {
for format := FiatRatesTimeFormat ; len ( format ) >= 8 ; format = format [ : len ( format ) - 2 ] {
convertedDate , err := time . Parse ( format , date )
if err == nil {
return & convertedDate , nil
}
}
msg := "Date \"" + date + "\" does not match any of available formats. "
msg += "Possible formats are: YYYYMMDDhhmmss, YYYYMMDDhhmm, YYYYMMDDhh, YYYYMMDD"
return nil , errors . New ( msg )
}
// FiatRatesStoreTicker stores ticker data at the specified time
func ( d * RocksDB ) FiatRatesStoreTicker ( ticker * CurrencyRatesTicker ) error {
if len ( ticker . Rates ) == 0 {
return errors . New ( "Error storing ticker: empty rates" )
} else if ticker . Timestamp == nil {
return errors . New ( "Error storing ticker: empty timestamp" )
}
ratesMarshalled , err := json . Marshal ( ticker . Rates )
if err != nil {
glog . Error ( "Error marshalling ticker rates: " , err )
return err
}
timeFormatted := ticker . Timestamp . UTC ( ) . Format ( FiatRatesTimeFormat )
err = d . db . PutCF ( d . wo , d . cfh [ cfFiatRates ] , [ ] byte ( timeFormatted ) , ratesMarshalled )
if err != nil {
glog . Error ( "Error storing ticker: " , err )
return err
}
return nil
}
// FiatRatesFindTicker gets FiatRates data closest to the specified timestamp
func ( d * RocksDB ) FiatRatesFindTicker ( tickerTime * time . Time ) ( * CurrencyRatesTicker , error ) {
ticker := & CurrencyRatesTicker { }
tickerTimeFormatted := tickerTime . UTC ( ) . Format ( FiatRatesTimeFormat )
it := d . db . NewIteratorCF ( d . ro , d . cfh [ cfFiatRates ] )
defer it . Close ( )
for it . Seek ( [ ] byte ( tickerTimeFormatted ) ) ; it . Valid ( ) ; it . Next ( ) {
timeObj , err := time . Parse ( FiatRatesTimeFormat , string ( it . Key ( ) . Data ( ) ) )
if err != nil {
glog . Error ( "FiatRatesFindTicker time parse error: " , err )
return nil , err
}
timeObj = timeObj . UTC ( )
ticker . Timestamp = & timeObj
err = json . Unmarshal ( it . Value ( ) . Data ( ) , & ticker . Rates )
if err != nil {
glog . Error ( "FiatRatesFindTicker error unpacking rates: " , err )
return nil , err
}
break
}
if err := it . Err ( ) ; err != nil {
glog . Error ( "FiatRatesFindTicker Iterator error: " , err )
return nil , err
}
if ! it . Valid ( ) {
return nil , nil // ticker not found
}
return ticker , nil
}
// FiatRatesFindLastTicker gets the last FiatRates record
func ( d * RocksDB ) FiatRatesFindLastTicker ( ) ( * CurrencyRatesTicker , error ) {
ticker := & CurrencyRatesTicker { }
it := d . db . NewIteratorCF ( d . ro , d . cfh [ cfFiatRates ] )
defer it . Close ( )
for it . SeekToLast ( ) ; it . Valid ( ) ; it . Next ( ) {
timeObj , err := time . Parse ( FiatRatesTimeFormat , string ( it . Key ( ) . Data ( ) ) )
if err != nil {
glog . Error ( "FiatRatesFindTicker time parse error: " , err )
return nil , err
}
timeObj = timeObj . UTC ( )
ticker . Timestamp = & timeObj
err = json . Unmarshal ( it . Value ( ) . Data ( ) , & ticker . Rates )
if err != nil {
glog . Error ( "FiatRatesFindTicker error unpacking rates: " , err )
return nil , err
}
break
}
if err := it . Err ( ) ; err != nil {
glog . Error ( "FiatRatesFindLastTicker Iterator error: " , err )
return ticker , err
}
if ! it . Valid ( ) {
return nil , nil // ticker not found
}
return ticker , nil
}
2017-08-28 09:50:57 -06:00
// Close releases the RocksDB environment opened in NewRocksDB.
func ( d * RocksDB ) Close ( ) error {
2018-05-22 04:56:51 -06:00
if d . db != nil {
// store the internal state of the app
2018-05-29 03:37:35 -06:00
if d . is != nil && d . is . DbState == common . DbStateOpen {
d . is . DbState = common . DbStateClosed
if err := d . StoreInternalState ( d . is ) ; err != nil {
2018-10-04 01:19:41 -06:00
glog . Info ( "internalState: " , err )
2018-05-22 04:56:51 -06:00
}
}
glog . Infof ( "rocksdb: close" )
d . closeDB ( )
d . wo . Destroy ( )
d . ro . Destroy ( )
}
2017-08-28 09:50:57 -06:00
return nil
}
2018-03-12 09:28:52 -06:00
// Reopen reopens the database
// It closes and reopens db, nobody can access the database during the operation!
func ( d * RocksDB ) Reopen ( ) error {
err := d . closeDB ( )
if err != nil {
return err
}
d . db = nil
2018-09-18 03:49:39 -06:00
db , cfh , err := openDB ( d . path , d . cache , d . maxOpenFiles )
2018-03-12 09:28:52 -06:00
if err != nil {
return err
}
d . db , d . cfh = db , cfh
return nil
}
2018-11-01 11:28:48 -06:00
func atoi ( s string ) int {
i , err := strconv . Atoi ( s )
if err != nil {
return 0
}
return i
}
// GetMemoryStats returns memory usage statistics as reported by RocksDB
2018-08-22 08:20:52 -06:00
func ( d * RocksDB ) GetMemoryStats ( ) string {
2018-11-01 11:28:48 -06:00
var total , indexAndFilter , memtable int
2018-08-22 08:20:52 -06:00
type columnStats struct {
name string
indexAndFilter string
memtable string
}
cs := make ( [ ] columnStats , len ( cfNames ) )
for i := 0 ; i < len ( cfNames ) ; i ++ {
cs [ i ] . name = cfNames [ i ]
cs [ i ] . indexAndFilter = d . db . GetPropertyCF ( "rocksdb.estimate-table-readers-mem" , d . cfh [ i ] )
cs [ i ] . memtable = d . db . GetPropertyCF ( "rocksdb.cur-size-all-mem-tables" , d . cfh [ i ] )
2018-11-01 11:28:48 -06:00
indexAndFilter += atoi ( cs [ i ] . indexAndFilter )
memtable += atoi ( cs [ i ] . memtable )
2018-08-22 08:20:52 -06:00
}
m := struct {
cacheUsage int
pinnedCacheUsage int
columns [ ] columnStats
} {
cacheUsage : d . cache . GetUsage ( ) ,
pinnedCacheUsage : d . cache . GetPinnedUsage ( ) ,
columns : cs ,
}
2018-11-01 11:28:48 -06:00
total = m . cacheUsage + indexAndFilter + memtable
return fmt . Sprintf ( "Total %d, indexAndFilter %d, memtable %d, %+v" , total , indexAndFilter , memtable , m )
2018-08-22 08:20:52 -06:00
}
2018-08-30 14:39:03 -06:00
// StopIteration is returned by callback function to signal stop of iteration
type StopIteration struct { }
func ( e * StopIteration ) Error ( ) string {
return ""
}
2019-01-03 09:19:56 -07:00
// GetTransactionsCallback is called by GetTransactions/GetAddrDescTransactions for each found tx
// indexes contain array of indexes (input negative, output positive) in tx where is given address
type GetTransactionsCallback func ( txid string , height uint32 , indexes [ ] int32 ) error
2018-03-20 08:58:35 -06:00
// GetTransactions finds all input/output transactions for address
2018-01-29 15:25:40 -07:00
// Transaction are passed to callback function.
2019-01-03 09:19:56 -07:00
func ( d * RocksDB ) GetTransactions ( address string , lower uint32 , higher uint32 , fn GetTransactionsCallback ) ( err error ) {
2018-01-30 10:22:25 -07:00
if glog . V ( 1 ) {
2018-03-20 08:58:35 -06:00
glog . Infof ( "rocksdb: address get %s %d-%d " , address , lower , higher )
2018-01-30 10:22:25 -07:00
}
2018-08-28 16:25:26 -06:00
addrDesc , err := d . chainParser . GetAddrDescFromAddress ( address )
2018-03-28 05:23:43 -06:00
if err != nil {
return err
}
2018-08-30 14:39:03 -06:00
return d . GetAddrDescTransactions ( addrDesc , lower , higher , fn )
}
2017-08-28 09:50:57 -06:00
2018-08-30 14:39:03 -06:00
// GetAddrDescTransactions finds all input/output transactions for address descriptor
2018-12-20 09:33:13 -07:00
// Transaction are passed to callback function in the order from newest block to the oldest
2019-01-03 09:19:56 -07:00
func ( d * RocksDB ) GetAddrDescTransactions ( addrDesc bchain . AddressDescriptor , lower uint32 , higher uint32 , fn GetTransactionsCallback ) ( err error ) {
txidUnpackedLen := d . chainParser . PackedTxidLen ( )
2020-01-27 06:33:13 -07:00
addrDescLen := len ( addrDesc )
2019-01-03 09:19:56 -07:00
startKey := packAddressKey ( addrDesc , higher )
stopKey := packAddressKey ( addrDesc , lower )
indexes := make ( [ ] int32 , 0 , 16 )
2018-04-17 15:50:01 -06:00
it := d . db . NewIteratorCF ( d . ro , d . cfh [ cfAddresses ] )
2017-08-28 09:50:57 -06:00
defer it . Close ( )
2019-01-03 09:19:56 -07:00
for it . Seek ( startKey ) ; it . Valid ( ) ; it . Next ( ) {
2018-01-28 03:59:05 -07:00
key := it . Key ( ) . Data ( )
2019-01-03 09:19:56 -07:00
if bytes . Compare ( key , stopKey ) > 0 {
2017-08-28 09:50:57 -06:00
break
}
2020-01-27 06:33:13 -07:00
if len ( key ) != addrDescLen + packedHeightBytes {
if glog . V ( 2 ) {
glog . Warningf ( "rocksdb: addrDesc %s - mixed with %s" , addrDesc , hex . EncodeToString ( key ) )
}
continue
}
val := it . Value ( ) . Data ( )
2019-01-03 09:19:56 -07:00
if glog . V ( 2 ) {
glog . Infof ( "rocksdb: addresses %s: %s" , hex . EncodeToString ( key ) , hex . EncodeToString ( val ) )
}
_ , height , err := unpackAddressKey ( key )
2017-08-28 09:50:57 -06:00
if err != nil {
return err
}
2019-01-03 09:19:56 -07:00
for len ( val ) > txidUnpackedLen {
tx , err := d . chainParser . UnpackTxid ( val [ : txidUnpackedLen ] )
2018-04-23 09:05:23 -06:00
if err != nil {
return err
}
2019-01-03 09:19:56 -07:00
indexes = indexes [ : 0 ]
val = val [ txidUnpackedLen : ]
for {
index , l := unpackVarint32 ( val )
indexes = append ( indexes , index >> 1 )
val = val [ l : ]
if index & 1 == 1 {
break
} else if len ( val ) == 0 {
glog . Warningf ( "rocksdb: addresses contain incorrect data %s: %s" , hex . EncodeToString ( key ) , hex . EncodeToString ( val ) )
break
}
}
if err := fn ( tx , height , indexes ) ; err != nil {
2018-08-30 14:39:03 -06:00
if _ , ok := err . ( * StopIteration ) ; ok {
return nil
}
2018-01-28 03:59:05 -07:00
return err
}
2017-08-28 09:50:57 -06:00
}
2019-01-03 09:19:56 -07:00
if len ( val ) != 0 {
glog . Warningf ( "rocksdb: addresses contain incorrect data %s: %s" , hex . EncodeToString ( key ) , hex . EncodeToString ( val ) )
}
2017-08-28 09:50:57 -06:00
}
return nil
}
2017-10-05 06:35:07 -06:00
const (
opInsert = 0
opDelete = 1
)
2018-04-23 09:05:23 -06:00
// ConnectBlock indexes addresses in the block and stores them in db
2018-01-31 07:23:17 -07:00
func ( d * RocksDB ) ConnectBlock ( block * bchain . Block ) error {
2017-09-03 16:20:20 -06:00
wb := gorocksdb . NewWriteBatch ( )
defer wb . Destroy ( )
2018-01-30 10:22:25 -07:00
if glog . V ( 2 ) {
2018-11-23 14:16:32 -07:00
glog . Infof ( "rocksdb: insert %d %s" , block . Height , block . Hash )
2017-10-05 16:23:37 -06:00
}
2018-11-06 10:41:13 -07:00
chainType := d . chainParser . GetChainType ( )
2018-03-23 06:15:35 -06:00
2018-11-23 14:16:32 -07:00
if err := d . writeHeightFromBlock ( wb , block , opInsert ) ; err != nil {
2017-09-03 16:20:20 -06:00
return err
}
2019-01-03 09:19:56 -07:00
addresses := make ( addressesMap )
2018-11-06 10:41:13 -07:00
if chainType == bchain . ChainBitcoinType {
2018-08-20 10:35:46 -06:00
txAddressesMap := make ( map [ string ] * TxAddresses )
balances := make ( map [ string ] * AddrBalance )
2018-11-06 10:41:13 -07:00
if err := d . processAddressesBitcoinType ( block , addresses , txAddressesMap , balances ) ; err != nil {
2018-08-19 09:43:00 -06:00
return err
}
2018-08-18 16:23:26 -06:00
if err := d . storeTxAddresses ( wb , txAddressesMap ) ; err != nil {
return err
}
if err := d . storeBalances ( wb , balances ) ; err != nil {
return err
}
if err := d . storeAndCleanupBlockTxs ( wb , block ) ; err != nil {
2018-04-17 15:50:01 -06:00
return err
}
2018-11-06 10:41:13 -07:00
} else if chainType == bchain . ChainEthereumType {
2018-11-23 06:08:10 -07:00
addressContracts := make ( map [ string ] * AddrContracts )
blockTxs , err := d . processAddressesEthereumType ( block , addresses , addressContracts )
if err != nil {
return err
}
if err := d . storeAddressContracts ( wb , addressContracts ) ; err != nil {
return err
}
if err := d . storeAndCleanupBlockTxsEthereumType ( wb , block , blockTxs ) ; err != nil {
2018-03-23 06:15:35 -06:00
return err
}
2018-11-06 16:24:53 -07:00
} else {
return errors . New ( "Unknown chain type" )
2017-09-03 16:20:20 -06:00
}
2018-11-23 06:08:10 -07:00
if err := d . storeAddresses ( wb , block . Height , addresses ) ; err != nil {
return err
}
2019-11-19 00:46:47 -07:00
if err := d . db . Write ( d . wo , wb ) ; err != nil {
return err
}
d . is . AppendBlockTime ( uint32 ( block . Time ) )
return nil
2017-09-03 16:20:20 -06:00
}
2018-04-17 15:50:01 -06:00
// Addresses index
2017-08-28 09:50:57 -06:00
2019-01-03 09:19:56 -07:00
type txIndexes struct {
btxID [ ] byte
indexes [ ] int32
}
// addressesMap is a map of addresses in a block
// each address contains a slice of transactions with indexes where the address appears
// slice is used instead of map so that order is defined and also search in case of few items
type addressesMap map [ string ] [ ] txIndexes
2017-10-05 06:35:07 -06:00
type outpoint struct {
2018-04-23 09:05:23 -06:00
btxID [ ] byte
2018-08-02 06:30:45 -06:00
index int32
2018-04-23 09:05:23 -06:00
}
2019-03-05 05:48:11 -07:00
// TxInput holds input data of the transaction in TxAddresses
2018-08-20 10:35:46 -06:00
type TxInput struct {
2018-08-30 14:39:03 -06:00
AddrDesc bchain . AddressDescriptor
2018-08-20 10:35:46 -06:00
ValueSat big . Int
2018-08-15 11:22:26 -06:00
}
2019-03-05 05:48:11 -07:00
// Addresses converts AddressDescriptor of the input to array of strings
2018-08-28 16:25:26 -06:00
func ( ti * TxInput ) Addresses ( p bchain . BlockChainParser ) ( [ ] string , bool , error ) {
2018-08-30 14:39:03 -06:00
return p . GetAddressesFromAddrDesc ( ti . AddrDesc )
2018-08-20 10:35:46 -06:00
}
2019-03-05 05:48:11 -07:00
// TxOutput holds output data of the transaction in TxAddresses
2018-08-20 10:35:46 -06:00
type TxOutput struct {
2018-08-30 14:39:03 -06:00
AddrDesc bchain . AddressDescriptor
2018-08-20 10:35:46 -06:00
Spent bool
ValueSat big . Int
2018-08-02 06:30:45 -06:00
}
2019-03-05 05:48:11 -07:00
// Addresses converts AddressDescriptor of the output to array of strings
2018-08-28 16:25:26 -06:00
func ( to * TxOutput ) Addresses ( p bchain . BlockChainParser ) ( [ ] string , bool , error ) {
2018-08-30 14:39:03 -06:00
return p . GetAddressesFromAddrDesc ( to . AddrDesc )
2018-08-02 06:30:45 -06:00
}
2019-03-05 05:48:11 -07:00
// TxAddresses stores transaction inputs and outputs with amounts
2018-08-20 10:35:46 -06:00
type TxAddresses struct {
2018-08-21 05:16:29 -06:00
Height uint32
2018-08-20 10:35:46 -06:00
Inputs [ ] TxInput
Outputs [ ] TxOutput
}
2019-04-30 08:06:56 -06:00
// Utxo holds information about unspent transaction output
type Utxo struct {
BtxID [ ] byte
Vout int32
Height uint32
ValueSat big . Int
}
2019-03-05 05:48:11 -07:00
// AddrBalance stores number of transactions and balances of an address
2018-08-20 10:35:46 -06:00
type AddrBalance struct {
Txs uint32
SentSat big . Int
BalanceSat big . Int
2019-04-30 08:06:56 -06:00
Utxos [ ] Utxo
2019-05-11 18:19:51 -06:00
utxosMap map [ string ] int
2018-08-20 10:35:46 -06:00
}
2019-03-05 05:48:11 -07:00
// ReceivedSat computes received amount from total balance and sent amount
2018-08-21 02:11:27 -06:00
func ( ab * AddrBalance ) ReceivedSat ( ) * big . Int {
2018-08-20 10:35:46 -06:00
var r big . Int
r . Add ( & ab . BalanceSat , & ab . SentSat )
2018-08-21 02:11:27 -06:00
return & r
2018-08-02 06:30:45 -06:00
}
2019-05-11 18:19:51 -06:00
// addUtxo
func ( ab * AddrBalance ) addUtxo ( u * Utxo ) {
ab . Utxos = append ( ab . Utxos , * u )
2020-02-09 03:46:13 -07:00
ab . manageUtxoMap ( u )
}
func ( ab * AddrBalance ) manageUtxoMap ( u * Utxo ) {
2019-05-11 18:19:51 -06:00
l := len ( ab . Utxos )
if l >= 16 {
if len ( ab . utxosMap ) == 0 {
ab . utxosMap = make ( map [ string ] int , 32 )
for i := 0 ; i < l ; i ++ {
s := string ( ab . Utxos [ i ] . BtxID )
if _ , e := ab . utxosMap [ s ] ; ! e {
ab . utxosMap [ s ] = i
}
}
} else {
s := string ( u . BtxID )
if _ , e := ab . utxosMap [ s ] ; ! e {
ab . utxosMap [ s ] = l - 1
}
}
}
}
2020-02-09 03:46:13 -07:00
// on disconnect, the added utxos must be inserted in the right position so that utxosMap index works
func ( ab * AddrBalance ) addUtxoInDisconnect ( u * Utxo ) {
insert := - 1
if len ( ab . utxosMap ) > 0 {
if i , e := ab . utxosMap [ string ( u . BtxID ) ] ; e {
insert = i
}
} else {
for i := range ab . Utxos {
utxo := & ab . Utxos [ i ]
if * ( * int ) ( unsafe . Pointer ( & utxo . BtxID [ 0 ] ) ) == * ( * int ) ( unsafe . Pointer ( & u . BtxID [ 0 ] ) ) && bytes . Equal ( utxo . BtxID , u . BtxID ) {
insert = i
break
}
}
}
if insert > - 1 {
// check if it is necessary to insert the utxo into the array
for i := insert ; i < len ( ab . Utxos ) ; i ++ {
utxo := & ab . Utxos [ i ]
// either the vout is greater than the inserted vout or it is a different tx
if utxo . Vout > u . Vout || * ( * int ) ( unsafe . Pointer ( & utxo . BtxID [ 0 ] ) ) != * ( * int ) ( unsafe . Pointer ( & u . BtxID [ 0 ] ) ) || ! bytes . Equal ( utxo . BtxID , u . BtxID ) {
// found the right place, insert the utxo
ab . Utxos = append ( ab . Utxos , * u )
copy ( ab . Utxos [ i + 1 : ] , ab . Utxos [ i : ] )
ab . Utxos [ i ] = * u
// reset utxosMap after insert, the index will have to be rebuilt if needed
ab . utxosMap = nil
return
}
}
}
ab . Utxos = append ( ab . Utxos , * u )
ab . manageUtxoMap ( u )
}
2019-05-11 18:19:51 -06:00
// markUtxoAsSpent finds outpoint btxID:vout in utxos and marks it as spent
// for small number of utxos the linear search is done, for larger number there is a hashmap index
2020-02-09 03:46:13 -07:00
// it is much faster than removing the utxo from the slice as it would cause in memory reallocations
2019-05-11 18:19:51 -06:00
func ( ab * AddrBalance ) markUtxoAsSpent ( btxID [ ] byte , vout int32 ) {
if len ( ab . utxosMap ) == 0 {
for i := range ab . Utxos {
utxo := & ab . Utxos [ i ]
if utxo . Vout == vout && * ( * int ) ( unsafe . Pointer ( & utxo . BtxID [ 0 ] ) ) == * ( * int ) ( unsafe . Pointer ( & btxID [ 0 ] ) ) && bytes . Equal ( utxo . BtxID , btxID ) {
// mark utxo as spent by setting vout=-1
utxo . Vout = - 1
return
}
}
} else {
if i , e := ab . utxosMap [ string ( btxID ) ] ; e {
l := len ( ab . Utxos )
for ; i < l ; i ++ {
utxo := & ab . Utxos [ i ]
if utxo . Vout == vout {
if bytes . Equal ( utxo . BtxID , btxID ) {
// mark utxo as spent by setting vout=-1
utxo . Vout = - 1
return
}
2019-05-27 04:51:42 -06:00
break
2019-05-11 18:19:51 -06:00
}
}
}
}
2020-01-31 01:29:54 -07:00
glog . Errorf ( "Utxo %s:%d not found, utxosMap size %d" , hex . EncodeToString ( btxID ) , vout , len ( ab . utxosMap ) )
2019-05-11 18:19:51 -06:00
}
2018-08-16 07:31:11 -06:00
type blockTxs struct {
btxID [ ] byte
inputs [ ] outpoint
}
2018-08-30 14:39:03 -06:00
func ( d * RocksDB ) resetValueSatToZero ( valueSat * big . Int , addrDesc bchain . AddressDescriptor , logText string ) {
2018-08-28 16:25:26 -06:00
ad , _ , err := d . chainParser . GetAddressesFromAddrDesc ( addrDesc )
2018-08-15 11:22:26 -06:00
if err != nil {
2018-09-02 13:31:33 -06:00
glog . Warningf ( "rocksdb: unparsable address hex '%v' reached negative %s %v, resetting to 0. Parser error %v" , addrDesc , logText , valueSat . String ( ) , err )
2018-08-15 11:22:26 -06:00
} else {
2018-09-02 13:31:33 -06:00
glog . Warningf ( "rocksdb: address %v hex '%v' reached negative %s %v, resetting to 0" , ad , addrDesc , logText , valueSat . String ( ) )
2018-08-15 11:22:26 -06:00
}
valueSat . SetInt64 ( 0 )
}
2018-11-01 11:28:48 -06:00
// GetAndResetConnectBlockStats gets statistics about cache usage in connect blocks and resets the counters
2018-09-24 10:23:13 -06:00
func ( d * RocksDB ) GetAndResetConnectBlockStats ( ) string {
s := fmt . Sprintf ( "%+v" , d . cbs )
d . cbs = connectBlockStats { }
return s
}
2019-01-03 09:19:56 -07:00
func ( d * RocksDB ) processAddressesBitcoinType ( block * bchain . Block , addresses addressesMap , txAddressesMap map [ string ] * TxAddresses , balances map [ string ] * AddrBalance ) error {
2018-08-16 07:31:11 -06:00
blockTxIDs := make ( [ ] [ ] byte , len ( block . Txs ) )
2018-08-21 05:16:29 -06:00
blockTxAddresses := make ( [ ] * TxAddresses , len ( block . Txs ) )
2019-04-30 08:06:56 -06:00
// first process all outputs so that inputs can refer to txs in this block
2018-08-16 07:31:11 -06:00
for txi := range block . Txs {
tx := & block . Txs [ txi ]
2018-08-02 06:30:45 -06:00
btxID , err := d . chainParser . PackTxid ( tx . Txid )
if err != nil {
return err
}
2018-08-16 07:31:11 -06:00
blockTxIDs [ txi ] = btxID
2018-08-21 05:16:29 -06:00
ta := TxAddresses { Height : block . Height }
2018-08-20 10:35:46 -06:00
ta . Outputs = make ( [ ] TxOutput , len ( tx . Vout ) )
2018-08-02 06:30:45 -06:00
txAddressesMap [ string ( btxID ) ] = & ta
2018-08-21 05:16:29 -06:00
blockTxAddresses [ txi ] = & ta
2018-08-02 06:30:45 -06:00
for i , output := range tx . Vout {
2018-08-20 10:35:46 -06:00
tao := & ta . Outputs [ i ]
tao . ValueSat = output . ValueSat
2018-08-28 16:25:26 -06:00
addrDesc , err := d . chainParser . GetAddrDescFromVout ( & output )
if err != nil || len ( addrDesc ) == 0 || len ( addrDesc ) > maxAddrDescLen {
2018-08-02 06:30:45 -06:00
if err != nil {
// do not log ErrAddressMissing, transactions can be without to address (for example eth contracts)
if err != bchain . ErrAddressMissing {
2019-05-23 05:56:31 -06:00
glog . Warningf ( "rocksdb: addrDesc: %v - height %d, tx %v, output %v, error %v" , err , block . Height , tx . Txid , output , err )
2018-08-02 06:30:45 -06:00
}
} else {
2019-05-23 05:56:31 -06:00
glog . V ( 1 ) . Infof ( "rocksdb: height %d, tx %v, vout %v, skipping addrDesc of length %d" , block . Height , tx . Txid , i , len ( addrDesc ) )
2018-08-02 06:30:45 -06:00
}
continue
}
2018-08-30 14:39:03 -06:00
tao . AddrDesc = addrDesc
2019-05-02 06:10:26 -06:00
if d . chainParser . IsAddrDescIndexable ( addrDesc ) {
strAddrDesc := string ( addrDesc )
balance , e := balances [ strAddrDesc ]
if ! e {
2019-05-11 18:19:51 -06:00
balance , err = d . GetAddrDescBalance ( addrDesc , addressBalanceDetailUTXOIndexed )
2019-05-02 06:10:26 -06:00
if err != nil {
return err
}
if balance == nil {
balance = & AddrBalance { }
}
balances [ strAddrDesc ] = balance
d . cbs . balancesMiss ++
} else {
d . cbs . balancesHit ++
2018-10-01 05:22:03 -06:00
}
2019-05-02 06:10:26 -06:00
balance . BalanceSat . Add ( & balance . BalanceSat , & output . ValueSat )
2019-05-11 18:19:51 -06:00
balance . addUtxo ( & Utxo {
2019-05-03 12:46:40 -06:00
BtxID : btxID ,
Vout : int32 ( i ) ,
Height : block . Height ,
ValueSat : output . ValueSat ,
} )
2019-05-02 06:10:26 -06:00
counted := addToAddressesMap ( addresses , strAddrDesc , btxID , int32 ( i ) )
if ! counted {
balance . Txs ++
2018-10-01 05:22:03 -06:00
}
2018-08-02 06:30:45 -06:00
}
}
}
// process inputs
2018-08-16 07:31:11 -06:00
for txi := range block . Txs {
tx := & block . Txs [ txi ]
spendingTxid := blockTxIDs [ txi ]
2018-08-21 05:16:29 -06:00
ta := blockTxAddresses [ txi ]
2018-08-20 10:35:46 -06:00
ta . Inputs = make ( [ ] TxInput , len ( tx . Vin ) )
2018-08-21 10:56:30 -06:00
logged := false
2018-08-02 06:30:45 -06:00
for i , input := range tx . Vin {
2018-08-20 10:35:46 -06:00
tai := & ta . Inputs [ i ]
2018-08-02 06:30:45 -06:00
btxID , err := d . chainParser . PackTxid ( input . Txid )
if err != nil {
// do not process inputs without input txid
if err == bchain . ErrTxidMissing {
continue
}
return err
}
stxID := string ( btxID )
ita , e := txAddressesMap [ stxID ]
if ! e {
ita , err = d . getTxAddresses ( btxID )
if err != nil {
return err
}
if ita == nil {
2019-01-09 15:24:25 -07:00
// allow parser to process unknown input, some coins may implement special handling, default is to log warning
2019-01-10 08:39:36 -07:00
tai . AddrDesc = d . chainParser . GetAddrDescForUnknownInput ( tx , i )
2018-08-02 06:30:45 -06:00
continue
}
txAddressesMap [ stxID ] = ita
2018-09-24 10:23:13 -06:00
d . cbs . txAddressesMiss ++
} else {
d . cbs . txAddressesHit ++
2018-08-02 06:30:45 -06:00
}
2018-08-20 10:35:46 -06:00
if len ( ita . Outputs ) <= int ( input . Vout ) {
2018-08-02 06:30:45 -06:00
glog . Warningf ( "rocksdb: height %d, tx %v, input tx %v vout %v is out of bounds of stored tx" , block . Height , tx . Txid , input . Txid , input . Vout )
continue
}
2019-05-02 06:10:26 -06:00
spentOutput := & ita . Outputs [ int ( input . Vout ) ]
if spentOutput . Spent {
2018-08-02 06:30:45 -06:00
glog . Warningf ( "rocksdb: height %d, tx %v, input tx %v vout %v is double spend" , block . Height , tx . Txid , input . Txid , input . Vout )
}
2019-05-02 06:10:26 -06:00
tai . AddrDesc = spentOutput . AddrDesc
tai . ValueSat = spentOutput . ValueSat
2018-08-02 09:46:23 -06:00
// mark the output as spent in tx
2019-05-02 06:10:26 -06:00
spentOutput . Spent = true
if len ( spentOutput . AddrDesc ) == 0 {
2018-08-21 10:56:30 -06:00
if ! logged {
2019-05-23 05:56:31 -06:00
glog . V ( 1 ) . Infof ( "rocksdb: height %d, tx %v, input tx %v vout %v skipping empty address" , block . Height , tx . Txid , input . Txid , input . Vout )
2018-08-21 10:56:30 -06:00
logged = true
}
2018-08-02 09:46:23 -06:00
continue
}
2019-05-02 06:10:26 -06:00
if d . chainParser . IsAddrDescIndexable ( spentOutput . AddrDesc ) {
strAddrDesc := string ( spentOutput . AddrDesc )
balance , e := balances [ strAddrDesc ]
if ! e {
2019-05-11 18:19:51 -06:00
balance , err = d . GetAddrDescBalance ( spentOutput . AddrDesc , addressBalanceDetailUTXOIndexed )
2019-05-02 06:10:26 -06:00
if err != nil {
return err
}
if balance == nil {
balance = & AddrBalance { }
}
balances [ strAddrDesc ] = balance
d . cbs . balancesMiss ++
} else {
d . cbs . balancesHit ++
2018-10-01 05:22:03 -06:00
}
2019-05-02 06:10:26 -06:00
counted := addToAddressesMap ( addresses , strAddrDesc , spendingTxid , ^ int32 ( i ) )
if ! counted {
balance . Txs ++
2018-10-01 05:22:03 -06:00
}
2019-05-02 06:10:26 -06:00
balance . BalanceSat . Sub ( & balance . BalanceSat , & spentOutput . ValueSat )
2019-05-11 18:19:51 -06:00
balance . markUtxoAsSpent ( btxID , int32 ( input . Vout ) )
2019-05-02 06:10:26 -06:00
if balance . BalanceSat . Sign ( ) < 0 {
d . resetValueSatToZero ( & balance . BalanceSat , spentOutput . AddrDesc , "balance" )
}
balance . SentSat . Add ( & balance . SentSat , & spentOutput . ValueSat )
2018-10-01 05:22:03 -06:00
}
2018-08-02 06:30:45 -06:00
}
}
2018-08-19 09:43:00 -06:00
return nil
2018-08-02 06:30:45 -06:00
}
2019-03-05 05:48:11 -07:00
// addToAddressesMap maintains mapping between addresses and transactions in one block
// the method assumes that outpus in the block are processed before the inputs
// the return value is true if the tx was processed before, to not to count the tx multiple times
2019-01-03 09:19:56 -07:00
func addToAddressesMap ( addresses addressesMap , strAddrDesc string , btxID [ ] byte , index int32 ) bool {
2019-03-05 05:48:11 -07:00
// check that the address was already processed in this block
2019-01-03 09:19:56 -07:00
// if not found, it has certainly not been counted
at , found := addresses [ strAddrDesc ]
if found {
2019-03-05 05:48:11 -07:00
// if the tx is already in the slice, append the index to the array of indexes
2019-01-03 09:19:56 -07:00
for i , t := range at {
if bytes . Equal ( btxID , t . btxID ) {
2019-03-05 05:48:11 -07:00
at [ i ] . indexes = append ( t . indexes , index )
2019-01-03 09:19:56 -07:00
return true
}
2018-08-02 06:30:45 -06:00
}
}
2019-01-03 09:19:56 -07:00
addresses [ strAddrDesc ] = append ( at , txIndexes {
btxID : btxID ,
indexes : [ ] int32 { index } ,
} )
2018-08-02 06:30:45 -06:00
return false
}
2019-01-03 09:19:56 -07:00
func ( d * RocksDB ) storeAddresses ( wb * gorocksdb . WriteBatch , height uint32 , addresses addressesMap ) error {
for addrDesc , txi := range addresses {
2018-08-30 14:39:03 -06:00
ba := bchain . AddressDescriptor ( addrDesc )
2018-08-19 09:43:00 -06:00
key := packAddressKey ( ba , height )
2019-01-03 09:19:56 -07:00
val := d . packTxIndexes ( txi )
2018-08-02 06:30:45 -06:00
wb . PutCF ( d . cfh [ cfAddresses ] , key , val )
}
return nil
}
2018-08-20 10:35:46 -06:00
func ( d * RocksDB ) storeTxAddresses ( wb * gorocksdb . WriteBatch , am map [ string ] * TxAddresses ) error {
2018-08-02 06:30:45 -06:00
varBuf := make ( [ ] byte , maxPackedBigintBytes )
buf := make ( [ ] byte , 1024 )
for txID , ta := range am {
2018-08-02 08:10:28 -06:00
buf = packTxAddresses ( ta , buf , varBuf )
2018-08-02 06:30:45 -06:00
wb . PutCF ( d . cfh [ cfTxAddresses ] , [ ] byte ( txID ) , buf )
}
return nil
}
2018-08-20 10:35:46 -06:00
func ( d * RocksDB ) storeBalances ( wb * gorocksdb . WriteBatch , abm map [ string ] * AddrBalance ) error {
2019-05-03 12:46:40 -06:00
// allocate buffer initial buffer
buf := make ( [ ] byte , 1024 )
varBuf := make ( [ ] byte , maxPackedBigintBytes )
2018-08-28 16:25:26 -06:00
for addrDesc , ab := range abm {
2019-05-03 12:46:40 -06:00
// balance with 0 transactions is removed from db - happens on disconnect
2018-08-20 10:35:46 -06:00
if ab == nil || ab . Txs <= 0 {
2018-08-30 14:39:03 -06:00
wb . DeleteCF ( d . cfh [ cfAddressBalance ] , bchain . AddressDescriptor ( addrDesc ) )
2018-08-03 11:26:16 -06:00
} else {
2019-05-03 12:46:40 -06:00
buf = packAddrBalance ( ab , buf , varBuf )
wb . PutCF ( d . cfh [ cfAddressBalance ] , bchain . AddressDescriptor ( addrDesc ) , buf )
2018-08-03 11:26:16 -06:00
}
2018-08-02 06:30:45 -06:00
}
return nil
}
2018-11-23 06:08:10 -07:00
func ( d * RocksDB ) cleanupBlockTxs ( wb * gorocksdb . WriteBatch , block * bchain . Block ) error {
keep := d . chainParser . KeepBlockAddresses ( )
// cleanup old block address
if block . Height > uint32 ( keep ) {
2018-11-29 07:37:04 -07:00
for rh := block . Height - uint32 ( keep ) ; rh > 0 ; rh -- {
2018-11-23 06:08:10 -07:00
key := packUint ( rh )
val , err := d . db . GetCF ( d . ro , d . cfh [ cfBlockTxs ] , key )
if err != nil {
return err
}
2018-11-29 07:37:04 -07:00
// nil data means the key was not found in DB
if val . Data ( ) == nil {
2018-11-23 06:08:10 -07:00
break
}
val . Free ( )
d . db . DeleteCF ( d . wo , d . cfh [ cfBlockTxs ] , key )
}
}
return nil
}
2018-08-16 07:31:11 -06:00
func ( d * RocksDB ) storeAndCleanupBlockTxs ( wb * gorocksdb . WriteBatch , block * bchain . Block ) error {
2018-08-17 15:48:36 -06:00
pl := d . chainParser . PackedTxidLen ( )
buf := make ( [ ] byte , 0 , pl * len ( block . Txs ) )
2018-08-16 07:31:11 -06:00
varBuf := make ( [ ] byte , vlq . MaxLen64 )
2018-08-17 15:48:36 -06:00
zeroTx := make ( [ ] byte , pl )
2018-08-16 07:31:11 -06:00
for i := range block . Txs {
tx := & block . Txs [ i ]
o := make ( [ ] outpoint , len ( tx . Vin ) )
for v := range tx . Vin {
vin := & tx . Vin [ v ]
btxID , err := d . chainParser . PackTxid ( vin . Txid )
if err != nil {
2018-08-17 15:48:36 -06:00
// do not process inputs without input txid
if err == bchain . ErrTxidMissing {
btxID = zeroTx
} else {
return err
}
2018-08-16 07:31:11 -06:00
}
o [ v ] . btxID = btxID
o [ v ] . index = int32 ( vin . Vout )
}
btxID , err := d . chainParser . PackTxid ( tx . Txid )
if err != nil {
return err
}
buf = append ( buf , btxID ... )
l := packVaruint ( uint ( len ( o ) ) , varBuf )
buf = append ( buf , varBuf [ : l ] ... )
buf = append ( buf , d . packOutpoints ( o ) ... )
2018-08-02 06:30:45 -06:00
}
key := packUint ( block . Height )
2018-08-16 07:31:11 -06:00
wb . PutCF ( d . cfh [ cfBlockTxs ] , key , buf )
2018-11-23 06:08:10 -07:00
return d . cleanupBlockTxs ( wb , block )
2018-08-02 06:30:45 -06:00
}
2018-08-16 07:31:11 -06:00
func ( d * RocksDB ) getBlockTxs ( height uint32 ) ( [ ] blockTxs , error ) {
2018-08-03 11:26:16 -06:00
pl := d . chainParser . PackedTxidLen ( )
2018-08-16 07:31:11 -06:00
val , err := d . db . GetCF ( d . ro , d . cfh [ cfBlockTxs ] , packUint ( height ) )
2018-08-03 11:26:16 -06:00
if err != nil {
return nil , err
}
defer val . Free ( )
buf := val . Data ( )
2018-11-14 15:02:42 -07:00
bt := make ( [ ] blockTxs , 0 , 8 )
2018-08-16 07:31:11 -06:00
for i := 0 ; i < len ( buf ) ; {
if len ( buf ) - i < pl {
glog . Error ( "rocksdb: Inconsistent data in blockTxs " , hex . EncodeToString ( buf ) )
return nil , errors . New ( "Inconsistent data in blockTxs" )
}
2018-11-23 14:16:32 -07:00
txid := append ( [ ] byte ( nil ) , buf [ i : i + pl ] ... )
2018-08-16 07:31:11 -06:00
i += pl
o , ol , err := d . unpackNOutpoints ( buf [ i : ] )
if err != nil {
glog . Error ( "rocksdb: Inconsistent data in blockTxs " , hex . EncodeToString ( buf ) )
return nil , errors . New ( "Inconsistent data in blockTxs" )
}
bt = append ( bt , blockTxs {
btxID : txid ,
inputs : o ,
} )
i += ol
2018-08-03 11:26:16 -06:00
}
2018-08-16 07:31:11 -06:00
return bt , nil
2018-08-03 11:26:16 -06:00
}
2018-11-01 11:28:48 -06:00
// GetAddrDescBalance returns AddrBalance for given addrDesc
2019-05-11 18:19:51 -06:00
func ( d * RocksDB ) GetAddrDescBalance ( addrDesc bchain . AddressDescriptor , detail AddressBalanceDetail ) ( * AddrBalance , error ) {
2018-08-28 16:25:26 -06:00
val , err := d . db . GetCF ( d . ro , d . cfh [ cfAddressBalance ] , addrDesc )
2018-08-02 06:30:45 -06:00
if err != nil {
return nil , err
}
defer val . Free ( )
buf := val . Data ( )
// 3 is minimum length of addrBalance - 1 byte txs, 1 byte sent, 1 byte balance
if len ( buf ) < 3 {
return nil , nil
}
2019-05-11 18:19:51 -06:00
return unpackAddrBalance ( buf , d . chainParser . PackedTxidLen ( ) , detail )
2018-08-02 06:30:45 -06:00
}
2018-08-20 10:35:46 -06:00
// GetAddressBalance returns address balance for an address or nil if address not found
2019-05-11 18:19:51 -06:00
func ( d * RocksDB ) GetAddressBalance ( address string , detail AddressBalanceDetail ) ( * AddrBalance , error ) {
2018-08-28 16:25:26 -06:00
addrDesc , err := d . chainParser . GetAddrDescFromAddress ( address )
2018-08-20 10:35:46 -06:00
if err != nil {
return nil , err
}
2019-05-11 18:19:51 -06:00
return d . GetAddrDescBalance ( addrDesc , detail )
2018-08-20 10:35:46 -06:00
}
func ( d * RocksDB ) getTxAddresses ( btxID [ ] byte ) ( * TxAddresses , error ) {
2018-08-02 06:30:45 -06:00
val , err := d . db . GetCF ( d . ro , d . cfh [ cfTxAddresses ] , btxID )
if err != nil {
return nil , err
}
defer val . Free ( )
buf := val . Data ( )
2018-08-21 05:16:29 -06:00
// 2 is minimum length of addrBalance - 1 byte height, 1 byte inputs len, 1 byte outputs len
if len ( buf ) < 3 {
2018-08-02 06:30:45 -06:00
return nil , nil
}
2018-08-02 08:10:28 -06:00
return unpackTxAddresses ( buf )
}
2018-08-20 10:35:46 -06:00
// GetTxAddresses returns TxAddresses for given txid or nil if not found
func ( d * RocksDB ) GetTxAddresses ( txid string ) ( * TxAddresses , error ) {
btxID , err := d . chainParser . PackTxid ( txid )
if err != nil {
return nil , err
}
return d . getTxAddresses ( btxID )
}
2019-03-29 10:01:20 -06:00
// AddrDescForOutpoint defines function that returns address descriptorfor given outpoint or nil if outpoint not found
func ( d * RocksDB ) AddrDescForOutpoint ( outpoint bchain . Outpoint ) bchain . AddressDescriptor {
ta , err := d . GetTxAddresses ( outpoint . Txid )
if err != nil || ta == nil {
return nil
}
if outpoint . Vout < 0 {
vin := ^ outpoint . Vout
if len ( ta . Inputs ) <= int ( vin ) {
return nil
}
return ta . Inputs [ vin ] . AddrDesc
}
if len ( ta . Outputs ) <= int ( outpoint . Vout ) {
return nil
}
return ta . Outputs [ outpoint . Vout ] . AddrDesc
}
2018-08-20 10:35:46 -06:00
func packTxAddresses ( ta * TxAddresses , buf [ ] byte , varBuf [ ] byte ) [ ] byte {
2018-08-02 08:10:28 -06:00
buf = buf [ : 0 ]
2018-08-21 05:16:29 -06:00
l := packVaruint ( uint ( ta . Height ) , varBuf )
buf = append ( buf , varBuf [ : l ] ... )
l = packVaruint ( uint ( len ( ta . Inputs ) ) , varBuf )
2018-08-02 08:10:28 -06:00
buf = append ( buf , varBuf [ : l ] ... )
2018-08-20 10:35:46 -06:00
for i := range ta . Inputs {
buf = appendTxInput ( & ta . Inputs [ i ] , buf , varBuf )
2018-08-02 08:10:28 -06:00
}
2018-08-20 10:35:46 -06:00
l = packVaruint ( uint ( len ( ta . Outputs ) ) , varBuf )
2018-08-02 08:10:28 -06:00
buf = append ( buf , varBuf [ : l ] ... )
2018-08-20 10:35:46 -06:00
for i := range ta . Outputs {
buf = appendTxOutput ( & ta . Outputs [ i ] , buf , varBuf )
2018-08-02 08:10:28 -06:00
}
return buf
}
2018-08-20 10:35:46 -06:00
func appendTxInput ( txi * TxInput , buf [ ] byte , varBuf [ ] byte ) [ ] byte {
2018-08-30 14:39:03 -06:00
la := len ( txi . AddrDesc )
2018-08-16 07:31:11 -06:00
l := packVaruint ( uint ( la ) , varBuf )
2018-08-15 11:22:26 -06:00
buf = append ( buf , varBuf [ : l ] ... )
2018-08-30 14:39:03 -06:00
buf = append ( buf , txi . AddrDesc ... )
2018-08-20 10:35:46 -06:00
l = packBigint ( & txi . ValueSat , varBuf )
2018-08-15 11:22:26 -06:00
buf = append ( buf , varBuf [ : l ] ... )
return buf
}
2018-08-20 10:35:46 -06:00
func appendTxOutput ( txo * TxOutput , buf [ ] byte , varBuf [ ] byte ) [ ] byte {
2018-08-30 14:39:03 -06:00
la := len ( txo . AddrDesc )
2018-08-20 10:35:46 -06:00
if txo . Spent {
2018-08-02 08:10:28 -06:00
la = ^ la
}
l := packVarint ( la , varBuf )
buf = append ( buf , varBuf [ : l ] ... )
2018-08-30 14:39:03 -06:00
buf = append ( buf , txo . AddrDesc ... )
2018-08-20 10:35:46 -06:00
l = packBigint ( & txo . ValueSat , varBuf )
2018-08-02 08:10:28 -06:00
buf = append ( buf , varBuf [ : l ] ... )
return buf
}
2019-05-11 18:19:51 -06:00
func unpackAddrBalance ( buf [ ] byte , txidUnpackedLen int , detail AddressBalanceDetail ) ( * AddrBalance , error ) {
2019-05-03 12:46:40 -06:00
txs , l := unpackVaruint ( buf )
sentSat , sl := unpackBigint ( buf [ l : ] )
balanceSat , bl := unpackBigint ( buf [ l + sl : ] )
l = l + sl + bl
2019-05-11 18:19:51 -06:00
ab := & AddrBalance {
2019-05-03 12:46:40 -06:00
Txs : uint32 ( txs ) ,
SentSat : sentSat ,
BalanceSat : balanceSat ,
2019-05-11 18:19:51 -06:00
}
if detail != AddressBalanceDetailNoUTXO {
// estimate the size of utxos to avoid reallocation
ab . Utxos = make ( [ ] Utxo , 0 , len ( buf [ l : ] ) / txidUnpackedLen + 3 )
// ab.utxosMap = make(map[string]int, cap(ab.Utxos))
for len ( buf [ l : ] ) >= txidUnpackedLen + 3 {
btxID := append ( [ ] byte ( nil ) , buf [ l : l + txidUnpackedLen ] ... )
l += txidUnpackedLen
vout , ll := unpackVaruint ( buf [ l : ] )
l += ll
height , ll := unpackVaruint ( buf [ l : ] )
l += ll
valueSat , ll := unpackBigint ( buf [ l : ] )
l += ll
u := Utxo {
BtxID : btxID ,
Vout : int32 ( vout ) ,
Height : uint32 ( height ) ,
ValueSat : valueSat ,
}
if detail == AddressBalanceDetailUTXO {
ab . Utxos = append ( ab . Utxos , u )
} else {
ab . addUtxo ( & u )
}
}
}
return ab , nil
2019-05-03 12:46:40 -06:00
}
func packAddrBalance ( ab * AddrBalance , buf , varBuf [ ] byte ) [ ] byte {
buf = buf [ : 0 ]
l := packVaruint ( uint ( ab . Txs ) , varBuf )
buf = append ( buf , varBuf [ : l ] ... )
l = packBigint ( & ab . SentSat , varBuf )
buf = append ( buf , varBuf [ : l ] ... )
l = packBigint ( & ab . BalanceSat , varBuf )
buf = append ( buf , varBuf [ : l ] ... )
for _ , utxo := range ab . Utxos {
2019-05-06 00:37:16 -06:00
// if Vout < 0, utxo is marked as spent
if utxo . Vout >= 0 {
buf = append ( buf , utxo . BtxID ... )
l = packVaruint ( uint ( utxo . Vout ) , varBuf )
buf = append ( buf , varBuf [ : l ] ... )
l = packVaruint ( uint ( utxo . Height ) , varBuf )
buf = append ( buf , varBuf [ : l ] ... )
l = packBigint ( & utxo . ValueSat , varBuf )
buf = append ( buf , varBuf [ : l ] ... )
}
2019-05-03 12:46:40 -06:00
}
return buf
}
2018-08-20 10:35:46 -06:00
func unpackTxAddresses ( buf [ ] byte ) ( * TxAddresses , error ) {
ta := TxAddresses { }
2018-08-21 05:16:29 -06:00
height , l := unpackVaruint ( buf )
ta . Height = uint32 ( height )
inputs , ll := unpackVaruint ( buf [ l : ] )
l += ll
2018-08-20 10:35:46 -06:00
ta . Inputs = make ( [ ] TxInput , inputs )
2018-08-02 06:30:45 -06:00
for i := uint ( 0 ) ; i < inputs ; i ++ {
2018-08-20 10:35:46 -06:00
l += unpackTxInput ( & ta . Inputs [ i ] , buf [ l : ] )
2018-08-02 06:30:45 -06:00
}
outputs , ll := unpackVaruint ( buf [ l : ] )
l += ll
2018-08-20 10:35:46 -06:00
ta . Outputs = make ( [ ] TxOutput , outputs )
2018-08-02 06:30:45 -06:00
for i := uint ( 0 ) ; i < outputs ; i ++ {
2018-08-20 10:35:46 -06:00
l += unpackTxOutput ( & ta . Outputs [ i ] , buf [ l : ] )
2018-08-02 06:30:45 -06:00
}
return & ta , nil
}
2018-08-20 10:35:46 -06:00
func unpackTxInput ( ti * TxInput , buf [ ] byte ) int {
2018-08-16 07:31:11 -06:00
al , l := unpackVaruint ( buf )
2018-11-23 14:16:32 -07:00
ti . AddrDesc = append ( [ ] byte ( nil ) , buf [ l : l + int ( al ) ] ... )
2018-08-16 07:31:11 -06:00
al += uint ( l )
2018-08-20 10:35:46 -06:00
ti . ValueSat , l = unpackBigint ( buf [ al : ] )
2018-08-16 07:31:11 -06:00
return l + int ( al )
2018-08-15 11:22:26 -06:00
}
2018-08-20 10:35:46 -06:00
func unpackTxOutput ( to * TxOutput , buf [ ] byte ) int {
2018-08-02 06:30:45 -06:00
al , l := unpackVarint ( buf )
if al < 0 {
2018-08-20 10:35:46 -06:00
to . Spent = true
2018-08-02 08:10:28 -06:00
al = ^ al
2018-08-02 06:30:45 -06:00
}
2018-11-23 14:16:32 -07:00
to . AddrDesc = append ( [ ] byte ( nil ) , buf [ l : l + al ] ... )
2018-08-02 06:30:45 -06:00
al += l
2018-08-20 10:35:46 -06:00
to . ValueSat , l = unpackBigint ( buf [ al : ] )
2018-08-02 06:30:45 -06:00
return l + al
}
2019-01-03 09:19:56 -07:00
func ( d * RocksDB ) packTxIndexes ( txi [ ] txIndexes ) [ ] byte {
buf := make ( [ ] byte , 0 , 32 )
bvout := make ( [ ] byte , vlq . MaxLen32 )
2019-01-07 05:38:58 -07:00
// store the txs in reverse order for ordering from newest to oldest
for j := len ( txi ) - 1 ; j >= 0 ; j -- {
t := & txi [ j ]
2019-01-03 09:19:56 -07:00
buf = append ( buf , [ ] byte ( t . btxID ) ... )
for i , index := range t . indexes {
index <<= 1
if i == len ( t . indexes ) - 1 {
index |= 1
}
l := packVarint32 ( index , bvout )
buf = append ( buf , bvout [ : l ] ... )
}
}
return buf
}
2018-08-02 06:30:45 -06:00
func ( d * RocksDB ) packOutpoints ( outpoints [ ] outpoint ) [ ] byte {
2018-11-14 15:02:42 -07:00
buf := make ( [ ] byte , 0 , 32 )
2018-08-02 06:30:45 -06:00
bvout := make ( [ ] byte , vlq . MaxLen32 )
for _ , o := range outpoints {
l := packVarint32 ( o . index , bvout )
buf = append ( buf , [ ] byte ( o . btxID ) ... )
buf = append ( buf , bvout [ : l ] ... )
}
return buf
}
2018-08-16 07:31:11 -06:00
func ( d * RocksDB ) unpackNOutpoints ( buf [ ] byte ) ( [ ] outpoint , int , error ) {
txidUnpackedLen := d . chainParser . PackedTxidLen ( )
n , p := unpackVaruint ( buf )
outpoints := make ( [ ] outpoint , n )
for i := uint ( 0 ) ; i < n ; i ++ {
if p + txidUnpackedLen >= len ( buf ) {
return nil , 0 , errors . New ( "Inconsistent data in unpackNOutpoints" )
}
btxID := append ( [ ] byte ( nil ) , buf [ p : p + txidUnpackedLen ] ... )
p += txidUnpackedLen
vout , voutLen := unpackVarint32 ( buf [ p : ] )
p += voutLen
outpoints [ i ] = outpoint {
btxID : btxID ,
index : vout ,
}
}
return outpoints , p , nil
}
2017-09-11 04:20:21 -06:00
// Block index
2017-08-28 09:50:57 -06:00
2018-08-23 15:20:07 -06:00
// BlockInfo holds information about blocks kept in column height
2018-08-21 08:36:14 -06:00
type BlockInfo struct {
2018-09-14 04:10:03 -06:00
Hash string
Time int64
Txs uint32
Size uint32
Height uint32 // Height is not packed!
2018-08-21 08:36:14 -06:00
}
2018-08-23 15:20:07 -06:00
func ( d * RocksDB ) packBlockInfo ( block * BlockInfo ) ( [ ] byte , error ) {
2018-08-21 08:36:14 -06:00
packed := make ( [ ] byte , 0 , 64 )
varBuf := make ( [ ] byte , vlq . MaxLen64 )
b , err := d . chainParser . PackBlockHash ( block . Hash )
if err != nil {
return nil , err
}
2020-01-29 12:45:22 -07:00
pl := d . chainParser . PackedTxidLen ( )
if len ( b ) != pl {
glog . Warning ( "Non standard block hash for height " , block . Height , ", hash [" , block . Hash , "]" )
if len ( b ) > pl {
b = b [ : pl ]
} else {
b = append ( b , make ( [ ] byte , len ( b ) - pl ) ... )
}
}
2018-08-21 08:36:14 -06:00
packed = append ( packed , b ... )
packed = append ( packed , packUint ( uint32 ( block . Time ) ) ... )
2018-08-23 15:20:07 -06:00
l := packVaruint ( uint ( block . Txs ) , varBuf )
2018-08-21 08:36:14 -06:00
packed = append ( packed , varBuf [ : l ] ... )
l = packVaruint ( uint ( block . Size ) , varBuf )
packed = append ( packed , varBuf [ : l ] ... )
return packed , nil
}
func ( d * RocksDB ) unpackBlockInfo ( buf [ ] byte ) ( * BlockInfo , error ) {
pl := d . chainParser . PackedTxidLen ( )
2018-10-19 04:27:54 -06:00
// minimum length is PackedTxidLen + 4 bytes time + 1 byte txs + 1 byte size
2018-08-21 08:36:14 -06:00
if len ( buf ) < pl + 4 + 2 {
return nil , nil
}
txid , err := d . chainParser . UnpackBlockHash ( buf [ : pl ] )
if err != nil {
return nil , err
}
t := unpackUint ( buf [ pl : ] )
txs , l := unpackVaruint ( buf [ pl + 4 : ] )
size , _ := unpackVaruint ( buf [ pl + 4 + l : ] )
return & BlockInfo {
2018-08-23 15:20:07 -06:00
Hash : txid ,
Time : int64 ( t ) ,
Txs : uint32 ( txs ) ,
Size : uint32 ( size ) ,
2018-08-21 08:36:14 -06:00
} , nil
}
2018-01-24 10:02:46 -07:00
// GetBestBlock returns the block hash of the block with highest height in the db
func ( d * RocksDB ) GetBestBlock ( ) ( uint32 , string , error ) {
it := d . db . NewIteratorCF ( d . ro , d . cfh [ cfHeight ] )
defer it . Close ( )
if it . SeekToLast ( ) ; it . Valid ( ) {
bestHeight := unpackUint ( it . Key ( ) . Data ( ) )
2018-08-21 08:36:14 -06:00
info , err := d . unpackBlockInfo ( it . Value ( ) . Data ( ) )
if info != nil {
if glog . V ( 1 ) {
glog . Infof ( "rocksdb: bestblock %d %+v" , bestHeight , info )
}
2018-08-23 15:20:07 -06:00
return bestHeight , info . Hash , err
2018-01-30 10:22:25 -07:00
}
2018-01-24 10:02:46 -07:00
}
return 0 , "" , nil
2017-10-05 06:35:07 -06:00
}
2017-09-11 04:20:21 -06:00
2018-01-24 10:02:46 -07:00
// GetBlockHash returns block hash at given height or empty string if not found
2017-10-05 06:35:07 -06:00
func ( d * RocksDB ) GetBlockHash ( height uint32 ) ( string , error ) {
key := packUint ( height )
2018-01-24 08:57:05 -07:00
val , err := d . db . GetCF ( d . ro , d . cfh [ cfHeight ] , key )
2017-09-11 04:20:21 -06:00
if err != nil {
return "" , err
2017-08-28 09:50:57 -06:00
}
2017-10-05 06:35:07 -06:00
defer val . Free ( )
2018-08-21 08:36:14 -06:00
info , err := d . unpackBlockInfo ( val . Data ( ) )
if info == nil {
return "" , err
}
2018-08-23 15:20:07 -06:00
return info . Hash , nil
2018-08-21 08:36:14 -06:00
}
// GetBlockInfo returns block info stored in db
func ( d * RocksDB ) GetBlockInfo ( height uint32 ) ( * BlockInfo , error ) {
key := packUint ( height )
val , err := d . db . GetCF ( d . ro , d . cfh [ cfHeight ] , key )
if err != nil {
return nil , err
}
defer val . Free ( )
2018-09-14 04:10:03 -06:00
bi , err := d . unpackBlockInfo ( val . Data ( ) )
2018-10-19 04:27:54 -06:00
if err != nil || bi == nil {
2018-09-14 04:10:03 -06:00
return nil , err
}
bi . Height = height
return bi , err
2017-08-28 09:50:57 -06:00
}
2018-08-23 15:20:07 -06:00
func ( d * RocksDB ) writeHeightFromBlock ( wb * gorocksdb . WriteBatch , block * bchain . Block , op int ) error {
return d . writeHeight ( wb , block . Height , & BlockInfo {
2018-09-14 04:10:03 -06:00
Hash : block . Hash ,
Time : block . Time ,
Txs : uint32 ( len ( block . Txs ) ) ,
Size : uint32 ( block . Size ) ,
Height : block . Height ,
2018-08-23 15:20:07 -06:00
} , op )
}
2017-09-11 04:20:21 -06:00
2018-08-23 15:20:07 -06:00
func ( d * RocksDB ) writeHeight ( wb * gorocksdb . WriteBatch , height uint32 , bi * BlockInfo , op int ) error {
key := packUint ( height )
2017-10-05 06:35:07 -06:00
switch op {
case opInsert :
2018-08-23 15:20:07 -06:00
val , err := d . packBlockInfo ( bi )
2017-09-11 04:20:21 -06:00
if err != nil {
return err
}
2018-01-24 08:57:05 -07:00
wb . PutCF ( d . cfh [ cfHeight ] , key , val )
2018-09-20 04:15:46 -06:00
d . is . UpdateBestHeight ( height )
2017-10-05 06:35:07 -06:00
case opDelete :
2018-01-24 08:57:05 -07:00
wb . DeleteCF ( d . cfh [ cfHeight ] , key )
2018-09-20 04:15:46 -06:00
d . is . UpdateBestHeight ( height - 1 )
2017-09-11 04:20:21 -06:00
}
2017-09-03 16:20:20 -06:00
return nil
}
2018-08-03 11:26:16 -06:00
// Disconnect blocks
2018-04-23 09:05:23 -06:00
2020-01-31 01:29:54 -07:00
func ( d * RocksDB ) disconnectTxAddressesInputs ( wb * gorocksdb . WriteBatch , btxID [ ] byte , inputs [ ] outpoint , txa * TxAddresses , txAddressesToUpdate map [ string ] * TxAddresses ,
getAddressBalance func ( addrDesc bchain . AddressDescriptor ) ( * AddrBalance , error ) ,
addressFoundInTx func ( addrDesc bchain . AddressDescriptor , btxID [ ] byte ) bool ) error {
2019-05-06 00:37:16 -06:00
var err error
var balance * AddrBalance
2018-08-20 10:35:46 -06:00
for i , t := range txa . Inputs {
2018-08-30 14:39:03 -06:00
if len ( t . AddrDesc ) > 0 {
2019-05-06 00:37:16 -06:00
input := & inputs [ i ]
2020-01-31 01:29:54 -07:00
exist := addressFoundInTx ( t . AddrDesc , btxID )
s := string ( input . btxID )
2019-05-06 00:37:16 -06:00
sa , found := txAddressesToUpdate [ s ]
if ! found {
sa , err = d . getTxAddresses ( input . btxID )
2018-08-16 07:31:11 -06:00
if err != nil {
return err
}
2019-01-11 12:37:48 -07:00
if sa != nil {
txAddressesToUpdate [ s ] = sa
}
}
2019-05-06 00:37:16 -06:00
var inputHeight uint32
2019-01-11 12:37:48 -07:00
if sa != nil {
2019-05-06 00:37:16 -06:00
sa . Outputs [ input . index ] . Spent = false
inputHeight = sa . Height
}
if d . chainParser . IsAddrDescIndexable ( t . AddrDesc ) {
balance , err = getAddressBalance ( t . AddrDesc )
if err != nil {
return err
}
if balance != nil {
// subtract number of txs only once
if ! exist {
balance . Txs --
}
balance . SentSat . Sub ( & balance . SentSat , & t . ValueSat )
if balance . SentSat . Sign ( ) < 0 {
d . resetValueSatToZero ( & balance . SentSat , t . AddrDesc , "sent amount" )
}
balance . BalanceSat . Add ( & balance . BalanceSat , & t . ValueSat )
2020-02-09 03:46:13 -07:00
balance . addUtxoInDisconnect ( & Utxo {
2019-05-06 00:37:16 -06:00
BtxID : input . btxID ,
Vout : input . index ,
Height : inputHeight ,
ValueSat : t . ValueSat ,
} )
} else {
ad , _ , _ := d . chainParser . GetAddressesFromAddrDesc ( t . AddrDesc )
glog . Warningf ( "Balance for address %s (%s) not found" , ad , t . AddrDesc )
}
2018-08-15 11:22:26 -06:00
}
2018-08-03 11:26:16 -06:00
}
2018-04-23 09:05:23 -06:00
}
2020-01-31 01:29:54 -07:00
return nil
}
func ( d * RocksDB ) disconnectTxAddressesOutputs ( wb * gorocksdb . WriteBatch , btxID [ ] byte , txa * TxAddresses ,
getAddressBalance func ( addrDesc bchain . AddressDescriptor ) ( * AddrBalance , error ) ,
addressFoundInTx func ( addrDesc bchain . AddressDescriptor , btxID [ ] byte ) bool ) error {
2019-05-06 00:37:16 -06:00
for i , t := range txa . Outputs {
2018-08-30 14:39:03 -06:00
if len ( t . AddrDesc ) > 0 {
2020-01-31 01:29:54 -07:00
exist := addressFoundInTx ( t . AddrDesc , btxID )
2019-05-06 00:37:16 -06:00
if d . chainParser . IsAddrDescIndexable ( t . AddrDesc ) {
balance , err := getAddressBalance ( t . AddrDesc )
if err != nil {
return err
2018-08-16 07:31:11 -06:00
}
2019-05-06 00:37:16 -06:00
if balance != nil {
// subtract number of txs only once
if ! exist {
balance . Txs --
}
balance . BalanceSat . Sub ( & balance . BalanceSat , & t . ValueSat )
if balance . BalanceSat . Sign ( ) < 0 {
d . resetValueSatToZero ( & balance . BalanceSat , t . AddrDesc , "balance" )
}
2019-05-11 18:19:51 -06:00
balance . markUtxoAsSpent ( btxID , int32 ( i ) )
2019-05-06 00:37:16 -06:00
} else {
ad , _ , _ := d . chainParser . GetAddressesFromAddrDesc ( t . AddrDesc )
glog . Warningf ( "Balance for address %s (%s) not found" , ad , t . AddrDesc )
2018-08-16 07:31:11 -06:00
}
2018-08-15 11:22:26 -06:00
}
}
}
2020-01-31 01:29:54 -07:00
return nil
}
func ( d * RocksDB ) disconnectBlock ( height uint32 , blockTxs [ ] blockTxs ) error {
wb := gorocksdb . NewWriteBatch ( )
defer wb . Destroy ( )
txAddressesToUpdate := make ( map [ string ] * TxAddresses )
txAddresses := make ( [ ] * TxAddresses , len ( blockTxs ) )
txsToDelete := make ( map [ string ] struct { } )
balances := make ( map [ string ] * AddrBalance )
getAddressBalance := func ( addrDesc bchain . AddressDescriptor ) ( * AddrBalance , error ) {
var err error
s := string ( addrDesc )
b , fb := balances [ s ]
if ! fb {
2020-02-09 03:46:13 -07:00
b , err = d . GetAddrDescBalance ( addrDesc , addressBalanceDetailUTXOIndexed )
2020-01-31 01:29:54 -07:00
if err != nil {
return nil , err
}
balances [ s ] = b
}
return b , nil
}
// all addresses in the block are stored in blockAddressesTxs, together with a map of transactions where they appear
blockAddressesTxs := make ( map [ string ] map [ string ] struct { } )
// addressFoundInTx handles updates of the blockAddressesTxs map and returns true if the address+tx was already encountered
addressFoundInTx := func ( addrDesc bchain . AddressDescriptor , btxID [ ] byte ) bool {
sAddrDesc := string ( addrDesc )
sBtxID := string ( btxID )
a , exist := blockAddressesTxs [ sAddrDesc ]
if ! exist {
2020-02-14 02:04:12 -07:00
blockAddressesTxs [ sAddrDesc ] = map [ string ] struct { } { sBtxID : { } }
2020-01-31 01:29:54 -07:00
} else {
_ , exist = a [ sBtxID ]
if ! exist {
a [ sBtxID ] = struct { } { }
}
}
return exist
}
glog . Info ( "Disconnecting block " , height , " containing " , len ( blockTxs ) , " transactions" )
// when connecting block, outputs are processed first
// when disconnecting, inputs must be reversed first
for i := range blockTxs {
btxID := blockTxs [ i ] . btxID
s := string ( btxID )
txsToDelete [ s ] = struct { } { }
txa , err := d . getTxAddresses ( btxID )
if err != nil {
return err
}
if txa == nil {
ut , _ := d . chainParser . UnpackTxid ( btxID )
glog . Warning ( "TxAddress for txid " , ut , " not found" )
continue
}
txAddresses [ i ] = txa
if err := d . disconnectTxAddressesInputs ( wb , btxID , blockTxs [ i ] . inputs , txa , txAddressesToUpdate , getAddressBalance , addressFoundInTx ) ; err != nil {
return err
}
}
for i := range blockTxs {
btxID := blockTxs [ i ] . btxID
txa := txAddresses [ i ]
if txa == nil {
continue
}
if err := d . disconnectTxAddressesOutputs ( wb , btxID , txa , getAddressBalance , addressFoundInTx ) ; err != nil {
return err
}
}
for a := range blockAddressesTxs {
2018-08-15 11:22:26 -06:00
key := packAddressKey ( [ ] byte ( a ) , height )
wb . DeleteCF ( d . cfh [ cfAddresses ] , key )
}
2020-01-31 01:29:54 -07:00
key := packUint ( height )
wb . DeleteCF ( d . cfh [ cfBlockTxs ] , key )
wb . DeleteCF ( d . cfh [ cfHeight ] , key )
d . storeTxAddresses ( wb , txAddressesToUpdate )
d . storeBalancesDisconnect ( wb , balances )
for s := range txsToDelete {
b := [ ] byte ( s )
wb . DeleteCF ( d . cfh [ cfTransactions ] , b )
wb . DeleteCF ( d . cfh [ cfTxAddresses ] , b )
}
return d . db . Write ( d . wo , wb )
2018-08-03 11:26:16 -06:00
}
2018-11-06 10:41:13 -07:00
// DisconnectBlockRangeBitcoinType removes all data belonging to blocks in range lower-higher
2018-11-23 14:16:32 -07:00
// it is able to disconnect only blocks for which there are data in the blockTxs column
2018-11-06 10:41:13 -07:00
func ( d * RocksDB ) DisconnectBlockRangeBitcoinType ( lower uint32 , higher uint32 ) error {
2018-08-16 07:31:11 -06:00
blocks := make ( [ ] [ ] blockTxs , higher - lower + 1 )
2018-08-03 11:26:16 -06:00
for height := lower ; height <= higher ; height ++ {
2018-08-16 07:31:11 -06:00
blockTxs , err := d . getBlockTxs ( height )
2018-08-03 11:26:16 -06:00
if err != nil {
return err
}
2018-08-16 07:31:11 -06:00
if len ( blockTxs ) == 0 {
2018-08-03 11:26:16 -06:00
return errors . Errorf ( "Cannot disconnect blocks with height %v and lower. It is necessary to rebuild index." , height )
}
2018-08-16 07:31:11 -06:00
blocks [ height - lower ] = blockTxs
2018-08-03 11:26:16 -06:00
}
for height := higher ; height >= lower ; height -- {
2020-01-31 01:29:54 -07:00
err := d . disconnectBlock ( height , blocks [ height - lower ] )
if err != nil {
return err
2018-08-03 11:26:16 -06:00
}
}
2020-01-31 01:29:54 -07:00
d . is . RemoveLastBlockTimes ( int ( higher - lower ) + 1 )
glog . Infof ( "rocksdb: blocks %d-%d disconnected" , lower , higher )
return nil
2018-08-03 11:26:16 -06:00
}
2019-05-06 00:37:16 -06:00
func ( d * RocksDB ) storeBalancesDisconnect ( wb * gorocksdb . WriteBatch , balances map [ string ] * AddrBalance ) {
for _ , b := range balances {
if b != nil {
// remove spent utxos
us := make ( [ ] Utxo , 0 , len ( b . Utxos ) )
for _ , u := range b . Utxos {
// remove utxos marked as spent
if u . Vout >= 0 {
us = append ( us , u )
}
}
b . Utxos = us
// sort utxos by height
sort . SliceStable ( b . Utxos , func ( i , j int ) bool {
return b . Utxos [ i ] . Height < b . Utxos [ j ] . Height
} )
}
}
d . storeBalances ( wb , balances )
}
2018-02-05 02:31:22 -07:00
func dirSize ( path string ) ( int64 , error ) {
var size int64
err := filepath . Walk ( path , func ( _ string , info os . FileInfo , err error ) error {
2018-09-20 04:06:34 -06:00
if err == nil {
2018-09-20 04:15:46 -06:00
if ! info . IsDir ( ) {
size += info . Size ( )
}
2018-09-20 04:06:34 -06:00
}
2018-02-05 02:31:22 -07:00
return err
} )
return size , err
}
2018-02-06 01:43:54 -07:00
// DatabaseSizeOnDisk returns size of the database in bytes
2018-03-13 04:34:49 -06:00
func ( d * RocksDB ) DatabaseSizeOnDisk ( ) int64 {
size , err := dirSize ( d . path )
if err != nil {
2019-02-13 02:42:30 -07:00
glog . Warning ( "rocksdb: DatabaseSizeOnDisk: " , err )
2018-03-13 04:34:49 -06:00
return 0
}
return size
2018-02-06 01:43:54 -07:00
}
2018-03-05 10:14:41 -07:00
// GetTx returns transaction stored in db and height of the block containing it
func ( d * RocksDB ) GetTx ( txid string ) ( * bchain . Tx , uint32 , error ) {
2018-04-08 03:24:29 -06:00
key , err := d . chainParser . PackTxid ( txid )
2018-03-05 10:14:41 -07:00
if err != nil {
return nil , 0 , err
}
val , err := d . db . GetCF ( d . ro , d . cfh [ cfTransactions ] , key )
if err != nil {
return nil , 0 , err
}
defer val . Free ( )
2018-03-06 04:36:24 -07:00
data := val . Data ( )
if len ( data ) > 4 {
2018-03-08 04:59:37 -07:00
return d . chainParser . UnpackTx ( data )
2018-03-06 04:36:24 -07:00
}
return nil , 0 , nil
2018-03-05 10:14:41 -07:00
}
2018-03-06 04:36:24 -07:00
// PutTx stores transactions in db
func ( d * RocksDB ) PutTx ( tx * bchain . Tx , height uint32 , blockTime int64 ) error {
2018-04-08 03:24:29 -06:00
key , err := d . chainParser . PackTxid ( tx . Txid )
2018-03-05 10:14:41 -07:00
if err != nil {
return nil
}
2018-03-08 04:59:37 -07:00
buf , err := d . chainParser . PackTx ( tx , height , blockTime )
2018-03-05 10:14:41 -07:00
if err != nil {
return err
}
2018-06-04 09:11:10 -06:00
err = d . db . PutCF ( d . wo , d . cfh [ cfTransactions ] , key , buf )
if err == nil {
d . is . AddDBColumnStats ( cfTransactions , 1 , int64 ( len ( key ) ) , int64 ( len ( buf ) ) )
}
return err
2018-03-05 10:14:41 -07:00
}
2018-03-06 04:36:24 -07:00
// DeleteTx removes transactions from db
2018-03-05 10:14:41 -07:00
func ( d * RocksDB ) DeleteTx ( txid string ) error {
2018-04-08 03:24:29 -06:00
key , err := d . chainParser . PackTxid ( txid )
2018-03-05 10:14:41 -07:00
if err != nil {
return nil
}
2018-06-04 09:11:10 -06:00
// use write batch so that this delete matches other deletes
wb := gorocksdb . NewWriteBatch ( )
defer wb . Destroy ( )
d . internalDeleteTx ( wb , key )
return d . db . Write ( d . wo , wb )
}
// internalDeleteTx checks if tx is cached and updates internal state accordingly
func ( d * RocksDB ) internalDeleteTx ( wb * gorocksdb . WriteBatch , key [ ] byte ) {
val , err := d . db . GetCF ( d . ro , d . cfh [ cfTransactions ] , key )
// ignore error, it is only for statistics
if err == nil {
l := len ( val . Data ( ) )
if l > 0 {
d . is . AddDBColumnStats ( cfTransactions , - 1 , int64 ( - len ( key ) ) , int64 ( - l ) )
}
defer val . Free ( )
}
wb . DeleteCF ( d . cfh [ cfTransactions ] , key )
2018-03-05 10:14:41 -07:00
}
2018-05-22 04:56:51 -06:00
// internal state
const internalStateKey = "internalState"
2019-11-19 00:46:47 -07:00
func ( d * RocksDB ) loadBlockTimes ( ) ( [ ] uint32 , error ) {
var times [ ] uint32
it := d . db . NewIteratorCF ( d . ro , d . cfh [ cfHeight ] )
defer it . Close ( )
counter := uint32 ( 0 )
time := uint32 ( 0 )
for it . SeekToFirst ( ) ; it . Valid ( ) ; it . Next ( ) {
height := unpackUint ( it . Key ( ) . Data ( ) )
if height > counter {
glog . Warning ( "gap in cfHeight: expecting " , counter , ", got " , height )
for ; counter < height ; counter ++ {
times = append ( times , time )
}
}
counter ++
info , err := d . unpackBlockInfo ( it . Value ( ) . Data ( ) )
if err != nil {
return nil , err
}
2020-01-29 12:45:22 -07:00
if info != nil {
time = uint32 ( info . Time )
}
2019-11-19 00:46:47 -07:00
times = append ( times , time )
}
glog . Info ( "loaded " , len ( times ) , " block times" )
return times , nil
}
2018-05-22 04:56:51 -06:00
// LoadInternalState loads from db internal state or initializes a new one if not yet stored
2018-05-23 02:58:07 -06:00
func ( d * RocksDB ) LoadInternalState ( rpcCoin string ) ( * common . InternalState , error ) {
2018-05-22 04:56:51 -06:00
val , err := d . db . GetCF ( d . ro , d . cfh [ cfDefault ] , [ ] byte ( internalStateKey ) )
if err != nil {
return nil , err
}
defer val . Free ( )
data := val . Data ( )
var is * common . InternalState
if len ( data ) == 0 {
2020-02-16 15:07:50 -07:00
is = & common . InternalState { Coin : rpcCoin , UtxoChecked : true }
2018-05-22 04:56:51 -06:00
} else {
is , err = common . UnpackInternalState ( data )
if err != nil {
return nil , err
}
2018-05-23 02:58:07 -06:00
// verify that the rpc coin matches DB coin
// running it mismatched would corrupt the database
if is . Coin == "" {
is . Coin = rpcCoin
} else if is . Coin != rpcCoin {
return nil , errors . Errorf ( "Coins do not match. DB coin %v, RPC coin %v" , is . Coin , rpcCoin )
}
2018-05-22 04:56:51 -06:00
}
// make sure that column stats match the columns
sc := is . DbColumns
nc := make ( [ ] common . InternalStateColumn , len ( cfNames ) )
for i := 0 ; i < len ( nc ) ; i ++ {
nc [ i ] . Name = cfNames [ i ]
2018-07-27 04:57:48 -06:00
nc [ i ] . Version = dbVersion
2018-05-22 04:56:51 -06:00
for j := 0 ; j < len ( sc ) ; j ++ {
if sc [ j ] . Name == nc [ i ] . Name {
2018-07-27 04:57:48 -06:00
// check the version of the column, if it does not match, the db is not compatible
if sc [ j ] . Version != dbVersion {
return nil , errors . Errorf ( "DB version %v of column '%v' does not match the required version %v. DB is not compatible." , sc [ j ] . Version , sc [ j ] . Name , dbVersion )
}
2018-05-22 04:56:51 -06:00
nc [ i ] . Rows = sc [ j ] . Rows
2018-06-04 10:09:36 -06:00
nc [ i ] . KeyBytes = sc [ j ] . KeyBytes
nc [ i ] . ValueBytes = sc [ j ] . ValueBytes
2018-06-12 14:57:46 -06:00
nc [ i ] . Updated = sc [ j ] . Updated
2018-05-22 04:56:51 -06:00
break
}
}
}
is . DbColumns = nc
2019-11-19 00:46:47 -07:00
is . BlockTimes , err = d . loadBlockTimes ( )
if err != nil {
return nil , err
}
2018-09-20 04:15:46 -06:00
// after load, reset the synchronization data
is . IsSynchronized = false
is . IsMempoolSynchronized = false
var t time . Time
is . LastMempoolSync = t
is . SyncMode = false
2018-05-22 04:56:51 -06:00
return is , nil
}
2018-11-01 11:28:48 -06:00
// SetInconsistentState sets the internal state to DbStateInconsistent or DbStateOpen based on inconsistent parameter
// db in left in DbStateInconsistent state cannot be used and must be recreated
2018-08-18 16:23:26 -06:00
func ( d * RocksDB ) SetInconsistentState ( inconsistent bool ) error {
if d . is == nil {
return errors . New ( "Internal state not created" )
}
if inconsistent {
d . is . DbState = common . DbStateInconsistent
} else {
d . is . DbState = common . DbStateOpen
}
return d . storeState ( d . is )
}
2018-05-29 03:37:35 -06:00
// SetInternalState sets the InternalState to be used by db to collect internal state
func ( d * RocksDB ) SetInternalState ( is * common . InternalState ) {
d . is = is
}
2018-05-22 04:56:51 -06:00
// StoreInternalState stores the internal state to db
func ( d * RocksDB ) StoreInternalState ( is * common . InternalState ) error {
2018-08-18 16:23:26 -06:00
if d . metrics != nil {
for c := 0 ; c < len ( cfNames ) ; c ++ {
rows , keyBytes , valueBytes := d . is . GetDBColumnStatValues ( c )
d . metrics . DbColumnRows . With ( common . Labels { "column" : cfNames [ c ] } ) . Set ( float64 ( rows ) )
d . metrics . DbColumnSize . With ( common . Labels { "column" : cfNames [ c ] } ) . Set ( float64 ( keyBytes + valueBytes ) )
}
2018-06-08 06:05:41 -06:00
}
2018-08-18 16:23:26 -06:00
return d . storeState ( is )
}
func ( d * RocksDB ) storeState ( is * common . InternalState ) error {
2018-05-22 04:56:51 -06:00
buf , err := is . Pack ( )
if err != nil {
return err
}
return d . db . PutCF ( d . wo , d . cfh [ cfDefault ] , [ ] byte ( internalStateKey ) , buf )
}
2018-06-08 05:19:57 -06:00
func ( d * RocksDB ) computeColumnSize ( col int , stopCompute chan os . Signal ) ( int64 , int64 , int64 , error ) {
2018-06-01 08:01:58 -06:00
var rows , keysSum , valuesSum int64
var seekKey [ ] byte
2018-08-22 08:20:52 -06:00
// do not use cache
ro := gorocksdb . NewDefaultReadOptions ( )
ro . SetFillCache ( false )
2018-06-01 08:01:58 -06:00
for {
var key [ ] byte
2018-08-22 08:20:52 -06:00
it := d . db . NewIteratorCF ( ro , d . cfh [ col ] )
2018-06-01 08:01:58 -06:00
if rows == 0 {
it . SeekToFirst ( )
} else {
2018-06-08 05:19:57 -06:00
glog . Info ( "db: Column " , cfNames [ col ] , ": rows " , rows , ", key bytes " , keysSum , ", value bytes " , valuesSum , ", in progress..." )
2018-06-01 08:01:58 -06:00
it . Seek ( seekKey )
it . Next ( )
}
for count := 0 ; it . Valid ( ) && count < refreshIterator ; it . Next ( ) {
2018-06-08 05:19:57 -06:00
select {
case <- stopCompute :
return 0 , 0 , 0 , errors . New ( "Interrupted" )
default :
}
2018-06-01 08:01:58 -06:00
key = it . Key ( ) . Data ( )
count ++
rows ++
keysSum += int64 ( len ( key ) )
valuesSum += int64 ( len ( it . Value ( ) . Data ( ) ) )
}
seekKey = append ( [ ] byte { } , key ... )
valid := it . Valid ( )
it . Close ( )
if ! valid {
break
}
}
return rows , keysSum , valuesSum , nil
}
// ComputeInternalStateColumnStats computes stats of all db columns and sets them to internal state
// can be very slow operation
2018-06-08 05:19:57 -06:00
func ( d * RocksDB ) ComputeInternalStateColumnStats ( stopCompute chan os . Signal ) error {
start := time . Now ( )
glog . Info ( "db: ComputeInternalStateColumnStats start" )
2018-06-01 08:01:58 -06:00
for c := 0 ; c < len ( cfNames ) ; c ++ {
2018-06-08 05:19:57 -06:00
rows , keysSum , valuesSum , err := d . computeColumnSize ( c , stopCompute )
2018-06-01 08:01:58 -06:00
if err != nil {
return err
}
d . is . SetDBColumnStats ( c , rows , keysSum , valuesSum )
2018-06-08 05:19:57 -06:00
glog . Info ( "db: Column " , cfNames [ c ] , ": rows " , rows , ", key bytes " , keysSum , ", value bytes " , valuesSum )
2018-06-01 08:01:58 -06:00
}
2018-06-08 05:19:57 -06:00
glog . Info ( "db: ComputeInternalStateColumnStats finished in " , time . Since ( start ) )
2018-06-01 08:01:58 -06:00
return nil
}
2020-02-14 08:33:36 -07:00
func reorderUtxo ( utxos [ ] Utxo , index int ) {
var from , to int
for from = index ; from >= 0 ; from -- {
if ! bytes . Equal ( utxos [ from ] . BtxID , utxos [ index ] . BtxID ) {
break
}
}
from ++
for to = index + 1 ; to < len ( utxos ) ; to ++ {
if ! bytes . Equal ( utxos [ to ] . BtxID , utxos [ index ] . BtxID ) {
break
}
}
toSort := utxos [ from : to ]
sort . SliceStable ( toSort , func ( i , j int ) bool {
return toSort [ i ] . Vout < toSort [ j ] . Vout
} )
}
func ( d * RocksDB ) fixUtxo ( addrDesc bchain . AddressDescriptor , ba * AddrBalance ) ( bool , bool , error ) {
reorder := false
2020-01-26 12:12:12 -07:00
var checksum big . Int
2020-02-14 02:04:12 -07:00
var prevUtxo * Utxo
2020-01-26 12:12:12 -07:00
for i := range ba . Utxos {
2020-02-14 02:04:12 -07:00
utxo := & ba . Utxos [ i ]
checksum . Add ( & checksum , & utxo . ValueSat )
if prevUtxo != nil {
if prevUtxo . Vout > utxo . Vout && * ( * int ) ( unsafe . Pointer ( & utxo . BtxID [ 0 ] ) ) == * ( * int ) ( unsafe . Pointer ( & prevUtxo . BtxID [ 0 ] ) ) && bytes . Equal ( utxo . BtxID , prevUtxo . BtxID ) {
2020-02-14 08:33:36 -07:00
reorderUtxo ( ba . Utxos , i )
reorder = true
2020-02-14 02:04:12 -07:00
}
}
prevUtxo = utxo
2020-02-14 08:54:51 -07:00
}
if reorder {
// get the checksum again after reorder
checksum . SetInt64 ( 0 )
for i := range ba . Utxos {
utxo := & ba . Utxos [ i ]
checksum . Add ( & checksum , & utxo . ValueSat )
}
2020-01-26 12:12:12 -07:00
}
if checksum . Cmp ( & ba . BalanceSat ) != 0 {
2020-01-27 06:33:58 -07:00
var checksumFromTxs big . Int
var utxos [ ] Utxo
err := d . GetAddrDescTransactions ( addrDesc , 0 , ^ uint32 ( 0 ) , func ( txid string , height uint32 , indexes [ ] int32 ) error {
var ta * TxAddresses
var err error
for _ , index := range indexes {
// take only outputs
if index >= 0 {
if ta == nil {
ta , err = d . GetTxAddresses ( txid )
if err != nil {
return err
}
}
if ta == nil {
return errors . New ( "DB inconsistency: tx " + txid + ": not found in txAddresses" )
} else {
if len ( ta . Outputs ) <= int ( index ) {
glog . Warning ( "DB inconsistency: txAddresses " + txid + " does not have enough outputs" )
} else {
tao := & ta . Outputs [ index ]
if ! tao . Spent {
bTxid , _ := d . chainParser . PackTxid ( txid )
checksumFromTxs . Add ( & checksumFromTxs , & tao . ValueSat )
utxos = append ( utxos , Utxo { BtxID : bTxid , Height : height , Vout : index , ValueSat : tao . ValueSat } )
2020-01-29 12:46:58 -07:00
if checksumFromTxs . Cmp ( & ba . BalanceSat ) == 0 {
return & StopIteration { }
}
2020-01-27 06:33:58 -07:00
}
}
}
}
}
return nil
} )
if err != nil {
2020-02-14 08:33:36 -07:00
return false , false , err
2020-01-27 06:33:58 -07:00
}
fixed := false
if checksumFromTxs . Cmp ( & ba . BalanceSat ) == 0 {
2020-01-29 12:46:58 -07:00
// reverse the utxos as they are added in descending order by height
for i := len ( utxos ) / 2 - 1 ; i >= 0 ; i -- {
opp := len ( utxos ) - 1 - i
utxos [ i ] , utxos [ opp ] = utxos [ opp ] , utxos [ i ]
}
2020-01-27 06:33:58 -07:00
ba . Utxos = utxos
wb := gorocksdb . NewWriteBatch ( )
err = d . storeBalances ( wb , map [ string ] * AddrBalance { string ( addrDesc ) : ba } )
if err == nil {
err = d . db . Write ( d . wo , wb )
}
wb . Destroy ( )
if err != nil {
2020-02-14 08:33:36 -07:00
return false , false , errors . Errorf ( "balance %s, checksum %s, from txa %s, txs %d, error storing fixed utxos %v" , ba . BalanceSat . String ( ) , checksum . String ( ) , checksumFromTxs . String ( ) , ba . Txs , err )
2020-01-27 06:33:58 -07:00
}
fixed = true
}
2020-02-14 08:33:36 -07:00
return fixed , false , errors . Errorf ( "balance %s, checksum %s, from txa %s, txs %d" , ba . BalanceSat . String ( ) , checksum . String ( ) , checksumFromTxs . String ( ) , ba . Txs )
} else if reorder {
wb := gorocksdb . NewWriteBatch ( )
err := d . storeBalances ( wb , map [ string ] * AddrBalance { string ( addrDesc ) : ba } )
if err == nil {
err = d . db . Write ( d . wo , wb )
}
wb . Destroy ( )
if err != nil {
return false , false , errors . Errorf ( "error storing reordered utxos %v" , err )
}
2020-01-26 12:12:12 -07:00
}
2020-02-14 08:33:36 -07:00
return false , reorder , nil
2020-01-26 12:12:12 -07:00
}
// FixUtxos checks and fixes possible
func ( d * RocksDB ) FixUtxos ( stop chan os . Signal ) error {
if d . chainParser . GetChainType ( ) != bchain . ChainBitcoinType {
glog . Info ( "FixUtxos: applicable only for bitcoin type coins" )
return nil
}
glog . Info ( "FixUtxos: starting" )
2020-01-27 06:33:58 -07:00
var row , errorsCount , fixedCount int64
2020-01-26 12:12:12 -07:00
var seekKey [ ] byte
// do not use cache
ro := gorocksdb . NewDefaultReadOptions ( )
ro . SetFillCache ( false )
for {
var addrDesc bchain . AddressDescriptor
it := d . db . NewIteratorCF ( ro , d . cfh [ cfAddressBalance ] )
if row == 0 {
it . SeekToFirst ( )
} else {
glog . Info ( "FixUtxos: row " , row , ", errors " , errorsCount )
it . Seek ( seekKey )
it . Next ( )
}
for count := 0 ; it . Valid ( ) && count < refreshIterator ; it . Next ( ) {
select {
case <- stop :
return errors . New ( "Interrupted" )
default :
}
addrDesc = it . Key ( ) . Data ( )
buf := it . Value ( ) . Data ( )
count ++
row ++
if len ( buf ) < 3 {
glog . Error ( "FixUtxos: row " , row , ", addrDesc " , addrDesc , ", empty data" )
2020-01-27 06:33:58 -07:00
errorsCount ++
2020-01-26 12:12:12 -07:00
continue
}
ba , err := unpackAddrBalance ( buf , d . chainParser . PackedTxidLen ( ) , AddressBalanceDetailUTXO )
if err != nil {
glog . Error ( "FixUtxos: row " , row , ", addrDesc " , addrDesc , ", unpackAddrBalance error " , err )
2020-01-27 06:33:58 -07:00
errorsCount ++
2020-01-26 12:12:12 -07:00
continue
}
2020-02-14 08:33:36 -07:00
fixed , reordered , err := d . fixUtxo ( addrDesc , ba )
2020-01-26 12:12:12 -07:00
if err != nil {
2020-01-27 06:33:58 -07:00
errorsCount ++
glog . Error ( "FixUtxos: row " , row , ", addrDesc " , addrDesc , ", error " , err , ", fixed " , fixed )
if fixed {
fixedCount ++
}
2020-02-14 08:33:36 -07:00
} else if reordered {
glog . Error ( "FixUtxos: row " , row , ", addrDesc " , addrDesc , " reordered" )
fixedCount ++
2020-01-26 12:12:12 -07:00
}
}
seekKey = append ( [ ] byte { } , addrDesc ... )
valid := it . Valid ( )
it . Close ( )
if ! valid {
break
}
}
2020-01-27 06:33:58 -07:00
glog . Info ( "FixUtxos: finished, scanned " , row , " rows, found " , errorsCount , " errors, fixed " , fixedCount )
2020-01-26 12:12:12 -07:00
return nil
}
2017-08-28 09:50:57 -06:00
// Helpers
2018-08-30 14:39:03 -06:00
func packAddressKey ( addrDesc bchain . AddressDescriptor , height uint32 ) [ ] byte {
2018-12-20 09:33:13 -07:00
buf := make ( [ ] byte , len ( addrDesc ) + packedHeightBytes )
copy ( buf , addrDesc )
// pack height as binary complement to achieve ordering from newest to oldest block
binary . BigEndian . PutUint32 ( buf [ len ( addrDesc ) : ] , ^ height )
2018-04-26 11:50:22 -06:00
return buf
2018-04-23 09:05:23 -06:00
}
func unpackAddressKey ( key [ ] byte ) ( [ ] byte , uint32 , error ) {
i := len ( key ) - packedHeightBytes
if i <= 0 {
return nil , 0 , errors . New ( "Invalid address key" )
}
2018-12-20 09:33:13 -07:00
// height is packed in binary complement, convert it
return key [ : i ] , ^ unpackUint ( key [ i : i + packedHeightBytes ] ) , nil
2018-04-23 09:05:23 -06:00
}
2017-08-28 09:50:57 -06:00
func packUint ( i uint32 ) [ ] byte {
2017-10-05 06:35:07 -06:00
buf := make ( [ ] byte , 4 )
binary . BigEndian . PutUint32 ( buf , i )
return buf
2017-08-28 09:50:57 -06:00
}
2018-01-24 10:02:46 -07:00
func unpackUint ( buf [ ] byte ) uint32 {
return binary . BigEndian . Uint32 ( buf )
}
2018-08-02 06:30:45 -06:00
func packVarint32 ( i int32 , buf [ ] byte ) int {
return vlq . PutInt ( buf , int64 ( i ) )
}
func packVarint ( i int , buf [ ] byte ) int {
2018-04-17 15:50:01 -06:00
return vlq . PutInt ( buf , int64 ( i ) )
2017-08-28 09:50:57 -06:00
}
2018-08-02 06:30:45 -06:00
func packVaruint ( i uint , buf [ ] byte ) int {
return vlq . PutUint ( buf , uint64 ( i ) )
}
func unpackVarint32 ( buf [ ] byte ) ( int32 , int ) {
2018-04-19 06:28:05 -06:00
i , ofs := vlq . Int ( buf )
2018-03-23 06:15:35 -06:00
return int32 ( i ) , ofs
2017-10-05 06:35:07 -06:00
}
2018-07-27 11:46:21 -06:00
2018-08-02 06:30:45 -06:00
func unpackVarint ( buf [ ] byte ) ( int , int ) {
i , ofs := vlq . Int ( buf )
return int ( i ) , ofs
}
func unpackVaruint ( buf [ ] byte ) ( uint , int ) {
i , ofs := vlq . Uint ( buf )
return uint ( i ) , ofs
}
2018-07-27 11:46:21 -06:00
const (
// number of bits in a big.Word
wordBits = 32 << ( uint64 ( ^ big . Word ( 0 ) ) >> 63 )
// number of bytes in a big.Word
wordBytes = wordBits / 8
// max packed bigint words
maxPackedBigintWords = ( 256 - wordBytes ) / wordBytes
2018-08-02 06:30:45 -06:00
maxPackedBigintBytes = 249
2018-07-27 11:46:21 -06:00
)
// big int is packed in BigEndian order without memory allocation as 1 byte length followed by bytes of big int
// number of written bytes is returned
// limitation: bigints longer than 248 bytes are truncated to 248 bytes
// caution: buffer must be big enough to hold the packed big int, buffer 249 bytes big is always safe
func packBigint ( bi * big . Int , buf [ ] byte ) int {
w := bi . Bits ( )
lw := len ( w )
// zero returns only one byte - zero length
if lw == 0 {
buf [ 0 ] = 0
return 1
}
// pack the most significant word in a special way - skip leading zeros
w0 := w [ lw - 1 ]
fb := 8
mask := big . Word ( 0xff ) << ( wordBits - 8 )
for w0 & mask == 0 {
fb --
mask >>= 8
}
for i := fb ; i > 0 ; i -- {
buf [ i ] = byte ( w0 )
w0 >>= 8
}
// if the big int is too big (> 2^1984), the number of bytes would not fit to 1 byte
// in this case, truncate the number, it is not expected to work with this big numbers as amounts
s := 0
if lw > maxPackedBigintWords {
s = lw - maxPackedBigintWords
}
// pack the rest of the words in reverse order
for j := lw - 2 ; j >= s ; j -- {
d := w [ j ]
for i := fb + wordBytes ; i > fb ; i -- {
buf [ i ] = byte ( d )
d >>= 8
}
fb += wordBytes
}
buf [ 0 ] = byte ( fb )
return fb + 1
}
func unpackBigint ( buf [ ] byte ) ( big . Int , int ) {
var r big . Int
l := int ( buf [ 0 ] ) + 1
r . SetBytes ( buf [ 1 : l ] )
return r , l
}