2019-01-30 09:56:15 -07:00
|
|
|
package api
|
|
|
|
|
|
|
|
import (
|
|
|
|
"blockbook/bchain"
|
|
|
|
"blockbook/db"
|
|
|
|
"fmt"
|
|
|
|
"math/big"
|
2019-02-03 15:42:44 -07:00
|
|
|
"sort"
|
2019-01-30 09:56:15 -07:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/golang/glog"
|
|
|
|
"github.com/juju/errors"
|
|
|
|
)
|
|
|
|
|
|
|
|
const xpubLen = 111
|
2019-01-31 00:30:18 -07:00
|
|
|
const defaultAddressesGap = 20
|
2019-01-30 09:56:15 -07:00
|
|
|
|
2019-02-03 15:42:44 -07:00
|
|
|
const txInput = 1
|
|
|
|
const txOutput = 2
|
|
|
|
|
2019-01-30 09:56:15 -07:00
|
|
|
var cachedXpubs = make(map[string]*xpubData)
|
|
|
|
var cachedXpubsMux sync.Mutex
|
|
|
|
|
2019-02-03 15:42:44 -07:00
|
|
|
type xpubTxid struct {
|
|
|
|
txid string
|
|
|
|
height uint32
|
|
|
|
inputOutput byte
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
|
|
|
|
2019-02-03 15:42:44 -07:00
|
|
|
type xpubTxids []xpubTxid
|
|
|
|
|
|
|
|
func (a xpubTxids) Len() int { return len(a) }
|
|
|
|
func (a xpubTxids) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
|
|
func (a xpubTxids) Less(i, j int) bool { return a[i].height >= a[j].height }
|
|
|
|
|
2019-01-30 09:56:15 -07:00
|
|
|
type xpubAddress struct {
|
2019-02-03 15:42:44 -07:00
|
|
|
addrDesc bchain.AddressDescriptor
|
|
|
|
balance *db.AddrBalance
|
|
|
|
txs uint32
|
|
|
|
maxHeight uint32
|
|
|
|
complete bool
|
|
|
|
txids xpubTxids
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type xpubData struct {
|
2019-01-31 00:30:18 -07:00
|
|
|
gap int
|
2019-01-30 09:56:15 -07:00
|
|
|
dataHeight uint32
|
|
|
|
dataHash string
|
|
|
|
txs uint32
|
|
|
|
sentSat big.Int
|
|
|
|
balanceSat big.Int
|
|
|
|
addresses []xpubAddress
|
|
|
|
changeAddresses []xpubAddress
|
|
|
|
}
|
|
|
|
|
2019-02-03 15:42:44 -07:00
|
|
|
func (w *Worker) xpubGetAddressTxids(addrDesc bchain.AddressDescriptor, mempool bool, fromHeight, toHeight uint32, maxResults int) ([]xpubTxid, bool, error) {
|
2019-01-30 09:56:15 -07:00
|
|
|
var err error
|
2019-02-03 15:42:44 -07:00
|
|
|
complete := true
|
|
|
|
txs := make([]xpubTxid, 0, 4)
|
2019-01-30 09:56:15 -07:00
|
|
|
var callback db.GetTransactionsCallback
|
2019-02-03 15:42:44 -07:00
|
|
|
callback = func(txid string, height uint32, indexes []int32) error {
|
|
|
|
// take all txs in the last found block even if it exceeds maxResults
|
|
|
|
if len(txs) >= maxResults && txs[len(txs)-1].height != height {
|
|
|
|
complete = false
|
|
|
|
return &db.StopIteration{}
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
2019-02-03 15:42:44 -07:00
|
|
|
inputOutput := byte(0)
|
|
|
|
for _, index := range indexes {
|
|
|
|
if index < 0 {
|
|
|
|
inputOutput |= txInput
|
|
|
|
} else {
|
|
|
|
inputOutput |= txOutput
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
|
|
|
}
|
2019-02-03 15:42:44 -07:00
|
|
|
txs = append(txs, xpubTxid{txid, height, inputOutput})
|
|
|
|
return nil
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
|
|
|
if mempool {
|
2019-02-03 15:42:44 -07:00
|
|
|
uniqueTxs := make(map[string]int)
|
2019-01-30 09:56:15 -07:00
|
|
|
o, err := w.chain.GetMempoolTransactionsForAddrDesc(addrDesc)
|
|
|
|
if err != nil {
|
2019-02-03 15:42:44 -07:00
|
|
|
return nil, false, err
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
|
|
|
for _, m := range o {
|
2019-02-03 15:42:44 -07:00
|
|
|
if l, found := uniqueTxs[m.Txid]; !found {
|
|
|
|
l = len(txs)
|
2019-01-30 09:56:15 -07:00
|
|
|
callback(m.Txid, 0, []int32{m.Vout})
|
2019-02-03 15:42:44 -07:00
|
|
|
if len(txs) > l {
|
|
|
|
uniqueTxs[m.Txid] = l - 1
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if m.Vout < 0 {
|
|
|
|
txs[l].inputOutput |= txInput
|
|
|
|
} else {
|
|
|
|
txs[l].inputOutput |= txOutput
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2019-02-03 15:42:44 -07:00
|
|
|
err = w.db.GetAddrDescTransactions(addrDesc, fromHeight, toHeight, callback)
|
2019-01-30 09:56:15 -07:00
|
|
|
if err != nil {
|
2019-02-03 15:42:44 -07:00
|
|
|
return nil, false, err
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
|
|
|
}
|
2019-02-03 15:42:44 -07:00
|
|
|
return txs, complete, nil
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
|
|
|
|
2019-02-03 15:42:44 -07:00
|
|
|
func (w *Worker) xpubCheckAndLoadTxids(ad *xpubAddress, filter *AddressFilter, maxHeight uint32, pageSize int) error {
|
|
|
|
// skip if not discovered
|
|
|
|
if ad.balance == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// if completely read, check if there are not some new txs and load if necessary
|
|
|
|
if ad.complete {
|
|
|
|
if ad.balance.Txs != ad.txs {
|
|
|
|
newTxids, _, err := w.xpubGetAddressTxids(ad.addrDesc, false, ad.maxHeight+1, maxHeight, maxInt)
|
|
|
|
if err == nil {
|
|
|
|
ad.txids = append(newTxids, ad.txids...)
|
|
|
|
ad.maxHeight = maxHeight
|
|
|
|
ad.txs = uint32(len(ad.txids))
|
|
|
|
if ad.txs != ad.balance.Txs {
|
|
|
|
glog.Warning("xpubCheckAndLoadTxids inconsistency ", ad.addrDesc, ", ad.txs=", ad.txs, ", ad.balance.Txs=", ad.balance.Txs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// unless the filter is completely off, load all txids
|
|
|
|
if filter.FromHeight != 0 || filter.ToHeight != 0 || filter.Vout != AddressFilterVoutOff {
|
|
|
|
pageSize = maxInt
|
|
|
|
}
|
|
|
|
newTxids, complete, err := w.xpubGetAddressTxids(ad.addrDesc, false, 0, maxHeight, pageSize)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ad.txids = newTxids
|
|
|
|
ad.complete = complete
|
|
|
|
ad.maxHeight = maxHeight
|
|
|
|
if complete {
|
|
|
|
ad.txs = uint32(len(ad.txids))
|
|
|
|
if ad.txs != ad.balance.Txs {
|
|
|
|
glog.Warning("xpubCheckAndLoadTxids inconsistency ", ad.addrDesc, ", ad.txs=", ad.txs, ", ad.balance.Txs=", ad.balance.Txs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *Worker) xpubDerivedAddressBalance(data *xpubData, ad *xpubAddress) (bool, error) {
|
2019-01-30 09:56:15 -07:00
|
|
|
var err error
|
|
|
|
if ad.balance, err = w.db.GetAddrDescBalance(ad.addrDesc); err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if ad.balance != nil {
|
|
|
|
data.txs += ad.balance.Txs
|
|
|
|
data.sentSat.Add(&data.sentSat, &ad.balance.SentSat)
|
|
|
|
data.balanceSat.Add(&data.balanceSat, &ad.balance.BalanceSat)
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2019-02-03 15:42:44 -07:00
|
|
|
func (w *Worker) xpubScanAddresses(xpub string, data *xpubData, addresses []xpubAddress, gap int, change int, minDerivedIndex int, fork bool) (int, []xpubAddress, error) {
|
2019-01-31 00:30:18 -07:00
|
|
|
// rescan known addresses
|
|
|
|
lastUsed := 0
|
|
|
|
for i := range addresses {
|
|
|
|
ad := &addresses[i]
|
|
|
|
if fork {
|
2019-02-03 15:42:44 -07:00
|
|
|
// reset the cached data
|
|
|
|
ad.txs = 0
|
|
|
|
ad.maxHeight = 0
|
|
|
|
ad.complete = false
|
|
|
|
ad.txids = nil
|
2019-01-31 00:30:18 -07:00
|
|
|
}
|
2019-02-03 15:42:44 -07:00
|
|
|
used, err := w.xpubDerivedAddressBalance(data, ad)
|
2019-01-31 00:30:18 -07:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
|
|
|
if used {
|
|
|
|
lastUsed = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// derive new addresses as necessary
|
|
|
|
missing := len(addresses) - lastUsed
|
|
|
|
for missing < gap {
|
|
|
|
from := len(addresses)
|
|
|
|
to := from + gap - missing
|
|
|
|
if to < minDerivedIndex {
|
|
|
|
to = minDerivedIndex
|
|
|
|
}
|
|
|
|
descriptors, err := w.chainParser.DeriveAddressDescriptorsFromTo(xpub, uint32(change), uint32(from), uint32(to))
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
|
|
|
for i, a := range descriptors {
|
|
|
|
ad := xpubAddress{addrDesc: a}
|
2019-02-03 15:42:44 -07:00
|
|
|
used, err := w.xpubDerivedAddressBalance(data, &ad)
|
2019-01-31 00:30:18 -07:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
|
|
|
if used {
|
|
|
|
lastUsed = i + from
|
|
|
|
}
|
|
|
|
addresses = append(addresses, ad)
|
|
|
|
}
|
|
|
|
missing = len(addresses) - lastUsed
|
|
|
|
}
|
|
|
|
return lastUsed, addresses, nil
|
|
|
|
}
|
|
|
|
|
2019-01-30 09:56:15 -07:00
|
|
|
func (w *Worker) tokenFromXpubAddress(ad *xpubAddress, changeIndex int, index int) Token {
|
|
|
|
a, _, _ := w.chainParser.GetAddressesFromAddrDesc(ad.addrDesc)
|
|
|
|
var address string
|
|
|
|
if len(a) > 0 {
|
|
|
|
address = a[0]
|
|
|
|
}
|
|
|
|
return Token{
|
|
|
|
Type: XPUBAddressTokenType,
|
|
|
|
Name: address,
|
|
|
|
Decimals: w.chainParser.AmountDecimals(),
|
|
|
|
BalanceSat: (*Amount)(&ad.balance.BalanceSat),
|
|
|
|
Transfers: int(ad.balance.Txs),
|
|
|
|
Contract: fmt.Sprintf("%d/%d", changeIndex, index),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetAddressForXpub computes address value and gets transactions for given address
|
2019-01-31 00:30:18 -07:00
|
|
|
func (w *Worker) GetAddressForXpub(xpub string, page int, txsOnPage int, option GetAddressOption, filter *AddressFilter, gap int) (*Address, error) {
|
2019-01-30 09:56:15 -07:00
|
|
|
if w.chainType != bchain.ChainBitcoinType || len(xpub) != xpubLen {
|
|
|
|
return nil, ErrUnsupportedXpub
|
|
|
|
}
|
|
|
|
start := time.Now()
|
2019-01-31 00:30:18 -07:00
|
|
|
if gap <= 0 {
|
|
|
|
gap = defaultAddressesGap
|
|
|
|
}
|
|
|
|
// gap is increased one as there must be gap of empty addresses before the derivation is stopped
|
|
|
|
gap++
|
2019-02-04 08:53:49 -07:00
|
|
|
page--
|
|
|
|
if page < 0 {
|
|
|
|
page = 0
|
|
|
|
}
|
2019-01-30 09:56:15 -07:00
|
|
|
var processedHash string
|
|
|
|
cachedXpubsMux.Lock()
|
|
|
|
data, found := cachedXpubs[xpub]
|
|
|
|
cachedXpubsMux.Unlock()
|
2019-02-03 15:42:44 -07:00
|
|
|
var (
|
|
|
|
txm []string
|
|
|
|
txs []*Tx
|
|
|
|
txids []string
|
|
|
|
pg Paging
|
|
|
|
totalResults int
|
|
|
|
err error
|
|
|
|
bestheight uint32
|
|
|
|
besthash string
|
|
|
|
)
|
|
|
|
// to load all data for xpub may take some time, do it in a loop to process a possible new block
|
2019-01-30 09:56:15 -07:00
|
|
|
for {
|
2019-02-03 15:42:44 -07:00
|
|
|
bestheight, besthash, err = w.db.GetBestBlock()
|
2019-01-30 09:56:15 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Annotatef(err, "GetBestBlock")
|
|
|
|
}
|
|
|
|
if besthash == processedHash {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
fork := false
|
2019-01-31 00:30:18 -07:00
|
|
|
if !found || data.gap != gap {
|
|
|
|
data = &xpubData{gap: gap}
|
2019-01-30 09:56:15 -07:00
|
|
|
} else {
|
|
|
|
hash, err := w.db.GetBlockHash(data.dataHeight)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if hash != data.dataHash {
|
2019-02-03 15:42:44 -07:00
|
|
|
// in case of for reset all cached data
|
2019-01-30 09:56:15 -07:00
|
|
|
fork = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
processedHash = besthash
|
|
|
|
if data.dataHeight < bestheight {
|
|
|
|
data.dataHeight = bestheight
|
|
|
|
data.dataHash = besthash
|
2019-01-31 00:30:18 -07:00
|
|
|
var lastUsedIndex int
|
2019-02-03 15:42:44 -07:00
|
|
|
lastUsedIndex, data.addresses, err = w.xpubScanAddresses(xpub, data, data.addresses, gap, 0, 0, fork)
|
2019-01-31 00:30:18 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
2019-02-03 15:42:44 -07:00
|
|
|
_, data.changeAddresses, err = w.xpubScanAddresses(xpub, data, data.changeAddresses, gap, 1, lastUsedIndex, fork)
|
2019-01-31 00:30:18 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
|
|
|
}
|
2019-02-03 15:42:44 -07:00
|
|
|
if option >= TxidHistory {
|
|
|
|
for i := range data.addresses {
|
|
|
|
if err = w.xpubCheckAndLoadTxids(&data.addresses[i], filter, bestheight, txsOnPage); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i := range data.changeAddresses {
|
|
|
|
if err = w.xpubCheckAndLoadTxids(&data.changeAddresses[i], filter, bestheight, txsOnPage); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
|
|
|
cachedXpubsMux.Lock()
|
|
|
|
cachedXpubs[xpub] = data
|
|
|
|
cachedXpubsMux.Unlock()
|
2019-02-03 15:42:44 -07:00
|
|
|
// TODO mempool
|
|
|
|
if option >= TxidHistory {
|
|
|
|
txc := make(xpubTxids, 0, 32)
|
|
|
|
var addTxids func(ad *xpubAddress)
|
2019-02-04 08:53:49 -07:00
|
|
|
if filter.FromHeight == 0 && filter.ToHeight == 0 && filter.Vout == AddressFilterVoutOff {
|
2019-02-03 15:42:44 -07:00
|
|
|
addTxids = func(ad *xpubAddress) {
|
|
|
|
txc = append(txc, ad.txids...)
|
|
|
|
}
|
|
|
|
totalResults = int(data.txs)
|
|
|
|
} else {
|
|
|
|
toHeight := maxUint32
|
|
|
|
if filter.ToHeight != 0 {
|
|
|
|
toHeight = filter.ToHeight
|
|
|
|
}
|
|
|
|
addTxids = func(ad *xpubAddress) {
|
|
|
|
for _, txid := range ad.txids {
|
|
|
|
if txid.height < filter.FromHeight || txid.height > toHeight {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if filter.Vout != AddressFilterVoutOff {
|
|
|
|
if filter.Vout == AddressFilterVoutInputs && txid.inputOutput&txInput == 0 ||
|
|
|
|
filter.Vout == AddressFilterVoutOutputs && txid.inputOutput&txOutput == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
txc = append(txc, txid)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
totalResults = -1
|
|
|
|
}
|
|
|
|
for i := range data.addresses {
|
|
|
|
addTxids(&data.addresses[i])
|
|
|
|
}
|
|
|
|
for i := range data.changeAddresses {
|
|
|
|
addTxids(&data.changeAddresses[i])
|
|
|
|
}
|
|
|
|
sort.Stable(txc)
|
|
|
|
var from, to int
|
|
|
|
pg, from, to, page = computePaging(len(txc), page, txsOnPage)
|
|
|
|
if len(txc) >= txsOnPage {
|
|
|
|
if totalResults < 0 {
|
|
|
|
pg.TotalPages = -1
|
|
|
|
} else {
|
|
|
|
pg, _, _, _ = computePaging(totalResults, page, txsOnPage)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if option == TxidHistory {
|
|
|
|
txids = make([]string, len(txm)+to-from)
|
|
|
|
} else {
|
|
|
|
txs = make([]*Tx, len(txm)+to-from)
|
|
|
|
}
|
|
|
|
txi := 0
|
|
|
|
// get confirmed transactions
|
|
|
|
for i := from; i < to; i++ {
|
|
|
|
xpubTxid := &txc[i]
|
|
|
|
if option == TxidHistory {
|
|
|
|
txids[txi] = xpubTxid.txid
|
|
|
|
} else {
|
|
|
|
if txs[txi], err = w.txFromTxid(xpubTxid.txid, bestheight, option); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
txi++
|
|
|
|
}
|
|
|
|
if option == TxidHistory {
|
|
|
|
txids = txids[:txi]
|
|
|
|
} else if option >= TxHistoryLight {
|
|
|
|
txs = txs[:txi]
|
|
|
|
}
|
|
|
|
}
|
2019-01-31 07:04:09 -07:00
|
|
|
totalTokens := 0
|
2019-02-04 08:53:49 -07:00
|
|
|
xpubAddresses := make(map[string]struct{})
|
2019-01-30 09:56:15 -07:00
|
|
|
tokens := make([]Token, 0, 4)
|
2019-01-31 00:30:18 -07:00
|
|
|
for i := range data.addresses {
|
|
|
|
ad := &data.addresses[i]
|
2019-01-30 09:56:15 -07:00
|
|
|
if ad.balance != nil {
|
2019-01-31 07:04:09 -07:00
|
|
|
totalTokens++
|
|
|
|
if filter.AllTokens || !IsZeroBigInt(&ad.balance.BalanceSat) {
|
2019-02-04 08:53:49 -07:00
|
|
|
t := w.tokenFromXpubAddress(ad, 0, i)
|
|
|
|
tokens = append(tokens, t)
|
|
|
|
xpubAddresses[t.Name] = struct{}{}
|
|
|
|
} else {
|
|
|
|
a, _, _ := w.chainParser.GetAddressesFromAddrDesc(ad.addrDesc)
|
|
|
|
if len(a) > 0 {
|
|
|
|
xpubAddresses[a[0]] = struct{}{}
|
|
|
|
}
|
2019-01-31 07:04:09 -07:00
|
|
|
}
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
2019-01-31 00:30:18 -07:00
|
|
|
}
|
|
|
|
for i := range data.changeAddresses {
|
|
|
|
ad := &data.changeAddresses[i]
|
|
|
|
if ad.balance != nil {
|
2019-01-31 07:04:09 -07:00
|
|
|
totalTokens++
|
|
|
|
if filter.AllTokens || !IsZeroBigInt(&ad.balance.BalanceSat) {
|
2019-02-04 08:53:49 -07:00
|
|
|
t := w.tokenFromXpubAddress(ad, 1, i)
|
|
|
|
tokens = append(tokens, t)
|
|
|
|
xpubAddresses[t.Name] = struct{}{}
|
|
|
|
} else {
|
|
|
|
a, _, _ := w.chainParser.GetAddressesFromAddrDesc(ad.addrDesc)
|
|
|
|
if len(a) > 0 {
|
|
|
|
xpubAddresses[a[0]] = struct{}{}
|
|
|
|
}
|
2019-01-31 07:04:09 -07:00
|
|
|
}
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
var totalReceived big.Int
|
|
|
|
totalReceived.Add(&data.balanceSat, &data.sentSat)
|
|
|
|
addr := Address{
|
2019-02-03 15:42:44 -07:00
|
|
|
Paging: pg,
|
2019-01-30 09:56:15 -07:00
|
|
|
AddrStr: xpub,
|
|
|
|
BalanceSat: (*Amount)(&data.balanceSat),
|
|
|
|
TotalReceivedSat: (*Amount)(&totalReceived),
|
|
|
|
TotalSentSat: (*Amount)(&data.sentSat),
|
|
|
|
Txs: int(data.txs),
|
|
|
|
// UnconfirmedBalanceSat: (*Amount)(&uBalSat),
|
|
|
|
// UnconfirmedTxs: len(txm),
|
2019-02-04 08:53:49 -07:00
|
|
|
Transactions: txs,
|
|
|
|
Txids: txids,
|
|
|
|
TotalTokens: totalTokens,
|
|
|
|
Tokens: tokens,
|
|
|
|
XPubAddresses: xpubAddresses,
|
2019-01-30 09:56:15 -07:00
|
|
|
}
|
2019-02-03 15:42:44 -07:00
|
|
|
glog.Info("GetAddressForXpub ", xpub[:16], ", ", len(data.addresses)+len(data.changeAddresses), " derived addresses, ", data.txs, " total txs finished in ", time.Since(start))
|
2019-01-30 09:56:15 -07:00
|
|
|
return &addr, nil
|
|
|
|
}
|