diff --git a/src/main.cpp b/src/main.cpp index bf48bc44a..0edf7fd8b 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -6266,6 +6266,29 @@ bool ProcessMessages(CNode* pfrom) return fOk; } +class CompareInvMempoolOrder +{ + CTxMemPool *mp; +public: + CompareInvMempoolOrder(CTxMemPool *mempool) + { + mp = mempool; + } + + bool operator()(const CInv &a, const CInv &b) + { + if (a.type != MSG_TX && b.type != MSG_TX) { + return false; + } else { + if (a.type != MSG_TX) { + return true; + } else if (b.type != MSG_TX) { + return false; + } + return mp->CompareDepthAndScore(a.hash, b.hash); + } + } +}; bool SendMessages(CNode* pto) { @@ -6527,42 +6550,31 @@ bool SendMessages(CNode* pto) bool fSendTrickle = pto->fWhitelisted; if (pto->nNextInvSend < nNow) { fSendTrickle = true; - pto->nNextInvSend = PoissonNextSend(nNow, AVG_INVENTORY_BROADCAST_INTERVAL); + // Use half the delay for outbound peers, as their is less privacy concern for them. + pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound); } LOCK(pto->cs_inventory); - vInv.reserve(std::min(1000, pto->vInventoryToSend.size())); + if (fSendTrickle && pto->vInventoryToSend.size() > 1) { + // Topologically and fee-rate sort the inventory we send for privacy and priority reasons. + CompareInvMempoolOrder compareInvMempoolOrder(&mempool); + std::stable_sort(pto->vInventoryToSend.begin(), pto->vInventoryToSend.end(), compareInvMempoolOrder); + } + vInv.reserve(std::min(INVENTORY_BROADCAST_MAX, pto->vInventoryToSend.size())); vInvWait.reserve(pto->vInventoryToSend.size()); BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend) { if (inv.type == MSG_TX && pto->filterInventoryKnown.contains(inv.hash)) continue; - - // trickle out tx inv to protect privacy - if (inv.type == MSG_TX && !fSendTrickle) - { - // 1/4 of tx invs blast to all immediately - static uint256 hashSalt; - if (hashSalt.IsNull()) - hashSalt = GetRandHash(); - uint256 hashRand = ArithToUint256(UintToArith256(inv.hash) ^ UintToArith256(hashSalt)); - hashRand = Hash(BEGIN(hashRand), END(hashRand)); - bool fTrickleWait = ((UintToArith256(hashRand) & 3) != 0); - - if (fTrickleWait) - { - vInvWait.push_back(inv); - continue; - } + // No reason to drain out at many times the network's capacity, + // especially since we have many peers and some will draw much shorter delays. + if (vInv.size() >= INVENTORY_BROADCAST_MAX || (inv.type == MSG_TX && !fSendTrickle)) { + vInvWait.push_back(inv); + continue; } pto->filterInventoryKnown.insert(inv.hash); vInv.push_back(inv); - if (vInv.size() >= 1000) - { - pto->PushMessage(NetMsgType::INV, vInv); - vInv.clear(); - } } pto->vInventoryToSend = vInvWait; } diff --git a/src/main.h b/src/main.h index 74f55b699..cd0543945 100644 --- a/src/main.h +++ b/src/main.h @@ -101,9 +101,12 @@ static const unsigned int MAX_REJECT_MESSAGE_LENGTH = 111; static const unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24 * 24 * 60; /** Average delay between peer address broadcasts in seconds. */ static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL = 30; -/** Average delay between trickled inventory broadcasts in seconds. - * Blocks, whitelisted receivers, and a random 25% of transactions bypass this. */ -static const unsigned int AVG_INVENTORY_BROADCAST_INTERVAL = 5; +/** Average delay between trickled inventory transmissions in seconds. + * Blocks and whitelisted receivers bypass this, outbound peers get half this delay. */ +static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5; +/** Maximum number of inventory items to send per transmission. + * Limits the impact of low-fee transaction floods. */ +static const unsigned int INVENTORY_BROADCAST_MAX = 7 * INVENTORY_BROADCAST_INTERVAL; /** Average delay between feefilter broadcasts in seconds. */ static const unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60; /** Maximum feefilter broadcast delay after significant change. */ diff --git a/src/txmempool.cpp b/src/txmempool.cpp index c1984ae7c..933130d8f 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -764,6 +764,21 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const assert(innerUsage == cachedInnerUsage); } +bool CTxMemPool::CompareDepthAndScore(const uint256& hasha, const uint256& hashb) +{ + LOCK(cs); + indexed_transaction_set::const_iterator i = mapTx.find(hasha); + if (i == mapTx.end()) return false; + indexed_transaction_set::const_iterator j = mapTx.find(hashb); + if (j == mapTx.end()) return true; + uint64_t counta = i->GetCountWithAncestors(); + uint64_t countb = j->GetCountWithAncestors(); + if (counta == countb) { + return CompareTxMemPoolEntryByScore()(*i, *j); + } + return counta < countb; +} + void CTxMemPool::queryHashes(vector& vtxid) { vtxid.clear(); diff --git a/src/txmempool.h b/src/txmempool.h index e8db3ecdc..b1967cc69 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -504,6 +504,7 @@ public: std::list& conflicts, bool fCurrentEstimate = true); void clear(); void _clear(); //lock free + bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb); void queryHashes(std::vector& vtxid); void pruneSpent(const uint256& hash, CCoins &coins); unsigned int GetTransactionsUpdated() const;