Line data Source code
1 : // Copyright (c) 2009-2010 Satoshi Nakamoto
2 : // Copyright (c) 2009-2015 The Bitcoin developers
3 : // Distributed under the MIT software license, see the accompanying
4 : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 :
6 : #include "policy/fees.h"
7 : #include "policy/policy.h"
8 :
9 : #include "primitives/transaction.h"
10 : #include "streams.h"
11 : #include "txmempool.h"
12 : #include "util/system.h"
13 :
14 484 : void TxConfirmStats::Initialize(std::vector<double>& defaultBuckets,
15 : unsigned int maxConfirms, double _decay)
16 : {
17 484 : decay = _decay;
18 36564 : for (unsigned int i = 0; i < defaultBuckets.size(); i++) {
19 36080 : buckets.push_back(defaultBuckets[i]);
20 36080 : bucketMap[defaultBuckets[i]] = i;
21 : }
22 484 : confAvg.resize(maxConfirms);
23 484 : curBlockConf.resize(maxConfirms);
24 484 : unconfTxs.resize(maxConfirms);
25 12584 : for (unsigned int i = 0; i < maxConfirms; i++) {
26 12100 : confAvg[i].resize(buckets.size());
27 12100 : curBlockConf[i].resize(buckets.size());
28 12100 : unconfTxs[i].resize(buckets.size());
29 : }
30 :
31 484 : oldUnconfTxs.resize(buckets.size());
32 484 : curBlockTxCt.resize(buckets.size());
33 484 : txCtAvg.resize(buckets.size());
34 484 : curBlockVal.resize(buckets.size());
35 484 : avg.resize(buckets.size());
36 484 : }
37 :
38 : // Zero out the data for the current block
39 39133 : void TxConfirmStats::ClearCurrent(unsigned int nBlockHeight)
40 : {
41 2946236 : for (unsigned int j = 0; j < buckets.size(); j++) {
42 2907096 : oldUnconfTxs[j] += unconfTxs[nBlockHeight%unconfTxs.size()][j];
43 2907096 : unconfTxs[nBlockHeight%unconfTxs.size()][j] = 0;
44 75584600 : for (unsigned int i = 0; i < curBlockConf.size(); i++)
45 72677400 : curBlockConf[i][j] = 0;
46 2907096 : curBlockTxCt[j] = 0;
47 2907096 : curBlockVal[j] = 0;
48 : }
49 39133 : }
50 :
51 :
52 123055 : void TxConfirmStats::Record(int blocksToConfirm, double val)
53 : {
54 : // blocksToConfirm is 1-based
55 123055 : if (blocksToConfirm < 1)
56 : return;
57 123055 : unsigned int bucketindex = bucketMap.lower_bound(val)->second;
58 3154662 : for (size_t i = blocksToConfirm; i <= curBlockConf.size(); i++) {
59 3031605 : curBlockConf[i - 1][bucketindex]++;
60 : }
61 123055 : curBlockTxCt[bucketindex]++;
62 123055 : curBlockVal[bucketindex] += val;
63 : }
64 :
65 39133 : void TxConfirmStats::UpdateMovingAverages()
66 : {
67 2946236 : for (unsigned int j = 0; j < buckets.size(); j++) {
68 75584600 : for (unsigned int i = 0; i < confAvg.size(); i++)
69 72677400 : confAvg[i][j] = confAvg[i][j] * decay + curBlockConf[i][j];
70 2907096 : avg[j] = avg[j] * decay + curBlockVal[j];
71 2907096 : txCtAvg[j] = txCtAvg[j] * decay + curBlockTxCt[j];
72 : }
73 39133 : }
74 :
75 : // returns -1 on error conditions
76 22583 : double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
77 : double successBreakPoint, bool requireGreater,
78 : unsigned int nBlockHeight)
79 : {
80 : // Counters for a bucket (or range of buckets)
81 22583 : double nConf = 0; // Number of tx's confirmed within the confTarget
82 22583 : double totalNum = 0; // Total number of tx's that were ever confirmed
83 22583 : int extraNum = 0; // Number of tx's still in mempool for confTarget or longer
84 :
85 22583 : int maxbucketindex = buckets.size() - 1;
86 :
87 : // requireGreater means we are looking for the lowest feerate such that all higher
88 : // values pass, so we start at maxbucketindex (highest feerate) and look at succesively
89 : // smaller buckets until we reach failure. Otherwise, we are looking for the highest
90 : // feerate such that all lower values fail, and we go in the opposite direction.
91 22583 : unsigned int startbucket = requireGreater ? maxbucketindex : 0;
92 22583 : int step = requireGreater ? -1 : 1;
93 :
94 : // We'll combine buckets until we have enough samples.
95 : // The near and far variables will define the range we've combined
96 : // The best variables are the last range we saw which still had a high
97 : // enough confirmation rate to count as success.
98 : // The cur variables are the current range we're counting.
99 22583 : unsigned int curNearBucket = startbucket;
100 22583 : unsigned int bestNearBucket = startbucket;
101 22583 : unsigned int curFarBucket = startbucket;
102 22583 : unsigned int bestFarBucket = startbucket;
103 :
104 22583 : bool foundAnswer = false;
105 22583 : unsigned int bins = unconfTxs.size();
106 :
107 : // Start counting from highest(default) or lowest feerate transactions
108 1690373 : for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) {
109 1668019 : curFarBucket = bucket;
110 1668019 : nConf += confAvg[confTarget - 1][bucket];
111 1668019 : totalNum += txCtAvg[bucket];
112 21760197 : for (unsigned int confct = confTarget; confct < GetMaxConfirms(); confct++)
113 20092178 : extraNum += unconfTxs[(nBlockHeight - confct)%bins][bucket];
114 1668019 : extraNum += oldUnconfTxs[bucket];
115 : // If we have enough transaction data points in this range of buckets,
116 : // we can test for success
117 : // (Only count the confirmed data points, so that each confirmation count
118 : // will be looking at the same amount of data and same bucket breaks)
119 1668019 : if (totalNum >= sufficientTxVal / (1 - decay)) {
120 1347 : double curPct = nConf / (totalNum + extraNum);
121 :
122 : // Check to see if we are no longer getting confirmed at the success rate
123 1347 : if (requireGreater && curPct < successBreakPoint)
124 : break;
125 1114 : if (!requireGreater && curPct > successBreakPoint)
126 : break;
127 :
128 : // Otherwise update the cumulative stats, and the bucket variables
129 : // and reset the counters
130 : else {
131 1114 : foundAnswer = true;
132 1114 : nConf = 0;
133 1114 : totalNum = 0;
134 1114 : extraNum = 0;
135 1114 : bestNearBucket = curNearBucket;
136 1114 : bestFarBucket = curFarBucket;
137 1114 : curNearBucket = bucket + step;
138 : }
139 : }
140 : }
141 :
142 22583 : double median = -1;
143 22583 : double txSum = 0;
144 :
145 : // Calculate the "average" feerate of the best bucket range that met success conditions
146 : // Find the bucket with the median transaction and then report the average feerate from that bucket
147 : // This is a compromise between finding the median which we can't since we don't save all tx's
148 : // and reporting the average which is less accurate
149 22583 : unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket;
150 22583 : unsigned int maxBucket = bestNearBucket > bestFarBucket ? bestNearBucket : bestFarBucket;
151 49323 : for (unsigned int j = minBucket; j <= maxBucket; j++) {
152 26740 : txSum += txCtAvg[j];
153 : }
154 22583 : if (foundAnswer && txSum != 0) {
155 279 : txSum = txSum / 2;
156 1413 : for (unsigned int j = minBucket; j <= maxBucket; j++) {
157 1413 : if (txCtAvg[j] < txSum)
158 1134 : txSum -= txCtAvg[j];
159 : else { // we're in the right bucket
160 279 : median = avg[j] / txCtAvg[j];
161 279 : break;
162 : }
163 : }
164 : }
165 :
166 22583 : LogPrint(BCLog::ESTIMATEFEE, "%3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
167 : confTarget, requireGreater ? ">" : "<", successBreakPoint,
168 : requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket],
169 : 100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum);
170 :
171 22583 : return median;
172 : }
173 :
174 356 : void TxConfirmStats::Write(CAutoFile& fileout)
175 : {
176 356 : fileout << decay;
177 356 : fileout << buckets;
178 356 : fileout << avg;
179 356 : fileout << txCtAvg;
180 356 : fileout << confAvg;
181 356 : }
182 :
183 84 : void TxConfirmStats::Read(CAutoFile& filein)
184 : {
185 : // Read data file into temporary variables and do some very basic sanity checking
186 84 : std::vector<double> fileBuckets;
187 84 : std::vector<double> fileAvg;
188 84 : std::vector<std::vector<double> > fileConfAvg;
189 84 : std::vector<double> fileTxCtAvg;
190 84 : double fileDecay;
191 84 : size_t maxConfirms;
192 84 : size_t numBuckets;
193 :
194 84 : filein >> fileDecay;
195 84 : if (fileDecay <= 0 || fileDecay >= 1)
196 0 : throw std::runtime_error("Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive)");
197 84 : filein >> fileBuckets;
198 84 : numBuckets = fileBuckets.size();
199 84 : if (numBuckets <= 1 || numBuckets > 1000)
200 0 : throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
201 84 : filein >> fileAvg;
202 84 : if (fileAvg.size() != numBuckets)
203 0 : throw std::runtime_error("Corrupt estimates file. Mismatch in feerate average bucket count");
204 84 : filein >> fileTxCtAvg;
205 84 : if (fileTxCtAvg.size() != numBuckets)
206 0 : throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count");
207 84 : filein >> fileConfAvg;
208 84 : maxConfirms = fileConfAvg.size();
209 84 : if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) // one week
210 0 : throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms");
211 2184 : for (unsigned int i = 0; i < maxConfirms; i++) {
212 2100 : if (fileConfAvg[i].size() != numBuckets)
213 0 : throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count");
214 : }
215 : // Now that we've processed the entire feerate estimate data file and not
216 : // thrown any errors, we can copy it to our data structures
217 84 : decay = fileDecay;
218 84 : buckets = fileBuckets;
219 84 : avg = fileAvg;
220 84 : confAvg = fileConfAvg;
221 84 : txCtAvg = fileTxCtAvg;
222 84 : bucketMap.clear();
223 :
224 : // Resize the current block variables which aren't stored in the data file
225 : // to match the number of confirms and buckets
226 84 : curBlockConf.resize(maxConfirms);
227 2184 : for (unsigned int i = 0; i < maxConfirms; i++) {
228 2100 : curBlockConf[i].resize(buckets.size());
229 : }
230 84 : curBlockTxCt.resize(buckets.size());
231 84 : curBlockVal.resize(buckets.size());
232 :
233 84 : unconfTxs.resize(maxConfirms);
234 2184 : for (unsigned int i = 0; i < maxConfirms; i++) {
235 2100 : unconfTxs[i].resize(buckets.size());
236 : }
237 84 : oldUnconfTxs.resize(buckets.size());
238 :
239 6300 : for (unsigned int i = 0; i < buckets.size(); i++)
240 6216 : bucketMap[buckets[i]] = i;
241 :
242 84 : LogPrint(BCLog::ESTIMATEFEE, "Reading estimates: %u buckets counting confirms up to %u blocks\n",
243 : numBuckets, maxConfirms);
244 84 : }
245 :
246 133723 : unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val)
247 : {
248 133723 : unsigned int bucketindex = bucketMap.lower_bound(val)->second;
249 133723 : unsigned int blockIndex = nBlockHeight % unconfTxs.size();
250 133723 : unconfTxs[blockIndex][bucketindex]++;
251 133723 : return bucketindex;
252 : }
253 :
254 133670 : void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight, unsigned int bucketindex)
255 : {
256 : //nBestSeenHeight is not updated yet for the new block
257 133670 : int blocksAgo = nBestSeenHeight - entryHeight;
258 133670 : if (nBestSeenHeight == 0) // the BlockPolicyEstimator hasn't seen any blocks yet
259 : blocksAgo = 0;
260 133670 : if (blocksAgo < 0) {
261 0 : LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error, blocks ago is negative for mempool tx\n");
262 0 : return; //This can't happen because we call this with our best seen height, no entries can have higher
263 : }
264 :
265 133670 : if (blocksAgo >= (int)unconfTxs.size()) {
266 141 : if (oldUnconfTxs[bucketindex] > 0)
267 141 : oldUnconfTxs[bucketindex]--;
268 : else
269 0 : LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error, mempool tx removed from >25 blocks,bucketIndex=%u already\n",
270 : bucketindex);
271 : }
272 : else {
273 133529 : unsigned int blockIndex = entryHeight % unconfTxs.size();
274 133529 : if (unconfTxs[blockIndex][bucketindex] > 0)
275 133529 : unconfTxs[blockIndex][bucketindex]--;
276 : else
277 0 : LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error, mempool tx removed from blockIndex=%u,bucketIndex=%u already\n",
278 : blockIndex, bucketindex);
279 : }
280 : }
281 :
282 : // This function is called from CTxMemPool::removeUnchecked to ensure
283 : // txs removed from the mempool for any reason are no longer
284 : // tracked. Txs that were part of a block have already been removed in
285 : // processBlockTx to ensure they are never double tracked, but it is
286 : // of no harm to try to remove them again.
287 310057 : bool CBlockPolicyEstimator::removeTx(const uint256& hash)
288 : {
289 310057 : std::map<uint256, TxStatsInfo>::iterator pos = mapMemPoolTxs.find(hash);
290 310057 : if (pos != mapMemPoolTxs.end()) {
291 133670 : feeStats.removeTx(pos->second.blockHeight, nBestSeenHeight, pos->second.bucketIndex);
292 133670 : mapMemPoolTxs.erase(hash);
293 133670 : return true;
294 : }
295 : return false;
296 : }
297 :
298 484 : CBlockPolicyEstimator::CBlockPolicyEstimator(const CFeeRate& _minRelayFee)
299 484 : : nBestSeenHeight(0), trackedTxs(0), untrackedTxs(0)
300 : {
301 484 : static_assert(MIN_FEERATE > 0, "Min feerate must be nonzero");
302 484 : minTrackedFee = _minRelayFee < CFeeRate(MIN_FEERATE) ? CFeeRate(MIN_FEERATE) : _minRelayFee;
303 484 : std::vector<double> vfeelist;
304 36080 : for (double bucketBoundary = minTrackedFee.GetFeePerK(); bucketBoundary <= MAX_FEERATE; bucketBoundary *= FEE_SPACING) {
305 35596 : vfeelist.push_back(bucketBoundary);
306 : }
307 484 : vfeelist.push_back(INF_FEERATE);
308 484 : feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY);
309 484 : }
310 :
311 191011 : void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, bool validFeeEstimate)
312 : {
313 191011 : if(entry.HasZerocoins() || entry.IsShielded()) {
314 : // Zerocoin spends/mints had fixed feerate. Skip them for the estimates.
315 57288 : return;
316 : }
317 :
318 188833 : unsigned int txHeight = entry.GetHeight();
319 188833 : uint256 hash = entry.GetTx().GetHash();
320 188833 : if (mapMemPoolTxs.count(hash)) {
321 2 : LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error mempool tx %s already being tracked\n",
322 : hash.ToString().c_str());
323 2 : return;
324 : }
325 :
326 188831 : if (txHeight != nBestSeenHeight) {
327 : // Ignore side chains and re-orgs; assuming they are random they don't
328 : // affect the estimate. We'll potentially double count transactions in 1-block reorgs.
329 : // Ignore txs if BlockPolicyEstimator is not in sync with chainActive.Tip().
330 : // It will be synced next time a block is processed.
331 : return;
332 : }
333 :
334 : // Only want to be updating estimates when our blockchain is synced,
335 : // otherwise we'll miscalculate how many blocks its taking to get included.
336 133780 : if (!validFeeEstimate) {
337 57 : untrackedTxs++;
338 57 : return;
339 : }
340 133723 : trackedTxs++;
341 :
342 : // Feerates are stored and reported as PIV-per-kb:
343 133723 : CFeeRate feeRate(entry.GetFee(), entry.GetTxSize());
344 :
345 133723 : mapMemPoolTxs[hash].blockHeight = txHeight;
346 133723 : mapMemPoolTxs[hash].bucketIndex = feeStats.NewTx(txHeight, (double)feeRate.GetFeePerK());
347 : }
348 :
349 124563 : bool CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry* entry)
350 : {
351 124563 : if(entry->HasZerocoins() || entry->IsShielded()) {
352 : // Zerocoin spends/mints had fixed feerate. Skip them for the estimates.
353 : return false;
354 : }
355 :
356 123160 : if (!removeTx(entry->GetTx().GetHash())) {
357 : // This transaction wasn't being tracked for fee estimation
358 : return false;
359 : }
360 :
361 : // How many blocks did it take for miners to include this transaction?
362 : // blocksToConfirm is 1-based, so a transaction included in the earliest
363 : // possible block has confirmation count of 1
364 123055 : int blocksToConfirm = nBlockHeight - entry->GetHeight();
365 123055 : if (blocksToConfirm <= 0) {
366 : // This can't happen because we don't process transactions from a block with a height
367 : // lower than our greatest seen height
368 0 : LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy error Transaction had negative blocksToConfirm\n");
369 0 : return false;
370 : }
371 :
372 : // Feerates are stored and reported as PIV-per-kb:
373 123055 : CFeeRate feeRate(entry->GetFee(), entry->GetTxSize());
374 :
375 123055 : feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK());
376 123055 : return true;
377 : }
378 :
379 42036 : void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
380 : std::vector<const CTxMemPoolEntry*>& entries)
381 : {
382 42036 : if (nBlockHeight <= nBestSeenHeight) {
383 : // Ignore side chains and re-orgs; assuming they are random
384 : // they don't affect the estimate.
385 : // And if an attacker can re-org the chain at will, then
386 : // you've got much bigger problems than "attacker can influence
387 : // transaction fees."
388 2903 : return;
389 : }
390 :
391 : // Must update nBestSeenHeight in sync with ClearCurrent so that
392 : // calls to removeTx (via processBlockTx) correctly calculate age
393 : // of unconfirmed txs to remove from tracking.
394 39133 : nBestSeenHeight = nBlockHeight;
395 :
396 : // Clear the current block state and update unconfirmed circular buffer
397 39133 : feeStats.ClearCurrent(nBlockHeight);
398 :
399 39133 : unsigned int countedTxs = 0;
400 : // Repopulate the current block state
401 163696 : for (unsigned int i = 0; i < entries.size(); i++) {
402 124563 : if (processBlockTx(nBlockHeight, entries[i]))
403 123055 : countedTxs++;
404 : }
405 :
406 : // Update all exponential averages with the current block state
407 39133 : feeStats.UpdateMovingAverages();
408 :
409 39133 : LogPrint(BCLog::ESTIMATEFEE, "Blockpolicy after updating estimates for %u of %u txs in block, since last block %u of %u tracked, new mempool map size %u\n",
410 : countedTxs, entries.size(), trackedTxs, trackedTxs + untrackedTxs, mapMemPoolTxs.size());
411 :
412 39133 : trackedTxs = 0;
413 39133 : untrackedTxs = 0;
414 : }
415 :
416 198 : CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget)
417 : {
418 : // Return failure if trying to analyze a target we're not tracking
419 198 : if (confTarget <= 0 || (unsigned int)confTarget > feeStats.GetMaxConfirms())
420 0 : return CFeeRate(0);
421 :
422 198 : double median = feeStats.EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
423 :
424 198 : if (median < 0)
425 12 : return CFeeRate(0);
426 :
427 186 : return CFeeRate(median);
428 : }
429 :
430 983 : CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool)
431 : {
432 983 : if (answerFoundAtTarget)
433 965 : *answerFoundAtTarget = confTarget;
434 : // Return failure if trying to analyze a target we're not tracking
435 983 : if (confTarget <= 0 || (unsigned int)confTarget > feeStats.GetMaxConfirms())
436 0 : return CFeeRate(0);
437 :
438 : double median = -1;
439 23368 : while (median < 0 && (unsigned int)confTarget <= feeStats.GetMaxConfirms()) {
440 22385 : median = feeStats.EstimateMedianVal(confTarget++, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
441 : }
442 :
443 983 : if (answerFoundAtTarget)
444 965 : *answerFoundAtTarget = confTarget - 1;
445 :
446 : // If mempool is limiting txs , return at least the min feerate from the mempool
447 1966 : CAmount minPoolFee = pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
448 983 : if (minPoolFee > 0 && minPoolFee > median)
449 12 : return CFeeRate(minPoolFee);
450 :
451 971 : if (median < 0)
452 890 : return CFeeRate(0);
453 :
454 81 : return CFeeRate(median);
455 : }
456 :
457 356 : void CBlockPolicyEstimator::Write(CAutoFile& fileout)
458 : {
459 356 : fileout << nBestSeenHeight;
460 356 : feeStats.Write(fileout);
461 356 : }
462 :
463 84 : void CBlockPolicyEstimator::Read(CAutoFile& filein, int nFileVersion)
464 : {
465 84 : int nFileBestSeenHeight;
466 84 : filein >> nFileBestSeenHeight;
467 84 : feeStats.Read(filein);
468 84 : nBestSeenHeight = nFileBestSeenHeight;
469 84 : if (nFileVersion < 4029900) {
470 0 : TxConfirmStats priStats;
471 0 : priStats.Read(filein);
472 : }
473 84 : }
|