rippled
Loading...
Searching...
No Matches
LedgerMaster.cpp
1#include <xrpld/app/consensus/RCLValidations.h>
2#include <xrpld/app/ledger/Ledger.h>
3#include <xrpld/app/ledger/LedgerMaster.h>
4#include <xrpld/app/ledger/LedgerReplayer.h>
5#include <xrpld/app/ledger/OpenLedger.h>
6#include <xrpld/app/ledger/OrderBookDB.h>
7#include <xrpld/app/ledger/PendingSaves.h>
8#include <xrpld/app/main/Application.h>
9#include <xrpld/app/misc/AmendmentTable.h>
10#include <xrpld/app/misc/LoadFeeTrack.h>
11#include <xrpld/app/misc/NetworkOPs.h>
12#include <xrpld/app/misc/SHAMapStore.h>
13#include <xrpld/app/misc/Transaction.h>
14#include <xrpld/app/misc/TxQ.h>
15#include <xrpld/app/misc/ValidatorList.h>
16#include <xrpld/app/paths/PathRequests.h>
17#include <xrpld/app/rdb/RelationalDatabase.h>
18#include <xrpld/core/TimeKeeper.h>
19#include <xrpld/overlay/Overlay.h>
20#include <xrpld/overlay/Peer.h>
21
22#include <xrpl/basics/Log.h>
23#include <xrpl/basics/MathUtilities.h>
24#include <xrpl/basics/UptimeClock.h>
25#include <xrpl/basics/contract.h>
26#include <xrpl/basics/safe_cast.h>
27#include <xrpl/basics/scope.h>
28#include <xrpl/beast/utility/instrumentation.h>
29#include <xrpl/protocol/BuildInfo.h>
30#include <xrpl/protocol/HashPrefix.h>
31#include <xrpl/protocol/digest.h>
32#include <xrpl/resource/Fees.h>
33
34#include <algorithm>
35#include <chrono>
36#include <cstdlib>
37#include <memory>
38#include <vector>
39
40namespace xrpl {
41
42// Don't catch up more than 100 ledgers (cannot exceed 256)
43static constexpr int MAX_LEDGER_GAP{100};
44
45// Don't acquire history if ledger is too old
47
48// Don't acquire history if write load is too high
49static constexpr int MAX_WRITE_LOAD_ACQUIRE{8192};
50
51// Helper function for LedgerMaster::doAdvance()
52// Return true if candidateLedger should be fetched from the network.
53static bool
55 std::uint32_t const currentLedger,
56 std::uint32_t const ledgerHistory,
57 std::optional<LedgerIndex> const minimumOnline,
58 std::uint32_t const candidateLedger,
60{
61 bool const ret = [&]() {
62 // Fetch ledger if it may be the current ledger
63 if (candidateLedger >= currentLedger)
64 return true;
65
66 // Or if it is within our configured history range:
67 if (currentLedger - candidateLedger <= ledgerHistory)
68 return true;
69
70 // Or if greater than or equal to a specific minimum ledger.
71 // Do nothing if the minimum ledger to keep online is unknown.
72 return minimumOnline.has_value() && candidateLedger >= *minimumOnline;
73 }();
74
75 JLOG(j.trace()) << "Missing ledger " << candidateLedger
76 << (ret ? " should" : " should NOT") << " be acquired";
77 return ret;
78}
79
81 Application& app,
83 beast::insight::Collector::ptr const& collector,
84 beast::Journal journal)
85 : app_(app)
86 , m_journal(journal)
87 , mLedgerHistory(collector, app)
88 , standalone_(app_.config().standalone())
89 , fetch_depth_(
90 app_.getSHAMapStore().clampFetchDepth(app_.config().FETCH_DEPTH))
91 , ledger_history_(app_.config().LEDGER_HISTORY)
92 , ledger_fetch_size_(app_.config().getValueFor(SizedItem::ledgerFetch))
93 , fetch_packs_(
94 "FetchPack",
95 65536,
96 std::chrono::seconds{45},
98 app_.journal("TaggedCache"))
99 , m_stats(std::bind(&LedgerMaster::collect_metrics, this), collector)
100{
101}
102
105{
106 return app_.openLedger().current()->header().seq;
107}
108
114
115bool
117 ReadView const& view,
119 char const* reason)
120{
121 auto validLedger = getValidatedLedger();
122
123 if (validLedger && !areCompatible(*validLedger, view, s, reason))
124 {
125 return false;
126 }
127
128 {
130
131 if ((mLastValidLedger.second != 0) &&
133 mLastValidLedger.first,
134 mLastValidLedger.second,
135 view,
136 s,
137 reason))
138 {
139 return false;
140 }
141 }
142
143 return true;
144}
145
148{
149 using namespace std::chrono_literals;
151 if (pubClose == 0s)
152 {
153 JLOG(m_journal.debug()) << "No published ledger";
154 return weeks{2};
155 }
156
157 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
158 ret -= pubClose;
159 ret = (ret > 0s) ? ret : 0s;
160 static std::chrono::seconds lastRet = -1s;
161
162 if (ret != lastRet)
163 {
164 JLOG(m_journal.trace()) << "Published ledger age is " << ret.count();
165 lastRet = ret;
166 }
167 return ret;
168}
169
172{
173 using namespace std::chrono_literals;
174
176 if (valClose == 0s)
177 {
178 JLOG(m_journal.debug()) << "No validated ledger";
179 return weeks{2};
180 }
181
182 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
183 ret -= valClose;
184 ret = (ret > 0s) ? ret : 0s;
185 static std::chrono::seconds lastRet = -1s;
186
187 if (ret != lastRet)
188 {
189 JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count();
190 lastRet = ret;
191 }
192 return ret;
193}
194
195bool
197{
198 using namespace std::chrono_literals;
199
200 if (getPublishedLedgerAge() > 3min)
201 {
202 reason = "No recently-published ledger";
203 return false;
204 }
205 std::uint32_t validClose = mValidLedgerSign.load();
207 if (!validClose || !pubClose)
208 {
209 reason = "No published ledger";
210 return false;
211 }
212 if (validClose > (pubClose + 90))
213 {
214 reason = "Published ledger lags validated ledger";
215 return false;
216 }
217 return true;
218}
219
220void
222{
224 std::optional<uint256> consensusHash;
225
226 if (!standalone_)
227 {
228 auto validations = app_.validators().negativeUNLFilter(
230 l->header().hash, l->header().seq));
231 times.reserve(validations.size());
232 for (auto const& val : validations)
233 times.push_back(val->getSignTime());
234
235 if (!validations.empty())
236 consensusHash = validations.front()->getConsensusHash();
237 }
238
239 NetClock::time_point signTime;
240
241 if (!times.empty() && times.size() >= app_.validators().quorum())
242 {
243 // Calculate the sample median
244 std::sort(times.begin(), times.end());
245 auto const t0 = times[(times.size() - 1) / 2];
246 auto const t1 = times[times.size() / 2];
247 signTime = t0 + (t1 - t0) / 2;
248 }
249 else
250 {
251 signTime = l->header().closeTime;
252 }
253
254 mValidLedger.set(l);
255 mValidLedgerSign = signTime.time_since_epoch().count();
256 XRPL_ASSERT(
258 l->header().seq + max_ledger_difference_ >
260 "xrpl::LedgerMaster::setValidLedger : valid ledger sequence");
262 mValidLedgerSeq = l->header().seq;
263
266 mLedgerHistory.validatedLedger(l, consensusHash);
268 if (!app_.getOPs().isBlocked())
269 {
271 {
272 JLOG(m_journal.error()) << "One or more unsupported amendments "
273 "activated: server blocked.";
275 }
276 else if (!app_.getOPs().isAmendmentWarned() || l->isFlagLedger())
277 {
278 // Amendments can lose majority, so re-check periodically (every
279 // flag ledger), and clear the flag if appropriate. If an unknown
280 // amendment gains majority log a warning as soon as it's
281 // discovered, then again every flag ledger until the operator
282 // upgrades, the amendment loses majority, or the amendment goes
283 // live and the node gets blocked. Unlike being amendment blocked,
284 // this message may be logged more than once per session, because
285 // the node will otherwise function normally, and this gives
286 // operators an opportunity to see and resolve the warning.
287 if (auto const first =
289 {
290 JLOG(m_journal.error()) << "One or more unsupported amendments "
291 "reached majority. Upgrade before "
292 << to_string(*first)
293 << " to prevent your server from "
294 "becoming amendment blocked.";
296 }
297 else
299 }
300 }
301}
302
303void
305{
306 mPubLedger = l;
307 mPubLedgerClose = l->header().closeTime.time_since_epoch().count();
308 mPubLedgerSeq = l->header().seq;
309}
310
311void
313 std::shared_ptr<Transaction> const& transaction)
314{
316 mHeldTransactions.insert(transaction->getSTransaction());
317}
318
319// Validate a ledger's close time and sequence number if we're considering
320// jumping to that ledger. This helps defend against some rare hostile or
321// diverged majority scenarios.
322bool
324{
325 XRPL_ASSERT(ledger, "xrpl::LedgerMaster::canBeCurrent : non-null input");
326
327 // Never jump to a candidate ledger that precedes our
328 // last validated ledger
329
330 auto validLedger = getValidatedLedger();
331 if (validLedger && (ledger->header().seq < validLedger->header().seq))
332 {
333 JLOG(m_journal.trace())
334 << "Candidate for current ledger has low seq "
335 << ledger->header().seq << " < " << validLedger->header().seq;
336 return false;
337 }
338
339 // Ensure this ledger's parent close time is within five minutes of
340 // our current time. If we already have a known fully-valid ledger
341 // we perform this check. Otherwise, we only do it if we've built a
342 // few ledgers as our clock can be off when we first start up
343
344 auto closeTime = app_.timeKeeper().closeTime();
345 auto ledgerClose = ledger->header().parentCloseTime;
346
347 using namespace std::chrono_literals;
348 if ((validLedger || (ledger->header().seq > 10)) &&
349 ((std::max(closeTime, ledgerClose) - std::min(closeTime, ledgerClose)) >
350 5min))
351 {
352 JLOG(m_journal.warn())
353 << "Candidate for current ledger has close time "
354 << to_string(ledgerClose) << " at network time "
355 << to_string(closeTime) << " seq " << ledger->header().seq;
356 return false;
357 }
358
359 if (validLedger)
360 {
361 // Sequence number must not be too high. We allow ten ledgers
362 // for time inaccuracies plus a maximum run rate of one ledger
363 // every two seconds. The goal is to prevent a malicious ledger
364 // from increasing our sequence unreasonably high
365
366 LedgerIndex maxSeq = validLedger->header().seq + 10;
367
368 if (closeTime > validLedger->header().parentCloseTime)
369 maxSeq += std::chrono::duration_cast<std::chrono::seconds>(
370 closeTime - validLedger->header().parentCloseTime)
371 .count() /
372 2;
373
374 if (ledger->header().seq > maxSeq)
375 {
376 JLOG(m_journal.warn())
377 << "Candidate for current ledger has high seq "
378 << ledger->header().seq << " > " << maxSeq;
379 return false;
380 }
381
382 JLOG(m_journal.trace())
383 << "Acceptable seq range: " << validLedger->header().seq
384 << " <= " << ledger->header().seq << " <= " << maxSeq;
385 }
386
387 return true;
388}
389
390void
392{
393 XRPL_ASSERT(lastClosed, "xrpl::LedgerMaster::switchLCL : non-null input");
394 if (!lastClosed->isImmutable())
395 LogicError("mutable ledger in switchLCL");
396
397 if (lastClosed->open())
398 LogicError("The new last closed ledger is open!");
399
400 {
402 mClosedLedger.set(lastClosed);
403 }
404
405 if (standalone_)
406 {
407 setFullLedger(lastClosed, true, false);
408 tryAdvance();
409 }
410 else
411 {
412 checkAccept(lastClosed);
413 }
414}
415
416bool
417LedgerMaster::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
418{
419 return mLedgerHistory.fixIndex(ledgerIndex, ledgerHash);
420}
421
422bool
424{
425 bool validated = ledger->header().validated;
426 // Returns true if we already had the ledger
427 return mLedgerHistory.insert(std::move(ledger), validated);
428}
429
435void
437{
438 CanonicalTXSet const set = [this]() {
440 // VFALCO NOTE The hash for an open ledger is undefined so we use
441 // something that is a reasonable substitute.
442 CanonicalTXSet set(app_.openLedger().current()->header().parentHash);
444 return set;
445 }();
446
447 if (!set.empty())
449}
450
458
459void
464
465bool
467{
469 return boost::icl::contains(mCompleteLedgers, seq);
470}
471
472void
478
479bool
481{
482 if (ledger.open())
483 return false;
484
485 if (ledger.header().validated)
486 return true;
487
488 auto const seq = ledger.header().seq;
489 try
490 {
491 // Use the skip list in the last validated ledger to see if ledger
492 // comes before the last validated ledger (and thus has been
493 // validated).
494 auto const hash = walkHashBySeq(seq, InboundLedger::Reason::GENERIC);
495
496 if (!hash || ledger.header().hash != *hash)
497 {
498 // This ledger's hash is not the hash of the validated ledger
499 if (hash)
500 {
501 XRPL_ASSERT(
502 hash->isNonZero(),
503 "xrpl::LedgerMaster::isValidated : nonzero hash");
504 uint256 valHash =
506 if (valHash == ledger.header().hash)
507 {
508 // SQL database doesn't match ledger chain
509 clearLedger(seq);
510 }
511 }
512 return false;
513 }
514 }
515 catch (SHAMapMissingNode const& mn)
516 {
517 JLOG(m_journal.warn()) << "Ledger #" << seq << ": " << mn.what();
518 return false;
519 }
520
521 // Mark ledger as validated to save time if we see it again.
522 ledger.header().validated = true;
523 return true;
524}
525
526// returns Ledgers we have all the nodes for
527bool
529 std::uint32_t& minVal,
530 std::uint32_t& maxVal)
531{
532 // Validated ledger is likely not stored in the DB yet so we use the
533 // published ledger which is.
534 maxVal = mPubLedgerSeq.load();
535
536 if (!maxVal)
537 return false;
538
540 {
542 maybeMin = prevMissing(mCompleteLedgers, maxVal);
543 }
544
545 if (maybeMin == std::nullopt)
546 minVal = maxVal;
547 else
548 minVal = 1 + *maybeMin;
549
550 return true;
551}
552
553// Returns Ledgers we have all the nodes for and are indexed
554bool
556{
557 if (!getFullValidatedRange(minVal, maxVal))
558 return false;
559
560 // Remove from the validated range any ledger sequences that may not be
561 // fully updated in the database yet
562
563 auto const pendingSaves = app_.pendingSaves().getSnapshot();
564
565 if (!pendingSaves.empty() && ((minVal != 0) || (maxVal != 0)))
566 {
567 // Ensure we shrink the tips as much as possible. If we have 7-9 and
568 // 8,9 are invalid, we don't want to see the 8 and shrink to just 9
569 // because then we'll have nothing when we could have 7.
570 while (pendingSaves.count(maxVal) > 0)
571 --maxVal;
572 while (pendingSaves.count(minVal) > 0)
573 ++minVal;
574
575 // Best effort for remaining exclusions
576 for (auto v : pendingSaves)
577 {
578 if ((v.first >= minVal) && (v.first <= maxVal))
579 {
580 if (v.first > ((minVal + maxVal) / 2))
581 maxVal = v.first - 1;
582 else
583 minVal = v.first + 1;
584 }
585 }
586
587 if (minVal > maxVal)
588 minVal = maxVal = 0;
589 }
590
591 return true;
592}
593
594// Get the earliest ledger we will let peers fetch
597{
598 // The earliest ledger we will let people fetch is ledger zero,
599 // unless that creates a larger range than allowed
600 std::uint32_t e = getClosedLedger()->header().seq;
601
602 if (e > fetch_depth_)
603 e -= fetch_depth_;
604 else
605 e = 0;
606 return e;
607}
608
609void
611{
612 std::uint32_t seq = ledger->header().seq;
613 uint256 prevHash = ledger->header().parentHash;
614
616
617 std::uint32_t minHas = seq;
618 std::uint32_t maxHas = seq;
619
621 while (!app_.getJobQueue().isStopping() && seq > 0)
622 {
623 {
625 minHas = seq;
626 --seq;
627
628 if (haveLedger(seq))
629 break;
630 }
631
632 auto it(ledgerHashes.find(seq));
633
634 if (it == ledgerHashes.end())
635 {
636 if (app_.isStopping())
637 return;
638
639 {
641 mCompleteLedgers.insert(range(minHas, maxHas));
642 }
643 maxHas = minHas;
645 (seq < 500) ? 0 : (seq - 499), seq);
646 it = ledgerHashes.find(seq);
647
648 if (it == ledgerHashes.end())
649 break;
650
651 if (!nodeStore.fetchNodeObject(
652 ledgerHashes.begin()->second.ledgerHash,
653 ledgerHashes.begin()->first))
654 {
655 // The ledger is not backed by the node store
656 JLOG(m_journal.warn()) << "SQL DB ledger sequence " << seq
657 << " mismatches node store";
658 break;
659 }
660 }
661
662 if (it->second.ledgerHash != prevHash)
663 break;
664
665 prevHash = it->second.parentHash;
666 }
667
668 {
670 mCompleteLedgers.insert(range(minHas, maxHas));
671 }
672 {
674 mFillInProgress = 0;
675 tryAdvance();
676 }
677}
678
681void
683{
684 LedgerIndex const ledgerIndex = missing + 1;
685
686 auto const haveHash{getLedgerHashForHistory(ledgerIndex, reason)};
687 if (!haveHash || haveHash->isZero())
688 {
689 JLOG(m_journal.error())
690 << "No hash for fetch pack. Missing Index " << missing;
691 return;
692 }
693
694 // Select target Peer based on highest score. The score is randomized
695 // but biased in favor of Peers with low latency.
697 {
698 int maxScore = 0;
699 auto peerList = app_.overlay().getActivePeers();
700 for (auto const& peer : peerList)
701 {
702 if (peer->hasRange(missing, missing + 1))
703 {
704 int score = peer->getScore(true);
705 if (!target || (score > maxScore))
706 {
707 target = peer;
708 maxScore = score;
709 }
710 }
711 }
712 }
713
714 if (target)
715 {
716 protocol::TMGetObjectByHash tmBH;
717 tmBH.set_query(true);
718 tmBH.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
719 tmBH.set_ledgerhash(haveHash->begin(), 32);
720 auto packet = std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
721
722 target->send(packet);
723 JLOG(m_journal.trace()) << "Requested fetch pack for " << missing;
724 }
725 else
726 JLOG(m_journal.debug()) << "No peer for fetch pack";
727}
728
729void
731{
732 int invalidate = 0;
734
735 for (std::uint32_t lSeq = ledger.header().seq - 1; lSeq > 0; --lSeq)
736 {
737 if (haveLedger(lSeq))
738 {
739 try
740 {
741 hash = hashOfSeq(ledger, lSeq, m_journal);
742 }
743 catch (std::exception const& ex)
744 {
745 JLOG(m_journal.warn())
746 << "fixMismatch encounters partial ledger. Exception: "
747 << ex.what();
748 clearLedger(lSeq);
749 return;
750 }
751
752 if (hash)
753 {
754 // try to close the seam
755 auto otherLedger = getLedgerBySeq(lSeq);
756
757 if (otherLedger && (otherLedger->header().hash == *hash))
758 {
759 // we closed the seam
760 if (invalidate != 0)
761 {
762 JLOG(m_journal.warn())
763 << "Match at " << lSeq << ", " << invalidate
764 << " prior ledgers invalidated";
765 }
766
767 return;
768 }
769 }
770
771 clearLedger(lSeq);
772 ++invalidate;
773 }
774 }
775
776 // all prior ledgers invalidated
777 if (invalidate != 0)
778 {
779 JLOG(m_journal.warn())
780 << "All " << invalidate << " prior ledgers invalidated";
781 }
782}
783
784void
786 std::shared_ptr<Ledger const> const& ledger,
787 bool isSynchronous,
788 bool isCurrent)
789{
790 // A new ledger has been accepted as part of the trusted chain
791 JLOG(m_journal.debug()) << "Ledger " << ledger->header().seq
792 << " accepted :" << ledger->header().hash;
793 XRPL_ASSERT(
794 ledger->stateMap().getHash().isNonZero(),
795 "xrpl::LedgerMaster::setFullLedger : nonzero ledger state hash");
796
797 ledger->setValidated();
798 ledger->setFull();
799
800 if (isCurrent)
801 mLedgerHistory.insert(ledger, true);
802
803 {
804 // Check the SQL database's entry for the sequence before this
805 // ledger, if it's not this ledger's parent, invalidate it
807 ledger->header().seq - 1);
808 if (prevHash.isNonZero() && prevHash != ledger->header().parentHash)
809 clearLedger(ledger->header().seq - 1);
810 }
811
812 pendSaveValidated(app_, ledger, isSynchronous, isCurrent);
813
814 {
816 mCompleteLedgers.insert(ledger->header().seq);
817 }
818
819 {
821
822 if (ledger->header().seq > mValidLedgerSeq)
823 setValidLedger(ledger);
824 if (!mPubLedger)
825 {
826 setPubLedger(ledger);
827 app_.getOrderBookDB().setup(ledger);
828 }
829
830 if (ledger->header().seq != 0 && haveLedger(ledger->header().seq - 1))
831 {
832 // we think we have the previous ledger, double check
833 auto prevLedger = getLedgerBySeq(ledger->header().seq - 1);
834
835 if (!prevLedger ||
836 (prevLedger->header().hash != ledger->header().parentHash))
837 {
838 JLOG(m_journal.warn())
839 << "Acquired ledger invalidates previous ledger: "
840 << (prevLedger ? "hashMismatch" : "missingLedger");
841 fixMismatch(*ledger);
842 }
843 }
844 }
845}
846
847void
853
854// Check if the specified ledger can become the new last fully-validated
855// ledger.
856void
858{
859 std::size_t valCount = 0;
860
861 if (seq != 0)
862 {
863 // Ledger is too old
864 if (seq < mValidLedgerSeq)
865 return;
866
867 auto validations = app_.validators().negativeUNLFilter(
869 valCount = validations.size();
870 if (valCount >= app_.validators().quorum())
871 {
873 if (seq > mLastValidLedger.second)
874 mLastValidLedger = std::make_pair(hash, seq);
875 }
876
877 if (seq == mValidLedgerSeq)
878 return;
879
880 // Ledger could match the ledger we're already building
881 if (seq == mBuildingLedgerSeq)
882 return;
883 }
884
885 auto ledger = mLedgerHistory.getLedgerByHash(hash);
886
887 if (!ledger)
888 {
889 if ((seq != 0) && (getValidLedgerIndex() == 0))
890 {
891 // Set peers converged early if we can
892 if (valCount >= app_.validators().quorum())
894 }
895
896 // FIXME: We may not want to fetch a ledger with just one
897 // trusted validation
898 ledger = app_.getInboundLedgers().acquire(
900 }
901
902 if (ledger)
903 checkAccept(ledger);
904}
905
916
917void
919{
920 // Can we accept this ledger as our new last fully-validated ledger
921
922 if (!canBeCurrent(ledger))
923 return;
924
925 // Can we advance the last fully-validated ledger? If so, can we
926 // publish?
928
929 if (ledger->header().seq <= mValidLedgerSeq)
930 return;
931
932 auto const minVal = getNeededValidations();
933 auto validations = app_.validators().negativeUNLFilter(
935 ledger->header().hash, ledger->header().seq));
936 auto const tvc = validations.size();
937 if (tvc < minVal) // nothing we can do
938 {
939 JLOG(m_journal.trace())
940 << "Only " << tvc << " validations for " << ledger->header().hash;
941 return;
942 }
943
944 JLOG(m_journal.info()) << "Advancing accepted ledger to "
945 << ledger->header().seq << " with >= " << minVal
946 << " validations";
947
948 ledger->setValidated();
949 ledger->setFull();
950 setValidLedger(ledger);
951 if (!mPubLedger)
952 {
953 pendSaveValidated(app_, ledger, true, true);
954 setPubLedger(ledger);
955 app_.getOrderBookDB().setup(ledger);
956 }
957
958 std::uint32_t const base = app_.getFeeTrack().getLoadBase();
959 auto fees = app_.getValidations().fees(ledger->header().hash, base);
960 {
961 auto fees2 =
962 app_.getValidations().fees(ledger->header().parentHash, base);
963 fees.reserve(fees.size() + fees2.size());
964 std::copy(fees2.begin(), fees2.end(), std::back_inserter(fees));
965 }
966 std::uint32_t fee;
967 if (!fees.empty())
968 {
969 std::sort(fees.begin(), fees.end());
970 if (auto stream = m_journal.debug())
971 {
973 s << "Received fees from validations: (" << fees.size() << ") ";
974 for (auto const fee1 : fees)
975 {
976 s << " " << fee1;
977 }
978 stream << s.str();
979 }
980 fee = fees[fees.size() / 2]; // median
981 }
982 else
983 {
984 fee = base;
985 }
986
988
989 tryAdvance();
990
991 if (ledger->seq() % 256 == 0)
992 {
993 // Check if the majority of validators run a higher version rippled
994 // software. If so print a warning.
995 //
996 // Validators include their rippled software version in the validation
997 // messages of every (flag - 1) ledger. We wait for one ledger time
998 // before checking the version information to accumulate more validation
999 // messages.
1000
1001 auto currentTime = app_.timeKeeper().now();
1002 bool needPrint = false;
1003
1004 // The variable upgradeWarningPrevTime_ will be set when and only when
1005 // the warning is printed.
1007 {
1008 // Have not printed the warning before, check if need to print.
1009 auto const vals = app_.getValidations().getTrustedForLedger(
1010 ledger->header().parentHash, ledger->header().seq - 1);
1011 std::size_t higherVersionCount = 0;
1012 std::size_t rippledCount = 0;
1013 for (auto const& v : vals)
1014 {
1015 if (v->isFieldPresent(sfServerVersion))
1016 {
1017 auto version = v->getFieldU64(sfServerVersion);
1018 higherVersionCount +=
1019 BuildInfo::isNewerVersion(version) ? 1 : 0;
1020 rippledCount +=
1021 BuildInfo::isRippledVersion(version) ? 1 : 0;
1022 }
1023 }
1024 // We report only if (1) we have accumulated validation messages
1025 // from 90% validators from the UNL, (2) 60% of validators
1026 // running the rippled implementation have higher version numbers,
1027 // and (3) the calculation won't cause divide-by-zero.
1028 if (higherVersionCount > 0 && rippledCount > 0)
1029 {
1030 constexpr std::size_t reportingPercent = 90;
1031 constexpr std::size_t cutoffPercent = 60;
1032 auto const unlSize{
1033 app_.validators().getQuorumKeys().second.size()};
1034 needPrint = unlSize > 0 &&
1035 calculatePercent(vals.size(), unlSize) >=
1036 reportingPercent &&
1037 calculatePercent(higherVersionCount, rippledCount) >=
1038 cutoffPercent;
1039 }
1040 }
1041 // To throttle the warning messages, instead of printing a warning
1042 // every flag ledger, we print every week.
1043 else if (currentTime - upgradeWarningPrevTime_ >= weeks{1})
1044 {
1045 // Printed the warning before, and assuming most validators
1046 // do not downgrade, we keep printing the warning
1047 // until the local server is restarted.
1048 needPrint = true;
1049 }
1050
1051 if (needPrint)
1052 {
1053 upgradeWarningPrevTime_ = currentTime;
1054 auto const upgradeMsg =
1055 "Check for upgrade: "
1056 "A majority of trusted validators are "
1057 "running a newer version.";
1058 std::cerr << upgradeMsg << std::endl;
1059 JLOG(m_journal.error()) << upgradeMsg;
1060 }
1061 }
1062}
1063
1065void
1067 std::shared_ptr<Ledger const> const& ledger,
1068 uint256 const& consensusHash,
1069 Json::Value consensus)
1070{
1071 // Because we just built a ledger, we are no longer building one
1073
1074 // No need to process validations in standalone mode
1075 if (standalone_)
1076 return;
1077
1078 mLedgerHistory.builtLedger(ledger, consensusHash, std::move(consensus));
1079
1080 if (ledger->header().seq <= mValidLedgerSeq)
1081 {
1082 auto stream = app_.journal("LedgerConsensus").info();
1083 JLOG(stream) << "Consensus built old ledger: " << ledger->header().seq
1084 << " <= " << mValidLedgerSeq;
1085 return;
1086 }
1087
1088 // See if this ledger can be the new fully-validated ledger
1089 checkAccept(ledger);
1090
1091 if (ledger->header().seq <= mValidLedgerSeq)
1092 {
1093 auto stream = app_.journal("LedgerConsensus").debug();
1094 JLOG(stream) << "Consensus ledger fully validated";
1095 return;
1096 }
1097
1098 // This ledger cannot be the new fully-validated ledger, but
1099 // maybe we saved up validations for some other ledger that can be
1100
1101 auto validations = app_.validators().negativeUNLFilter(
1103
1104 // Track validation counts with sequence numbers
1105 class valSeq
1106 {
1107 public:
1108 valSeq() : valCount_(0), ledgerSeq_(0)
1109 {
1110 ;
1111 }
1112
1113 void
1114 mergeValidation(LedgerIndex seq)
1115 {
1116 valCount_++;
1117
1118 // If we didn't already know the sequence, now we do
1119 if (ledgerSeq_ == 0)
1120 ledgerSeq_ = seq;
1121 }
1122
1123 std::size_t valCount_;
1124 LedgerIndex ledgerSeq_;
1125 };
1126
1127 // Count the number of current, trusted validations
1129 for (auto const& v : validations)
1130 {
1131 valSeq& vs = count[v->getLedgerHash()];
1132 vs.mergeValidation(v->getFieldU32(sfLedgerSequence));
1133 }
1134
1135 auto const neededValidations = getNeededValidations();
1136 auto maxSeq = mValidLedgerSeq.load();
1137 auto maxLedger = ledger->header().hash;
1138
1139 // Of the ledgers with sufficient validations,
1140 // find the one with the highest sequence
1141 for (auto& v : count)
1142 if (v.second.valCount_ > neededValidations)
1143 {
1144 // If we still don't know the sequence, get it
1145 if (v.second.ledgerSeq_ == 0)
1146 {
1147 if (auto l = getLedgerByHash(v.first))
1148 v.second.ledgerSeq_ = l->header().seq;
1149 }
1150
1151 if (v.second.ledgerSeq_ > maxSeq)
1152 {
1153 maxSeq = v.second.ledgerSeq_;
1154 maxLedger = v.first;
1155 }
1156 }
1157
1158 if (maxSeq > mValidLedgerSeq)
1159 {
1160 auto stream = app_.journal("LedgerConsensus").debug();
1161 JLOG(stream) << "Consensus triggered check of ledger";
1162 checkAccept(maxLedger, maxSeq);
1163 }
1164}
1165
1168 LedgerIndex index,
1169 InboundLedger::Reason reason)
1170{
1171 // Try to get the hash of a ledger we need to fetch for history
1173 auto const& l{mHistLedger};
1174
1175 if (l && l->header().seq >= index)
1176 {
1177 ret = hashOfSeq(*l, index, m_journal);
1178 if (!ret)
1179 ret = walkHashBySeq(index, l, reason);
1180 }
1181
1182 if (!ret)
1183 ret = walkHashBySeq(index, reason);
1184
1185 return ret;
1186}
1187
1191{
1193
1194 JLOG(m_journal.trace()) << "findNewLedgersToPublish<";
1195
1196 // No valid ledger, nothing to do
1197 if (mValidLedger.empty())
1198 {
1199 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1200 return {};
1201 }
1202
1203 if (!mPubLedger)
1204 {
1205 JLOG(m_journal.info())
1206 << "First published ledger will be " << mValidLedgerSeq;
1207 return {mValidLedger.get()};
1208 }
1209
1211 {
1212 JLOG(m_journal.warn()) << "Gap in validated ledger stream "
1213 << mPubLedgerSeq << " - " << mValidLedgerSeq - 1;
1214
1215 auto valLedger = mValidLedger.get();
1216 ret.push_back(valLedger);
1217 setPubLedger(valLedger);
1218 app_.getOrderBookDB().setup(valLedger);
1219
1220 return {valLedger};
1221 }
1222
1224 {
1225 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1226 return {};
1227 }
1228
1229 int acqCount = 0;
1230
1231 auto pubSeq = mPubLedgerSeq + 1; // Next sequence to publish
1232 auto valLedger = mValidLedger.get();
1233 std::uint32_t valSeq = valLedger->header().seq;
1234
1235 scope_unlock sul{sl};
1236 try
1237 {
1238 for (std::uint32_t seq = pubSeq; seq <= valSeq; ++seq)
1239 {
1240 JLOG(m_journal.trace())
1241 << "Trying to fetch/publish valid ledger " << seq;
1242
1244 // This can throw
1245 auto hash = hashOfSeq(*valLedger, seq, m_journal);
1246 // VFALCO TODO Restructure this code so that zero is not
1247 // used.
1248 if (!hash)
1249 hash = beast::zero; // kludge
1250 if (seq == valSeq)
1251 {
1252 // We need to publish the ledger we just fully validated
1253 ledger = valLedger;
1254 }
1255 else if (hash->isZero())
1256 {
1257 // LCOV_EXCL_START
1258 JLOG(m_journal.fatal()) << "Ledger: " << valSeq
1259 << " does not have hash for " << seq;
1260 UNREACHABLE(
1261 "xrpl::LedgerMaster::findNewLedgersToPublish : ledger "
1262 "not found");
1263 // LCOV_EXCL_STOP
1264 }
1265 else
1266 {
1267 ledger = mLedgerHistory.getLedgerByHash(*hash);
1268 }
1269
1270 if (!app_.config().LEDGER_REPLAY)
1271 {
1272 // Can we try to acquire the ledger we need?
1273 if (!ledger && (++acqCount < ledger_fetch_size_))
1274 ledger = app_.getInboundLedgers().acquire(
1275 *hash, seq, InboundLedger::Reason::GENERIC);
1276 }
1277
1278 // Did we acquire the next ledger we need to publish?
1279 if (ledger && (ledger->header().seq == pubSeq))
1280 {
1281 ledger->setValidated();
1282 ret.push_back(ledger);
1283 ++pubSeq;
1284 }
1285 }
1286
1287 JLOG(m_journal.trace())
1288 << "ready to publish " << ret.size() << " ledgers.";
1289 }
1290 catch (std::exception const& ex)
1291 {
1292 JLOG(m_journal.error())
1293 << "Exception while trying to find ledgers to publish: "
1294 << ex.what();
1295 }
1296
1298 {
1299 /* Narrow down the gap of ledgers, and try to replay them.
1300 * When replaying a ledger gap, if the local node has
1301 * the start ledger, it saves an expensive InboundLedger
1302 * acquire. If the local node has the finish ledger, it
1303 * saves a skip list acquire.
1304 */
1305 auto const& startLedger = ret.empty() ? mPubLedger : ret.back();
1306 auto finishLedger = valLedger;
1307 while (startLedger->seq() + 1 < finishLedger->seq())
1308 {
1309 if (auto const parent = mLedgerHistory.getLedgerByHash(
1310 finishLedger->header().parentHash);
1311 parent)
1312 {
1313 finishLedger = parent;
1314 }
1315 else
1316 {
1317 auto numberLedgers =
1318 finishLedger->seq() - startLedger->seq() + 1;
1319 JLOG(m_journal.debug())
1320 << "Publish LedgerReplays " << numberLedgers
1321 << " ledgers, from seq=" << startLedger->header().seq
1322 << ", " << startLedger->header().hash
1323 << " to seq=" << finishLedger->header().seq << ", "
1324 << finishLedger->header().hash;
1327 finishLedger->header().hash,
1328 numberLedgers);
1329 break;
1330 }
1331 }
1332 }
1333
1334 return ret;
1335}
1336
1337void
1339{
1341
1342 // Can't advance without at least one fully-valid ledger
1343 mAdvanceWork = true;
1345 {
1346 mAdvanceThread = true;
1347 app_.getJobQueue().addJob(jtADVANCE, "advanceLedger", [this]() {
1349
1350 XRPL_ASSERT(
1352 "xrpl::LedgerMaster::tryAdvance : has valid ledger");
1353
1354 JLOG(m_journal.trace()) << "advanceThread<";
1355
1356 try
1357 {
1358 doAdvance(sl);
1359 }
1360 catch (std::exception const& ex)
1361 {
1362 JLOG(m_journal.fatal()) << "doAdvance throws: " << ex.what();
1363 }
1364
1365 mAdvanceThread = false;
1366 JLOG(m_journal.trace()) << "advanceThread>";
1367 });
1368 }
1369}
1370
1371void
1373{
1374 {
1377 {
1379 mPathLedger.reset();
1380 JLOG(m_journal.debug()) << "Need network ledger for updating paths";
1381 return;
1382 }
1383 }
1384
1385 while (!app_.getJobQueue().isStopping())
1386 {
1387 JLOG(m_journal.debug()) << "updatePaths running";
1389 {
1391
1392 if (!mValidLedger.empty() &&
1393 (!mPathLedger ||
1394 (mPathLedger->header().seq != mValidLedgerSeq)))
1395 { // We have a new valid ledger since the last full pathfinding
1397 lastLedger = mPathLedger;
1398 }
1399 else if (mPathFindNewRequest)
1400 { // We have a new request but no new ledger
1401 lastLedger = app_.openLedger().current();
1402 }
1403 else
1404 { // Nothing to do
1406 mPathLedger.reset();
1407 JLOG(m_journal.debug()) << "Nothing to do for updating paths";
1408 return;
1409 }
1410 }
1411
1412 if (!standalone_)
1413 { // don't pathfind with a ledger that's more than 60 seconds old
1414 using namespace std::chrono;
1415 auto age = time_point_cast<seconds>(app_.timeKeeper().closeTime()) -
1416 lastLedger->header().closeTime;
1417 if (age > 1min)
1418 {
1419 JLOG(m_journal.debug())
1420 << "Published ledger too old for updating paths";
1423 mPathLedger.reset();
1424 return;
1425 }
1426 }
1427
1428 try
1429 {
1430 auto& pathRequests = app_.getPathRequests();
1431 {
1433 if (!pathRequests.requestsPending())
1434 {
1436 mPathLedger.reset();
1437 JLOG(m_journal.debug())
1438 << "No path requests found. Nothing to do for updating "
1439 "paths. "
1440 << mPathFindThread << " jobs remaining";
1441 return;
1442 }
1443 }
1444 JLOG(m_journal.debug()) << "Updating paths";
1445 pathRequests.updateAll(lastLedger);
1446
1448 if (!pathRequests.requestsPending())
1449 {
1450 JLOG(m_journal.debug())
1451 << "No path requests left. No need for further updating "
1452 "paths";
1454 mPathLedger.reset();
1455 return;
1456 }
1457 }
1458 catch (SHAMapMissingNode const& mn)
1459 {
1460 JLOG(m_journal.info()) << "During pathfinding: " << mn.what();
1461 if (lastLedger->open())
1462 {
1463 // our parent is the problem
1465 lastLedger->header().parentHash,
1466 lastLedger->header().seq - 1,
1468 }
1469 else
1470 {
1471 // this ledger is the problem
1473 lastLedger->header().hash,
1474 lastLedger->header().seq,
1476 }
1477 }
1478 }
1479}
1480
1481bool
1483{
1485 mPathFindNewRequest = newPFWork("pf:newRequest", ml);
1486 return mPathFindNewRequest;
1487}
1488
1489bool
1491{
1493 bool const ret = mPathFindNewRequest;
1494 mPathFindNewRequest = false;
1495 return ret;
1496}
1497
1498// If the order book is radically updated, we need to reprocess all
1499// pathfinding requests.
1500bool
1502{
1504 mPathLedger.reset();
1505
1506 return newPFWork("pf:newOBDB", ml);
1507}
1508
1511bool
1513 char const* name,
1515{
1516 if (!app_.isStopping() && mPathFindThread < 2 &&
1518 {
1519 JLOG(m_journal.debug())
1520 << "newPFWork: Creating job. path find threads: "
1521 << mPathFindThread;
1522 if (app_.getJobQueue().addJob(
1523 jtUPDATE_PF, name, [this]() { updatePaths(); }))
1524 {
1526 }
1527 }
1528 // If we're stopping don't give callers the expectation that their
1529 // request will be fulfilled, even if it may be serviced.
1530 return mPathFindThread > 0 && !app_.isStopping();
1531}
1532
1535{
1536 return m_mutex;
1537}
1538
1539// The current ledger is the ledger we believe new transactions should go in
1545
1551
1552Rules
1554{
1555 // Once we have a guarantee that there's always a last validated
1556 // ledger then we can dispense with the if.
1557
1558 // Return the Rules from the last validated ledger.
1559 if (auto const ledger = getValidatedLedger())
1560 return ledger->rules();
1561
1562 return Rules(app_.config().features);
1563}
1564
1565// This is the last ledger we published to clients and can lag the validated
1566// ledger.
1573
1580
1583{
1584 uint256 hash = getHashBySeq(ledgerIndex);
1585 return hash.isNonZero() ? getCloseTimeByHash(hash, ledgerIndex)
1586 : std::nullopt;
1587}
1588
1591 LedgerHash const& ledgerHash,
1592 std::uint32_t index)
1593{
1594 auto nodeObject = app_.getNodeStore().fetchNodeObject(ledgerHash, index);
1595 if (nodeObject && (nodeObject->getData().size() >= 120))
1596 {
1597 SerialIter it(
1598 nodeObject->getData().data(), nodeObject->getData().size());
1599 if (safe_cast<HashPrefix>(it.get32()) == HashPrefix::ledgerMaster)
1600 {
1601 it.skip(
1602 4 + 8 + 32 + // seq drops parentHash
1603 32 + 32 + 4); // txHash acctHash parentClose
1605 }
1606 }
1607
1608 return std::nullopt;
1609}
1610
1611uint256
1613{
1615
1616 if (hash.isNonZero())
1617 return hash;
1618
1620}
1621
1624{
1625 std::optional<LedgerHash> ledgerHash;
1626
1627 if (auto referenceLedger = mValidLedger.get())
1628 ledgerHash = walkHashBySeq(index, referenceLedger, reason);
1629
1630 return ledgerHash;
1631}
1632
1635 std::uint32_t index,
1636 std::shared_ptr<ReadView const> const& referenceLedger,
1637 InboundLedger::Reason reason)
1638{
1639 if (!referenceLedger || (referenceLedger->header().seq < index))
1640 {
1641 // Nothing we can do. No validated ledger.
1642 return std::nullopt;
1643 }
1644
1645 // See if the hash for the ledger we need is in the reference ledger
1646 auto ledgerHash = hashOfSeq(*referenceLedger, index, m_journal);
1647 if (ledgerHash)
1648 return ledgerHash;
1649
1650 // The hash is not in the reference ledger. Get another ledger which can
1651 // be located easily and should contain the hash.
1652 LedgerIndex refIndex = getCandidateLedger(index);
1653 auto const refHash = hashOfSeq(*referenceLedger, refIndex, m_journal);
1654 XRPL_ASSERT(refHash, "xrpl::LedgerMaster::walkHashBySeq : found ledger");
1655 if (refHash)
1656 {
1657 // Try the hash and sequence of a better reference ledger just found
1658 auto ledger = mLedgerHistory.getLedgerByHash(*refHash);
1659
1660 if (ledger)
1661 {
1662 try
1663 {
1664 ledgerHash = hashOfSeq(*ledger, index, m_journal);
1665 }
1666 catch (SHAMapMissingNode const&)
1667 {
1668 ledger.reset();
1669 }
1670 }
1671
1672 // Try to acquire the complete ledger
1673 if (!ledger)
1674 {
1675 if (auto const l = app_.getInboundLedgers().acquire(
1676 *refHash, refIndex, reason))
1677 {
1678 ledgerHash = hashOfSeq(*l, index, m_journal);
1679 XRPL_ASSERT(
1680 ledgerHash,
1681 "xrpl::LedgerMaster::walkHashBySeq : has complete "
1682 "ledger");
1683 }
1684 }
1685 }
1686 return ledgerHash;
1687}
1688
1691{
1692 if (index <= mValidLedgerSeq)
1693 {
1694 // Always prefer a validated ledger
1695 if (auto valid = mValidLedger.get())
1696 {
1697 if (valid->header().seq == index)
1698 return valid;
1699
1700 try
1701 {
1702 auto const hash = hashOfSeq(*valid, index, m_journal);
1703
1704 if (hash)
1706 }
1707 catch (std::exception const&)
1708 {
1709 // Missing nodes are already handled
1710 }
1711 }
1712 }
1713
1714 if (auto ret = mLedgerHistory.getLedgerBySeq(index))
1715 return ret;
1716
1717 auto ret = mClosedLedger.get();
1718 if (ret && (ret->header().seq == index))
1719 return ret;
1720
1721 clearLedger(index);
1722 return {};
1723}
1724
1727{
1728 if (auto ret = mLedgerHistory.getLedgerByHash(hash))
1729 return ret;
1730
1731 auto ret = mClosedLedger.get();
1732 if (ret && (ret->header().hash == hash))
1733 return ret;
1734
1735 return {};
1736}
1737
1738void
1744
1745void
1747{
1749 fetch_packs_.sweep();
1750}
1751
1752float
1757
1758void
1760{
1762 if (seq > 0)
1763 mCompleteLedgers.erase(range(0u, seq - 1));
1764}
1765
1766void
1771
1772void
1774{
1775 replayData = std::move(replay);
1776}
1777
1780{
1781 return std::move(replayData);
1782}
1783
1784void
1786 std::uint32_t missing,
1787 bool& progress,
1788 InboundLedger::Reason reason,
1790{
1791 scope_unlock sul{sl};
1792 if (auto hash = getLedgerHashForHistory(missing, reason))
1793 {
1794 XRPL_ASSERT(
1795 hash->isNonZero(),
1796 "xrpl::LedgerMaster::fetchForHistory : found ledger");
1797 auto ledger = getLedgerByHash(*hash);
1798 if (!ledger)
1799 {
1801 {
1802 ledger =
1803 app_.getInboundLedgers().acquire(*hash, missing, reason);
1804 if (!ledger && missing != fetch_seq_ &&
1805 missing > app_.getNodeStore().earliestLedgerSeq())
1806 {
1807 JLOG(m_journal.trace())
1808 << "fetchForHistory want fetch pack " << missing;
1809 fetch_seq_ = missing;
1810 getFetchPack(missing, reason);
1811 }
1812 else
1813 JLOG(m_journal.trace())
1814 << "fetchForHistory no fetch pack for " << missing;
1815 }
1816 else
1817 JLOG(m_journal.debug())
1818 << "fetchForHistory found failed acquire";
1819 }
1820 if (ledger)
1821 {
1822 auto seq = ledger->header().seq;
1823 XRPL_ASSERT(
1824 seq == missing,
1825 "xrpl::LedgerMaster::fetchForHistory : sequence match");
1826 JLOG(m_journal.trace()) << "fetchForHistory acquired " << seq;
1827 setFullLedger(ledger, false, false);
1828 int fillInProgress;
1829 {
1831 mHistLedger = ledger;
1832 fillInProgress = mFillInProgress;
1833 }
1834 if (fillInProgress == 0 &&
1836 ledger->header().parentHash)
1837 {
1838 {
1839 // Previous ledger is in DB
1841 mFillInProgress = seq;
1842 }
1844 jtADVANCE, "tryFill", [this, ledger]() {
1845 tryFill(ledger);
1846 });
1847 }
1848 progress = true;
1849 }
1850 else
1851 {
1852 std::uint32_t fetchSz;
1853 // Do not fetch ledger sequences lower
1854 // than the earliest ledger sequence
1855 fetchSz = app_.getNodeStore().earliestLedgerSeq();
1856 fetchSz = missing >= fetchSz
1857 ? std::min(ledger_fetch_size_, (missing - fetchSz) + 1)
1858 : 0;
1859 try
1860 {
1861 for (std::uint32_t i = 0; i < fetchSz; ++i)
1862 {
1863 std::uint32_t seq = missing - i;
1864 if (auto h = getLedgerHashForHistory(seq, reason))
1865 {
1866 XRPL_ASSERT(
1867 h->isNonZero(),
1868 "xrpl::LedgerMaster::fetchForHistory : "
1869 "prefetched ledger");
1870 app_.getInboundLedgers().acquire(*h, seq, reason);
1871 }
1872 }
1873 }
1874 catch (std::exception const& ex)
1875 {
1876 JLOG(m_journal.warn())
1877 << "Threw while prefetching: " << ex.what();
1878 }
1879 }
1880 }
1881 else
1882 {
1883 JLOG(m_journal.fatal())
1884 << "Can't find ledger following prevMissing " << missing;
1885 JLOG(m_journal.fatal())
1886 << "Pub:" << mPubLedgerSeq << " Val:" << mValidLedgerSeq;
1887 JLOG(m_journal.fatal())
1888 << "Ledgers: " << app_.getLedgerMaster().getCompleteLedgers();
1889 JLOG(m_journal.fatal())
1890 << "Acquire reason: "
1891 << (reason == InboundLedger::Reason::HISTORY ? "HISTORY"
1892 : "NOT HISTORY");
1893 clearLedger(missing + 1);
1894 progress = true;
1895 }
1896}
1897
1898// Try to publish ledgers, acquire missing ledgers
1899void
1901{
1902 do
1903 {
1904 mAdvanceWork = false; // If there's work to do, we'll make progress
1905 bool progress = false;
1906
1907 auto const pubLedgers = findNewLedgersToPublish(sl);
1908 if (pubLedgers.empty())
1909 {
1915 {
1916 // We are in sync, so can acquire
1919 {
1921 missing = prevMissing(
1923 mPubLedger->header().seq,
1925 }
1926 if (missing)
1927 {
1928 JLOG(m_journal.trace())
1929 << "tryAdvance discovered missing " << *missing;
1930 if ((mFillInProgress == 0 || *missing > mFillInProgress) &&
1935 *missing,
1936 m_journal))
1937 {
1938 JLOG(m_journal.trace())
1939 << "advanceThread should acquire";
1940 }
1941 else
1942 missing = std::nullopt;
1943 }
1944 if (missing)
1945 {
1946 fetchForHistory(*missing, progress, reason, sl);
1948 {
1949 JLOG(m_journal.debug())
1950 << "tryAdvance found last valid changed";
1951 progress = true;
1952 }
1953 }
1954 }
1955 else
1956 {
1957 mHistLedger.reset();
1958 JLOG(m_journal.trace()) << "tryAdvance not fetching history";
1959 }
1960 }
1961 else
1962 {
1963 JLOG(m_journal.trace()) << "tryAdvance found " << pubLedgers.size()
1964 << " ledgers to publish";
1965 for (auto const& ledger : pubLedgers)
1966 {
1967 {
1968 scope_unlock sul{sl};
1969 JLOG(m_journal.debug())
1970 << "tryAdvance publishing seq " << ledger->header().seq;
1971 setFullLedger(ledger, true, true);
1972 }
1973
1974 setPubLedger(ledger);
1975
1976 {
1977 scope_unlock sul{sl};
1978 app_.getOPs().pubLedger(ledger);
1979 }
1980 }
1981
1983 progress = newPFWork("pf:newLedger", sl);
1984 }
1985 if (progress)
1986 mAdvanceWork = true;
1987 } while (mAdvanceWork);
1988}
1989
1990void
1992{
1993 fetch_packs_.canonicalize_replace_client(hash, data);
1994}
1995
1998{
1999 Blob data;
2000 if (fetch_packs_.retrieve(hash, data))
2001 {
2002 fetch_packs_.del(hash, false);
2003 if (hash == sha512Half(makeSlice(data)))
2004 return data;
2005 }
2006 return std::nullopt;
2007}
2008
2009void
2020
2046static void
2048 SHAMap const& want,
2049 SHAMap const* have,
2050 std::uint32_t cnt,
2051 protocol::TMGetObjectByHash* into,
2052 std::uint32_t seq,
2053 bool withLeaves = true)
2054{
2055 XRPL_ASSERT(cnt, "xrpl::populateFetchPack : nonzero count input");
2056
2057 Serializer s(1024);
2058
2059 want.visitDifferences(
2060 have,
2061 [&s, withLeaves, &cnt, into, seq](SHAMapTreeNode const& n) -> bool {
2062 if (!withLeaves && n.isLeaf())
2063 return true;
2064
2065 s.erase();
2067
2068 auto const& hash = n.getHash().as_uint256();
2069
2070 protocol::TMIndexedObject* obj = into->add_objects();
2071 obj->set_ledgerseq(seq);
2072 obj->set_hash(hash.data(), hash.size());
2073 obj->set_data(s.getDataPtr(), s.getLength());
2074
2075 return --cnt != 0;
2076 });
2077}
2078
2079void
2081 std::weak_ptr<Peer> const& wPeer,
2083 uint256 haveLedgerHash,
2085{
2086 using namespace std::chrono_literals;
2087 if (UptimeClock::now() > uptime + 1s)
2088 {
2089 JLOG(m_journal.info()) << "Fetch pack request got stale";
2090 return;
2091 }
2092
2094 {
2095 JLOG(m_journal.info()) << "Too busy to make fetch pack";
2096 return;
2097 }
2098
2099 auto peer = wPeer.lock();
2100
2101 if (!peer)
2102 return;
2103
2104 auto have = getLedgerByHash(haveLedgerHash);
2105
2106 if (!have)
2107 {
2108 JLOG(m_journal.info())
2109 << "Peer requests fetch pack for ledger we don't have: " << have;
2110 peer->charge(Resource::feeRequestNoReply, "get_object ledger");
2111 return;
2112 }
2113
2114 if (have->open())
2115 {
2116 JLOG(m_journal.warn())
2117 << "Peer requests fetch pack from open ledger: " << have;
2118 peer->charge(Resource::feeMalformedRequest, "get_object ledger open");
2119 return;
2120 }
2121
2122 if (have->header().seq < getEarliestFetch())
2123 {
2124 JLOG(m_journal.debug()) << "Peer requests fetch pack that is too early";
2125 peer->charge(Resource::feeMalformedRequest, "get_object ledger early");
2126 return;
2127 }
2128
2129 auto want = getLedgerByHash(have->header().parentHash);
2130
2131 if (!want)
2132 {
2133 JLOG(m_journal.info())
2134 << "Peer requests fetch pack for ledger whose predecessor we "
2135 << "don't have: " << have;
2136 peer->charge(
2137 Resource::feeRequestNoReply, "get_object ledger no parent");
2138 return;
2139 }
2140
2141 try
2142 {
2143 Serializer hdr(128);
2144
2145 protocol::TMGetObjectByHash reply;
2146 reply.set_query(false);
2147
2148 if (request->has_seq())
2149 reply.set_seq(request->seq());
2150
2151 reply.set_ledgerhash(request->ledgerhash());
2152 reply.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
2153
2154 // Building a fetch pack:
2155 // 1. Add the header for the requested ledger.
2156 // 2. Add the nodes for the AccountStateMap of that ledger.
2157 // 3. If there are transactions, add the nodes for the
2158 // transactions of the ledger.
2159 // 4. If the FetchPack now contains at least 512 entries then stop.
2160 // 5. If not very much time has elapsed, then loop back and repeat
2161 // the same process adding the previous ledger to the FetchPack.
2162 do
2163 {
2164 std::uint32_t lSeq = want->header().seq;
2165
2166 {
2167 // Serialize the ledger header:
2168 hdr.erase();
2169
2171 addRaw(want->header(), hdr);
2172
2173 // Add the data
2174 protocol::TMIndexedObject* obj = reply.add_objects();
2175 obj->set_hash(
2176 want->header().hash.data(), want->header().hash.size());
2177 obj->set_data(hdr.getDataPtr(), hdr.getLength());
2178 obj->set_ledgerseq(lSeq);
2179 }
2180
2182 want->stateMap(), &have->stateMap(), 16384, &reply, lSeq);
2183
2184 // We use nullptr here because transaction maps are per ledger
2185 // and so the requestor is unlikely to already have it.
2186 if (want->header().txHash.isNonZero())
2187 populateFetchPack(want->txMap(), nullptr, 512, &reply, lSeq);
2188
2189 if (reply.objects().size() >= 512)
2190 break;
2191
2192 have = std::move(want);
2193 want = getLedgerByHash(have->header().parentHash);
2194 } while (want && UptimeClock::now() <= uptime + 1s);
2195
2196 auto msg = std::make_shared<Message>(reply, protocol::mtGET_OBJECTS);
2197
2198 JLOG(m_journal.info())
2199 << "Built fetch pack with " << reply.objects().size() << " nodes ("
2200 << msg->getBufferSize() << " bytes)";
2201
2202 peer->send(msg);
2203 }
2204 catch (std::exception const& ex)
2205 {
2206 JLOG(m_journal.warn())
2207 << "Exception building fetch pack. Exception: " << ex.what();
2208 }
2209}
2210
2213{
2214 return fetch_packs_.getCacheSize();
2215}
2216
2217// Returns the minimum ledger sequence in SQL database, if any.
2223
2226{
2227 uint32_t first = 0, last = 0;
2228
2229 if (!getValidatedRange(first, last) || last < ledgerSeq)
2230 return {};
2231
2232 auto const lgr = getLedgerBySeq(ledgerSeq);
2233 if (!lgr || lgr->txs.empty())
2234 return {};
2235
2236 for (auto it = lgr->txs.begin(); it != lgr->txs.end(); ++it)
2237 if (it->first && it->second &&
2238 it->second->isFieldPresent(sfTransactionIndex) &&
2239 it->second->getFieldU32(sfTransactionIndex) == txnIndex)
2240 return it->first->getTransactionID();
2241
2242 return {};
2243}
2244
2245} // namespace xrpl
T back(T... args)
T back_inserter(T... args)
T begin(T... args)
Represents a JSON value.
Definition json_value.h:131
Provide a light-weight way to check active() before string formatting.
Definition Journal.h:186
A generic endpoint for log messages.
Definition Journal.h:41
Stream fatal() const
Definition Journal.h:333
Stream error() const
Definition Journal.h:327
Stream debug() const
Definition Journal.h:309
Stream info() const
Definition Journal.h:315
Stream trace() const
Severity stream access functions.
Definition Journal.h:303
Stream warn() const
Definition Journal.h:321
typename Clock::time_point time_point
virtual bool hasUnsupportedEnabled() const =0
returns true if one or more amendments on the network have been enabled that this server does not sup...
void doValidatedLedger(std::shared_ptr< ReadView const > const &lastValidatedLedger)
Called when a new fully-validated ledger is accepted.
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual PendingSaves & pendingSaves()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual LedgerIndex getMaxDisallowedLedger()=0
Ensure that a newly-started validator does not sign proposals older than the last ledger it persisted...
virtual LedgerReplayer & getLedgerReplayer()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual TimeKeeper & timeKeeper()=0
virtual PathRequests & getPathRequests()=0
virtual OpenLedger & openLedger()=0
virtual RCLValidations & getValidations()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual SHAMapStore & getSHAMapStore()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual Overlay & overlay()=0
virtual JobQueue & getJobQueue()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual bool isStopping() const =0
virtual ValidatorList & validators()=0
virtual NetworkOPs & getOPs()=0
Holds transactions which were deferred to the next pass of consensus.
void insert(std::shared_ptr< STTx const > const &txn)
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
std::unordered_set< uint256, beast::uhash<> > features
Definition Config.h:258
bool LEDGER_REPLAY
Definition Config.h:205
virtual bool isFailure(uint256 const &h)=0
virtual std::shared_ptr< Ledger const > acquire(uint256 const &hash, std::uint32_t seq, InboundLedger::Reason)=0
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition JobQueue.h:148
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition JobQueue.cpp:121
bool isStopping() const
Definition JobQueue.h:212
void validatedLedger(std::shared_ptr< Ledger const > const &, std::optional< uint256 > const &consensusHash)
Report that we have validated a particular ledger.
bool insert(std::shared_ptr< Ledger const > const &ledger, bool validated)
Track a ledger.
float getCacheHitRate()
Get the ledgers_by_hash cache hit rate.
LedgerHash getLedgerHash(LedgerIndex ledgerIndex)
Get a ledger's hash given its sequence number.
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
Repair a hash to index mapping.
std::shared_ptr< Ledger const > getLedgerByHash(LedgerHash const &ledgerHash)
Retrieve a ledger given its hash.
void sweep()
Remove stale cache entries.
void builtLedger(std::shared_ptr< Ledger const > const &, uint256 const &consensusHash, Json::Value)
Report that we have locally built a particular ledger.
std::shared_ptr< Ledger const > getLedgerBySeq(LedgerIndex ledgerIndex)
Get a ledger given its sequence number.
void clearLedgerCachePrior(LedgerIndex seq)
void set(std::shared_ptr< Ledger const > ledger)
std::shared_ptr< Ledger const > get()
std::optional< LedgerIndex > minSqlSeq()
std::atomic< LedgerIndex > mValidLedgerSeq
void consensusBuilt(std::shared_ptr< Ledger const > const &ledger, uint256 const &consensusHash, Json::Value consensus)
Report that the consensus process built a particular ledger.
bool haveLedger(std::uint32_t seq)
TaggedCache< uint256, Blob > fetch_packs_
std::size_t getNeededValidations()
Determines how many validations are needed to fully validate a ledger.
bool isCompatible(ReadView const &, beast::Journal::Stream, char const *reason)
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
Get the next transaction held for a particular account if any.
void setValidLedger(std::shared_ptr< Ledger const > const &l)
void switchLCL(std::shared_ptr< Ledger const > const &lastClosed)
std::recursive_mutex & peekMutex()
std::uint32_t fetch_seq_
std::chrono::seconds getValidatedLedgerAge()
TimeKeeper::time_point upgradeWarningPrevTime_
LedgerIndex getCurrentLedgerIndex()
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
void applyHeldTransactions()
Apply held transactions to the open ledger This is normally called as we close the ledger.
bool storeLedger(std::shared_ptr< Ledger const > ledger)
void gotFetchPack(bool progress, std::uint32_t seq)
void tryFill(std::shared_ptr< Ledger const > ledger)
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
void setPubLedger(std::shared_ptr< Ledger const > const &l)
bool newPFWork(char const *name, std::unique_lock< std::recursive_mutex > &)
A thread needs to be dispatched to handle pathfinding work of some kind.
void setFullLedger(std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
void clearPriorLedgers(LedgerIndex seq)
LedgerIndex const max_ledger_difference_
CanonicalTXSet mHeldTransactions
std::uint32_t const ledger_fetch_size_
void setBuildingLedger(LedgerIndex index)
std::pair< uint256, LedgerIndex > mLastValidLedger
bool isCaughtUp(std::string &reason)
std::size_t getFetchPackCacheSize() const
std::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the corresponding hash from peers.
std::vector< std::shared_ptr< Ledger const > > findNewLedgersToPublish(std::unique_lock< std::recursive_mutex > &)
std::atomic< std::uint32_t > mPubLedgerClose
std::optional< NetClock::time_point > getCloseTimeByHash(LedgerHash const &ledgerHash, LedgerIndex ledgerIndex)
std::uint32_t const fetch_depth_
std::atomic< LedgerIndex > mPubLedgerSeq
void clearLedger(std::uint32_t seq)
void clearLedgerCachePrior(LedgerIndex seq)
std::atomic_flag mGotFetchPackThread
uint256 getHashBySeq(std::uint32_t index)
Get a ledger's hash by sequence number using the cache.
std::shared_ptr< Ledger const > getClosedLedger()
void setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV)
std::optional< NetClock::time_point > getCloseTimeBySeq(LedgerIndex ledgerIndex)
std::string getCompleteLedgers()
std::shared_ptr< Ledger const > getValidatedLedger()
void fetchForHistory(std::uint32_t missing, bool &progress, InboundLedger::Reason reason, std::unique_lock< std::recursive_mutex > &)
std::uint32_t const ledger_history_
bool isValidated(ReadView const &ledger)
void fixMismatch(ReadView const &ledger)
void makeFetchPack(std::weak_ptr< Peer > const &wPeer, std::shared_ptr< protocol::TMGetObjectByHash > const &request, uint256 haveLedgerHash, UptimeClock::time_point uptime)
LedgerIndex getValidLedgerIndex()
std::shared_ptr< Ledger const > mPathLedger
bool const standalone_
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > mHistLedger
std::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
LedgerHistory mLedgerHistory
LedgerMaster(Application &app, Stopwatch &stopwatch, beast::insight::Collector::ptr const &collector, beast::Journal journal)
std::atomic< std::uint32_t > mValidLedgerSign
std::chrono::seconds getPublishedLedgerAge()
std::optional< uint256 > txnIdFromIndex(uint32_t ledgerSeq, uint32_t txnIndex)
bool canBeCurrent(std::shared_ptr< Ledger const > const &ledger)
Check the sequence number and parent close time of a ledger against our clock and last validated ledg...
void addFetchPack(uint256 const &hash, std::shared_ptr< Blob > data)
bool getFullValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::optional< LedgerHash > getLedgerHashForHistory(LedgerIndex index, InboundLedger::Reason reason)
RangeSet< std::uint32_t > mCompleteLedgers
LedgerHolder mValidLedger
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
LedgerHolder mClosedLedger
void doAdvance(std::unique_lock< std::recursive_mutex > &)
void addHeldTransaction(std::shared_ptr< Transaction > const &trans)
std::shared_ptr< ReadView const > getCurrentLedger()
beast::Journal m_journal
void takeReplay(std::unique_ptr< LedgerReplay > replay)
std::unique_ptr< LedgerReplay > replayData
std::unique_ptr< LedgerReplay > releaseReplay()
std::shared_ptr< Ledger const > getLedgerByHash(uint256 const &hash)
std::uint32_t getEarliestFetch()
std::recursive_mutex m_mutex
std::shared_ptr< Ledger const > mPubLedger
void failedSave(std::uint32_t seq, uint256 const &hash)
Application & app_
std::atomic< LedgerIndex > mBuildingLedgerSeq
std::recursive_mutex mCompleteLock
void replay(InboundLedger::Reason r, uint256 const &finishLedgerHash, std::uint32_t totalNumLedgers)
Replay a range of ledgers.
void setRemoteFee(std::uint32_t f)
bool isLoadedLocal() const
std::uint32_t getLoadBase() const
virtual void setAmendmentBlocked()=0
virtual bool isBlocked()=0
virtual void processTransactionSet(CanonicalTXSet const &set)=0
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
virtual void updateLocalTx(ReadView const &newValidLedger)=0
virtual void clearAmendmentWarned()=0
virtual bool isAmendmentWarned()=0
virtual void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted)=0
virtual bool isNeedNetworkLedger()=0
virtual void clearNeedNetworkLedger()=0
virtual void setAmendmentWarned()=0
Persistency layer for NodeObject.
Definition Database.h:32
std::uint32_t earliestLedgerSeq() const noexcept
Definition Database.h:202
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous, bool duplicate=false)
Fetch a node object.
Definition Database.cpp:221
virtual std::int32_t getWriteLoad() const =0
Retrieve the estimated number of pending write operations.
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
void setup(std::shared_ptr< ReadView const > const &ledger)
virtual void checkTracking(std::uint32_t index)=0
Calls the checkTracking function on each peer.
virtual PeerSequence getActivePeers() const =0
Returns a sequence representing the current list of peers.
bool requestsPending() const
std::map< LedgerIndex, bool > getSnapshot() const
Get a snapshot of the pending saves.
A view into a ledger.
Definition ReadView.h:32
virtual LedgerHeader const & header() const =0
Returns information about the ledger.
virtual bool open() const =0
Returns true if this reflects an open ledger.
virtual uint256 getHashByIndex(LedgerIndex ledgerIndex)=0
getHashByIndex Returns the hash of the ledger with the given sequence.
virtual std::optional< LedgerIndex > getMinLedgerSeq()=0
getMinLedgerSeq Returns the minimum ledger sequence in the Ledgers table.
virtual std::optional< LedgerHashPair > getHashesByIndex(LedgerIndex ledgerIndex)=0
getHashesByIndex Returns the hashes of the ledger and its parent as specified by the ledgerIndex.
Rules controlling protocol behavior.
Definition Rules.h:19
uint256 const & as_uint256() const
Definition SHAMapHash.h:25
virtual std::optional< LedgerIndex > minimumOnline() const =0
The minimum ledger to try and maintain in our database.
virtual void onLedgerClosed(std::shared_ptr< Ledger const > const &ledger)=0
Called by LedgerMaster every time a ledger validates.
SHAMapHash const & getHash() const
Return the hash of this node.
virtual void serializeWithPrefix(Serializer &) const =0
Serialize the node in a format appropriate for hashing.
virtual bool isLeaf() const =0
Determines if this is a leaf node.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition SHAMap.h:78
void visitDifferences(SHAMap const *have, std::function< bool(SHAMapTreeNode const &)> const &) const
Visit every node in this SHAMap that is not present in the specified SHAMap.
void skip(int num)
std::uint32_t get32()
void const * getDataPtr() const
Definition Serializer.h:204
int getLength() const
Definition Serializer.h:214
time_point now() const override
Returns the current time, using the server's clock.
Definition TimeKeeper.h:45
time_point closeTime() const
Returns the predicted close time, in network time.
Definition TimeKeeper.h:57
static time_point now()
std::vector< WrappedValidationType > getTrustedForLedger(ID const &ledgerID, Seq const &seq)
Get trusted full validations for a specific ledger.
std::vector< WrappedValidationType > currentTrusted()
Get the currently trusted full validations.
std::vector< std::uint32_t > fees(ID const &ledgerID, std::uint32_t baseFee)
Returns fees reported by trusted full validators in the given ledger.
QuorumKeys getQuorumKeys() const
Get the quorum and all of the trusted keys.
std::size_t quorum() const
Get quorum value for current trusted key set.
std::vector< std::shared_ptr< STValidation > > negativeUNLFilter(std::vector< std::shared_ptr< STValidation > > &&validations) const
Remove validations that are from validators on the negative UNL.
bool isNonZero() const
Definition base_uint.h:526
Automatically unlocks and re-locks a unique_lock object.
Definition scope.h:212
T clear(T... args)
T copy(T... args)
T count(T... args)
T empty(T... args)
T end(T... args)
T endl(T... args)
T find(T... args)
T is_same_v
T load(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
STL namespace.
bool isNewerVersion(std::uint64_t version)
Check if the version is newer than the local node's rippled software version.
bool isRippledVersion(std::uint64_t version)
Check if the encoded software version is a rippled software version.
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeRequestNoReply
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:6
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
void LogicError(std::string const &how) noexcept
Called when faulty logic causes a broken invariant.
std::optional< T > prevMissing(RangeSet< T > const &rs, T t, T minVal=0)
Find the largest value not in the set that is less than a given value.
Definition RangeSet.h:164
static constexpr std::chrono::minutes MAX_LEDGER_AGE_ACQUIRE
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition digest.h:205
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition chrono.h:100
ClosedInterval< T > range(T low, T high)
Create a closed range interval.
Definition RangeSet.h:35
std::string to_string(base_uint< Bits, Tag > const &a)
Definition base_uint.h:611
constexpr std::size_t calculatePercent(std::size_t count, std::size_t total)
Calculate one number divided by another number in percentage.
static void populateFetchPack(SHAMap const &want, SHAMap const *have, std::uint32_t cnt, protocol::TMGetObjectByHash *into, std::uint32_t seq, bool withLeaves=true)
Populate a fetch pack with data from the map the recipient wants.
bool pendSaveValidated(Application &app, std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
Save, or arrange to save, a fully-validated ledger Returns false on error.
Definition Ledger.cpp:981
SizedItem
Definition Config.h:25
bool areCompatible(ReadView const &validLedger, ReadView const &testLedger, beast::Journal::Stream &s, char const *reason)
Return false if the test ledger is provably incompatible with the valid ledger, that is,...
Definition View.cpp:901
LedgerIndex getCandidateLedger(LedgerIndex requested)
Find a ledger index from which we could easily get the requested ledger.
Definition View.h:528
std::optional< uint256 > hashOfSeq(ReadView const &ledger, LedgerIndex seq, beast::Journal journal)
Return the hash of a ledger by sequence.
Definition View.cpp:1063
@ jtLEDGER_DATA
Definition Job.h:46
@ jtUPDATE_PF
Definition Job.h:36
@ jtPUBOLDLEDGER
Definition Job.h:24
@ jtADVANCE
Definition Job.h:47
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
static constexpr int MAX_LEDGER_GAP
@ ledgerMaster
ledger master data for signing
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition Slice.h:225
static constexpr int MAX_WRITE_LOAD_ACQUIRE
static bool shouldAcquire(std::uint32_t const currentLedger, std::uint32_t const ledgerHistory, std::optional< LedgerIndex > const minimumOnline, std::uint32_t const candidateLedger, beast::Journal j)
T has_value(T... args)
T push_back(T... args)
T reserve(T... args)
T size(T... args)
T sort(T... args)
T str(T... args)
T swap(T... args)
T test_and_set(T... args)
T time_since_epoch(T... args)
T what(T... args)