rippled
Loading...
Searching...
No Matches
LedgerMaster.cpp
1#include <xrpld/app/consensus/RCLValidations.h>
2#include <xrpld/app/ledger/LedgerMaster.h>
3#include <xrpld/app/ledger/LedgerPersistence.h>
4#include <xrpld/app/ledger/LedgerReplayer.h>
5#include <xrpld/app/ledger/OpenLedger.h>
6#include <xrpld/app/main/Application.h>
7#include <xrpld/app/misc/SHAMapStore.h>
8#include <xrpld/app/misc/Transaction.h>
9#include <xrpld/app/misc/TxQ.h>
10#include <xrpld/app/misc/ValidatorList.h>
11#include <xrpld/core/TimeKeeper.h>
12#include <xrpld/overlay/Overlay.h>
13#include <xrpld/overlay/Peer.h>
14#include <xrpld/rpc/detail/PathRequestManager.h>
15
16#include <xrpl/basics/MathUtilities.h>
17#include <xrpl/basics/UptimeClock.h>
18#include <xrpl/basics/contract.h>
19#include <xrpl/basics/safe_cast.h>
20#include <xrpl/basics/scope.h>
21#include <xrpl/beast/utility/instrumentation.h>
22#include <xrpl/ledger/AmendmentTable.h>
23#include <xrpl/ledger/Ledger.h>
24#include <xrpl/ledger/OrderBookDB.h>
25#include <xrpl/ledger/PendingSaves.h>
26#include <xrpl/protocol/BuildInfo.h>
27#include <xrpl/protocol/HashPrefix.h>
28#include <xrpl/protocol/digest.h>
29#include <xrpl/rdb/RelationalDatabase.h>
30#include <xrpl/resource/Fees.h>
31#include <xrpl/server/LoadFeeTrack.h>
32#include <xrpl/server/NetworkOPs.h>
33
34#include <algorithm>
35#include <chrono>
36#include <cstdlib>
37#include <memory>
38#include <vector>
39
40namespace xrpl {
41
42// Don't catch up more than 100 ledgers (cannot exceed 256)
43static constexpr int MAX_LEDGER_GAP{100};
44
45// Don't acquire history if ledger is too old
47
48// Don't acquire history if write load is too high
49static constexpr int MAX_WRITE_LOAD_ACQUIRE{8192};
50
51// Helper function for LedgerMaster::doAdvance()
52// Return true if candidateLedger should be fetched from the network.
53static bool
55 std::uint32_t const currentLedger,
56 std::uint32_t const ledgerHistory,
57 std::optional<LedgerIndex> const minimumOnline,
58 std::uint32_t const candidateLedger,
60{
61 bool const ret = [&]() {
62 // Fetch ledger if it may be the current ledger
63 if (candidateLedger >= currentLedger)
64 return true;
65
66 // Or if it is within our configured history range:
67 if (currentLedger - candidateLedger <= ledgerHistory)
68 return true;
69
70 // Or if greater than or equal to a specific minimum ledger.
71 // Do nothing if the minimum ledger to keep online is unknown.
72 return minimumOnline.has_value() && candidateLedger >= *minimumOnline;
73 }();
74
75 JLOG(j.trace()) << "Missing ledger " << candidateLedger << (ret ? " should" : " should NOT")
76 << " be acquired";
77 return ret;
78}
79
81 Application& app,
83 beast::insight::Collector::ptr const& collector,
84 beast::Journal journal)
85 : app_(app)
86 , m_journal(journal)
87 , mLedgerHistory(collector, app)
88 , standalone_(app_.config().standalone())
89 , fetch_depth_(app_.getSHAMapStore().clampFetchDepth(app_.config().FETCH_DEPTH))
90 , ledger_history_(app_.config().LEDGER_HISTORY)
91 , ledger_fetch_size_(app_.config().getValueFor(SizedItem::ledgerFetch))
92 , fetch_packs_(
93 "FetchPack",
94 65536,
95 std::chrono::seconds{45},
97 app_.getJournal("TaggedCache"))
98 , m_stats(std::bind(&LedgerMaster::collect_metrics, this), collector)
99{
100}
101
104{
105 return app_.getOpenLedger().current()->header().seq;
106}
107
113
114bool
116{
117 auto validLedger = getValidatedLedger();
118
119 if (validLedger && !areCompatible(*validLedger, view, s, reason))
120 {
121 return false;
122 }
123
124 {
125 std::lock_guard const sl(m_mutex);
126
127 if ((mLastValidLedger.second != 0) &&
128 !areCompatible(mLastValidLedger.first, mLastValidLedger.second, view, s, reason))
129 {
130 return false;
131 }
132 }
133
134 return true;
135}
136
139{
140 using namespace std::chrono_literals;
141 std::chrono::seconds const pubClose{mPubLedgerClose.load()};
142 if (pubClose == 0s)
143 {
144 JLOG(m_journal.debug()) << "No published ledger";
145 return weeks{2};
146 }
147
148 std::chrono::seconds ret = app_.getTimeKeeper().closeTime().time_since_epoch();
149 ret -= pubClose;
150 ret = (ret > 0s) ? ret : 0s;
151 static std::chrono::seconds lastRet = -1s;
152
153 if (ret != lastRet)
154 {
155 JLOG(m_journal.trace()) << "Published ledger age is " << ret.count();
156 lastRet = ret;
157 }
158 return ret;
159}
160
163{
164 using namespace std::chrono_literals;
165
167 if (valClose == 0s)
168 {
169 JLOG(m_journal.debug()) << "No validated ledger";
170 return weeks{2};
171 }
172
173 std::chrono::seconds ret = app_.getTimeKeeper().closeTime().time_since_epoch();
174 ret -= valClose;
175 ret = (ret > 0s) ? ret : 0s;
176 static std::chrono::seconds lastRet = -1s;
177
178 if (ret != lastRet)
179 {
180 JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count();
181 lastRet = ret;
182 }
183 return ret;
184}
185
186bool
188{
189 using namespace std::chrono_literals;
190
191 if (getPublishedLedgerAge() > 3min)
192 {
193 reason = "No recently-published ledger";
194 return false;
195 }
196 std::uint32_t const validClose = mValidLedgerSign.load();
197 std::uint32_t const pubClose = mPubLedgerClose.load();
198 if ((validClose == 0u) || (pubClose == 0u))
199 {
200 reason = "No published ledger";
201 return false;
202 }
203 if (validClose > (pubClose + 90))
204 {
205 reason = "Published ledger lags validated ledger";
206 return false;
207 }
208 return true;
209}
210
211void
213{
215 std::optional<uint256> consensusHash;
216
217 if (!standalone_)
218 {
219 auto validations = app_.getValidators().negativeUNLFilter(
220 app_.getValidations().getTrustedForLedger(l->header().hash, l->header().seq));
221 times.reserve(validations.size());
222 for (auto const& val : validations)
223 times.push_back(val->getSignTime());
224
225 if (!validations.empty())
226 consensusHash = validations.front()->getConsensusHash();
227 }
228
229 NetClock::time_point signTime;
230
231 if (!times.empty() && times.size() >= app_.getValidators().quorum())
232 {
233 // Calculate the sample median
234 std::sort(times.begin(), times.end());
235 auto const t0 = times[(times.size() - 1) / 2];
236 auto const t1 = times[times.size() / 2];
237 signTime = t0 + (t1 - t0) / 2;
238 }
239 else
240 {
241 signTime = l->header().closeTime;
242 }
243
244 mValidLedger.set(l);
245 mValidLedgerSign = signTime.time_since_epoch().count();
246 XRPL_ASSERT(
249 "xrpl::LedgerMaster::setValidLedger : valid ledger sequence");
251 mValidLedgerSeq = l->header().seq;
252
255 mLedgerHistory.validatedLedger(l, consensusHash);
257 if (!app_.getOPs().isBlocked())
258 {
260 {
261 JLOG(m_journal.error()) << "One or more unsupported amendments "
262 "activated: server blocked.";
264 }
265 else if (!app_.getOPs().isAmendmentWarned() || l->isFlagLedger())
266 {
267 // Amendments can lose majority, so re-check periodically (every
268 // flag ledger), and clear the flag if appropriate. If an unknown
269 // amendment gains majority log a warning as soon as it's
270 // discovered, then again every flag ledger until the operator
271 // upgrades, the amendment loses majority, or the amendment goes
272 // live and the node gets blocked. Unlike being amendment blocked,
273 // this message may be logged more than once per session, because
274 // the node will otherwise function normally, and this gives
275 // operators an opportunity to see and resolve the warning.
276 if (auto const first = app_.getAmendmentTable().firstUnsupportedExpected())
277 {
278 JLOG(m_journal.error()) << "One or more unsupported amendments "
279 "reached majority. Upgrade before "
280 << to_string(*first)
281 << " to prevent your server from "
282 "becoming amendment blocked.";
284 }
285 else
286 {
288 }
289 }
290 }
291}
292
293void
295{
296 mPubLedger = l;
297 mPubLedgerClose = l->header().closeTime.time_since_epoch().count();
298 mPubLedgerSeq = l->header().seq;
299}
300
301void
303{
304 std::lock_guard const ml(m_mutex);
305 mHeldTransactions.insert(transaction->getSTransaction());
306}
307
308// Validate a ledger's close time and sequence number if we're considering
309// jumping to that ledger. This helps defend against some rare hostile or
310// diverged majority scenarios.
311bool
313{
314 XRPL_ASSERT(ledger, "xrpl::LedgerMaster::canBeCurrent : non-null input");
315
316 // Never jump to a candidate ledger that precedes our
317 // last validated ledger
318
319 auto validLedger = getValidatedLedger();
320 if (validLedger && (ledger->header().seq < validLedger->header().seq))
321 {
322 JLOG(m_journal.trace()) << "Candidate for current ledger has low seq "
323 << ledger->header().seq << " < " << validLedger->header().seq;
324 return false;
325 }
326
327 // Ensure this ledger's parent close time is within five minutes of
328 // our current time. If we already have a known fully-valid ledger
329 // we perform this check. Otherwise, we only do it if we've built a
330 // few ledgers as our clock can be off when we first start up
331
332 auto closeTime = app_.getTimeKeeper().closeTime();
333 auto ledgerClose = ledger->header().parentCloseTime;
334
335 using namespace std::chrono_literals;
336 if ((validLedger || (ledger->header().seq > 10)) &&
337 ((std::max(closeTime, ledgerClose) - std::min(closeTime, ledgerClose)) > 5min))
338 {
339 JLOG(m_journal.warn()) << "Candidate for current ledger has close time "
340 << to_string(ledgerClose) << " at network time "
341 << to_string(closeTime) << " seq " << ledger->header().seq;
342 return false;
343 }
344
345 if (validLedger)
346 {
347 // Sequence number must not be too high. We allow ten ledgers
348 // for time inaccuracies plus a maximum run rate of one ledger
349 // every two seconds. The goal is to prevent a malicious ledger
350 // from increasing our sequence unreasonably high
351
352 LedgerIndex maxSeq = validLedger->header().seq + 10;
353
354 if (closeTime > validLedger->header().parentCloseTime)
355 {
356 maxSeq += std::chrono::duration_cast<std::chrono::seconds>(
357 closeTime - validLedger->header().parentCloseTime)
358 .count() /
359 2;
360 }
361
362 if (ledger->header().seq > maxSeq)
363 {
364 JLOG(m_journal.warn()) << "Candidate for current ledger has high seq "
365 << ledger->header().seq << " > " << maxSeq;
366 return false;
367 }
368
369 JLOG(m_journal.trace()) << "Acceptable seq range: " << validLedger->header().seq
370 << " <= " << ledger->header().seq << " <= " << maxSeq;
371 }
372
373 return true;
374}
375
376void
378{
379 XRPL_ASSERT(lastClosed, "xrpl::LedgerMaster::switchLCL : non-null input");
380 if (!lastClosed->isImmutable())
381 LogicError("mutable ledger in switchLCL");
382
383 if (lastClosed->open())
384 LogicError("The new last closed ledger is open!");
385
386 {
387 std::lock_guard const ml(m_mutex);
388 mClosedLedger.set(lastClosed);
389 }
390
391 if (standalone_)
392 {
393 setFullLedger(lastClosed, true, false);
394 tryAdvance();
395 }
396 else
397 {
398 checkAccept(lastClosed);
399 }
400}
401
402bool
403LedgerMaster::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
404{
405 return mLedgerHistory.fixIndex(ledgerIndex, ledgerHash);
406}
407
408bool
410{
411 bool const validated = ledger->header().validated;
412 // Returns true if we already had the ledger
413 return mLedgerHistory.insert(ledger, validated);
414}
415
421void
423{
424 CanonicalTXSet const set = [this]() {
425 std::lock_guard const sl(m_mutex);
426 // VFALCO NOTE The hash for an open ledger is undefined so we use
427 // something that is a reasonable substitute.
428 CanonicalTXSet set(app_.getOpenLedger().current()->header().parentHash);
430 return set;
431 }();
432
433 if (!set.empty())
435}
436
444
445void
450
451bool
453{
455 return boost::icl::contains(mCompleteLedgers, seq);
456}
457
458void
464
465bool
467{
468 if (ledger.open())
469 return false;
470
471 if (ledger.header().validated)
472 return true;
473
474 auto const seq = ledger.header().seq;
475 try
476 {
477 // Use the skip list in the last validated ledger to see if ledger
478 // comes before the last validated ledger (and thus has been
479 // validated).
480 auto const hash = walkHashBySeq(seq, InboundLedger::Reason::GENERIC);
481
482 if (!hash || ledger.header().hash != *hash)
483 {
484 // This ledger's hash is not the hash of the validated ledger
485 if (hash)
486 {
487 XRPL_ASSERT(hash->isNonZero(), "xrpl::LedgerMaster::isValidated : nonzero hash");
488 uint256 const valHash = app_.getRelationalDatabase().getHashByIndex(seq);
489 if (valHash == ledger.header().hash)
490 {
491 // SQL database doesn't match ledger chain
492 clearLedger(seq);
493 }
494 }
495 return false;
496 }
497 }
498 catch (SHAMapMissingNode const& mn)
499 {
500 JLOG(m_journal.warn()) << "Ledger #" << seq << ": " << mn.what();
501 return false;
502 }
503
504 // Mark ledger as validated to save time if we see it again.
505 ledger.header().validated = true;
506 return true;
507}
508
509// returns Ledgers we have all the nodes for
510bool
512{
513 // Validated ledger is likely not stored in the DB yet so we use the
514 // published ledger which is.
515 maxVal = mPubLedgerSeq.load();
516
517 if (maxVal == 0u)
518 return false;
519
521 {
523 maybeMin = prevMissing(mCompleteLedgers, maxVal);
524 }
525
526 if (maybeMin == std::nullopt)
527 {
528 minVal = maxVal;
529 }
530 else
531 {
532 minVal = 1 + *maybeMin;
533 }
534
535 return true;
536}
537
538// Returns Ledgers we have all the nodes for and are indexed
539bool
541{
542 if (!getFullValidatedRange(minVal, maxVal))
543 return false;
544
545 // Remove from the validated range any ledger sequences that may not be
546 // fully updated in the database yet
547
548 auto const pendingSaves = app_.getPendingSaves().getSnapshot();
549
550 if (!pendingSaves.empty() && ((minVal != 0) || (maxVal != 0)))
551 {
552 // Ensure we shrink the tips as much as possible. If we have 7-9 and
553 // 8,9 are invalid, we don't want to see the 8 and shrink to just 9
554 // because then we'll have nothing when we could have 7.
555 while (pendingSaves.contains(maxVal))
556 --maxVal;
557 while (pendingSaves.contains(minVal))
558 ++minVal;
559
560 // Best effort for remaining exclusions
561 for (auto v : pendingSaves)
562 {
563 if ((v.first >= minVal) && (v.first <= maxVal))
564 {
565 if (v.first > ((minVal + maxVal) / 2))
566 {
567 maxVal = v.first - 1;
568 }
569 else
570 {
571 minVal = v.first + 1;
572 }
573 }
574 }
575
576 if (minVal > maxVal)
577 minVal = maxVal = 0;
578 }
579
580 return true;
581}
582
583// Get the earliest ledger we will let peers fetch
586{
587 // The earliest ledger we will let people fetch is ledger zero,
588 // unless that creates a larger range than allowed
589 std::uint32_t e = getClosedLedger()->header().seq;
590
591 if (e > fetch_depth_)
592 {
593 e -= fetch_depth_;
594 }
595 else
596 {
597 e = 0;
598 }
599 return e;
600}
601
602void
604{
605 std::uint32_t seq = ledger->header().seq;
606 uint256 prevHash = ledger->header().parentHash;
607
609
610 std::uint32_t minHas = seq;
611 std::uint32_t maxHas = seq;
612
614 while (!app_.getJobQueue().isStopping() && seq > 0)
615 {
616 {
617 std::lock_guard const ml(m_mutex);
618 minHas = seq;
619 --seq;
620
621 if (haveLedger(seq))
622 break;
623 }
624
625 auto it(ledgerHashes.find(seq));
626
627 if (it == ledgerHashes.end())
628 {
629 if (app_.isStopping())
630 return;
631
632 {
634 mCompleteLedgers.insert(range(minHas, maxHas));
635 }
636 maxHas = minHas;
637 ledgerHashes =
638 app_.getRelationalDatabase().getHashesByIndex((seq < 500) ? 0 : (seq - 499), seq);
639 it = ledgerHashes.find(seq);
640
641 if (it == ledgerHashes.end())
642 break;
643
644 if (!nodeStore.fetchNodeObject(
645 ledgerHashes.begin()->second.ledgerHash, ledgerHashes.begin()->first))
646 {
647 // The ledger is not backed by the node store
648 JLOG(m_journal.warn())
649 << "SQL DB ledger sequence " << seq << " mismatches node store";
650 break;
651 }
652 }
653
654 if (it->second.ledgerHash != prevHash)
655 break;
656
657 prevHash = it->second.parentHash;
658 }
659
660 {
662 mCompleteLedgers.insert(range(minHas, maxHas));
663 }
664 {
665 std::lock_guard const ml(m_mutex);
666 mFillInProgress = 0;
667 tryAdvance();
668 }
669}
670
673void
675{
676 LedgerIndex const ledgerIndex = missing + 1;
677
678 auto const haveHash{getLedgerHashForHistory(ledgerIndex, reason)};
679 if (!haveHash || haveHash->isZero())
680 {
681 JLOG(m_journal.error()) << "No hash for fetch pack. Missing Index " << missing;
682 return;
683 }
684
685 // Select target Peer based on highest score. The score is randomized
686 // but biased in favor of Peers with low latency.
688 {
689 int maxScore = 0;
690 auto peerList = app_.getOverlay().getActivePeers();
691 for (auto const& peer : peerList)
692 {
693 if (peer->hasRange(missing, missing + 1))
694 {
695 int const score = peer->getScore(true);
696 if (!target || (score > maxScore))
697 {
698 target = peer;
699 maxScore = score;
700 }
701 }
702 }
703 }
704
705 if (target)
706 {
707 protocol::TMGetObjectByHash tmBH;
708 tmBH.set_query(true);
709 tmBH.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
710 tmBH.set_ledgerhash(haveHash->begin(), 32);
711 auto packet = std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
712
713 target->send(packet);
714 JLOG(m_journal.trace()) << "Requested fetch pack for " << missing;
715 }
716 else
717 JLOG(m_journal.debug()) << "No peer for fetch pack";
718}
719
720void
722{
723 int invalidate = 0;
725
726 for (std::uint32_t lSeq = ledger.header().seq - 1; lSeq > 0; --lSeq)
727 {
728 if (haveLedger(lSeq))
729 {
730 try
731 {
732 hash = hashOfSeq(ledger, lSeq, m_journal);
733 }
734 catch (std::exception const& ex)
735 {
736 JLOG(m_journal.warn())
737 << "fixMismatch encounters partial ledger. Exception: " << ex.what();
738 clearLedger(lSeq);
739 return;
740 }
741
742 if (hash)
743 {
744 // try to close the seam
745 auto otherLedger = getLedgerBySeq(lSeq);
746
747 if (otherLedger && (otherLedger->header().hash == *hash))
748 {
749 // we closed the seam
750 if (invalidate != 0)
751 {
752 JLOG(m_journal.warn()) << "Match at " << lSeq << ", " << invalidate
753 << " prior ledgers invalidated";
754 }
755
756 return;
757 }
758 }
759
760 clearLedger(lSeq);
761 ++invalidate;
762 }
763 }
764
765 // all prior ledgers invalidated
766 if (invalidate != 0)
767 {
768 JLOG(m_journal.warn()) << "All " << invalidate << " prior ledgers invalidated";
769 }
770}
771
772void
774 std::shared_ptr<Ledger const> const& ledger,
775 bool isSynchronous,
776 bool isCurrent)
777{
778 // A new ledger has been accepted as part of the trusted chain
779 JLOG(m_journal.debug()) << "Ledger " << ledger->header().seq
780 << " accepted :" << ledger->header().hash;
781 XRPL_ASSERT(
782 ledger->stateMap().getHash().isNonZero(),
783 "xrpl::LedgerMaster::setFullLedger : nonzero ledger state hash");
784
785 ledger->setValidated();
786 ledger->setFull();
787
788 if (isCurrent)
789 mLedgerHistory.insert(ledger, true);
790
791 {
792 // Check the SQL database's entry for the sequence before this
793 // ledger, if it's not this ledger's parent, invalidate it
794 uint256 const prevHash =
795 app_.getRelationalDatabase().getHashByIndex(ledger->header().seq - 1);
796 if (prevHash.isNonZero() && prevHash != ledger->header().parentHash)
797 clearLedger(ledger->header().seq - 1);
798 }
799
800 pendSaveValidated(app_, ledger, isSynchronous, isCurrent);
801
802 {
804 mCompleteLedgers.insert(ledger->header().seq);
805 }
806
807 {
808 std::lock_guard const ml(m_mutex);
809
810 if (ledger->header().seq > mValidLedgerSeq)
811 setValidLedger(ledger);
812 if (!mPubLedger)
813 {
814 setPubLedger(ledger);
815 app_.getOrderBookDB().setup(ledger);
816 }
817
818 if (ledger->header().seq != 0 && haveLedger(ledger->header().seq - 1))
819 {
820 // we think we have the previous ledger, double check
821 auto prevLedger = getLedgerBySeq(ledger->header().seq - 1);
822
823 if (!prevLedger || (prevLedger->header().hash != ledger->header().parentHash))
824 {
825 JLOG(m_journal.warn()) << "Acquired ledger invalidates previous ledger: "
826 << (prevLedger ? "hashMismatch" : "missingLedger");
827 fixMismatch(*ledger);
828 }
829 }
830 }
831}
832
833void
839
840// Check if the specified ledger can become the new last fully-validated
841// ledger.
842void
844{
845 std::size_t valCount = 0;
846
847 if (seq != 0)
848 {
849 // Ledger is too old
850 if (seq < mValidLedgerSeq)
851 return;
852
853 auto validations = app_.getValidators().negativeUNLFilter(
855 valCount = validations.size();
856 if (valCount >= app_.getValidators().quorum())
857 {
858 std::lock_guard const ml(m_mutex);
859 if (seq > mLastValidLedger.second)
860 mLastValidLedger = std::make_pair(hash, seq);
861 }
862
863 if (seq == mValidLedgerSeq)
864 return;
865
866 // Ledger could match the ledger we're already building
867 if (seq == mBuildingLedgerSeq)
868 return;
869 }
870
871 auto ledger = mLedgerHistory.getLedgerByHash(hash);
872
873 if (!ledger)
874 {
875 if ((seq != 0) && (getValidLedgerIndex() == 0))
876 {
877 // Set peers converged early if we can
878 if (valCount >= app_.getValidators().quorum())
880 }
881
882 // FIXME: We may not want to fetch a ledger with just one
883 // trusted validation
885 }
886
887 if (ledger)
888 checkAccept(ledger);
889}
890
901
902void
904{
905 // Can we accept this ledger as our new last fully-validated ledger
906
907 if (!canBeCurrent(ledger))
908 return;
909
910 // Can we advance the last fully-validated ledger? If so, can we
911 // publish?
912 std::lock_guard const ml(m_mutex);
913
914 if (ledger->header().seq <= mValidLedgerSeq)
915 return;
916
917 auto const minVal = getNeededValidations();
918 auto validations = app_.getValidators().negativeUNLFilter(
919 app_.getValidations().getTrustedForLedger(ledger->header().hash, ledger->header().seq));
920 auto const tvc = validations.size();
921 if (tvc < minVal) // nothing we can do
922 {
923 JLOG(m_journal.trace()) << "Only " << tvc << " validations for " << ledger->header().hash;
924 return;
925 }
926
927 JLOG(m_journal.info()) << "Advancing accepted ledger to " << ledger->header().seq
928 << " with >= " << minVal << " validations";
929
930 ledger->setValidated();
931 ledger->setFull();
932 setValidLedger(ledger);
933 if (!mPubLedger)
934 {
935 pendSaveValidated(app_, ledger, true, true);
936 setPubLedger(ledger);
937 app_.getOrderBookDB().setup(ledger);
938 }
939
940 std::uint32_t const base = app_.getFeeTrack().getLoadBase();
941 auto fees = app_.getValidations().fees(ledger->header().hash, base);
942 {
943 auto fees2 = app_.getValidations().fees(ledger->header().parentHash, base);
944 fees.reserve(fees.size() + fees2.size());
945 std::copy(fees2.begin(), fees2.end(), std::back_inserter(fees));
946 }
947 std::uint32_t fee = 0;
948 if (!fees.empty())
949 {
950 std::sort(fees.begin(), fees.end());
951 if (auto stream = m_journal.debug())
952 {
954 s << "Received fees from validations: (" << fees.size() << ") ";
955 for (auto const fee1 : fees)
956 {
957 s << " " << fee1;
958 }
959 stream << s.str();
960 }
961 fee = fees[fees.size() / 2]; // median
962 }
963 else
964 {
965 fee = base;
966 }
967
969
970 tryAdvance();
971
972 if (ledger->seq() % 256 == 0)
973 {
974 // Check if the majority of validators run a higher version rippled
975 // software. If so print a warning.
976 //
977 // Validators include their rippled software version in the validation
978 // messages of every (flag - 1) ledger. We wait for one ledger time
979 // before checking the version information to accumulate more validation
980 // messages.
981
982 auto currentTime = app_.getTimeKeeper().now();
983 bool needPrint = false;
984
985 // The variable upgradeWarningPrevTime_ will be set when and only when
986 // the warning is printed.
988 {
989 // Have not printed the warning before, check if need to print.
990 auto const vals = app_.getValidations().getTrustedForLedger(
991 ledger->header().parentHash, ledger->header().seq - 1);
992 std::size_t higherVersionCount = 0;
993 std::size_t rippledCount = 0;
994 for (auto const& v : vals)
995 {
996 if (v->isFieldPresent(sfServerVersion))
997 {
998 auto version = v->getFieldU64(sfServerVersion);
999 higherVersionCount += BuildInfo::isNewerVersion(version) ? 1 : 0;
1000 rippledCount += BuildInfo::isRippledVersion(version) ? 1 : 0;
1001 }
1002 }
1003 // We report only if (1) we have accumulated validation messages
1004 // from 90% validators from the UNL, (2) 60% of validators
1005 // running the rippled implementation have higher version numbers,
1006 // and (3) the calculation won't cause divide-by-zero.
1007 if (higherVersionCount > 0 && rippledCount > 0)
1008 {
1009 constexpr std::size_t reportingPercent = 90;
1010 constexpr std::size_t cutoffPercent = 60;
1011 auto const unlSize{app_.getValidators().getQuorumKeys().second.size()};
1012 needPrint = unlSize > 0 &&
1013 calculatePercent(vals.size(), unlSize) >= reportingPercent &&
1014 calculatePercent(higherVersionCount, rippledCount) >= cutoffPercent;
1015 }
1016 }
1017 // To throttle the warning messages, instead of printing a warning
1018 // every flag ledger, we print every week.
1019 else if (currentTime - upgradeWarningPrevTime_ >= weeks{1})
1020 {
1021 // Printed the warning before, and assuming most validators
1022 // do not downgrade, we keep printing the warning
1023 // until the local server is restarted.
1024 needPrint = true;
1025 }
1026
1027 if (needPrint)
1028 {
1029 upgradeWarningPrevTime_ = currentTime;
1030 auto const upgradeMsg =
1031 "Check for upgrade: "
1032 "A majority of trusted validators are "
1033 "running a newer version.";
1034 std::cerr << upgradeMsg << std::endl;
1035 JLOG(m_journal.error()) << upgradeMsg;
1036 }
1037 }
1038}
1039
1041void
1043 std::shared_ptr<Ledger const> const& ledger,
1044 uint256 const& consensusHash,
1045 Json::Value consensus)
1046{
1047 // Because we just built a ledger, we are no longer building one
1049
1050 // No need to process validations in standalone mode
1051 if (standalone_)
1052 return;
1053
1054 mLedgerHistory.builtLedger(ledger, consensusHash, std::move(consensus));
1055
1056 if (ledger->header().seq <= mValidLedgerSeq)
1057 {
1058 auto stream = app_.getJournal("LedgerConsensus").info();
1059 JLOG(stream) << "Consensus built old ledger: " << ledger->header().seq
1060 << " <= " << mValidLedgerSeq;
1061 return;
1062 }
1063
1064 // See if this ledger can be the new fully-validated ledger
1065 checkAccept(ledger);
1066
1067 if (ledger->header().seq <= mValidLedgerSeq)
1068 {
1069 auto stream = app_.getJournal("LedgerConsensus").debug();
1070 JLOG(stream) << "Consensus ledger fully validated";
1071 return;
1072 }
1073
1074 // This ledger cannot be the new fully-validated ledger, but
1075 // maybe we saved up validations for some other ledger that can be
1076
1077 auto validations =
1079
1080 // Track validation counts with sequence numbers
1081 class valSeq
1082 {
1083 public:
1084 valSeq() = default;
1085
1086 void
1087 mergeValidation(LedgerIndex seq)
1088 {
1089 valCount_++;
1090
1091 // If we didn't already know the sequence, now we do
1092 if (ledgerSeq_ == 0)
1093 ledgerSeq_ = seq;
1094 }
1095
1096 std::size_t valCount_{0};
1097 LedgerIndex ledgerSeq_{0};
1098 };
1099
1100 // Count the number of current, trusted validations
1102 for (auto const& v : validations)
1103 {
1104 valSeq& vs = count[v->getLedgerHash()];
1105 vs.mergeValidation(v->getFieldU32(sfLedgerSequence));
1106 }
1107
1108 auto const neededValidations = getNeededValidations();
1109 auto maxSeq = mValidLedgerSeq.load();
1110 auto maxLedger = ledger->header().hash;
1111
1112 // Of the ledgers with sufficient validations,
1113 // find the one with the highest sequence
1114 for (auto& v : count)
1115 {
1116 if (v.second.valCount_ > neededValidations)
1117 {
1118 // If we still don't know the sequence, get it
1119 if (v.second.ledgerSeq_ == 0)
1120 {
1121 if (auto l = getLedgerByHash(v.first))
1122 v.second.ledgerSeq_ = l->header().seq;
1123 }
1124
1125 if (v.second.ledgerSeq_ > maxSeq)
1126 {
1127 maxSeq = v.second.ledgerSeq_;
1128 maxLedger = v.first;
1129 }
1130 }
1131 }
1132
1133 if (maxSeq > mValidLedgerSeq)
1134 {
1135 auto stream = app_.getJournal("LedgerConsensus").debug();
1136 JLOG(stream) << "Consensus triggered check of ledger";
1137 checkAccept(maxLedger, maxSeq);
1138 }
1139}
1140
1143{
1144 // Try to get the hash of a ledger we need to fetch for history
1146 auto const& l{mHistLedger};
1147
1148 if (l && l->header().seq >= index)
1149 {
1150 ret = hashOfSeq(*l, index, m_journal);
1151 if (!ret)
1152 ret = walkHashBySeq(index, l, reason);
1153 }
1154
1155 if (!ret)
1156 ret = walkHashBySeq(index, reason);
1157
1158 return ret;
1159}
1160
1163{
1165
1166 JLOG(m_journal.trace()) << "findNewLedgersToPublish<";
1167
1168 // No valid ledger, nothing to do
1169 if (mValidLedger.empty())
1170 {
1171 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1172 return {};
1173 }
1174
1175 if (!mPubLedger)
1176 {
1177 JLOG(m_journal.info()) << "First published ledger will be " << mValidLedgerSeq;
1178 return {mValidLedger.get()};
1179 }
1180
1182 {
1183 JLOG(m_journal.warn()) << "Gap in validated ledger stream " << mPubLedgerSeq << " - "
1184 << mValidLedgerSeq - 1;
1185
1186 auto valLedger = mValidLedger.get();
1187 ret.push_back(valLedger);
1188 setPubLedger(valLedger);
1189 app_.getOrderBookDB().setup(valLedger);
1190
1191 return {valLedger};
1192 }
1193
1195 {
1196 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1197 return {};
1198 }
1199
1200 int acqCount = 0;
1201
1202 auto pubSeq = mPubLedgerSeq + 1; // Next sequence to publish
1203 auto valLedger = mValidLedger.get();
1204 std::uint32_t const valSeq = valLedger->header().seq;
1205
1206 scope_unlock const sul{sl};
1207 try
1208 {
1209 for (std::uint32_t seq = pubSeq; seq <= valSeq; ++seq)
1210 {
1211 JLOG(m_journal.trace()) << "Trying to fetch/publish valid ledger " << seq;
1212
1214 // This can throw
1215 auto hash = hashOfSeq(*valLedger, seq, m_journal);
1216 // VFALCO TODO Restructure this code so that zero is not
1217 // used.
1218 if (!hash)
1219 hash = beast::zero; // kludge
1220 if (seq == valSeq)
1221 {
1222 // We need to publish the ledger we just fully validated
1223 ledger = valLedger;
1224 }
1225 else if (hash->isZero())
1226 {
1227 // LCOV_EXCL_START
1228 JLOG(m_journal.fatal())
1229 << "Ledger: " << valSeq << " does not have hash for " << seq;
1230 UNREACHABLE(
1231 "xrpl::LedgerMaster::findNewLedgersToPublish : ledger "
1232 "not found");
1233 // LCOV_EXCL_STOP
1234 }
1235 else
1236 {
1237 ledger = mLedgerHistory.getLedgerByHash(*hash);
1238 }
1239
1240 if (!app_.config().LEDGER_REPLAY)
1241 {
1242 // Can we try to acquire the ledger we need?
1243 if (!ledger && (++acqCount < ledger_fetch_size_))
1244 {
1245 ledger = app_.getInboundLedgers().acquire(
1246 *hash, seq, InboundLedger::Reason::GENERIC);
1247 }
1248 }
1249
1250 // Did we acquire the next ledger we need to publish?
1251 if (ledger && (ledger->header().seq == pubSeq))
1252 {
1253 ledger->setValidated();
1254 ret.push_back(ledger);
1255 ++pubSeq;
1256 }
1257 }
1258
1259 JLOG(m_journal.trace()) << "ready to publish " << ret.size() << " ledgers.";
1260 }
1261 catch (std::exception const& ex)
1262 {
1263 JLOG(m_journal.error()) << "Exception while trying to find ledgers to publish: "
1264 << ex.what();
1265 }
1266
1268 {
1269 /* Narrow down the gap of ledgers, and try to replay them.
1270 * When replaying a ledger gap, if the local node has
1271 * the start ledger, it saves an expensive InboundLedger
1272 * acquire. If the local node has the finish ledger, it
1273 * saves a skip list acquire.
1274 */
1275 auto const& startLedger = ret.empty() ? mPubLedger : ret.back();
1276 auto finishLedger = valLedger;
1277 while (startLedger->seq() + 1 < finishLedger->seq())
1278 {
1279 if (auto const parent =
1280 mLedgerHistory.getLedgerByHash(finishLedger->header().parentHash);
1281 parent)
1282 {
1283 finishLedger = parent;
1284 }
1285 else
1286 {
1287 auto numberLedgers = finishLedger->seq() - startLedger->seq() + 1;
1288 JLOG(m_journal.debug())
1289 << "Publish LedgerReplays " << numberLedgers
1290 << " ledgers, from seq=" << startLedger->header().seq << ", "
1291 << startLedger->header().hash << " to seq=" << finishLedger->header().seq
1292 << ", " << finishLedger->header().hash;
1294 InboundLedger::Reason::GENERIC, finishLedger->header().hash, numberLedgers);
1295 break;
1296 }
1297 }
1298 }
1299
1300 return ret;
1301}
1302
1303void
1305{
1306 std::lock_guard const ml(m_mutex);
1307
1308 // Can't advance without at least one fully-valid ledger
1309 mAdvanceWork = true;
1311 {
1312 mAdvanceThread = true;
1313 app_.getJobQueue().addJob(jtADVANCE, "AdvanceLedger", [this]() {
1315
1316 XRPL_ASSERT(
1318 "xrpl::LedgerMaster::tryAdvance : has valid ledger");
1319
1320 JLOG(m_journal.trace()) << "advanceThread<";
1321
1322 try
1323 {
1324 doAdvance(sl);
1325 }
1326 catch (std::exception const& ex)
1327 {
1328 JLOG(m_journal.fatal()) << "doAdvance throws: " << ex.what();
1329 }
1330
1331 mAdvanceThread = false;
1332 JLOG(m_journal.trace()) << "advanceThread>";
1333 });
1334 }
1335}
1336
1337void
1339{
1340 {
1341 std::lock_guard const ml(m_mutex);
1343 {
1345 mPathLedger.reset();
1346 JLOG(m_journal.debug()) << "Need network ledger for updating paths";
1347 return;
1348 }
1349 }
1350
1351 while (!app_.getJobQueue().isStopping())
1352 {
1353 JLOG(m_journal.debug()) << "updatePaths running";
1355 {
1356 std::lock_guard const ml(m_mutex);
1357
1358 if (!mValidLedger.empty() &&
1359 (!mPathLedger || (mPathLedger->header().seq != mValidLedgerSeq)))
1360 { // We have a new valid ledger since the last full pathfinding
1362 lastLedger = mPathLedger;
1363 }
1364 else if (mPathFindNewRequest)
1365 { // We have a new request but no new ledger
1366 lastLedger = app_.getOpenLedger().current();
1367 }
1368 else
1369 { // Nothing to do
1371 mPathLedger.reset();
1372 JLOG(m_journal.debug()) << "Nothing to do for updating paths";
1373 return;
1374 }
1375 }
1376
1377 if (!standalone_)
1378 { // don't pathfind with a ledger that's more than 60 seconds old
1379 using namespace std::chrono;
1380 auto age = time_point_cast<seconds>(app_.getTimeKeeper().closeTime()) -
1381 lastLedger->header().closeTime;
1382 if (age > 1min)
1383 {
1384 JLOG(m_journal.debug()) << "Published ledger too old for updating paths";
1385 std::lock_guard const ml(m_mutex);
1387 mPathLedger.reset();
1388 return;
1389 }
1390 }
1391
1392 try
1393 {
1394 auto& pathRequests = app_.getPathRequestManager();
1395 {
1396 std::lock_guard const ml(m_mutex);
1397 if (!pathRequests.requestsPending())
1398 {
1400 mPathLedger.reset();
1401 JLOG(m_journal.debug()) << "No path requests found. Nothing to do for updating "
1402 "paths. "
1403 << mPathFindThread << " jobs remaining";
1404 return;
1405 }
1406 }
1407 JLOG(m_journal.debug()) << "Updating paths";
1408 pathRequests.updateAll(lastLedger);
1409
1410 std::lock_guard const ml(m_mutex);
1411 if (!pathRequests.requestsPending())
1412 {
1413 JLOG(m_journal.debug()) << "No path requests left. No need for further updating "
1414 "paths";
1416 mPathLedger.reset();
1417 return;
1418 }
1419 }
1420 catch (SHAMapMissingNode const& mn)
1421 {
1422 JLOG(m_journal.info()) << "During pathfinding: " << mn.what();
1423 if (lastLedger->open())
1424 {
1425 // our parent is the problem
1427 lastLedger->header().parentHash,
1428 lastLedger->header().seq - 1,
1430 }
1431 else
1432 {
1433 // this ledger is the problem
1435 lastLedger->header().hash,
1436 lastLedger->header().seq,
1438 }
1439 }
1440 }
1441}
1442
1443bool
1445{
1447 mPathFindNewRequest = newPFWork("PthFindNewReq", ml);
1448 return mPathFindNewRequest;
1449}
1450
1451bool
1453{
1454 std::lock_guard const ml(m_mutex);
1455 bool const ret = mPathFindNewRequest;
1456 mPathFindNewRequest = false;
1457 return ret;
1458}
1459
1460// If the order book is radically updated, we need to reprocess all
1461// pathfinding requests.
1462bool
1464{
1466 mPathLedger.reset();
1467
1468 return newPFWork("PthFindOBDB", ml);
1469}
1470
1473bool
1475{
1477 {
1478 JLOG(m_journal.debug()) << "newPFWork: Creating job. path find threads: "
1479 << mPathFindThread;
1480 if (app_.getJobQueue().addJob(jtUPDATE_PF, name, [this]() { updatePaths(); }))
1481 {
1483 }
1484 }
1485 // If we're stopping don't give callers the expectation that their
1486 // request will be fulfilled, even if it may be serviced.
1487 return mPathFindThread > 0 && !app_.isStopping();
1488}
1489
1492{
1493 return m_mutex;
1494}
1495
1496// The current ledger is the ledger we believe new transactions should go in
1502
1508
1509Rules
1511{
1512 // Once we have a guarantee that there's always a last validated
1513 // ledger then we can dispense with the if.
1514
1515 // Return the Rules from the last validated ledger.
1516 if (auto const ledger = getValidatedLedger())
1517 return ledger->rules();
1518
1519 return Rules(app_.config().features);
1520}
1521
1522// This is the last ledger we published to clients and can lag the validated
1523// ledger.
1526{
1527 std::lock_guard const lock(m_mutex);
1528 return mPubLedger;
1529}
1530
1537
1540{
1541 uint256 const hash = getHashBySeq(ledgerIndex);
1542 return hash.isNonZero() ? getCloseTimeByHash(hash, ledgerIndex) : std::nullopt;
1543}
1544
1547{
1548 auto nodeObject = app_.getNodeStore().fetchNodeObject(ledgerHash, index);
1549 if (nodeObject && (nodeObject->getData().size() >= 120))
1550 {
1551 SerialIter it(nodeObject->getData().data(), nodeObject->getData().size());
1552 if (safe_cast<HashPrefix>(it.get32()) == HashPrefix::ledgerMaster)
1553 {
1554 it.skip(
1555 4 + 8 + 32 + // seq drops parentHash
1556 32 + 32 + 4); // txHash acctHash parentClose
1558 }
1559 }
1560
1561 return std::nullopt;
1562}
1563
1564uint256
1566{
1568
1569 if (hash.isNonZero())
1570 return hash;
1571
1573}
1574
1577{
1578 std::optional<LedgerHash> ledgerHash;
1579
1580 if (auto referenceLedger = mValidLedger.get())
1581 ledgerHash = walkHashBySeq(index, referenceLedger, reason);
1582
1583 return ledgerHash;
1584}
1585
1588 std::uint32_t index,
1589 std::shared_ptr<ReadView const> const& referenceLedger,
1590 InboundLedger::Reason reason)
1591{
1592 if (!referenceLedger || (referenceLedger->header().seq < index))
1593 {
1594 // Nothing we can do. No validated ledger.
1595 return std::nullopt;
1596 }
1597
1598 // See if the hash for the ledger we need is in the reference ledger
1599 auto ledgerHash = hashOfSeq(*referenceLedger, index, m_journal);
1600 if (ledgerHash)
1601 return ledgerHash;
1602
1603 // The hash is not in the reference ledger. Get another ledger which can
1604 // be located easily and should contain the hash.
1605 LedgerIndex const refIndex = getCandidateLedger(index);
1606 auto const refHash = hashOfSeq(*referenceLedger, refIndex, m_journal);
1607 XRPL_ASSERT(refHash, "xrpl::LedgerMaster::walkHashBySeq : found ledger");
1608 if (refHash)
1609 {
1610 // Try the hash and sequence of a better reference ledger just found
1611 auto ledger = mLedgerHistory.getLedgerByHash(*refHash);
1612
1613 if (ledger)
1614 {
1615 try
1616 {
1617 ledgerHash = hashOfSeq(*ledger, index, m_journal);
1618 }
1619 catch (SHAMapMissingNode const&)
1620 {
1621 ledger.reset();
1622 }
1623 }
1624
1625 // Try to acquire the complete ledger
1626 if (!ledger)
1627 {
1628 if (auto const l = app_.getInboundLedgers().acquire(*refHash, refIndex, reason))
1629 {
1630 ledgerHash = hashOfSeq(*l, index, m_journal);
1631 XRPL_ASSERT(
1632 ledgerHash,
1633 "xrpl::LedgerMaster::walkHashBySeq : has complete "
1634 "ledger");
1635 }
1636 }
1637 }
1638 return ledgerHash;
1639}
1640
1643{
1644 if (index <= mValidLedgerSeq)
1645 {
1646 // Always prefer a validated ledger
1647 if (auto valid = mValidLedger.get())
1648 {
1649 if (valid->header().seq == index)
1650 return valid;
1651
1652 try
1653 {
1654 auto const hash = hashOfSeq(*valid, index, m_journal);
1655
1656 if (hash)
1658 }
1659 catch (std::exception const&) // NOLINT(bugprone-empty-catch)
1660 {
1661 // Missing nodes are already handled
1662 }
1663 }
1664 }
1665
1666 if (auto ret = mLedgerHistory.getLedgerBySeq(index))
1667 return ret;
1668
1669 auto ret = mClosedLedger.get();
1670 if (ret && (ret->header().seq == index))
1671 return ret;
1672
1673 clearLedger(index);
1674 return {};
1675}
1676
1679{
1680 if (auto ret = mLedgerHistory.getLedgerByHash(hash))
1681 return ret;
1682
1683 auto ret = mClosedLedger.get();
1684 if (ret && (ret->header().hash == hash))
1685 return ret;
1686
1687 return {};
1688}
1689
1690void
1696
1697void
1699{
1701 fetch_packs_.sweep();
1702}
1703
1704float
1709
1710void
1712{
1714 if (seq > 0)
1715 mCompleteLedgers.erase(range(0u, seq - 1));
1716}
1717
1718void
1723
1724void
1726{
1727 replayData = std::move(replay);
1728}
1729
1732{
1733 return std::move(replayData);
1734}
1735
1736void
1738 std::uint32_t missing,
1739 bool& progress,
1740 InboundLedger::Reason reason,
1742{
1743 scope_unlock const sul{sl};
1744 if (auto hash = getLedgerHashForHistory(missing, reason))
1745 {
1746 XRPL_ASSERT(hash->isNonZero(), "xrpl::LedgerMaster::fetchForHistory : found ledger");
1747 auto ledger = getLedgerByHash(*hash);
1748 if (!ledger)
1749 {
1751 {
1752 ledger = app_.getInboundLedgers().acquire(*hash, missing, reason);
1753 if (!ledger && missing != fetch_seq_ &&
1754 missing > app_.getNodeStore().earliestLedgerSeq())
1755 {
1756 JLOG(m_journal.trace()) << "fetchForHistory want fetch pack " << missing;
1757 fetch_seq_ = missing;
1758 getFetchPack(missing, reason);
1759 }
1760 else
1761 JLOG(m_journal.trace()) << "fetchForHistory no fetch pack for " << missing;
1762 }
1763 else
1764 JLOG(m_journal.debug()) << "fetchForHistory found failed acquire";
1765 }
1766 if (ledger)
1767 {
1768 auto seq = ledger->header().seq;
1769 XRPL_ASSERT(seq == missing, "xrpl::LedgerMaster::fetchForHistory : sequence match");
1770 JLOG(m_journal.trace()) << "fetchForHistory acquired " << seq;
1771 setFullLedger(ledger, false, false);
1772 int fillInProgress = 0;
1773 {
1774 std::lock_guard const lock(m_mutex);
1775 mHistLedger = ledger;
1776 fillInProgress = mFillInProgress;
1777 }
1778 if (fillInProgress == 0 &&
1779 app_.getRelationalDatabase().getHashByIndex(seq - 1) == ledger->header().parentHash)
1780 {
1781 {
1782 // Previous ledger is in DB
1783 std::lock_guard const lock(m_mutex);
1784 mFillInProgress = seq;
1785 }
1787 jtADVANCE, "TryFill", [this, ledger]() { tryFill(ledger); });
1788 }
1789 progress = true;
1790 }
1791 else
1792 {
1793 std::uint32_t fetchSz = 0;
1794 // Do not fetch ledger sequences lower
1795 // than the earliest ledger sequence
1796 fetchSz = app_.getNodeStore().earliestLedgerSeq();
1797 fetchSz =
1798 missing >= fetchSz ? std::min(ledger_fetch_size_, (missing - fetchSz) + 1) : 0;
1799 try
1800 {
1801 for (std::uint32_t i = 0; i < fetchSz; ++i)
1802 {
1803 std::uint32_t const seq = missing - i;
1804 if (auto h = getLedgerHashForHistory(seq, reason))
1805 {
1806 XRPL_ASSERT(
1807 h->isNonZero(),
1808 "xrpl::LedgerMaster::fetchForHistory : "
1809 "prefetched ledger");
1810 app_.getInboundLedgers().acquire(*h, seq, reason);
1811 }
1812 }
1813 }
1814 catch (std::exception const& ex)
1815 {
1816 JLOG(m_journal.warn()) << "Threw while prefetching: " << ex.what();
1817 }
1818 }
1819 }
1820 else
1821 {
1822 JLOG(m_journal.fatal()) << "Can't find ledger following prevMissing " << missing;
1823 JLOG(m_journal.fatal()) << "Pub:" << mPubLedgerSeq << " Val:" << mValidLedgerSeq;
1824 JLOG(m_journal.fatal()) << "Ledgers: " << app_.getLedgerMaster().getCompleteLedgers();
1825 JLOG(m_journal.fatal()) << "Acquire reason: "
1826 << (reason == InboundLedger::Reason::HISTORY ? "HISTORY"
1827 : "NOT HISTORY");
1828 clearLedger(missing + 1);
1829 progress = true;
1830 }
1831}
1832
1833// Try to publish ledgers, acquire missing ledgers
1834void
1836{
1837 do
1838 {
1839 mAdvanceWork = false; // If there's work to do, we'll make progress
1840 bool progress = false;
1841
1842 auto const pubLedgers = findNewLedgersToPublish(sl);
1843 if (pubLedgers.empty())
1844 {
1850 {
1851 // We are in sync, so can acquire
1854 {
1855 std::lock_guard const sll(mCompleteLock);
1856 missing = prevMissing(
1858 mPubLedger->header().seq,
1860 }
1861 if (missing)
1862 {
1863 JLOG(m_journal.trace()) << "tryAdvance discovered missing " << *missing;
1864 if ((mFillInProgress == 0 || *missing > mFillInProgress) &&
1869 *missing,
1870 m_journal))
1871 {
1872 JLOG(m_journal.trace()) << "advanceThread should acquire";
1873 }
1874 else
1875 {
1876 missing = std::nullopt;
1877 }
1878 }
1879 if (missing)
1880 {
1881 fetchForHistory(*missing, progress, reason, sl);
1883 {
1884 JLOG(m_journal.debug()) << "tryAdvance found last valid changed";
1885 progress = true;
1886 }
1887 }
1888 }
1889 else
1890 {
1891 mHistLedger.reset();
1892 JLOG(m_journal.trace()) << "tryAdvance not fetching history";
1893 }
1894 }
1895 else
1896 {
1897 JLOG(m_journal.trace())
1898 << "tryAdvance found " << pubLedgers.size() << " ledgers to publish";
1899 for (auto const& ledger : pubLedgers)
1900 {
1901 {
1902 scope_unlock const sul{sl};
1903 JLOG(m_journal.debug()) << "tryAdvance publishing seq " << ledger->header().seq;
1904 setFullLedger(ledger, true, true);
1905 }
1906
1907 setPubLedger(ledger);
1908
1909 {
1910 scope_unlock const sul{sl};
1911 app_.getOPs().pubLedger(ledger);
1912 }
1913 }
1914
1916 progress = newPFWork("PthFindNewLed", sl);
1917 }
1918 if (progress)
1919 mAdvanceWork = true;
1920 } while (mAdvanceWork);
1921}
1922
1923void
1925{
1926 fetch_packs_.canonicalize_replace_client(hash, data);
1927}
1928
1931{
1932 Blob data;
1933 if (fetch_packs_.retrieve(hash, data))
1934 {
1935 fetch_packs_.del(hash, false);
1936 if (hash == sha512Half(makeSlice(data)))
1937 return data;
1938 }
1939 return std::nullopt;
1940}
1941
1942void
1953
1979static void
1981 SHAMap const& want,
1982 SHAMap const* have,
1983 std::uint32_t cnt,
1984 protocol::TMGetObjectByHash* into,
1985 std::uint32_t seq,
1986 bool withLeaves = true)
1987{
1988 XRPL_ASSERT(cnt, "xrpl::populateFetchPack : nonzero count input");
1989
1990 Serializer s(1024);
1991
1992 want.visitDifferences(have, [&s, withLeaves, &cnt, into, seq](SHAMapTreeNode const& n) -> bool {
1993 if (!withLeaves && n.isLeaf())
1994 return true;
1995
1996 s.erase();
1998
1999 auto const& hash = n.getHash().as_uint256();
2000
2001 protocol::TMIndexedObject* obj = into->add_objects();
2002 obj->set_ledgerseq(seq);
2003 obj->set_hash(hash.data(), hash.size());
2004 obj->set_data(s.getDataPtr(), s.getLength());
2005
2006 return --cnt != 0;
2007 });
2008}
2009
2010void
2012 std::weak_ptr<Peer> const& wPeer,
2014 uint256 haveLedgerHash,
2016{
2017 using namespace std::chrono_literals;
2018 if (UptimeClock::now() > uptime + 1s)
2019 {
2020 JLOG(m_journal.info()) << "Fetch pack request got stale";
2021 return;
2022 }
2023
2025 {
2026 JLOG(m_journal.info()) << "Too busy to make fetch pack";
2027 return;
2028 }
2029
2030 auto peer = wPeer.lock();
2031
2032 if (!peer)
2033 return;
2034
2035 auto have = getLedgerByHash(haveLedgerHash);
2036
2037 if (!have)
2038 {
2039 JLOG(m_journal.info()) << "Peer requests fetch pack for ledger we don't have: " << have;
2040 peer->charge(Resource::feeRequestNoReply, "get_object ledger");
2041 return;
2042 }
2043
2044 if (have->open())
2045 {
2046 JLOG(m_journal.warn()) << "Peer requests fetch pack from open ledger: " << have;
2047 peer->charge(Resource::feeMalformedRequest, "get_object ledger open");
2048 return;
2049 }
2050
2051 if (have->header().seq < getEarliestFetch())
2052 {
2053 JLOG(m_journal.debug()) << "Peer requests fetch pack that is too early";
2054 peer->charge(Resource::feeMalformedRequest, "get_object ledger early");
2055 return;
2056 }
2057
2058 auto want = getLedgerByHash(have->header().parentHash);
2059
2060 if (!want)
2061 {
2062 JLOG(m_journal.info()) << "Peer requests fetch pack for ledger whose predecessor we "
2063 << "don't have: " << have;
2064 peer->charge(Resource::feeRequestNoReply, "get_object ledger no parent");
2065 return;
2066 }
2067
2068 try
2069 {
2070 Serializer hdr(128);
2071
2072 protocol::TMGetObjectByHash reply;
2073 reply.set_query(false);
2074
2075 if (request->has_seq())
2076 reply.set_seq(request->seq());
2077
2078 reply.set_ledgerhash(request->ledgerhash());
2079 reply.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
2080
2081 // Building a fetch pack:
2082 // 1. Add the header for the requested ledger.
2083 // 2. Add the nodes for the AccountStateMap of that ledger.
2084 // 3. If there are transactions, add the nodes for the
2085 // transactions of the ledger.
2086 // 4. If the FetchPack now contains at least 512 entries then stop.
2087 // 5. If not very much time has elapsed, then loop back and repeat
2088 // the same process adding the previous ledger to the FetchPack.
2089 do
2090 {
2091 std::uint32_t const lSeq = want->header().seq;
2092
2093 {
2094 // Serialize the ledger header:
2095 hdr.erase();
2096
2098 addRaw(want->header(), hdr);
2099
2100 // Add the data
2101 protocol::TMIndexedObject* obj = reply.add_objects();
2102 obj->set_hash(want->header().hash.data(), want->header().hash.size());
2103 obj->set_data(hdr.getDataPtr(), hdr.getLength());
2104 obj->set_ledgerseq(lSeq);
2105 }
2106
2107 populateFetchPack(want->stateMap(), &have->stateMap(), 16384, &reply, lSeq);
2108
2109 // We use nullptr here because transaction maps are per ledger
2110 // and so the requestor is unlikely to already have it.
2111 if (want->header().txHash.isNonZero())
2112 populateFetchPack(want->txMap(), nullptr, 512, &reply, lSeq);
2113
2114 if (reply.objects().size() >= 512)
2115 break;
2116
2117 have = std::move(want);
2118 want = getLedgerByHash(have->header().parentHash);
2119 } while (want && UptimeClock::now() <= uptime + 1s);
2120
2121 auto msg = std::make_shared<Message>(reply, protocol::mtGET_OBJECTS);
2122
2123 JLOG(m_journal.info()) << "Built fetch pack with " << reply.objects().size() << " nodes ("
2124 << msg->getBufferSize() << " bytes)";
2125
2126 peer->send(msg);
2127 }
2128 catch (std::exception const& ex)
2129 {
2130 JLOG(m_journal.warn()) << "Exception building fetch pack. Exception: " << ex.what();
2131 }
2132}
2133
2136{
2137 return fetch_packs_.getCacheSize();
2138}
2139
2140// Returns the minimum ledger sequence in SQL database, if any.
2146
2149{
2150 uint32_t first = 0, last = 0;
2151
2152 if (!getValidatedRange(first, last) || last < ledgerSeq)
2153 return {};
2154
2155 auto const lgr = getLedgerBySeq(ledgerSeq);
2156 if (!lgr || lgr->txs.empty())
2157 return {};
2158
2159 for (auto it = lgr->txs.begin(); it != lgr->txs.end(); ++it)
2160 {
2161 if (it->first && it->second && it->second->isFieldPresent(sfTransactionIndex) &&
2162 it->second->getFieldU32(sfTransactionIndex) == txnIndex)
2163 return it->first->getTransactionID();
2164 }
2165
2166 return {};
2167}
2168
2169} // namespace xrpl
T back(T... args)
T back_inserter(T... args)
T begin(T... args)
Represents a JSON value.
Definition json_value.h:130
Provide a light-weight way to check active() before string formatting.
Definition Journal.h:180
A generic endpoint for log messages.
Definition Journal.h:40
Stream fatal() const
Definition Journal.h:325
Stream error() const
Definition Journal.h:319
Stream debug() const
Definition Journal.h:301
Stream info() const
Definition Journal.h:307
Stream trace() const
Severity stream access functions.
Definition Journal.h:295
Stream warn() const
Definition Journal.h:313
typename Clock::time_point time_point
virtual bool hasUnsupportedEnabled() const =0
returns true if one or more amendments on the network have been enabled that this server does not sup...
void doValidatedLedger(std::shared_ptr< ReadView const > const &lastValidatedLedger)
Called when a new fully-validated ledger is accepted.
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual LedgerIndex getMaxDisallowedLedger()=0
Ensure that a newly-started validator does not sign proposals older than the last ledger it persisted...
Holds transactions which were deferred to the next pass of consensus.
void insert(std::shared_ptr< STTx const > const &txn)
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
std::unordered_set< uint256, beast::uhash<> > features
Definition Config.h:261
bool LEDGER_REPLAY
Definition Config.h:208
virtual bool isFailure(uint256 const &h)=0
virtual std::shared_ptr< Ledger const > acquire(uint256 const &hash, std::uint32_t seq, InboundLedger::Reason)=0
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition JobQueue.h:147
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition JobQueue.cpp:105
bool isStopping() const
Definition JobQueue.h:210
void validatedLedger(std::shared_ptr< Ledger const > const &, std::optional< uint256 > const &consensusHash)
Report that we have validated a particular ledger.
bool insert(std::shared_ptr< Ledger const > const &ledger, bool validated)
Track a ledger.
float getCacheHitRate()
Get the ledgers_by_hash cache hit rate.
LedgerHash getLedgerHash(LedgerIndex ledgerIndex)
Get a ledger's hash given its sequence number.
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
Repair a hash to index mapping.
std::shared_ptr< Ledger const > getLedgerByHash(LedgerHash const &ledgerHash)
Retrieve a ledger given its hash.
void sweep()
Remove stale cache entries.
void builtLedger(std::shared_ptr< Ledger const > const &, uint256 const &consensusHash, Json::Value)
Report that we have locally built a particular ledger.
std::shared_ptr< Ledger const > getLedgerBySeq(LedgerIndex ledgerIndex)
Get a ledger given its sequence number.
void clearLedgerCachePrior(LedgerIndex seq)
void set(std::shared_ptr< Ledger const > ledger)
std::shared_ptr< Ledger const > get()
std::optional< LedgerIndex > minSqlSeq()
std::atomic< LedgerIndex > mValidLedgerSeq
void consensusBuilt(std::shared_ptr< Ledger const > const &ledger, uint256 const &consensusHash, Json::Value consensus)
Report that the consensus process built a particular ledger.
bool haveLedger(std::uint32_t seq)
TaggedCache< uint256, Blob > fetch_packs_
std::size_t getNeededValidations()
Determines how many validations are needed to fully validate a ledger.
bool isCompatible(ReadView const &, beast::Journal::Stream, char const *reason)
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
Get the next transaction held for a particular account if any.
void setValidLedger(std::shared_ptr< Ledger const > const &l)
void switchLCL(std::shared_ptr< Ledger const > const &lastClosed)
std::recursive_mutex & peekMutex()
std::uint32_t fetch_seq_
std::chrono::seconds getValidatedLedgerAge()
TimeKeeper::time_point upgradeWarningPrevTime_
LedgerIndex getCurrentLedgerIndex()
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
void applyHeldTransactions()
Apply held transactions to the open ledger This is normally called as we close the ledger.
bool storeLedger(std::shared_ptr< Ledger const > ledger)
void gotFetchPack(bool progress, std::uint32_t seq)
void tryFill(std::shared_ptr< Ledger const > ledger)
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
void setPubLedger(std::shared_ptr< Ledger const > const &l)
bool newPFWork(char const *name, std::unique_lock< std::recursive_mutex > &)
A thread needs to be dispatched to handle pathfinding work of some kind.
void setFullLedger(std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
void clearPriorLedgers(LedgerIndex seq)
LedgerIndex const max_ledger_difference_
CanonicalTXSet mHeldTransactions
std::uint32_t const ledger_fetch_size_
void setBuildingLedger(LedgerIndex index)
std::pair< uint256, LedgerIndex > mLastValidLedger
bool isCaughtUp(std::string &reason)
std::size_t getFetchPackCacheSize() const
std::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the corresponding hash from peers.
std::vector< std::shared_ptr< Ledger const > > findNewLedgersToPublish(std::unique_lock< std::recursive_mutex > &)
std::atomic< std::uint32_t > mPubLedgerClose
std::optional< NetClock::time_point > getCloseTimeByHash(LedgerHash const &ledgerHash, LedgerIndex ledgerIndex)
std::uint32_t const fetch_depth_
std::atomic< LedgerIndex > mPubLedgerSeq
void clearLedger(std::uint32_t seq)
void clearLedgerCachePrior(LedgerIndex seq)
std::atomic_flag mGotFetchPackThread
uint256 getHashBySeq(std::uint32_t index)
Get a ledger's hash by sequence number using the cache.
std::shared_ptr< Ledger const > getClosedLedger()
void setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV)
std::optional< NetClock::time_point > getCloseTimeBySeq(LedgerIndex ledgerIndex)
std::string getCompleteLedgers()
std::shared_ptr< Ledger const > getValidatedLedger()
void fetchForHistory(std::uint32_t missing, bool &progress, InboundLedger::Reason reason, std::unique_lock< std::recursive_mutex > &)
std::uint32_t const ledger_history_
bool isValidated(ReadView const &ledger)
void fixMismatch(ReadView const &ledger)
void makeFetchPack(std::weak_ptr< Peer > const &wPeer, std::shared_ptr< protocol::TMGetObjectByHash > const &request, uint256 haveLedgerHash, UptimeClock::time_point uptime)
LedgerIndex getValidLedgerIndex()
std::shared_ptr< Ledger const > mPathLedger
bool const standalone_
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > mHistLedger
std::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
LedgerHistory mLedgerHistory
LedgerMaster(Application &app, Stopwatch &stopwatch, beast::insight::Collector::ptr const &collector, beast::Journal journal)
std::atomic< std::uint32_t > mValidLedgerSign
std::chrono::seconds getPublishedLedgerAge()
std::optional< uint256 > txnIdFromIndex(uint32_t ledgerSeq, uint32_t txnIndex)
bool canBeCurrent(std::shared_ptr< Ledger const > const &ledger)
Check the sequence number and parent close time of a ledger against our clock and last validated ledg...
void addFetchPack(uint256 const &hash, std::shared_ptr< Blob > data)
bool getFullValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::optional< LedgerHash > getLedgerHashForHistory(LedgerIndex index, InboundLedger::Reason reason)
RangeSet< std::uint32_t > mCompleteLedgers
LedgerHolder mValidLedger
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
LedgerHolder mClosedLedger
void doAdvance(std::unique_lock< std::recursive_mutex > &)
void addHeldTransaction(std::shared_ptr< Transaction > const &trans)
std::shared_ptr< ReadView const > getCurrentLedger()
beast::Journal m_journal
void takeReplay(std::unique_ptr< LedgerReplay > replay)
std::unique_ptr< LedgerReplay > replayData
std::unique_ptr< LedgerReplay > releaseReplay()
std::shared_ptr< Ledger const > getLedgerByHash(uint256 const &hash)
std::uint32_t getEarliestFetch()
std::recursive_mutex m_mutex
std::shared_ptr< Ledger const > mPubLedger
void failedSave(std::uint32_t seq, uint256 const &hash)
Application & app_
std::atomic< LedgerIndex > mBuildingLedgerSeq
std::recursive_mutex mCompleteLock
void replay(InboundLedger::Reason r, uint256 const &finishLedgerHash, std::uint32_t totalNumLedgers)
Replay a range of ledgers.
void setRemoteFee(std::uint32_t f)
bool isLoadedLocal() const
std::uint32_t getLoadBase() const
virtual void setAmendmentBlocked()=0
virtual bool isBlocked()=0
virtual void processTransactionSet(CanonicalTXSet const &set)=0
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
virtual void updateLocalTx(ReadView const &newValidLedger)=0
virtual void clearAmendmentWarned()=0
virtual bool isAmendmentWarned()=0
virtual void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted)=0
virtual bool isNeedNetworkLedger()=0
virtual void clearNeedNetworkLedger()=0
virtual void setAmendmentWarned()=0
Persistency layer for NodeObject.
Definition Database.h:31
std::uint32_t earliestLedgerSeq() const noexcept
Definition Database.h:189
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous, bool duplicate=false)
Fetch a node object.
Definition Database.cpp:209
virtual std::int32_t getWriteLoad() const =0
Retrieve the estimated number of pending write operations.
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
virtual void setup(std::shared_ptr< ReadView const > const &ledger)=0
Initialize or update the order book database with a new ledger.
virtual void checkTracking(std::uint32_t index)=0
Calls the checkTracking function on each peer.
virtual PeerSequence getActivePeers() const =0
Returns a sequence representing the current list of peers.
std::map< LedgerIndex, bool > getSnapshot() const
Get a snapshot of the pending saves.
A view into a ledger.
Definition ReadView.h:31
virtual LedgerHeader const & header() const =0
Returns information about the ledger.
virtual bool open() const =0
Returns true if this reflects an open ledger.
virtual uint256 getHashByIndex(LedgerIndex ledgerIndex)=0
getHashByIndex Returns the hash of the ledger with the given sequence.
virtual std::optional< LedgerIndex > getMinLedgerSeq()=0
getMinLedgerSeq Returns the minimum ledger sequence in the Ledgers table.
virtual std::optional< LedgerHashPair > getHashesByIndex(LedgerIndex ledgerIndex)=0
getHashesByIndex Returns the hashes of the ledger and its parent as specified by the ledgerIndex.
Rules controlling protocol behavior.
Definition Rules.h:18
uint256 const & as_uint256() const
Definition SHAMapHash.h:24
virtual std::optional< LedgerIndex > minimumOnline() const =0
The minimum ledger to try and maintain in our database.
virtual void onLedgerClosed(std::shared_ptr< Ledger const > const &ledger)=0
Called by LedgerMaster every time a ledger validates.
SHAMapHash const & getHash() const
Return the hash of this node.
virtual void serializeWithPrefix(Serializer &) const =0
Serialize the node in a format appropriate for hashing.
virtual bool isLeaf() const =0
Determines if this is a leaf node.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition SHAMap.h:77
void visitDifferences(SHAMap const *have, std::function< bool(SHAMapTreeNode const &)> const &) const
Visit every node in this SHAMap that is not present in the specified SHAMap.
void skip(int num)
std::uint32_t get32()
void const * getDataPtr() const
Definition Serializer.h:197
int getLength() const
Definition Serializer.h:207
virtual PendingSaves & getPendingSaves()=0
virtual JobQueue & getJobQueue()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual beast::Journal getJournal(std::string const &name)=0
virtual NetworkOPs & getOPs()=0
virtual SHAMapStore & getSHAMapStore()=0
virtual NodeStore::Database & getNodeStore()=0
virtual OpenLedger & getOpenLedger()=0
virtual ValidatorList & getValidators()=0
virtual PathRequestManager & getPathRequestManager()=0
virtual RCLValidations & getValidations()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual bool isStopping() const =0
virtual LedgerReplayer & getLedgerReplayer()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual Overlay & getOverlay()=0
virtual TimeKeeper & getTimeKeeper()=0
time_point now() const override
Returns the current time, using the server's clock.
Definition TimeKeeper.h:44
time_point closeTime() const
Returns the predicted close time, in network time.
Definition TimeKeeper.h:56
static time_point now()
std::vector< WrappedValidationType > getTrustedForLedger(ID const &ledgerID, Seq const &seq)
Get trusted full validations for a specific ledger.
std::vector< WrappedValidationType > currentTrusted()
Get the currently trusted full validations.
std::vector< std::uint32_t > fees(ID const &ledgerID, std::uint32_t baseFee)
Returns fees reported by trusted full validators in the given ledger.
QuorumKeys getQuorumKeys() const
Get the quorum and all of the trusted keys.
std::size_t quorum() const
Get quorum value for current trusted key set.
std::vector< std::shared_ptr< STValidation > > negativeUNLFilter(std::vector< std::shared_ptr< STValidation > > &&validations) const
Remove validations that are from validators on the negative UNL.
bool isNonZero() const
Definition base_uint.h:518
Automatically unlocks and re-locks a unique_lock object.
Definition scope.h:202
T clear(T... args)
T copy(T... args)
T count(T... args)
T empty(T... args)
T end(T... args)
T endl(T... args)
T find(T... args)
T is_same_v
T load(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
STL namespace.
bool isNewerVersion(std::uint64_t version)
Check if the version is newer than the local node's rippled software version.
bool isRippledVersion(std::uint64_t version)
Check if the encoded software version is a rippled software version.
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeRequestNoReply
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:5
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
void LogicError(std::string const &how) noexcept
Called when faulty logic causes a broken invariant.
std::optional< T > prevMissing(RangeSet< T > const &rs, T t, T minVal=0)
Find the largest value not in the set that is less than a given value.
Definition RangeSet.h:163
static constexpr std::chrono::minutes MAX_LEDGER_AGE_ACQUIRE
bool pendSaveValidated(ServiceRegistry &registry, std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
Save, or arrange to save, a fully-validated ledger.
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition digest.h:204
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition chrono.h:94
ClosedInterval< T > range(T low, T high)
Create a closed range interval.
Definition RangeSet.h:34
std::string to_string(base_uint< Bits, Tag > const &a)
Definition base_uint.h:602
constexpr std::size_t calculatePercent(std::size_t count, std::size_t total)
Calculate one number divided by another number in percentage.
static void populateFetchPack(SHAMap const &want, SHAMap const *have, std::uint32_t cnt, protocol::TMGetObjectByHash *into, std::uint32_t seq, bool withLeaves=true)
Populate a fetch pack with data from the map the recipient wants.
SizedItem
Definition Config.h:27
bool areCompatible(ReadView const &validLedger, ReadView const &testLedger, beast::Journal::Stream &s, char const *reason)
Return false if the test ledger is provably incompatible with the valid ledger, that is,...
Definition View.cpp:95
LedgerIndex getCandidateLedger(LedgerIndex requested)
Find a ledger index from which we could easily get the requested ledger.
Definition View.h:103
std::optional< uint256 > hashOfSeq(ReadView const &ledger, LedgerIndex seq, beast::Journal journal)
Return the hash of a ledger by sequence.
Definition View.cpp:232
@ jtLEDGER_DATA
Definition Job.h:45
@ jtUPDATE_PF
Definition Job.h:35
@ jtPUBOLDLEDGER
Definition Job.h:23
@ jtADVANCE
Definition Job.h:46
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
static constexpr int MAX_LEDGER_GAP
@ ledgerMaster
ledger master data for signing
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition Slice.h:215
static constexpr int MAX_WRITE_LOAD_ACQUIRE
static bool shouldAcquire(std::uint32_t const currentLedger, std::uint32_t const ledgerHistory, std::optional< LedgerIndex > const minimumOnline, std::uint32_t const candidateLedger, beast::Journal j)
T has_value(T... args)
T push_back(T... args)
T reserve(T... args)
T size(T... args)
T sort(T... args)
T str(T... args)
T swap(T... args)
T test_and_set(T... args)
T time_since_epoch(T... args)
T what(T... args)