rippled
Loading...
Searching...
No Matches
LedgerMaster.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLValidations.h>
21#include <xrpld/app/ledger/Ledger.h>
22#include <xrpld/app/ledger/LedgerMaster.h>
23#include <xrpld/app/ledger/LedgerReplayer.h>
24#include <xrpld/app/ledger/OpenLedger.h>
25#include <xrpld/app/ledger/OrderBookDB.h>
26#include <xrpld/app/ledger/PendingSaves.h>
27#include <xrpld/app/main/Application.h>
28#include <xrpld/app/misc/AmendmentTable.h>
29#include <xrpld/app/misc/LoadFeeTrack.h>
30#include <xrpld/app/misc/NetworkOPs.h>
31#include <xrpld/app/misc/SHAMapStore.h>
32#include <xrpld/app/misc/Transaction.h>
33#include <xrpld/app/misc/TxQ.h>
34#include <xrpld/app/misc/ValidatorList.h>
35#include <xrpld/app/paths/PathRequests.h>
36#include <xrpld/app/rdb/RelationalDatabase.h>
37#include <xrpld/core/TimeKeeper.h>
38#include <xrpld/overlay/Overlay.h>
39#include <xrpld/overlay/Peer.h>
40
41#include <xrpl/basics/Log.h>
42#include <xrpl/basics/MathUtilities.h>
43#include <xrpl/basics/UptimeClock.h>
44#include <xrpl/basics/contract.h>
45#include <xrpl/basics/safe_cast.h>
46#include <xrpl/basics/scope.h>
47#include <xrpl/beast/utility/instrumentation.h>
48#include <xrpl/protocol/BuildInfo.h>
49#include <xrpl/protocol/HashPrefix.h>
50#include <xrpl/protocol/digest.h>
51#include <xrpl/resource/Fees.h>
52
53#include <algorithm>
54#include <chrono>
55#include <cstdlib>
56#include <memory>
57#include <vector>
58
59namespace ripple {
60
61// Don't catch up more than 100 ledgers (cannot exceed 256)
62static constexpr int MAX_LEDGER_GAP{100};
63
64// Don't acquire history if ledger is too old
66
67// Don't acquire history if write load is too high
68static constexpr int MAX_WRITE_LOAD_ACQUIRE{8192};
69
70// Helper function for LedgerMaster::doAdvance()
71// Return true if candidateLedger should be fetched from the network.
72static bool
74 std::uint32_t const currentLedger,
75 std::uint32_t const ledgerHistory,
76 std::optional<LedgerIndex> const minimumOnline,
77 std::uint32_t const candidateLedger,
79{
80 bool const ret = [&]() {
81 // Fetch ledger if it may be the current ledger
82 if (candidateLedger >= currentLedger)
83 return true;
84
85 // Or if it is within our configured history range:
86 if (currentLedger - candidateLedger <= ledgerHistory)
87 return true;
88
89 // Or if greater than or equal to a specific minimum ledger.
90 // Do nothing if the minimum ledger to keep online is unknown.
91 return minimumOnline.has_value() && candidateLedger >= *minimumOnline;
92 }();
93
94 JLOG(j.trace()) << "Missing ledger " << candidateLedger
95 << (ret ? " should" : " should NOT") << " be acquired";
96 return ret;
97}
98
100 Application& app,
102 beast::insight::Collector::ptr const& collector,
103 beast::Journal journal)
104 : app_(app)
105 , m_journal(journal)
106 , mLedgerHistory(collector, app)
107 , standalone_(app_.config().standalone())
108 , fetch_depth_(
109 app_.getSHAMapStore().clampFetchDepth(app_.config().FETCH_DEPTH))
110 , ledger_history_(app_.config().LEDGER_HISTORY)
111 , ledger_fetch_size_(app_.config().getValueFor(SizedItem::ledgerFetch))
112 , fetch_packs_(
113 "FetchPack",
114 65536,
115 std::chrono::seconds{45},
116 stopwatch,
117 app_.journal("TaggedCache"))
118 , m_stats(std::bind(&LedgerMaster::collect_metrics, this), collector)
119{
120}
121
124{
125 return app_.openLedger().current()->info().seq;
126}
127
133
134bool
136 ReadView const& view,
138 char const* reason)
139{
140 auto validLedger = getValidatedLedger();
141
142 if (validLedger && !areCompatible(*validLedger, view, s, reason))
143 {
144 return false;
145 }
146
147 {
149
150 if ((mLastValidLedger.second != 0) &&
152 mLastValidLedger.first,
153 mLastValidLedger.second,
154 view,
155 s,
156 reason))
157 {
158 return false;
159 }
160 }
161
162 return true;
163}
164
167{
168 using namespace std::chrono_literals;
170 if (pubClose == 0s)
171 {
172 JLOG(m_journal.debug()) << "No published ledger";
173 return weeks{2};
174 }
175
176 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
177 ret -= pubClose;
178 ret = (ret > 0s) ? ret : 0s;
179 static std::chrono::seconds lastRet = -1s;
180
181 if (ret != lastRet)
182 {
183 JLOG(m_journal.trace()) << "Published ledger age is " << ret.count();
184 lastRet = ret;
185 }
186 return ret;
187}
188
191{
192 using namespace std::chrono_literals;
193
195 if (valClose == 0s)
196 {
197 JLOG(m_journal.debug()) << "No validated ledger";
198 return weeks{2};
199 }
200
201 std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch();
202 ret -= valClose;
203 ret = (ret > 0s) ? ret : 0s;
204 static std::chrono::seconds lastRet = -1s;
205
206 if (ret != lastRet)
207 {
208 JLOG(m_journal.trace()) << "Validated ledger age is " << ret.count();
209 lastRet = ret;
210 }
211 return ret;
212}
213
214bool
216{
217 using namespace std::chrono_literals;
218
219 if (getPublishedLedgerAge() > 3min)
220 {
221 reason = "No recently-published ledger";
222 return false;
223 }
224 std::uint32_t validClose = mValidLedgerSign.load();
226 if (!validClose || !pubClose)
227 {
228 reason = "No published ledger";
229 return false;
230 }
231 if (validClose > (pubClose + 90))
232 {
233 reason = "Published ledger lags validated ledger";
234 return false;
235 }
236 return true;
237}
238
239void
241{
243 std::optional<uint256> consensusHash;
244
245 if (!standalone_)
246 {
247 auto validations = app_.validators().negativeUNLFilter(
249 l->info().hash, l->info().seq));
250 times.reserve(validations.size());
251 for (auto const& val : validations)
252 times.push_back(val->getSignTime());
253
254 if (!validations.empty())
255 consensusHash = validations.front()->getConsensusHash();
256 }
257
258 NetClock::time_point signTime;
259
260 if (!times.empty() && times.size() >= app_.validators().quorum())
261 {
262 // Calculate the sample median
263 std::sort(times.begin(), times.end());
264 auto const t0 = times[(times.size() - 1) / 2];
265 auto const t1 = times[times.size() / 2];
266 signTime = t0 + (t1 - t0) / 2;
267 }
268 else
269 {
270 signTime = l->info().closeTime;
271 }
272
273 mValidLedger.set(l);
274 mValidLedgerSign = signTime.time_since_epoch().count();
275 XRPL_ASSERT(
277 l->info().seq + max_ledger_difference_ >
279 "ripple::LedgerMaster::setValidLedger : valid ledger sequence");
281 mValidLedgerSeq = l->info().seq;
282
285 mLedgerHistory.validatedLedger(l, consensusHash);
287 if (!app_.getOPs().isBlocked())
288 {
290 {
291 JLOG(m_journal.error()) << "One or more unsupported amendments "
292 "activated: server blocked.";
294 }
295 else if (!app_.getOPs().isAmendmentWarned() || l->isFlagLedger())
296 {
297 // Amendments can lose majority, so re-check periodically (every
298 // flag ledger), and clear the flag if appropriate. If an unknown
299 // amendment gains majority log a warning as soon as it's
300 // discovered, then again every flag ledger until the operator
301 // upgrades, the amendment loses majority, or the amendment goes
302 // live and the node gets blocked. Unlike being amendment blocked,
303 // this message may be logged more than once per session, because
304 // the node will otherwise function normally, and this gives
305 // operators an opportunity to see and resolve the warning.
306 if (auto const first =
308 {
309 JLOG(m_journal.error()) << "One or more unsupported amendments "
310 "reached majority. Upgrade before "
311 << to_string(*first)
312 << " to prevent your server from "
313 "becoming amendment blocked.";
315 }
316 else
318 }
319 }
320}
321
322void
324{
325 mPubLedger = l;
326 mPubLedgerClose = l->info().closeTime.time_since_epoch().count();
327 mPubLedgerSeq = l->info().seq;
328}
329
330void
332 std::shared_ptr<Transaction> const& transaction)
333{
335 mHeldTransactions.insert(transaction->getSTransaction());
336}
337
338// Validate a ledger's close time and sequence number if we're considering
339// jumping to that ledger. This helps defend against some rare hostile or
340// diverged majority scenarios.
341bool
343{
344 XRPL_ASSERT(ledger, "ripple::LedgerMaster::canBeCurrent : non-null input");
345
346 // Never jump to a candidate ledger that precedes our
347 // last validated ledger
348
349 auto validLedger = getValidatedLedger();
350 if (validLedger && (ledger->info().seq < validLedger->info().seq))
351 {
352 JLOG(m_journal.trace())
353 << "Candidate for current ledger has low seq " << ledger->info().seq
354 << " < " << validLedger->info().seq;
355 return false;
356 }
357
358 // Ensure this ledger's parent close time is within five minutes of
359 // our current time. If we already have a known fully-valid ledger
360 // we perform this check. Otherwise, we only do it if we've built a
361 // few ledgers as our clock can be off when we first start up
362
363 auto closeTime = app_.timeKeeper().closeTime();
364 auto ledgerClose = ledger->info().parentCloseTime;
365
366 using namespace std::chrono_literals;
367 if ((validLedger || (ledger->info().seq > 10)) &&
368 ((std::max(closeTime, ledgerClose) - std::min(closeTime, ledgerClose)) >
369 5min))
370 {
371 JLOG(m_journal.warn())
372 << "Candidate for current ledger has close time "
373 << to_string(ledgerClose) << " at network time "
374 << to_string(closeTime) << " seq " << ledger->info().seq;
375 return false;
376 }
377
378 if (validLedger)
379 {
380 // Sequence number must not be too high. We allow ten ledgers
381 // for time inaccuracies plus a maximum run rate of one ledger
382 // every two seconds. The goal is to prevent a malicious ledger
383 // from increasing our sequence unreasonably high
384
385 LedgerIndex maxSeq = validLedger->info().seq + 10;
386
387 if (closeTime > validLedger->info().parentCloseTime)
388 maxSeq += std::chrono::duration_cast<std::chrono::seconds>(
389 closeTime - validLedger->info().parentCloseTime)
390 .count() /
391 2;
392
393 if (ledger->info().seq > maxSeq)
394 {
395 JLOG(m_journal.warn())
396 << "Candidate for current ledger has high seq "
397 << ledger->info().seq << " > " << maxSeq;
398 return false;
399 }
400
401 JLOG(m_journal.trace())
402 << "Acceptable seq range: " << validLedger->info().seq
403 << " <= " << ledger->info().seq << " <= " << maxSeq;
404 }
405
406 return true;
407}
408
409void
411{
412 XRPL_ASSERT(lastClosed, "ripple::LedgerMaster::switchLCL : non-null input");
413 if (!lastClosed->isImmutable())
414 LogicError("mutable ledger in switchLCL");
415
416 if (lastClosed->open())
417 LogicError("The new last closed ledger is open!");
418
419 {
421 mClosedLedger.set(lastClosed);
422 }
423
424 if (standalone_)
425 {
426 setFullLedger(lastClosed, true, false);
427 tryAdvance();
428 }
429 else
430 {
431 checkAccept(lastClosed);
432 }
433}
434
435bool
436LedgerMaster::fixIndex(LedgerIndex ledgerIndex, LedgerHash const& ledgerHash)
437{
438 return mLedgerHistory.fixIndex(ledgerIndex, ledgerHash);
439}
440
441bool
443{
444 bool validated = ledger->info().validated;
445 // Returns true if we already had the ledger
446 return mLedgerHistory.insert(std::move(ledger), validated);
447}
448
454void
456{
457 CanonicalTXSet const set = [this]() {
459 // VFALCO NOTE The hash for an open ledger is undefined so we use
460 // something that is a reasonable substitute.
461 CanonicalTXSet set(app_.openLedger().current()->info().parentHash);
463 return set;
464 }();
465
466 if (!set.empty())
468}
469
477
478void
483
484bool
486{
488 return boost::icl::contains(mCompleteLedgers, seq);
489}
490
491void
497
498bool
500{
501 if (ledger.open())
502 return false;
503
504 if (ledger.info().validated)
505 return true;
506
507 auto const seq = ledger.info().seq;
508 try
509 {
510 // Use the skip list in the last validated ledger to see if ledger
511 // comes before the last validated ledger (and thus has been
512 // validated).
513 auto const hash = walkHashBySeq(seq, InboundLedger::Reason::GENERIC);
514
515 if (!hash || ledger.info().hash != *hash)
516 {
517 // This ledger's hash is not the hash of the validated ledger
518 if (hash)
519 {
520 XRPL_ASSERT(
521 hash->isNonZero(),
522 "ripple::LedgerMaster::isValidated : nonzero hash");
523 uint256 valHash =
525 if (valHash == ledger.info().hash)
526 {
527 // SQL database doesn't match ledger chain
528 clearLedger(seq);
529 }
530 }
531 return false;
532 }
533 }
534 catch (SHAMapMissingNode const& mn)
535 {
536 JLOG(m_journal.warn()) << "Ledger #" << seq << ": " << mn.what();
537 return false;
538 }
539
540 // Mark ledger as validated to save time if we see it again.
541 ledger.info().validated = true;
542 return true;
543}
544
545// returns Ledgers we have all the nodes for
546bool
548 std::uint32_t& minVal,
549 std::uint32_t& maxVal)
550{
551 // Validated ledger is likely not stored in the DB yet so we use the
552 // published ledger which is.
553 maxVal = mPubLedgerSeq.load();
554
555 if (!maxVal)
556 return false;
557
559 {
561 maybeMin = prevMissing(mCompleteLedgers, maxVal);
562 }
563
564 if (maybeMin == std::nullopt)
565 minVal = maxVal;
566 else
567 minVal = 1 + *maybeMin;
568
569 return true;
570}
571
572// Returns Ledgers we have all the nodes for and are indexed
573bool
575{
576 if (!getFullValidatedRange(minVal, maxVal))
577 return false;
578
579 // Remove from the validated range any ledger sequences that may not be
580 // fully updated in the database yet
581
582 auto const pendingSaves = app_.pendingSaves().getSnapshot();
583
584 if (!pendingSaves.empty() && ((minVal != 0) || (maxVal != 0)))
585 {
586 // Ensure we shrink the tips as much as possible. If we have 7-9 and
587 // 8,9 are invalid, we don't want to see the 8 and shrink to just 9
588 // because then we'll have nothing when we could have 7.
589 while (pendingSaves.count(maxVal) > 0)
590 --maxVal;
591 while (pendingSaves.count(minVal) > 0)
592 ++minVal;
593
594 // Best effort for remaining exclusions
595 for (auto v : pendingSaves)
596 {
597 if ((v.first >= minVal) && (v.first <= maxVal))
598 {
599 if (v.first > ((minVal + maxVal) / 2))
600 maxVal = v.first - 1;
601 else
602 minVal = v.first + 1;
603 }
604 }
605
606 if (minVal > maxVal)
607 minVal = maxVal = 0;
608 }
609
610 return true;
611}
612
613// Get the earliest ledger we will let peers fetch
616{
617 // The earliest ledger we will let people fetch is ledger zero,
618 // unless that creates a larger range than allowed
619 std::uint32_t e = getClosedLedger()->info().seq;
620
621 if (e > fetch_depth_)
622 e -= fetch_depth_;
623 else
624 e = 0;
625 return e;
626}
627
628void
630{
631 std::uint32_t seq = ledger->info().seq;
632 uint256 prevHash = ledger->info().parentHash;
633
635
636 std::uint32_t minHas = seq;
637 std::uint32_t maxHas = seq;
638
640 while (!app_.getJobQueue().isStopping() && seq > 0)
641 {
642 {
644 minHas = seq;
645 --seq;
646
647 if (haveLedger(seq))
648 break;
649 }
650
651 auto it(ledgerHashes.find(seq));
652
653 if (it == ledgerHashes.end())
654 {
655 if (app_.isStopping())
656 return;
657
658 {
660 mCompleteLedgers.insert(range(minHas, maxHas));
661 }
662 maxHas = minHas;
664 (seq < 500) ? 0 : (seq - 499), seq);
665 it = ledgerHashes.find(seq);
666
667 if (it == ledgerHashes.end())
668 break;
669
670 if (!nodeStore.fetchNodeObject(
671 ledgerHashes.begin()->second.ledgerHash,
672 ledgerHashes.begin()->first))
673 {
674 // The ledger is not backed by the node store
675 JLOG(m_journal.warn()) << "SQL DB ledger sequence " << seq
676 << " mismatches node store";
677 break;
678 }
679 }
680
681 if (it->second.ledgerHash != prevHash)
682 break;
683
684 prevHash = it->second.parentHash;
685 }
686
687 {
689 mCompleteLedgers.insert(range(minHas, maxHas));
690 }
691 {
693 mFillInProgress = 0;
694 tryAdvance();
695 }
696}
697
700void
702{
703 LedgerIndex const ledgerIndex = missing + 1;
704
705 auto const haveHash{getLedgerHashForHistory(ledgerIndex, reason)};
706 if (!haveHash || haveHash->isZero())
707 {
708 JLOG(m_journal.error())
709 << "No hash for fetch pack. Missing Index " << missing;
710 return;
711 }
712
713 // Select target Peer based on highest score. The score is randomized
714 // but biased in favor of Peers with low latency.
716 {
717 int maxScore = 0;
718 auto peerList = app_.overlay().getActivePeers();
719 for (auto const& peer : peerList)
720 {
721 if (peer->hasRange(missing, missing + 1))
722 {
723 int score = peer->getScore(true);
724 if (!target || (score > maxScore))
725 {
726 target = peer;
727 maxScore = score;
728 }
729 }
730 }
731 }
732
733 if (target)
734 {
735 protocol::TMGetObjectByHash tmBH;
736 tmBH.set_query(true);
737 tmBH.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
738 tmBH.set_ledgerhash(haveHash->begin(), 32);
739 auto packet = std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS);
740
741 target->send(packet);
742 JLOG(m_journal.trace()) << "Requested fetch pack for " << missing;
743 }
744 else
745 JLOG(m_journal.debug()) << "No peer for fetch pack";
746}
747
748void
750{
751 int invalidate = 0;
753
754 for (std::uint32_t lSeq = ledger.info().seq - 1; lSeq > 0; --lSeq)
755 {
756 if (haveLedger(lSeq))
757 {
758 try
759 {
760 hash = hashOfSeq(ledger, lSeq, m_journal);
761 }
762 catch (std::exception const& ex)
763 {
764 JLOG(m_journal.warn())
765 << "fixMismatch encounters partial ledger. Exception: "
766 << ex.what();
767 clearLedger(lSeq);
768 return;
769 }
770
771 if (hash)
772 {
773 // try to close the seam
774 auto otherLedger = getLedgerBySeq(lSeq);
775
776 if (otherLedger && (otherLedger->info().hash == *hash))
777 {
778 // we closed the seam
779 if (invalidate != 0)
780 {
781 JLOG(m_journal.warn())
782 << "Match at " << lSeq << ", " << invalidate
783 << " prior ledgers invalidated";
784 }
785
786 return;
787 }
788 }
789
790 clearLedger(lSeq);
791 ++invalidate;
792 }
793 }
794
795 // all prior ledgers invalidated
796 if (invalidate != 0)
797 {
798 JLOG(m_journal.warn())
799 << "All " << invalidate << " prior ledgers invalidated";
800 }
801}
802
803void
805 std::shared_ptr<Ledger const> const& ledger,
806 bool isSynchronous,
807 bool isCurrent)
808{
809 // A new ledger has been accepted as part of the trusted chain
810 JLOG(m_journal.debug()) << "Ledger " << ledger->info().seq
811 << " accepted :" << ledger->info().hash;
812 XRPL_ASSERT(
813 ledger->stateMap().getHash().isNonZero(),
814 "ripple::LedgerMaster::setFullLedger : nonzero ledger state hash");
815
816 ledger->setValidated();
817 ledger->setFull();
818
819 if (isCurrent)
820 mLedgerHistory.insert(ledger, true);
821
822 {
823 // Check the SQL database's entry for the sequence before this
824 // ledger, if it's not this ledger's parent, invalidate it
825 uint256 prevHash =
826 app_.getRelationalDatabase().getHashByIndex(ledger->info().seq - 1);
827 if (prevHash.isNonZero() && prevHash != ledger->info().parentHash)
828 clearLedger(ledger->info().seq - 1);
829 }
830
831 pendSaveValidated(app_, ledger, isSynchronous, isCurrent);
832
833 {
835 mCompleteLedgers.insert(ledger->info().seq);
836 }
837
838 {
840
841 if (ledger->info().seq > mValidLedgerSeq)
842 setValidLedger(ledger);
843 if (!mPubLedger)
844 {
845 setPubLedger(ledger);
846 app_.getOrderBookDB().setup(ledger);
847 }
848
849 if (ledger->info().seq != 0 && haveLedger(ledger->info().seq - 1))
850 {
851 // we think we have the previous ledger, double check
852 auto prevLedger = getLedgerBySeq(ledger->info().seq - 1);
853
854 if (!prevLedger ||
855 (prevLedger->info().hash != ledger->info().parentHash))
856 {
857 JLOG(m_journal.warn())
858 << "Acquired ledger invalidates previous ledger: "
859 << (prevLedger ? "hashMismatch" : "missingLedger");
860 fixMismatch(*ledger);
861 }
862 }
863 }
864}
865
866void
872
873// Check if the specified ledger can become the new last fully-validated
874// ledger.
875void
877{
878 std::size_t valCount = 0;
879
880 if (seq != 0)
881 {
882 // Ledger is too old
883 if (seq < mValidLedgerSeq)
884 return;
885
886 auto validations = app_.validators().negativeUNLFilter(
888 valCount = validations.size();
889 if (valCount >= app_.validators().quorum())
890 {
892 if (seq > mLastValidLedger.second)
893 mLastValidLedger = std::make_pair(hash, seq);
894 }
895
896 if (seq == mValidLedgerSeq)
897 return;
898
899 // Ledger could match the ledger we're already building
900 if (seq == mBuildingLedgerSeq)
901 return;
902 }
903
904 auto ledger = mLedgerHistory.getLedgerByHash(hash);
905
906 if (!ledger)
907 {
908 if ((seq != 0) && (getValidLedgerIndex() == 0))
909 {
910 // Set peers converged early if we can
911 if (valCount >= app_.validators().quorum())
913 }
914
915 // FIXME: We may not want to fetch a ledger with just one
916 // trusted validation
917 ledger = app_.getInboundLedgers().acquire(
919 }
920
921 if (ledger)
922 checkAccept(ledger);
923}
924
935
936void
938{
939 // Can we accept this ledger as our new last fully-validated ledger
940
941 if (!canBeCurrent(ledger))
942 return;
943
944 // Can we advance the last fully-validated ledger? If so, can we
945 // publish?
947
948 if (ledger->info().seq <= mValidLedgerSeq)
949 return;
950
951 auto const minVal = getNeededValidations();
952 auto validations = app_.validators().negativeUNLFilter(
954 ledger->info().hash, ledger->info().seq));
955 auto const tvc = validations.size();
956 if (tvc < minVal) // nothing we can do
957 {
958 JLOG(m_journal.trace())
959 << "Only " << tvc << " validations for " << ledger->info().hash;
960 return;
961 }
962
963 JLOG(m_journal.info()) << "Advancing accepted ledger to "
964 << ledger->info().seq << " with >= " << minVal
965 << " validations";
966
967 ledger->setValidated();
968 ledger->setFull();
969 setValidLedger(ledger);
970 if (!mPubLedger)
971 {
972 pendSaveValidated(app_, ledger, true, true);
973 setPubLedger(ledger);
974 app_.getOrderBookDB().setup(ledger);
975 }
976
977 std::uint32_t const base = app_.getFeeTrack().getLoadBase();
978 auto fees = app_.getValidations().fees(ledger->info().hash, base);
979 {
980 auto fees2 =
981 app_.getValidations().fees(ledger->info().parentHash, base);
982 fees.reserve(fees.size() + fees2.size());
983 std::copy(fees2.begin(), fees2.end(), std::back_inserter(fees));
984 }
985 std::uint32_t fee;
986 if (!fees.empty())
987 {
988 std::sort(fees.begin(), fees.end());
989 if (auto stream = m_journal.debug())
990 {
992 s << "Received fees from validations: (" << fees.size() << ") ";
993 for (auto const fee1 : fees)
994 {
995 s << " " << fee1;
996 }
997 stream << s.str();
998 }
999 fee = fees[fees.size() / 2]; // median
1000 }
1001 else
1002 {
1003 fee = base;
1004 }
1005
1007
1008 tryAdvance();
1009
1010 if (ledger->seq() % 256 == 0)
1011 {
1012 // Check if the majority of validators run a higher version rippled
1013 // software. If so print a warning.
1014 //
1015 // Once the HardenedValidations amendment is enabled, validators include
1016 // their rippled software version in the validation messages of every
1017 // (flag - 1) ledger. We wait for one ledger time before checking the
1018 // version information to accumulate more validation messages.
1019
1020 auto currentTime = app_.timeKeeper().now();
1021 bool needPrint = false;
1022
1023 // The variable upgradeWarningPrevTime_ will be set when and only when
1024 // the warning is printed.
1026 {
1027 // Have not printed the warning before, check if need to print.
1028 auto const vals = app_.getValidations().getTrustedForLedger(
1029 ledger->info().parentHash, ledger->info().seq - 1);
1030 std::size_t higherVersionCount = 0;
1031 std::size_t rippledCount = 0;
1032 for (auto const& v : vals)
1033 {
1034 if (v->isFieldPresent(sfServerVersion))
1035 {
1036 auto version = v->getFieldU64(sfServerVersion);
1037 higherVersionCount +=
1038 BuildInfo::isNewerVersion(version) ? 1 : 0;
1039 rippledCount +=
1040 BuildInfo::isRippledVersion(version) ? 1 : 0;
1041 }
1042 }
1043 // We report only if (1) we have accumulated validation messages
1044 // from 90% validators from the UNL, (2) 60% of validators
1045 // running the rippled implementation have higher version numbers,
1046 // and (3) the calculation won't cause divide-by-zero.
1047 if (higherVersionCount > 0 && rippledCount > 0)
1048 {
1049 constexpr std::size_t reportingPercent = 90;
1050 constexpr std::size_t cutoffPercent = 60;
1051 auto const unlSize{
1052 app_.validators().getQuorumKeys().second.size()};
1053 needPrint = unlSize > 0 &&
1054 calculatePercent(vals.size(), unlSize) >=
1055 reportingPercent &&
1056 calculatePercent(higherVersionCount, rippledCount) >=
1057 cutoffPercent;
1058 }
1059 }
1060 // To throttle the warning messages, instead of printing a warning
1061 // every flag ledger, we print every week.
1062 else if (currentTime - upgradeWarningPrevTime_ >= weeks{1})
1063 {
1064 // Printed the warning before, and assuming most validators
1065 // do not downgrade, we keep printing the warning
1066 // until the local server is restarted.
1067 needPrint = true;
1068 }
1069
1070 if (needPrint)
1071 {
1072 upgradeWarningPrevTime_ = currentTime;
1073 auto const upgradeMsg =
1074 "Check for upgrade: "
1075 "A majority of trusted validators are "
1076 "running a newer version.";
1077 std::cerr << upgradeMsg << std::endl;
1078 JLOG(m_journal.error()) << upgradeMsg;
1079 }
1080 }
1081}
1082
1084void
1086 std::shared_ptr<Ledger const> const& ledger,
1087 uint256 const& consensusHash,
1088 Json::Value consensus)
1089{
1090 // Because we just built a ledger, we are no longer building one
1092
1093 // No need to process validations in standalone mode
1094 if (standalone_)
1095 return;
1096
1097 mLedgerHistory.builtLedger(ledger, consensusHash, std::move(consensus));
1098
1099 if (ledger->info().seq <= mValidLedgerSeq)
1100 {
1101 auto stream = app_.journal("LedgerConsensus").info();
1102 JLOG(stream) << "Consensus built old ledger: " << ledger->info().seq
1103 << " <= " << mValidLedgerSeq;
1104 return;
1105 }
1106
1107 // See if this ledger can be the new fully-validated ledger
1108 checkAccept(ledger);
1109
1110 if (ledger->info().seq <= mValidLedgerSeq)
1111 {
1112 auto stream = app_.journal("LedgerConsensus").debug();
1113 JLOG(stream) << "Consensus ledger fully validated";
1114 return;
1115 }
1116
1117 // This ledger cannot be the new fully-validated ledger, but
1118 // maybe we saved up validations for some other ledger that can be
1119
1120 auto validations = app_.validators().negativeUNLFilter(
1122
1123 // Track validation counts with sequence numbers
1124 class valSeq
1125 {
1126 public:
1127 valSeq() : valCount_(0), ledgerSeq_(0)
1128 {
1129 ;
1130 }
1131
1132 void
1133 mergeValidation(LedgerIndex seq)
1134 {
1135 valCount_++;
1136
1137 // If we didn't already know the sequence, now we do
1138 if (ledgerSeq_ == 0)
1139 ledgerSeq_ = seq;
1140 }
1141
1142 std::size_t valCount_;
1143 LedgerIndex ledgerSeq_;
1144 };
1145
1146 // Count the number of current, trusted validations
1148 for (auto const& v : validations)
1149 {
1150 valSeq& vs = count[v->getLedgerHash()];
1151 vs.mergeValidation(v->getFieldU32(sfLedgerSequence));
1152 }
1153
1154 auto const neededValidations = getNeededValidations();
1155 auto maxSeq = mValidLedgerSeq.load();
1156 auto maxLedger = ledger->info().hash;
1157
1158 // Of the ledgers with sufficient validations,
1159 // find the one with the highest sequence
1160 for (auto& v : count)
1161 if (v.second.valCount_ > neededValidations)
1162 {
1163 // If we still don't know the sequence, get it
1164 if (v.second.ledgerSeq_ == 0)
1165 {
1166 if (auto l = getLedgerByHash(v.first))
1167 v.second.ledgerSeq_ = l->info().seq;
1168 }
1169
1170 if (v.second.ledgerSeq_ > maxSeq)
1171 {
1172 maxSeq = v.second.ledgerSeq_;
1173 maxLedger = v.first;
1174 }
1175 }
1176
1177 if (maxSeq > mValidLedgerSeq)
1178 {
1179 auto stream = app_.journal("LedgerConsensus").debug();
1180 JLOG(stream) << "Consensus triggered check of ledger";
1181 checkAccept(maxLedger, maxSeq);
1182 }
1183}
1184
1187 LedgerIndex index,
1188 InboundLedger::Reason reason)
1189{
1190 // Try to get the hash of a ledger we need to fetch for history
1192 auto const& l{mHistLedger};
1193
1194 if (l && l->info().seq >= index)
1195 {
1196 ret = hashOfSeq(*l, index, m_journal);
1197 if (!ret)
1198 ret = walkHashBySeq(index, l, reason);
1199 }
1200
1201 if (!ret)
1202 ret = walkHashBySeq(index, reason);
1203
1204 return ret;
1205}
1206
1210{
1212
1213 JLOG(m_journal.trace()) << "findNewLedgersToPublish<";
1214
1215 // No valid ledger, nothing to do
1216 if (mValidLedger.empty())
1217 {
1218 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1219 return {};
1220 }
1221
1222 if (!mPubLedger)
1223 {
1224 JLOG(m_journal.info())
1225 << "First published ledger will be " << mValidLedgerSeq;
1226 return {mValidLedger.get()};
1227 }
1228
1230 {
1231 JLOG(m_journal.warn()) << "Gap in validated ledger stream "
1232 << mPubLedgerSeq << " - " << mValidLedgerSeq - 1;
1233
1234 auto valLedger = mValidLedger.get();
1235 ret.push_back(valLedger);
1236 setPubLedger(valLedger);
1237 app_.getOrderBookDB().setup(valLedger);
1238
1239 return {valLedger};
1240 }
1241
1243 {
1244 JLOG(m_journal.trace()) << "No valid journal, nothing to publish.";
1245 return {};
1246 }
1247
1248 int acqCount = 0;
1249
1250 auto pubSeq = mPubLedgerSeq + 1; // Next sequence to publish
1251 auto valLedger = mValidLedger.get();
1252 std::uint32_t valSeq = valLedger->info().seq;
1253
1254 scope_unlock sul{sl};
1255 try
1256 {
1257 for (std::uint32_t seq = pubSeq; seq <= valSeq; ++seq)
1258 {
1259 JLOG(m_journal.trace())
1260 << "Trying to fetch/publish valid ledger " << seq;
1261
1263 // This can throw
1264 auto hash = hashOfSeq(*valLedger, seq, m_journal);
1265 // VFALCO TODO Restructure this code so that zero is not
1266 // used.
1267 if (!hash)
1268 hash = beast::zero; // kludge
1269 if (seq == valSeq)
1270 {
1271 // We need to publish the ledger we just fully validated
1272 ledger = valLedger;
1273 }
1274 else if (hash->isZero())
1275 {
1276 // LCOV_EXCL_START
1277 JLOG(m_journal.fatal()) << "Ledger: " << valSeq
1278 << " does not have hash for " << seq;
1279 UNREACHABLE(
1280 "ripple::LedgerMaster::findNewLedgersToPublish : ledger "
1281 "not found");
1282 // LCOV_EXCL_STOP
1283 }
1284 else
1285 {
1286 ledger = mLedgerHistory.getLedgerByHash(*hash);
1287 }
1288
1289 if (!app_.config().LEDGER_REPLAY)
1290 {
1291 // Can we try to acquire the ledger we need?
1292 if (!ledger && (++acqCount < ledger_fetch_size_))
1293 ledger = app_.getInboundLedgers().acquire(
1294 *hash, seq, InboundLedger::Reason::GENERIC);
1295 }
1296
1297 // Did we acquire the next ledger we need to publish?
1298 if (ledger && (ledger->info().seq == pubSeq))
1299 {
1300 ledger->setValidated();
1301 ret.push_back(ledger);
1302 ++pubSeq;
1303 }
1304 }
1305
1306 JLOG(m_journal.trace())
1307 << "ready to publish " << ret.size() << " ledgers.";
1308 }
1309 catch (std::exception const& ex)
1310 {
1311 JLOG(m_journal.error())
1312 << "Exception while trying to find ledgers to publish: "
1313 << ex.what();
1314 }
1315
1317 {
1318 /* Narrow down the gap of ledgers, and try to replay them.
1319 * When replaying a ledger gap, if the local node has
1320 * the start ledger, it saves an expensive InboundLedger
1321 * acquire. If the local node has the finish ledger, it
1322 * saves a skip list acquire.
1323 */
1324 auto const& startLedger = ret.empty() ? mPubLedger : ret.back();
1325 auto finishLedger = valLedger;
1326 while (startLedger->seq() + 1 < finishLedger->seq())
1327 {
1328 if (auto const parent = mLedgerHistory.getLedgerByHash(
1329 finishLedger->info().parentHash);
1330 parent)
1331 {
1332 finishLedger = parent;
1333 }
1334 else
1335 {
1336 auto numberLedgers =
1337 finishLedger->seq() - startLedger->seq() + 1;
1338 JLOG(m_journal.debug())
1339 << "Publish LedgerReplays " << numberLedgers
1340 << " ledgers, from seq=" << startLedger->info().seq << ", "
1341 << startLedger->info().hash
1342 << " to seq=" << finishLedger->info().seq << ", "
1343 << finishLedger->info().hash;
1346 finishLedger->info().hash,
1347 numberLedgers);
1348 break;
1349 }
1350 }
1351 }
1352
1353 return ret;
1354}
1355
1356void
1358{
1360
1361 // Can't advance without at least one fully-valid ledger
1362 mAdvanceWork = true;
1364 {
1365 mAdvanceThread = true;
1366 app_.getJobQueue().addJob(jtADVANCE, "advanceLedger", [this]() {
1368
1369 XRPL_ASSERT(
1371 "ripple::LedgerMaster::tryAdvance : has valid ledger");
1372
1373 JLOG(m_journal.trace()) << "advanceThread<";
1374
1375 try
1376 {
1377 doAdvance(sl);
1378 }
1379 catch (std::exception const& ex)
1380 {
1381 JLOG(m_journal.fatal()) << "doAdvance throws: " << ex.what();
1382 }
1383
1384 mAdvanceThread = false;
1385 JLOG(m_journal.trace()) << "advanceThread>";
1386 });
1387 }
1388}
1389
1390void
1392{
1393 {
1396 {
1398 mPathLedger.reset();
1399 JLOG(m_journal.debug()) << "Need network ledger for updating paths";
1400 return;
1401 }
1402 }
1403
1404 while (!app_.getJobQueue().isStopping())
1405 {
1406 JLOG(m_journal.debug()) << "updatePaths running";
1408 {
1410
1411 if (!mValidLedger.empty() &&
1412 (!mPathLedger || (mPathLedger->info().seq != mValidLedgerSeq)))
1413 { // We have a new valid ledger since the last full pathfinding
1415 lastLedger = mPathLedger;
1416 }
1417 else if (mPathFindNewRequest)
1418 { // We have a new request but no new ledger
1419 lastLedger = app_.openLedger().current();
1420 }
1421 else
1422 { // Nothing to do
1424 mPathLedger.reset();
1425 JLOG(m_journal.debug()) << "Nothing to do for updating paths";
1426 return;
1427 }
1428 }
1429
1430 if (!standalone_)
1431 { // don't pathfind with a ledger that's more than 60 seconds old
1432 using namespace std::chrono;
1433 auto age = time_point_cast<seconds>(app_.timeKeeper().closeTime()) -
1434 lastLedger->info().closeTime;
1435 if (age > 1min)
1436 {
1437 JLOG(m_journal.debug())
1438 << "Published ledger too old for updating paths";
1441 mPathLedger.reset();
1442 return;
1443 }
1444 }
1445
1446 try
1447 {
1448 auto& pathRequests = app_.getPathRequests();
1449 {
1451 if (!pathRequests.requestsPending())
1452 {
1454 mPathLedger.reset();
1455 JLOG(m_journal.debug())
1456 << "No path requests found. Nothing to do for updating "
1457 "paths. "
1458 << mPathFindThread << " jobs remaining";
1459 return;
1460 }
1461 }
1462 JLOG(m_journal.debug()) << "Updating paths";
1463 pathRequests.updateAll(lastLedger);
1464
1466 if (!pathRequests.requestsPending())
1467 {
1468 JLOG(m_journal.debug())
1469 << "No path requests left. No need for further updating "
1470 "paths";
1472 mPathLedger.reset();
1473 return;
1474 }
1475 }
1476 catch (SHAMapMissingNode const& mn)
1477 {
1478 JLOG(m_journal.info()) << "During pathfinding: " << mn.what();
1479 if (lastLedger->open())
1480 {
1481 // our parent is the problem
1483 lastLedger->info().parentHash,
1484 lastLedger->info().seq - 1,
1486 }
1487 else
1488 {
1489 // this ledger is the problem
1491 lastLedger->info().hash,
1492 lastLedger->info().seq,
1494 }
1495 }
1496 }
1497}
1498
1499bool
1501{
1503 mPathFindNewRequest = newPFWork("pf:newRequest", ml);
1504 return mPathFindNewRequest;
1505}
1506
1507bool
1509{
1511 bool const ret = mPathFindNewRequest;
1512 mPathFindNewRequest = false;
1513 return ret;
1514}
1515
1516// If the order book is radically updated, we need to reprocess all
1517// pathfinding requests.
1518bool
1520{
1522 mPathLedger.reset();
1523
1524 return newPFWork("pf:newOBDB", ml);
1525}
1526
1529bool
1531 char const* name,
1533{
1534 if (!app_.isStopping() && mPathFindThread < 2 &&
1536 {
1537 JLOG(m_journal.debug())
1538 << "newPFWork: Creating job. path find threads: "
1539 << mPathFindThread;
1540 if (app_.getJobQueue().addJob(
1541 jtUPDATE_PF, name, [this]() { updatePaths(); }))
1542 {
1544 }
1545 }
1546 // If we're stopping don't give callers the expectation that their
1547 // request will be fulfilled, even if it may be serviced.
1548 return mPathFindThread > 0 && !app_.isStopping();
1549}
1550
1553{
1554 return m_mutex;
1555}
1556
1557// The current ledger is the ledger we believe new transactions should go in
1563
1569
1570Rules
1572{
1573 // Once we have a guarantee that there's always a last validated
1574 // ledger then we can dispense with the if.
1575
1576 // Return the Rules from the last validated ledger.
1577 if (auto const ledger = getValidatedLedger())
1578 return ledger->rules();
1579
1580 return Rules(app_.config().features);
1581}
1582
1583// This is the last ledger we published to clients and can lag the validated
1584// ledger.
1591
1598
1601{
1602 uint256 hash = getHashBySeq(ledgerIndex);
1603 return hash.isNonZero() ? getCloseTimeByHash(hash, ledgerIndex)
1604 : std::nullopt;
1605}
1606
1609 LedgerHash const& ledgerHash,
1610 std::uint32_t index)
1611{
1612 auto nodeObject = app_.getNodeStore().fetchNodeObject(ledgerHash, index);
1613 if (nodeObject && (nodeObject->getData().size() >= 120))
1614 {
1615 SerialIter it(
1616 nodeObject->getData().data(), nodeObject->getData().size());
1617 if (safe_cast<HashPrefix>(it.get32()) == HashPrefix::ledgerMaster)
1618 {
1619 it.skip(
1620 4 + 8 + 32 + // seq drops parentHash
1621 32 + 32 + 4); // txHash acctHash parentClose
1623 }
1624 }
1625
1626 return std::nullopt;
1627}
1628
1629uint256
1631{
1633
1634 if (hash.isNonZero())
1635 return hash;
1636
1638}
1639
1642{
1643 std::optional<LedgerHash> ledgerHash;
1644
1645 if (auto referenceLedger = mValidLedger.get())
1646 ledgerHash = walkHashBySeq(index, referenceLedger, reason);
1647
1648 return ledgerHash;
1649}
1650
1653 std::uint32_t index,
1654 std::shared_ptr<ReadView const> const& referenceLedger,
1655 InboundLedger::Reason reason)
1656{
1657 if (!referenceLedger || (referenceLedger->info().seq < index))
1658 {
1659 // Nothing we can do. No validated ledger.
1660 return std::nullopt;
1661 }
1662
1663 // See if the hash for the ledger we need is in the reference ledger
1664 auto ledgerHash = hashOfSeq(*referenceLedger, index, m_journal);
1665 if (ledgerHash)
1666 return ledgerHash;
1667
1668 // The hash is not in the reference ledger. Get another ledger which can
1669 // be located easily and should contain the hash.
1670 LedgerIndex refIndex = getCandidateLedger(index);
1671 auto const refHash = hashOfSeq(*referenceLedger, refIndex, m_journal);
1672 XRPL_ASSERT(refHash, "ripple::LedgerMaster::walkHashBySeq : found ledger");
1673 if (refHash)
1674 {
1675 // Try the hash and sequence of a better reference ledger just found
1676 auto ledger = mLedgerHistory.getLedgerByHash(*refHash);
1677
1678 if (ledger)
1679 {
1680 try
1681 {
1682 ledgerHash = hashOfSeq(*ledger, index, m_journal);
1683 }
1684 catch (SHAMapMissingNode const&)
1685 {
1686 ledger.reset();
1687 }
1688 }
1689
1690 // Try to acquire the complete ledger
1691 if (!ledger)
1692 {
1693 if (auto const l = app_.getInboundLedgers().acquire(
1694 *refHash, refIndex, reason))
1695 {
1696 ledgerHash = hashOfSeq(*l, index, m_journal);
1697 XRPL_ASSERT(
1698 ledgerHash,
1699 "ripple::LedgerMaster::walkHashBySeq : has complete "
1700 "ledger");
1701 }
1702 }
1703 }
1704 return ledgerHash;
1705}
1706
1709{
1710 if (index <= mValidLedgerSeq)
1711 {
1712 // Always prefer a validated ledger
1713 if (auto valid = mValidLedger.get())
1714 {
1715 if (valid->info().seq == index)
1716 return valid;
1717
1718 try
1719 {
1720 auto const hash = hashOfSeq(*valid, index, m_journal);
1721
1722 if (hash)
1724 }
1725 catch (std::exception const&)
1726 {
1727 // Missing nodes are already handled
1728 }
1729 }
1730 }
1731
1732 if (auto ret = mLedgerHistory.getLedgerBySeq(index))
1733 return ret;
1734
1735 auto ret = mClosedLedger.get();
1736 if (ret && (ret->info().seq == index))
1737 return ret;
1738
1739 clearLedger(index);
1740 return {};
1741}
1742
1745{
1746 if (auto ret = mLedgerHistory.getLedgerByHash(hash))
1747 return ret;
1748
1749 auto ret = mClosedLedger.get();
1750 if (ret && (ret->info().hash == hash))
1751 return ret;
1752
1753 return {};
1754}
1755
1756void
1762
1763void
1765{
1767 fetch_packs_.sweep();
1768}
1769
1770float
1775
1776void
1778{
1780 if (seq > 0)
1781 mCompleteLedgers.erase(range(0u, seq - 1));
1782}
1783
1784void
1789
1790void
1792{
1793 replayData = std::move(replay);
1794}
1795
1798{
1799 return std::move(replayData);
1800}
1801
1802void
1804 std::uint32_t missing,
1805 bool& progress,
1806 InboundLedger::Reason reason,
1808{
1809 scope_unlock sul{sl};
1810 if (auto hash = getLedgerHashForHistory(missing, reason))
1811 {
1812 XRPL_ASSERT(
1813 hash->isNonZero(),
1814 "ripple::LedgerMaster::fetchForHistory : found ledger");
1815 auto ledger = getLedgerByHash(*hash);
1816 if (!ledger)
1817 {
1819 {
1820 ledger =
1821 app_.getInboundLedgers().acquire(*hash, missing, reason);
1822 if (!ledger && missing != fetch_seq_ &&
1823 missing > app_.getNodeStore().earliestLedgerSeq())
1824 {
1825 JLOG(m_journal.trace())
1826 << "fetchForHistory want fetch pack " << missing;
1827 fetch_seq_ = missing;
1828 getFetchPack(missing, reason);
1829 }
1830 else
1831 JLOG(m_journal.trace())
1832 << "fetchForHistory no fetch pack for " << missing;
1833 }
1834 else
1835 JLOG(m_journal.debug())
1836 << "fetchForHistory found failed acquire";
1837 }
1838 if (ledger)
1839 {
1840 auto seq = ledger->info().seq;
1841 XRPL_ASSERT(
1842 seq == missing,
1843 "ripple::LedgerMaster::fetchForHistory : sequence match");
1844 JLOG(m_journal.trace()) << "fetchForHistory acquired " << seq;
1845 setFullLedger(ledger, false, false);
1846 int fillInProgress;
1847 {
1849 mHistLedger = ledger;
1850 fillInProgress = mFillInProgress;
1851 }
1852 if (fillInProgress == 0 &&
1854 ledger->info().parentHash)
1855 {
1856 {
1857 // Previous ledger is in DB
1859 mFillInProgress = seq;
1860 }
1862 jtADVANCE, "tryFill", [this, ledger]() {
1863 tryFill(ledger);
1864 });
1865 }
1866 progress = true;
1867 }
1868 else
1869 {
1870 std::uint32_t fetchSz;
1871 // Do not fetch ledger sequences lower
1872 // than the earliest ledger sequence
1873 fetchSz = app_.getNodeStore().earliestLedgerSeq();
1874 fetchSz = missing >= fetchSz
1875 ? std::min(ledger_fetch_size_, (missing - fetchSz) + 1)
1876 : 0;
1877 try
1878 {
1879 for (std::uint32_t i = 0; i < fetchSz; ++i)
1880 {
1881 std::uint32_t seq = missing - i;
1882 if (auto h = getLedgerHashForHistory(seq, reason))
1883 {
1884 XRPL_ASSERT(
1885 h->isNonZero(),
1886 "ripple::LedgerMaster::fetchForHistory : "
1887 "prefetched ledger");
1888 app_.getInboundLedgers().acquire(*h, seq, reason);
1889 }
1890 }
1891 }
1892 catch (std::exception const& ex)
1893 {
1894 JLOG(m_journal.warn())
1895 << "Threw while prefetching: " << ex.what();
1896 }
1897 }
1898 }
1899 else
1900 {
1901 JLOG(m_journal.fatal())
1902 << "Can't find ledger following prevMissing " << missing;
1903 JLOG(m_journal.fatal())
1904 << "Pub:" << mPubLedgerSeq << " Val:" << mValidLedgerSeq;
1905 JLOG(m_journal.fatal())
1906 << "Ledgers: " << app_.getLedgerMaster().getCompleteLedgers();
1907 JLOG(m_journal.fatal())
1908 << "Acquire reason: "
1909 << (reason == InboundLedger::Reason::HISTORY ? "HISTORY"
1910 : "NOT HISTORY");
1911 clearLedger(missing + 1);
1912 progress = true;
1913 }
1914}
1915
1916// Try to publish ledgers, acquire missing ledgers
1917void
1919{
1920 do
1921 {
1922 mAdvanceWork = false; // If there's work to do, we'll make progress
1923 bool progress = false;
1924
1925 auto const pubLedgers = findNewLedgersToPublish(sl);
1926 if (pubLedgers.empty())
1927 {
1933 {
1934 // We are in sync, so can acquire
1937 {
1939 missing = prevMissing(
1941 mPubLedger->info().seq,
1943 }
1944 if (missing)
1945 {
1946 JLOG(m_journal.trace())
1947 << "tryAdvance discovered missing " << *missing;
1948 if ((mFillInProgress == 0 || *missing > mFillInProgress) &&
1953 *missing,
1954 m_journal))
1955 {
1956 JLOG(m_journal.trace())
1957 << "advanceThread should acquire";
1958 }
1959 else
1960 missing = std::nullopt;
1961 }
1962 if (missing)
1963 {
1964 fetchForHistory(*missing, progress, reason, sl);
1966 {
1967 JLOG(m_journal.debug())
1968 << "tryAdvance found last valid changed";
1969 progress = true;
1970 }
1971 }
1972 }
1973 else
1974 {
1975 mHistLedger.reset();
1976 JLOG(m_journal.trace()) << "tryAdvance not fetching history";
1977 }
1978 }
1979 else
1980 {
1981 JLOG(m_journal.trace()) << "tryAdvance found " << pubLedgers.size()
1982 << " ledgers to publish";
1983 for (auto const& ledger : pubLedgers)
1984 {
1985 {
1986 scope_unlock sul{sl};
1987 JLOG(m_journal.debug())
1988 << "tryAdvance publishing seq " << ledger->info().seq;
1989 setFullLedger(ledger, true, true);
1990 }
1991
1992 setPubLedger(ledger);
1993
1994 {
1995 scope_unlock sul{sl};
1996 app_.getOPs().pubLedger(ledger);
1997 }
1998 }
1999
2001 progress = newPFWork("pf:newLedger", sl);
2002 }
2003 if (progress)
2004 mAdvanceWork = true;
2005 } while (mAdvanceWork);
2006}
2007
2008void
2010{
2011 fetch_packs_.canonicalize_replace_client(hash, data);
2012}
2013
2016{
2017 Blob data;
2018 if (fetch_packs_.retrieve(hash, data))
2019 {
2020 fetch_packs_.del(hash, false);
2021 if (hash == sha512Half(makeSlice(data)))
2022 return data;
2023 }
2024 return std::nullopt;
2025}
2026
2027void
2038
2064static void
2066 SHAMap const& want,
2067 SHAMap const* have,
2068 std::uint32_t cnt,
2069 protocol::TMGetObjectByHash* into,
2070 std::uint32_t seq,
2071 bool withLeaves = true)
2072{
2073 XRPL_ASSERT(cnt, "ripple::populateFetchPack : nonzero count input");
2074
2075 Serializer s(1024);
2076
2077 want.visitDifferences(
2078 have,
2079 [&s, withLeaves, &cnt, into, seq](SHAMapTreeNode const& n) -> bool {
2080 if (!withLeaves && n.isLeaf())
2081 return true;
2082
2083 s.erase();
2085
2086 auto const& hash = n.getHash().as_uint256();
2087
2088 protocol::TMIndexedObject* obj = into->add_objects();
2089 obj->set_ledgerseq(seq);
2090 obj->set_hash(hash.data(), hash.size());
2091 obj->set_data(s.getDataPtr(), s.getLength());
2092
2093 return --cnt != 0;
2094 });
2095}
2096
2097void
2099 std::weak_ptr<Peer> const& wPeer,
2101 uint256 haveLedgerHash,
2103{
2104 using namespace std::chrono_literals;
2105 if (UptimeClock::now() > uptime + 1s)
2106 {
2107 JLOG(m_journal.info()) << "Fetch pack request got stale";
2108 return;
2109 }
2110
2112 {
2113 JLOG(m_journal.info()) << "Too busy to make fetch pack";
2114 return;
2115 }
2116
2117 auto peer = wPeer.lock();
2118
2119 if (!peer)
2120 return;
2121
2122 auto have = getLedgerByHash(haveLedgerHash);
2123
2124 if (!have)
2125 {
2126 JLOG(m_journal.info())
2127 << "Peer requests fetch pack for ledger we don't have: " << have;
2128 peer->charge(Resource::feeRequestNoReply, "get_object ledger");
2129 return;
2130 }
2131
2132 if (have->open())
2133 {
2134 JLOG(m_journal.warn())
2135 << "Peer requests fetch pack from open ledger: " << have;
2136 peer->charge(Resource::feeMalformedRequest, "get_object ledger open");
2137 return;
2138 }
2139
2140 if (have->info().seq < getEarliestFetch())
2141 {
2142 JLOG(m_journal.debug()) << "Peer requests fetch pack that is too early";
2143 peer->charge(Resource::feeMalformedRequest, "get_object ledger early");
2144 return;
2145 }
2146
2147 auto want = getLedgerByHash(have->info().parentHash);
2148
2149 if (!want)
2150 {
2151 JLOG(m_journal.info())
2152 << "Peer requests fetch pack for ledger whose predecessor we "
2153 << "don't have: " << have;
2154 peer->charge(
2155 Resource::feeRequestNoReply, "get_object ledger no parent");
2156 return;
2157 }
2158
2159 try
2160 {
2161 Serializer hdr(128);
2162
2163 protocol::TMGetObjectByHash reply;
2164 reply.set_query(false);
2165
2166 if (request->has_seq())
2167 reply.set_seq(request->seq());
2168
2169 reply.set_ledgerhash(request->ledgerhash());
2170 reply.set_type(protocol::TMGetObjectByHash::otFETCH_PACK);
2171
2172 // Building a fetch pack:
2173 // 1. Add the header for the requested ledger.
2174 // 2. Add the nodes for the AccountStateMap of that ledger.
2175 // 3. If there are transactions, add the nodes for the
2176 // transactions of the ledger.
2177 // 4. If the FetchPack now contains at least 512 entries then stop.
2178 // 5. If not very much time has elapsed, then loop back and repeat
2179 // the same process adding the previous ledger to the FetchPack.
2180 do
2181 {
2182 std::uint32_t lSeq = want->info().seq;
2183
2184 {
2185 // Serialize the ledger header:
2186 hdr.erase();
2187
2189 addRaw(want->info(), hdr);
2190
2191 // Add the data
2192 protocol::TMIndexedObject* obj = reply.add_objects();
2193 obj->set_hash(
2194 want->info().hash.data(), want->info().hash.size());
2195 obj->set_data(hdr.getDataPtr(), hdr.getLength());
2196 obj->set_ledgerseq(lSeq);
2197 }
2198
2200 want->stateMap(), &have->stateMap(), 16384, &reply, lSeq);
2201
2202 // We use nullptr here because transaction maps are per ledger
2203 // and so the requestor is unlikely to already have it.
2204 if (want->info().txHash.isNonZero())
2205 populateFetchPack(want->txMap(), nullptr, 512, &reply, lSeq);
2206
2207 if (reply.objects().size() >= 512)
2208 break;
2209
2210 have = std::move(want);
2211 want = getLedgerByHash(have->info().parentHash);
2212 } while (want && UptimeClock::now() <= uptime + 1s);
2213
2214 auto msg = std::make_shared<Message>(reply, protocol::mtGET_OBJECTS);
2215
2216 JLOG(m_journal.info())
2217 << "Built fetch pack with " << reply.objects().size() << " nodes ("
2218 << msg->getBufferSize() << " bytes)";
2219
2220 peer->send(msg);
2221 }
2222 catch (std::exception const& ex)
2223 {
2224 JLOG(m_journal.warn())
2225 << "Exception building fetch pach. Exception: " << ex.what();
2226 }
2227}
2228
2231{
2232 return fetch_packs_.getCacheSize();
2233}
2234
2235// Returns the minimum ledger sequence in SQL database, if any.
2241
2244{
2245 uint32_t first = 0, last = 0;
2246
2247 if (!getValidatedRange(first, last) || last < ledgerSeq)
2248 return {};
2249
2250 auto const lgr = getLedgerBySeq(ledgerSeq);
2251 if (!lgr || lgr->txs.empty())
2252 return {};
2253
2254 for (auto it = lgr->txs.begin(); it != lgr->txs.end(); ++it)
2255 if (it->first && it->second &&
2256 it->second->isFieldPresent(sfTransactionIndex) &&
2257 it->second->getFieldU32(sfTransactionIndex) == txnIndex)
2258 return it->first->getTransactionID();
2259
2260 return {};
2261}
2262
2263} // namespace ripple
T back(T... args)
T back_inserter(T... args)
T begin(T... args)
Represents a JSON value.
Definition json_value.h:149
Provide a light-weight way to check active() before string formatting.
Definition Journal.h:205
A generic endpoint for log messages.
Definition Journal.h:60
Stream fatal() const
Definition Journal.h:352
Stream error() const
Definition Journal.h:346
Stream debug() const
Definition Journal.h:328
Stream info() const
Definition Journal.h:334
Stream trace() const
Severity stream access functions.
Definition Journal.h:322
Stream warn() const
Definition Journal.h:340
typename Clock::time_point time_point
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual bool hasUnsupportedEnabled() const =0
returns true if one or more amendments on the network have been enabled that this server does not sup...
void doValidatedLedger(std::shared_ptr< ReadView const > const &lastValidatedLedger)
Called when a new fully-validated ledger is accepted.
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual SHAMapStore & getSHAMapStore()=0
virtual bool isStopping() const =0
virtual NodeStore::Database & getNodeStore()=0
virtual RCLValidations & getValidations()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual LedgerReplayer & getLedgerReplayer()=0
virtual TimeKeeper & timeKeeper()=0
virtual JobQueue & getJobQueue()=0
virtual NetworkOPs & getOPs()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual PathRequests & getPathRequests()=0
virtual LedgerIndex getMaxDisallowedLedger()=0
Ensure that a newly-started validator does not sign proposals older than the last ledger it persisted...
virtual AmendmentTable & getAmendmentTable()=0
virtual PendingSaves & pendingSaves()=0
Holds transactions which were deferred to the next pass of consensus.
void insert(std::shared_ptr< STTx const > const &txn)
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
bool LEDGER_REPLAY
Definition Config.h:223
std::unordered_set< uint256, beast::uhash<> > features
Definition Config.h:276
virtual std::shared_ptr< Ledger const > acquire(uint256 const &hash, std::uint32_t seq, InboundLedger::Reason)=0
virtual bool isFailure(uint256 const &h)=0
bool isStopping() const
Definition JobQueue.h:232
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition JobQueue.cpp:142
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition JobQueue.h:168
float getCacheHitRate()
Get the ledgers_by_hash cache hit rate.
void builtLedger(std::shared_ptr< Ledger const > const &, uint256 const &consensusHash, Json::Value)
Report that we have locally built a particular ledger.
void sweep()
Remove stale cache entries.
LedgerHash getLedgerHash(LedgerIndex ledgerIndex)
Get a ledger's hash given its sequence number.
void clearLedgerCachePrior(LedgerIndex seq)
std::shared_ptr< Ledger const > getLedgerBySeq(LedgerIndex ledgerIndex)
Get a ledger given its sequence number.
bool insert(std::shared_ptr< Ledger const > const &ledger, bool validated)
Track a ledger.
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
Repair a hash to index mapping.
void validatedLedger(std::shared_ptr< Ledger const > const &, std::optional< uint256 > const &consensusHash)
Report that we have validated a particular ledger.
std::shared_ptr< Ledger const > getLedgerByHash(LedgerHash const &ledgerHash)
Retrieve a ledger given its hash.
std::shared_ptr< Ledger const > get()
void set(std::shared_ptr< Ledger const > ledger)
bool haveLedger(std::uint32_t seq)
std::shared_ptr< Ledger const > getValidatedLedger()
void clearLedgerCachePrior(LedgerIndex seq)
RangeSet< std::uint32_t > mCompleteLedgers
void setBuildingLedger(LedgerIndex index)
std::unique_ptr< LedgerReplay > releaseReplay()
void failedSave(std::uint32_t seq, uint256 const &hash)
void takeReplay(std::unique_ptr< LedgerReplay > replay)
std::uint32_t const ledger_history_
void addHeldTransaction(std::shared_ptr< Transaction > const &trans)
void checkAccept(std::shared_ptr< Ledger const > const &ledger)
std::optional< NetClock::time_point > getCloseTimeByHash(LedgerHash const &ledgerHash, LedgerIndex ledgerIndex)
std::size_t getNeededValidations()
Determines how many validations are needed to fully validate a ledger.
std::unique_ptr< LedgerReplay > replayData
void setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV)
TimeKeeper::time_point upgradeWarningPrevTime_
LedgerHistory mLedgerHistory
std::optional< NetClock::time_point > getCloseTimeBySeq(LedgerIndex ledgerIndex)
void fixMismatch(ReadView const &ledger)
std::atomic< LedgerIndex > mPubLedgerSeq
void clearPriorLedgers(LedgerIndex seq)
std::shared_ptr< Ledger const > mPubLedger
void makeFetchPack(std::weak_ptr< Peer > const &wPeer, std::shared_ptr< protocol::TMGetObjectByHash > const &request, uint256 haveLedgerHash, UptimeClock::time_point uptime)
std::atomic< LedgerIndex > mBuildingLedgerSeq
std::shared_ptr< ReadView const > getCurrentLedger()
void tryFill(std::shared_ptr< Ledger const > ledger)
std::uint32_t const fetch_depth_
bool canBeCurrent(std::shared_ptr< Ledger const > const &ledger)
Check the sequence number and parent close time of a ledger against our clock and last validated ledg...
bool isValidated(ReadView const &ledger)
std::uint32_t getEarliestFetch()
std::recursive_mutex m_mutex
std::optional< LedgerHash > walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason)
Walk to a ledger's hash using the skip list.
uint256 getHashBySeq(std::uint32_t index)
Get a ledger's hash by sequence number using the cache.
std::shared_ptr< STTx const > popAcctTransaction(std::shared_ptr< STTx const > const &tx)
Get the next transaction held for a particular account if any.
LedgerIndex const max_ledger_difference_
bool fixIndex(LedgerIndex ledgerIndex, LedgerHash const &ledgerHash)
TaggedCache< uint256, Blob > fetch_packs_
bool newPFWork(char const *name, std::unique_lock< std::recursive_mutex > &)
A thread needs to be dispatched to handle pathfinding work of some kind.
bool isCaughtUp(std::string &reason)
void setPubLedger(std::shared_ptr< Ledger const > const &l)
std::optional< uint256 > txnIdFromIndex(uint32_t ledgerSeq, uint32_t txnIndex)
beast::Journal m_journal
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
void clearLedger(std::uint32_t seq)
std::pair< uint256, LedgerIndex > mLastValidLedger
std::shared_ptr< Ledger const > getClosedLedger()
std::optional< LedgerIndex > minSqlSeq()
void setFullLedger(std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
LedgerMaster(Application &app, Stopwatch &stopwatch, beast::insight::Collector::ptr const &collector, beast::Journal journal)
std::atomic< std::uint32_t > mValidLedgerSign
CanonicalTXSet mHeldTransactions
std::uint32_t const ledger_fetch_size_
void applyHeldTransactions()
Apply held transactions to the open ledger This is normally called as we close the ledger.
std::chrono::seconds getPublishedLedgerAge()
std::shared_ptr< Ledger const > mHistLedger
std::recursive_mutex mCompleteLock
std::string getCompleteLedgers()
std::atomic< LedgerIndex > mValidLedgerSeq
std::size_t getFetchPackCacheSize() const
bool getFullValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::optional< Blob > getFetchPack(uint256 const &hash) override
Retrieves partial ledger data of the coresponding hash from peers.
void gotFetchPack(bool progress, std::uint32_t seq)
std::recursive_mutex & peekMutex()
void consensusBuilt(std::shared_ptr< Ledger const > const &ledger, uint256 const &consensusHash, Json::Value consensus)
Report that the consensus process built a particular ledger.
std::shared_ptr< Ledger const > mPathLedger
void setValidLedger(std::shared_ptr< Ledger const > const &l)
std::optional< LedgerHash > getLedgerHashForHistory(LedgerIndex index, InboundLedger::Reason reason)
void addFetchPack(uint256 const &hash, std::shared_ptr< Blob > data)
std::atomic< std::uint32_t > mPubLedgerClose
void switchLCL(std::shared_ptr< Ledger const > const &lastClosed)
LedgerHolder mValidLedger
std::shared_ptr< ReadView const > getPublishedLedger()
std::atomic_flag mGotFetchPackThread
void doAdvance(std::unique_lock< std::recursive_mutex > &)
LedgerHolder mClosedLedger
bool storeLedger(std::shared_ptr< Ledger const > ledger)
std::vector< std::shared_ptr< Ledger const > > findNewLedgersToPublish(std::unique_lock< std::recursive_mutex > &)
LedgerIndex getCurrentLedgerIndex()
bool isCompatible(ReadView const &, beast::Journal::Stream, char const *reason)
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
void fetchForHistory(std::uint32_t missing, bool &progress, InboundLedger::Reason reason, std::unique_lock< std::recursive_mutex > &)
std::shared_ptr< Ledger const > getLedgerByHash(uint256 const &hash)
std::uint32_t fetch_seq_
LedgerIndex getValidLedgerIndex()
std::chrono::seconds getValidatedLedgerAge()
void replay(InboundLedger::Reason r, uint256 const &finishLedgerHash, std::uint32_t totalNumLedgers)
Replay a range of ledgers.
void setRemoteFee(std::uint32_t f)
bool isLoadedLocal() const
std::uint32_t getLoadBase() const
virtual bool isBlocked()=0
virtual void setAmendmentWarned()=0
virtual void setAmendmentBlocked()=0
virtual void clearNeedNetworkLedger()=0
virtual bool isAmendmentWarned()=0
virtual bool isNeedNetworkLedger()=0
virtual void updateLocalTx(ReadView const &newValidLedger)=0
virtual void clearAmendmentWarned()=0
virtual void processTransactionSet(CanonicalTXSet const &set)=0
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
virtual void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted)=0
Persistency layer for NodeObject.
Definition Database.h:51
std::shared_ptr< NodeObject > fetchNodeObject(uint256 const &hash, std::uint32_t ledgerSeq=0, FetchType fetchType=FetchType::synchronous, bool duplicate=false)
Fetch a node object.
Definition Database.cpp:240
virtual std::int32_t getWriteLoad() const =0
Retrieve the estimated number of pending write operations.
std::uint32_t earliestLedgerSeq() const noexcept
Definition Database.h:221
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
void setup(std::shared_ptr< ReadView const > const &ledger)
virtual PeerSequence getActivePeers() const =0
Returns a sequence representing the current list of peers.
virtual void checkTracking(std::uint32_t index)=0
Calls the checkTracking function on each peer.
bool requestsPending() const
std::map< LedgerIndex, bool > getSnapshot() const
Get a snapshot of the pending saves.
A view into a ledger.
Definition ReadView.h:51
virtual bool open() const =0
Returns true if this reflects an open ledger.
virtual LedgerInfo const & info() const =0
Returns information about the ledger.
virtual std::optional< LedgerHashPair > getHashesByIndex(LedgerIndex ledgerIndex)=0
getHashesByIndex Returns the hashes of the ledger and its parent as specified by the ledgerIndex.
virtual std::optional< LedgerIndex > getMinLedgerSeq()=0
getMinLedgerSeq Returns the minimum ledger sequence in the Ledgers table.
virtual uint256 getHashByIndex(LedgerIndex ledgerIndex)=0
getHashByIndex Returns the hash of the ledger with the given sequence.
Rules controlling protocol behavior.
Definition Rules.h:38
uint256 const & as_uint256() const
Definition SHAMapHash.h:44
virtual void onLedgerClosed(std::shared_ptr< Ledger const > const &ledger)=0
Called by LedgerMaster every time a ledger validates.
virtual std::optional< LedgerIndex > minimumOnline() const =0
The minimum ledger to try and maintain in our database.
virtual bool isLeaf() const =0
Determines if this is a leaf node.
SHAMapHash const & getHash() const
Return the hash of this node.
virtual void serializeWithPrefix(Serializer &) const =0
Serialize the node in a format appropriate for hashing.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition SHAMap.h:97
void visitDifferences(SHAMap const *have, std::function< bool(SHAMapTreeNode const &)> const &) const
Visit every node in this SHAMap that is not present in the specified SHAMap.
void skip(int num)
std::uint32_t get32()
int getLength() const
Definition Serializer.h:233
void const * getDataPtr() const
Definition Serializer.h:223
time_point now() const override
Returns the current time, using the server's clock.
Definition TimeKeeper.h:64
time_point closeTime() const
Returns the predicted close time, in network time.
Definition TimeKeeper.h:76
static time_point now()
std::vector< WrappedValidationType > getTrustedForLedger(ID const &ledgerID, Seq const &seq)
Get trusted full validations for a specific ledger.
std::vector< WrappedValidationType > currentTrusted()
Get the currently trusted full validations.
std::vector< std::uint32_t > fees(ID const &ledgerID, std::uint32_t baseFee)
Returns fees reported by trusted full validators in the given ledger.
std::vector< std::shared_ptr< STValidation > > negativeUNLFilter(std::vector< std::shared_ptr< STValidation > > &&validations) const
Remove validations that are from validators on the negative UNL.
std::size_t quorum() const
Get quorum value for current trusted key set.
QuorumKeys getQuorumKeys() const
Get the quorum and all of the trusted keys.
bool isNonZero() const
Definition base_uint.h:545
Automatically unlocks and re-locks a unique_lock object.
Definition scope.h:231
T clear(T... args)
T copy(T... args)
T count(T... args)
T empty(T... args)
T end(T... args)
T endl(T... args)
T find(T... args)
T is_same_v
T load(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
bool isNewerVersion(std::uint64_t version)
Check if the version is newer than the local node's rippled software version.
bool isRippledVersion(std::uint64_t version)
Check if the encoded software version is a rippled software version.
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeRequestNoReply
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:25
SizedItem
Definition Config.h:44
bool areCompatible(ReadView const &validLedger, ReadView const &testLedger, beast::Journal::Stream &s, char const *reason)
Return false if the test ledger is provably incompatible with the valid ledger, that is,...
Definition View.cpp:799
LedgerIndex getCandidateLedger(LedgerIndex requested)
Find a ledger index from which we could easily get the requested ledger.
Definition View.h:429
static bool shouldAcquire(std::uint32_t const currentLedger, std::uint32_t const ledgerHistory, std::optional< LedgerIndex > const minimumOnline, std::uint32_t const candidateLedger, beast::Journal j)
std::optional< T > prevMissing(RangeSet< T > const &rs, T t, T minVal=0)
Find the largest value not in the set that is less than a given value.
Definition RangeSet.h:183
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
std::optional< uint256 > hashOfSeq(ReadView const &ledger, LedgerIndex seq, beast::Journal journal)
Return the hash of a ledger by sequence.
Definition View.cpp:961
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
static constexpr int MAX_LEDGER_GAP
constexpr std::size_t calculatePercent(std::size_t count, std::size_t total)
Calculate one number divided by another number in percentage.
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition Slice.h:244
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition chrono.h:119
std::string to_string(base_uint< Bits, Tag > const &a)
Definition base_uint.h:630
ClosedInterval< T > range(T low, T high)
Create a closed range interval.
Definition RangeSet.h:54
static void populateFetchPack(SHAMap const &want, SHAMap const *have, std::uint32_t cnt, protocol::TMGetObjectByHash *into, std::uint32_t seq, bool withLeaves=true)
Populate a fetch pack with data from the map the recipient wants.
static constexpr std::chrono::minutes MAX_LEDGER_AGE_ACQUIRE
@ ledgerMaster
ledger master data for signing
static constexpr int MAX_WRITE_LOAD_ACQUIRE
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
@ jtLEDGER_DATA
Definition Job.h:66
@ jtUPDATE_PF
Definition Job.h:56
@ jtPUBOLDLEDGER
Definition Job.h:44
@ jtADVANCE
Definition Job.h:67
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition digest.h:224
void LogicError(std::string const &how) noexcept
Called when faulty logic causes a broken invariant.
bool pendSaveValidated(Application &app, std::shared_ptr< Ledger const > const &ledger, bool isSynchronous, bool isCurrent)
Save, or arrange to save, a fully-validated ledger Returns false on error.
Definition Ledger.cpp:997
STL namespace.
T has_value(T... args)
T push_back(T... args)
T reserve(T... args)
T size(T... args)
T sort(T... args)
T str(T... args)
T swap(T... args)
T test_and_set(T... args)
T time_since_epoch(T... args)
T what(T... args)