1#include <xrpld/app/consensus/RCLConsensus.h>
2#include <xrpld/app/consensus/RCLCxPeerPos.h>
3#include <xrpld/app/consensus/RCLValidations.h>
4#include <xrpld/app/ledger/AcceptedLedger.h>
5#include <xrpld/app/ledger/InboundLedgers.h>
6#include <xrpld/app/ledger/LedgerMaster.h>
7#include <xrpld/app/ledger/LedgerToJson.h>
8#include <xrpld/app/ledger/LocalTxs.h>
9#include <xrpld/app/ledger/OpenLedger.h>
10#include <xrpld/app/ledger/TransactionMaster.h>
11#include <xrpld/app/main/LoadManager.h>
12#include <xrpld/app/main/Tuning.h>
13#include <xrpld/app/misc/DeliverMax.h>
14#include <xrpld/app/misc/Transaction.h>
15#include <xrpld/app/misc/TxQ.h>
16#include <xrpld/app/misc/ValidatorKeys.h>
17#include <xrpld/app/misc/ValidatorList.h>
18#include <xrpld/app/misc/detail/AccountTxPaging.h>
19#include <xrpld/app/misc/make_NetworkOPs.h>
20#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
21#include <xrpld/consensus/Consensus.h>
22#include <xrpld/consensus/ConsensusParms.h>
23#include <xrpld/core/ConfigSections.h>
24#include <xrpld/overlay/Cluster.h>
25#include <xrpld/overlay/Overlay.h>
26#include <xrpld/overlay/predicates.h>
27#include <xrpld/rpc/BookChanges.h>
28#include <xrpld/rpc/CTID.h>
29#include <xrpld/rpc/DeliveredAmount.h>
30#include <xrpld/rpc/MPTokenIssuanceID.h>
31#include <xrpld/rpc/ServerHandler.h>
33#include <xrpl/basics/UptimeClock.h>
34#include <xrpl/basics/mulDiv.h>
35#include <xrpl/basics/safe_cast.h>
36#include <xrpl/basics/scope.h>
37#include <xrpl/beast/utility/rngfill.h>
38#include <xrpl/core/HashRouter.h>
39#include <xrpl/core/NetworkIDService.h>
40#include <xrpl/core/PerfLog.h>
41#include <xrpl/crypto/RFC1751.h>
42#include <xrpl/crypto/csprng.h>
43#include <xrpl/git/Git.h>
44#include <xrpl/ledger/AmendmentTable.h>
45#include <xrpl/ledger/OrderBookDB.h>
46#include <xrpl/ledger/helpers/AccountRootHelpers.h>
47#include <xrpl/ledger/helpers/DirectoryHelpers.h>
48#include <xrpl/ledger/helpers/TokenHelpers.h>
49#include <xrpl/protocol/BuildInfo.h>
50#include <xrpl/protocol/Feature.h>
51#include <xrpl/protocol/MultiApiJson.h>
52#include <xrpl/protocol/NFTSyntheticSerializer.h>
53#include <xrpl/protocol/RPCErr.h>
54#include <xrpl/protocol/Rate.h>
55#include <xrpl/protocol/TxFlags.h>
56#include <xrpl/protocol/jss.h>
57#include <xrpl/resource/Fees.h>
58#include <xrpl/resource/ResourceManager.h>
59#include <xrpl/server/LoadFeeTrack.h>
60#include <xrpl/tx/apply.h>
62#include <boost/asio/ip/host_name.hpp>
63#include <boost/asio/steady_timer.hpp>
98 "xrpl::NetworkOPsImp::TransactionStatus::TransactionStatus : "
200 return !(*
this != b);
219 boost::asio::io_context& ioCtx,
236 registry.getInboundTransactions(),
237 beast::get_abstract_clock<
std::chrono::steady_clock>(),
241 validatorKeys.keys ? validatorKeys.keys->publicKey : decltype(
validatorPK_){})
243 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
430 getServerInfo(
bool human,
bool admin,
bool counters)
override;
456 TER result)
override;
556 catch (boost::system::system_error
const& e)
558 JLOG(
m_journal.
error()) <<
"NetworkOPs: heartbeatTimer cancel error: " << e.what();
565 catch (boost::system::system_error
const& e)
567 JLOG(
m_journal.
error()) <<
"NetworkOPs: clusterTimer cancel error: " << e.what();
574 catch (boost::system::system_error
const& e)
577 <<
"NetworkOPs: accountHistoryTxTimer cancel error: " << e.what();
581 using namespace std::chrono_literals;
591 boost::asio::steady_timer& timer,
767 template <
class Handler>
769 :
hook(collector->make_hook(handler))
771 collector->make_gauge(
"State_Accounting",
"Disconnected_duration"))
773 ,
syncing_duration(collector->make_gauge(
"State_Accounting",
"Syncing_duration"))
774 ,
tracking_duration(collector->make_gauge(
"State_Accounting",
"Tracking_duration"))
775 ,
full_duration(collector->make_gauge(
"State_Accounting",
"Full_duration"))
777 collector->make_gauge(
"State_Accounting",
"Disconnected_transitions"))
779 collector->make_gauge(
"State_Accounting",
"Connected_transitions"))
782 collector->make_gauge(
"State_Accounting",
"Tracking_transitions"))
783 ,
full_transitions(collector->make_gauge(
"State_Accounting",
"Full_transitions"))
812 {
"disconnected",
"connected",
"syncing",
"tracking",
"full"}};
872 static std::string const hostname = boost::asio::ip::host_name();
879 static std::string const shroudedHostId = [
this]() {
880 auto const&
id =
registry_.get().getApp().nodeIdentity();
885 return shroudedHostId;
894 if (
registry_.get().getCluster().size() != 0)
900 boost::asio::steady_timer& timer,
906 if (
auto optionalCountedHandler =
908 if ((e.value() == boost::system::errc::success) && (!m_job_queue.isStopped()))
913 if (e.value() != boost::system::errc::success &&
914 e.value() != boost::asio::error::operation_aborted)
917 JLOG(m_journal.error())
918 <<
"Timer got error '" << e.message() <<
"'. Restarting timer.";
923 timer.expires_after(expiry_time);
924 timer.async_wait(std::move(*optionalCountedHandler));
929NetworkOPsImp::setHeartbeatTimer()
933 mConsensus.parms().ledgerGRANULARITY,
935 m_job_queue.addJob(jtNETOP_TIMER,
"NetHeart", [this]() { processHeartbeatTimer(); });
937 [
this]() { setHeartbeatTimer(); });
941NetworkOPsImp::setClusterTimer()
943 using namespace std::chrono_literals;
949 m_job_queue.addJob(
jtNETOP_CLUSTER,
"NetCluster", [
this]() { processClusterTimer(); });
951 [
this]() { setClusterTimer(); });
957 JLOG(m_journal.debug()) <<
"Scheduling AccountHistory job for account "
959 using namespace std::chrono_literals;
961 accountHistoryTxTimer_,
963 [
this, subInfo]() { addAccountHistoryJob(subInfo); },
964 [
this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
968NetworkOPsImp::processHeartbeatTimer()
975 LoadManager& mgr(registry_.get().getLoadManager());
978 std::size_t const numPeers = registry_.get().getOverlay().size();
981 if (numPeers < minPeerCount_)
983 if (mMode != OperatingMode::DISCONNECTED)
985 setMode(OperatingMode::DISCONNECTED);
987 ss <<
"Node count (" << numPeers <<
") has fallen "
988 <<
"below required minimum (" << minPeerCount_ <<
").";
989 JLOG(m_journal.warn()) << ss.
str();
990 CLOG(clog.
ss()) <<
"set mode to DISCONNECTED: " << ss.
str();
994 CLOG(clog.
ss()) <<
"already DISCONNECTED. too few peers (" << numPeers
995 <<
"), need at least " << minPeerCount_;
1002 setHeartbeatTimer();
1007 if (mMode == OperatingMode::DISCONNECTED)
1009 setMode(OperatingMode::CONNECTED);
1010 JLOG(m_journal.info()) <<
"Node count (" << numPeers <<
") is sufficient.";
1011 CLOG(clog.
ss()) <<
"setting mode to CONNECTED based on " << numPeers <<
" peers. ";
1016 auto origMode = mMode.load();
1017 CLOG(clog.
ss()) <<
"mode: " << strOperatingMode(origMode,
true);
1018 if (mMode == OperatingMode::SYNCING)
1020 setMode(OperatingMode::SYNCING);
1022 else if (mMode == OperatingMode::CONNECTED)
1024 setMode(OperatingMode::CONNECTED);
1026 auto newMode = mMode.load();
1027 if (origMode != newMode)
1029 CLOG(clog.
ss()) <<
", changing to " << strOperatingMode(newMode,
true);
1031 CLOG(clog.
ss()) <<
". ";
1034 mConsensus.timerEntry(registry_.get().getTimeKeeper().closeTime(), clog.
ss());
1036 CLOG(clog.
ss()) <<
"consensus phase " << to_string(mLastConsensusPhase);
1038 if (mLastConsensusPhase != currPhase)
1040 reportConsensusStateChange(currPhase);
1041 mLastConsensusPhase = currPhase;
1042 CLOG(clog.
ss()) <<
" changed to " << to_string(mLastConsensusPhase);
1044 CLOG(clog.
ss()) <<
". ";
1046 setHeartbeatTimer();
1050NetworkOPsImp::processClusterTimer()
1052 if (registry_.get().getCluster().size() == 0)
1055 using namespace std::chrono_literals;
1057 bool const update = registry_.get().getCluster().update(
1058 registry_.get().getApp().nodeIdentity().first,
1060 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1061 ? registry_.get().getFeeTrack().getLocalFee()
1063 registry_.get().getTimeKeeper().now());
1067 JLOG(m_journal.debug()) <<
"Too soon to send cluster update";
1072 protocol::TMCluster cluster;
1073 registry_.get().getCluster().for_each([&cluster](
ClusterNode const& node) {
1074 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1079 n.set_nodename(node.
name());
1082 Resource::Gossip const gossip = registry_.get().getResourceManager().exportConsumers();
1083 for (
auto& item : gossip.
items)
1085 protocol::TMLoadSource& node = *cluster.add_loadsources();
1086 node.set_name(to_string(item.address));
1087 node.set_cost(item.balance);
1089 registry_.get().getOverlay().foreach(
1097NetworkOPsImp::strOperatingMode(
OperatingMode const mode,
bool const admin)
const
1099 if (mode == OperatingMode::FULL && admin)
1101 auto const consensusMode = mConsensus.mode();
1102 if (consensusMode != ConsensusMode::wrongLedger)
1104 if (consensusMode == ConsensusMode::proposing)
1107 if (mConsensus.validating())
1108 return "validating";
1118 if (isNeedNetworkLedger())
1125 if (iTrans->isFlag(
tfInnerBatchTxn) && m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1127 JLOG(m_journal.error()) <<
"Submitted transaction invalid: tfInnerBatchTxn flag present.";
1134 auto const txid = trans->getTransactionID();
1135 auto const flags = registry_.get().getHashRouter().getFlags(txid);
1137 if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1139 JLOG(m_journal.warn()) <<
"Submitted transaction cached bad";
1146 registry_.get().getHashRouter(), *trans, m_ledgerMaster.getValidatedRules());
1148 if (validity != Validity::Valid)
1150 JLOG(m_journal.warn()) <<
"Submitted transaction invalid: " << reason;
1156 JLOG(m_journal.warn()) <<
"Exception checking transaction " << txid <<
": " << ex.
what();
1165 m_job_queue.addJob(
jtTRANSACTION,
"SubmitTxn", [
this, tx]() {
1167 processTransaction(t,
false,
false, FailHard::no);
1174 auto const newFlags = registry_.get().getHashRouter().getFlags(transaction->getID());
1176 if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1179 JLOG(m_journal.warn()) << transaction->getID() <<
": cached bad!\n";
1180 transaction->setStatus(
INVALID);
1185 auto const view = m_ledgerMaster.getCurrentLedger();
1190 auto const sttx = *transaction->getSTransaction();
1191 if (sttx.isFlag(
tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1193 transaction->setStatus(
INVALID);
1195 registry_.
get().getHashRouter().setFlags(transaction->getID(), HashRouterFlags::BAD);
1202 auto const [validity, reason] =
1203 checkValidity(registry_.get().getHashRouter(), sttx, view->rules());
1205 validity == Validity::Valid,
"xrpl::NetworkOPsImp::processTransaction : valid validity");
1208 if (validity == Validity::SigBad)
1210 JLOG(m_journal.info()) <<
"Transaction has bad signature: " << reason;
1211 transaction->setStatus(
INVALID);
1213 registry_.
get().getHashRouter().setFlags(transaction->getID(), HashRouterFlags::BAD);
1218 registry_.get().getMasterTransaction().canonicalize(&transaction);
1224NetworkOPsImp::processTransaction(
1230 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXN");
1233 if (!preProcessTransaction(transaction))
1238 doTransactionSync(transaction, bUnlimited, failType);
1242 doTransactionAsync(transaction, bUnlimited, failType);
1247NetworkOPsImp::doTransactionAsync(
1254 if (transaction->getApplying())
1257 mTransactions.push_back(
TransactionStatus(transaction, bUnlimited,
false, failType));
1258 transaction->setApplying();
1260 if (mDispatchState == DispatchState::none)
1262 if (m_job_queue.addJob(
jtBATCH,
"TxBatchAsync", [
this]() { transactionBatch(); }))
1264 mDispatchState = DispatchState::scheduled;
1270NetworkOPsImp::doTransactionSync(
1277 if (!transaction->getApplying())
1279 mTransactions.push_back(
TransactionStatus(transaction, bUnlimited,
true, failType));
1280 transaction->setApplying();
1284 return transaction->getApplying();
1289NetworkOPsImp::doTransactionSyncBatch(
1295 if (mDispatchState == DispatchState::running)
1304 if (!mTransactions.empty())
1307 if (m_job_queue.addJob(
jtBATCH,
"TxBatchSync", [
this]() { transactionBatch(); }))
1309 mDispatchState = DispatchState::scheduled;
1313 }
while (retryCallback(lock));
1319 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXNSet");
1322 for (
auto const& [_, tx] :
set)
1327 if (transaction->getStatus() ==
INVALID)
1329 if (!reason.
empty())
1331 JLOG(m_journal.trace()) <<
"Exception checking transaction: " << reason;
1333 registry_.get().getHashRouter().setFlags(tx->getTransactionID(), HashRouterFlags::BAD);
1338 if (!preProcessTransaction(transaction))
1349 for (
auto& transaction : candidates)
1351 if (!transaction->getApplying())
1353 transactions.
emplace_back(transaction,
false,
false, FailHard::no);
1354 transaction->setApplying();
1358 if (mTransactions.empty())
1360 mTransactions.swap(transactions);
1364 mTransactions.reserve(mTransactions.size() + transactions.
size());
1365 for (
auto& t : transactions)
1366 mTransactions.push_back(std::move(t));
1368 if (mTransactions.empty())
1370 JLOG(m_journal.debug()) <<
"No transaction to process!";
1375 XRPL_ASSERT(lock.owns_lock(),
"xrpl::NetworkOPsImp::processTransactionSet has lock");
1376 return std::any_of(mTransactions.begin(), mTransactions.end(), [](
auto const& t) {
1377 return t.transaction->getApplying();
1383NetworkOPsImp::transactionBatch()
1387 if (mDispatchState == DispatchState::running)
1390 while (!mTransactions.empty())
1401 mTransactions.
swap(transactions);
1402 XRPL_ASSERT(!transactions.
empty(),
"xrpl::NetworkOPsImp::apply : non-empty transactions");
1404 mDispatchState != DispatchState::running,
"xrpl::NetworkOPsImp::apply : is not running");
1406 mDispatchState = DispatchState::running;
1412 bool changed =
false;
1424 if (e.failType == FailHard::yes)
1427 auto const result = registry_.get().getTxQ().apply(
1428 registry_.get().getApp(), view, e.transaction->getSTransaction(), flags, j);
1429 e.result = result.ter;
1430 e.applied = result.applied;
1431 changed = changed || result.applied;
1440 if (
auto const l = m_ledgerMaster.getValidatedLedger())
1441 validatedLedgerIndex = l->header().seq;
1443 auto newOL = registry_.get().getOpenLedger().current();
1446 e.transaction->clearSubmitResult();
1450 pubProposedTransaction(newOL, e.transaction->getSTransaction(), e.result);
1451 e.transaction->setApplied();
1454 e.transaction->setResult(e.result);
1458 registry_.get().getHashRouter().setFlags(
1459 e.transaction->getID(), HashRouterFlags::BAD);
1469 JLOG(m_journal.info()) <<
"TransactionResult: " << token <<
": " << human;
1474 bool const addLocal = e.local;
1478 JLOG(m_journal.debug()) <<
"Transaction is now included in open ledger";
1479 e.transaction->setStatus(
INCLUDED);
1484 auto const& txCur = e.transaction->getSTransaction();
1487 for (
auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1489 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1496 if (t->getApplying())
1498 submit_held.
emplace_back(t,
false,
false, FailHard::no);
1507 JLOG(m_journal.info()) <<
"Transaction is obsolete";
1508 e.transaction->setStatus(
OBSOLETE);
1512 JLOG(m_journal.debug()) <<
"Transaction is likely to claim a"
1513 <<
" fee, but is queued until fee drops";
1515 e.transaction->setStatus(
HELD);
1519 m_ledgerMaster.addHeldTransaction(e.transaction);
1520 e.transaction->setQueued();
1521 e.transaction->setKept();
1525 if (e.failType != FailHard::yes)
1527 auto const lastLedgerSeq =
1528 e.transaction->getSTransaction()->at(~sfLastLedgerSequence);
1529 auto const ledgersLeft = lastLedgerSeq
1530 ? *lastLedgerSeq - m_ledgerMaster.getCurrentLedgerIndex()
1548 if (e.local || (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1549 registry_.get().getHashRouter().setFlags(
1550 e.transaction->getID(), HashRouterFlags::HELD))
1553 JLOG(m_journal.debug()) <<
"Transaction should be held: " << e.result;
1554 e.transaction->setStatus(
HELD);
1555 m_ledgerMaster.addHeldTransaction(e.transaction);
1556 e.transaction->setKept();
1559 JLOG(m_journal.debug())
1560 <<
"Not holding transaction " << e.transaction->getID() <<
": "
1561 << (e.local ?
"local" :
"network") <<
", "
1562 <<
"result: " << e.result <<
" ledgers left: "
1563 << (ledgersLeft ? to_string(*ledgersLeft) :
"unspecified");
1568 JLOG(m_journal.debug()) <<
"Status other than success " << e.result;
1569 e.transaction->setStatus(
INVALID);
1572 auto const enforceFailHard = e.failType == FailHard::yes && !
isTesSuccess(e.result);
1574 if (addLocal && !enforceFailHard)
1576 m_localTX->push_back(
1577 m_ledgerMaster.getCurrentLedgerIndex(), e.transaction->getSTransaction());
1578 e.transaction->setKept();
1582 ((mMode != OperatingMode::FULL) && (e.failType != FailHard::yes) && e.local) ||
1587 registry_.get().getHashRouter().shouldRelay(e.transaction->getID());
1588 if (
auto const sttx = *(e.transaction->getSTransaction()); toSkip &&
1595 protocol::TMTransaction tx;
1599 tx.set_rawtransaction(s.
data(), s.
size());
1600 tx.set_status(protocol::tsCURRENT);
1601 tx.set_receivetimestamp(
1602 registry_.get().getTimeKeeper().now().time_since_epoch().count());
1605 registry_.get().getOverlay().relay(e.transaction->getID(), tx, *toSkip);
1606 e.transaction->setBroadcast();
1610 if (validatedLedgerIndex)
1612 auto [fee, accountSeq, availableSeq] =
1613 registry_.get().getTxQ().getTxRequiredFeeAndSeq(
1614 *newOL, e.transaction->getSTransaction());
1615 e.transaction->setCurrentLedgerState(
1616 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1624 e.transaction->clearApplying();
1626 if (!submit_held.
empty())
1628 if (mTransactions.empty())
1630 mTransactions.swap(submit_held);
1634 mTransactions.reserve(mTransactions.size() + submit_held.
size());
1635 for (
auto& e : submit_held)
1636 mTransactions.push_back(std::move(e));
1642 mDispatchState = DispatchState::none;
1653 auto root = keylet::ownerDir(account);
1654 auto sleNode = lpLedger->read(keylet::page(
root));
1661 for (
auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1663 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1664 XRPL_ASSERT(sleCur,
"xrpl::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1666 switch (sleCur->getType())
1669 if (!jvObjects.
isMember(jss::offers))
1672 jvObjects[jss::offers].
append(sleCur->getJson(JsonOptions::none));
1675 case ltRIPPLE_STATE:
1676 if (!jvObjects.
isMember(jss::ripple_lines))
1681 jvObjects[jss::ripple_lines].
append(sleCur->getJson(JsonOptions::none));
1684 case ltACCOUNT_ROOT:
1689 "xrpl::NetworkOPsImp::getOwnerInfo : invalid "
1696 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1700 sleNode = lpLedger->read(keylet::page(
root, uNodeDir));
1701 XRPL_ASSERT(sleNode,
"xrpl::NetworkOPsImp::getOwnerInfo : read next page");
1703 }
while (uNodeDir != 0u);
1714NetworkOPsImp::isBlocked()
1716 return isAmendmentBlocked() || isUNLBlocked();
1720NetworkOPsImp::isAmendmentBlocked()
1722 return amendmentBlocked_;
1726NetworkOPsImp::setAmendmentBlocked()
1728 amendmentBlocked_ =
true;
1729 setMode(OperatingMode::CONNECTED);
1733NetworkOPsImp::isAmendmentWarned()
1735 return !amendmentBlocked_ && amendmentWarned_;
1739NetworkOPsImp::setAmendmentWarned()
1741 amendmentWarned_ =
true;
1745NetworkOPsImp::clearAmendmentWarned()
1747 amendmentWarned_ =
false;
1751NetworkOPsImp::isUNLBlocked()
1757NetworkOPsImp::setUNLBlocked()
1760 setMode(OperatingMode::CONNECTED);
1764NetworkOPsImp::clearUNLBlocked()
1766 unlBlocked_ =
false;
1777 JLOG(m_journal.trace()) <<
"NetworkOPsImp::checkLastClosedLedger";
1779 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1784 uint256 closedLedger = ourClosed->header().hash;
1785 uint256 const prevClosedLedger = ourClosed->header().parentHash;
1786 JLOG(m_journal.trace()) <<
"OurClosed: " << closedLedger;
1787 JLOG(m_journal.trace()) <<
"PrevClosed: " << prevClosedLedger;
1792 auto& validations = registry_.get().getValidations();
1793 JLOG(m_journal.debug()) <<
"ValidationTrie " <<
Json::Compact(validations.getJsonTrie());
1797 peerCounts[closedLedger] = 0;
1798 if (mMode >= OperatingMode::TRACKING)
1799 peerCounts[closedLedger]++;
1801 for (
auto& peer : peerList)
1803 uint256 const peerLedger = peer->getClosedLedgerHash();
1806 ++peerCounts[peerLedger];
1809 for (
auto const& it : peerCounts)
1810 JLOG(m_journal.debug()) <<
"L: " << it.first <<
" n=" << it.second;
1812 uint256 const preferredLCL = validations.getPreferredLCL(
1814 m_ledgerMaster.getValidLedgerIndex(),
1817 bool switchLedgers = preferredLCL != closedLedger;
1819 closedLedger = preferredLCL;
1821 if (switchLedgers && (closedLedger == prevClosedLedger))
1824 JLOG(m_journal.info()) <<
"We won't switch to our own previous ledger";
1825 networkClosed = ourClosed->header().hash;
1826 switchLedgers =
false;
1830 networkClosed = closedLedger;
1836 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1840 consensus = registry_.get().getInboundLedgers().acquire(
1841 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1845 (!m_ledgerMaster.canBeCurrent(consensus) ||
1846 !m_ledgerMaster.isCompatible(*consensus, m_journal.debug(),
"Not switching")))
1850 networkClosed = ourClosed->header().hash;
1854 JLOG(m_journal.warn()) <<
"We are not running on the consensus ledger";
1855 JLOG(m_journal.info()) <<
"Our LCL: " << ourClosed->header().hash <<
getJson({*ourClosed, {}});
1856 JLOG(m_journal.info()) <<
"Net LCL " << closedLedger;
1858 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1860 setMode(OperatingMode::CONNECTED);
1868 switchLastClosedLedger(consensus);
1878 JLOG(m_journal.error()) <<
"JUMP last closed ledger to " << newLCL->header().hash;
1880 clearNeedNetworkLedger();
1883 registry_.get().getTxQ().processClosedLedger(registry_.get().getApp(), *newLCL,
true);
1890 auto retries = m_localTX->getTxSet();
1891 auto const lastVal = registry_.get().getLedgerMaster().getValidatedLedger();
1899 rules.
emplace(registry_.get().getApp().config().features);
1901 registry_.get().getOpenLedger().accept(
1902 registry_.get().getApp(),
1912 return registry_.get().getTxQ().accept(registry_.get().getApp(), view);
1916 m_ledgerMaster.switchLCL(newLCL);
1918 protocol::TMStatusChange s;
1919 s.set_newevent(protocol::neSWITCHED_LEDGER);
1920 s.set_ledgerseq(newLCL->header().seq);
1921 s.set_networktime(registry_.get().getTimeKeeper().now().time_since_epoch().count());
1922 s.set_ledgerhashprevious(
1923 newLCL->header().parentHash.begin(), newLCL->header().parentHash.size());
1924 s.set_ledgerhash(newLCL->header().hash.begin(), newLCL->header().hash.size());
1925 registry_.get().getOverlay().foreach(
1930NetworkOPsImp::beginConsensus(
1934 XRPL_ASSERT(networkClosed.
isNonZero(),
"xrpl::NetworkOPsImp::beginConsensus : nonzero input");
1936 auto closingInfo = m_ledgerMaster.getCurrentLedger()->header();
1938 JLOG(m_journal.info()) <<
"Consensus time for #" << closingInfo.seq <<
" with LCL "
1939 << closingInfo.parentHash;
1941 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1946 if (mMode == OperatingMode::FULL)
1948 JLOG(m_journal.warn()) <<
"Don't have LCL, going to tracking";
1949 setMode(OperatingMode::TRACKING);
1950 CLOG(clog) <<
"beginConsensus Don't have LCL, going to tracking. ";
1953 CLOG(clog) <<
"beginConsensus no previous ledger. ";
1958 prevLedger->header().hash == closingInfo.parentHash,
1959 "xrpl::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1962 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->header().hash,
1963 "xrpl::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1966 registry_.get().getValidators().setNegativeUNL(prevLedger->negativeUNL());
1967 TrustChanges const changes = registry_.get().getValidators().updateTrusted(
1968 registry_.get().getValidations().getCurrentNodeIDs(),
1969 closingInfo.parentCloseTime,
1971 registry_.get().getOverlay(),
1972 registry_.get().getHashRouter());
1974 if (!changes.
added.empty() || !changes.
removed.empty())
1976 registry_.get().getValidations().trustChanged(changes.
added, changes.
removed);
1978 registry_.get().getAmendmentTable().trustChanged(
1979 registry_.get().getValidators().getQuorumKeys().second);
1982 mConsensus.startRound(
1983 registry_.get().getTimeKeeper().closeTime(),
1991 if (mLastConsensusPhase != currPhase)
1993 reportConsensusStateChange(currPhase);
1994 mLastConsensusPhase = currPhase;
1997 JLOG(m_journal.debug()) <<
"Initiating consensus engine";
2004 auto const& peerKey = peerPos.
publicKey();
2005 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2016 JLOG(m_journal.error()) <<
"Received a proposal signed by MY KEY from a peer. This may "
2017 "indicate a misconfiguration where another node has the same "
2018 "validator key, or may be caused by unusual message routing and "
2023 return mConsensus.peerProposal(registry_.get().getTimeKeeper().closeTime(), peerPos);
2031 protocol::TMHaveTransactionSet msg;
2032 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2033 msg.set_status(protocol::tsHAVE);
2034 registry_.get().getOverlay().foreach(
2039 mConsensus.gotTxSet(registry_.get().getTimeKeeper().closeTime(),
RCLTxSet{map});
2045 uint256 const deadLedger = m_ledgerMaster.getClosedLedger()->header().parentHash;
2046 for (
auto const& it : registry_.get().getOverlay().getActivePeers())
2048 if (it && (it->getClosedLedgerHash() == deadLedger))
2050 JLOG(m_journal.trace()) <<
"Killing obsolete peer status";
2056 bool const ledgerChange =
2057 checkLastClosedLedger(registry_.get().getOverlay().getActivePeers(), networkClosed);
2059 if (networkClosed.
isZero())
2061 CLOG(clog) <<
"endConsensus last closed ledger is zero. ";
2071 if (((mMode == OperatingMode::CONNECTED) || (mMode == OperatingMode::SYNCING)) && !ledgerChange)
2076 if (!needNetworkLedger_)
2077 setMode(OperatingMode::TRACKING);
2080 if (((mMode == OperatingMode::CONNECTED) || (mMode == OperatingMode::TRACKING)) &&
2086 auto current = m_ledgerMaster.getCurrentLedger();
2087 if (registry_.get().getTimeKeeper().now() <
2088 (
current->header().parentCloseTime + 2 *
current->header().closeTimeResolution))
2090 setMode(OperatingMode::FULL);
2094 beginConsensus(networkClosed, clog);
2098NetworkOPsImp::consensusViewChange()
2100 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2102 setMode(OperatingMode::CONNECTED);
2112 if (!mStreamMaps[sManifests].empty())
2116 jvObj[jss::type] =
"manifestReceived";
2122 jvObj[jss::signature] =
strHex(*sig);
2125 jvObj[jss::domain] = mo.
domain;
2128 for (
auto i = mStreamMaps[sManifests].begin(); i != mStreamMaps[sManifests].end();)
2130 if (
auto p = i->second.lock())
2132 p->send(jvObj,
true);
2137 i = mStreamMaps[sManifests].erase(i);
2143NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2147 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2148 , loadBaseServer{loadFeeTrack.getLoadBase()}
2150 , em{escalationMetrics}
2158 baseFee != b.
baseFee || em.has_value() != b.
em.has_value())
2164 em->minProcessingFeeLevel != b.
em->minProcessingFeeLevel ||
2165 em->openLedgerFeeLevel != b.
em->openLedgerFeeLevel ||
2166 em->referenceFeeLevel != b.
em->referenceFeeLevel);
2195 registry_.get().getOpenLedger().current()->fees().base,
2199 jvObj[jss::type] =
"serverStatus";
2201 jvObj[jss::load_base] = f.loadBaseServer;
2202 jvObj[jss::load_factor_server] = f.loadFactorServer;
2203 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2208 safe_cast<std::uint64_t>(f.loadFactorServer),
2209 mulDiv(f.em->openLedgerFeeLevel, f.loadBaseServer, f.em->referenceFeeLevel)
2212 jvObj[jss::load_factor] =
trunc32(loadFactor);
2213 jvObj[jss::load_factor_fee_escalation] = f.em->openLedgerFeeLevel.jsonClipped();
2214 jvObj[jss::load_factor_fee_queue] = f.em->minProcessingFeeLevel.jsonClipped();
2215 jvObj[jss::load_factor_fee_reference] = f.em->referenceFeeLevel.jsonClipped();
2219 jvObj[jss::load_factor] = f.loadFactorServer;
2233 p->send(jvObj,
true);
2250 if (!streamMap.empty())
2253 jvObj[jss::type] =
"consensusPhase";
2254 jvObj[jss::consensus] =
to_string(phase);
2256 for (
auto i = streamMap.begin(); i != streamMap.end();)
2258 if (
auto p = i->second.lock())
2260 p->send(jvObj,
true);
2265 i = streamMap.erase(i);
2281 auto const signerPublic = val->getSignerPublic();
2283 jvObj[jss::type] =
"validationReceived";
2285 jvObj[jss::ledger_hash] =
to_string(val->getLedgerHash());
2286 jvObj[jss::signature] =
strHex(val->getSignature());
2287 jvObj[jss::full] = val->isFull();
2288 jvObj[jss::flags] = val->getFlags();
2289 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2290 jvObj[jss::data] =
strHex(val->getSerializer().slice());
2291 jvObj[jss::network_id] =
registry_.
get().getNetworkIDService().getNetworkID();
2293 if (
auto version = (*val)[~sfServerVersion])
2296 if (
auto cookie = (*val)[~sfCookie])
2299 if (
auto hash = (*val)[~sfValidatedHash])
2300 jvObj[jss::validated_hash] =
strHex(*hash);
2302 auto const masterKey =
registry_.get().getValidatorManifests().getMasterKey(signerPublic);
2304 if (masterKey != signerPublic)
2309 if (
auto const seq = (*val)[~sfLedgerSequence])
2310 jvObj[jss::ledger_index] = *seq;
2312 if (val->isFieldPresent(sfAmendments))
2315 for (
auto const& amendment : val->getFieldV256(sfAmendments))
2316 jvObj[jss::amendments].append(
to_string(amendment));
2319 if (
auto const closeTime = (*val)[~sfCloseTime])
2320 jvObj[jss::close_time] = *closeTime;
2322 if (
auto const loadFee = (*val)[~sfLoadFee])
2323 jvObj[jss::load_fee] = *loadFee;
2325 if (
auto const baseFee = val->at(~sfBaseFee))
2326 jvObj[jss::base_fee] =
static_cast<double>(*baseFee);
2328 if (
auto const reserveBase = val->at(~sfReserveBase))
2329 jvObj[jss::reserve_base] = *reserveBase;
2331 if (
auto const reserveInc = val->at(~sfReserveIncrement))
2332 jvObj[jss::reserve_inc] = *reserveInc;
2336 if (
auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops); baseFeeXRP && baseFeeXRP->native())
2337 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2339 if (
auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2340 reserveBaseXRP && reserveBaseXRP->native())
2341 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2343 if (
auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2344 reserveIncXRP && reserveIncXRP->native())
2345 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2354 if (jvTx.
isMember(jss::ledger_index))
2356 jvTx[jss::ledger_index] =
std::to_string(jvTx[jss::ledger_index].asUInt());
2362 if (
auto p = i->second.lock())
2366 [&](
Json::Value const& jv) { p->send(jv,
true); });
2386 jvObj[jss::type] =
"peerStatusChange";
2394 p->send(jvObj,
true);
2408 using namespace std::chrono_literals;
2411 if (
registry_.get().getLedgerMaster().getValidatedLedgerAge() < 1min)
2416 if (
registry_.get().getLedgerMaster().getValidatedLedgerAge() >= 1min)
2437 JLOG(
m_journal.
trace()) <<
"recvValidation " << val->getLedgerHash() <<
" from " << source;
2456 JLOG(
m_journal.
warn()) <<
"Exception thrown for handling new validation "
2457 << val->getLedgerHash() <<
": " << e.
what();
2461 JLOG(
m_journal.
warn()) <<
"Unknown exception thrown for handling new validation "
2462 << val->getLedgerHash();
2474 ss <<
"VALIDATION: " << val->render() <<
" master_key: ";
2475 auto master =
registry_.
get().getValidators().getTrustedKey(val->getSignerPublic());
2489 return registry_.get().getApp().config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2511 "This server is amendment blocked, and must be updated to be "
2512 "able to stay in sync with the network.";
2519 "This server has an expired validator list. validators.txt "
2520 "may be incorrectly configured or some [validator_list_sites] "
2521 "may be unreachable.";
2528 "One or more unsupported amendments have reached majority. "
2529 "Upgrade to the latest version before they are activated "
2530 "to avoid being amendment blocked.";
2531 if (
auto const expected =
2532 registry_.get().getAmendmentTable().firstUnsupportedExpected())
2535 d[jss::expected_date] = expected->time_since_epoch().count();
2536 d[jss::expected_date_UTC] =
to_string(*expected);
2540 if (warnings.size() != 0u)
2541 info[jss::warnings] = std::move(warnings);
2549 if (!
registry_.get().getApp().config().SERVER_DOMAIN.empty())
2550 info[jss::server_domain] =
registry_.
get().getApp().config().SERVER_DOMAIN;
2560 info[jss::network_ledger] =
"waiting";
2562 info[jss::validation_quorum] =
2570 switch (
registry_.get().getApp().config().NODE_SIZE)
2573 info[jss::node_size] =
"tiny";
2576 info[jss::node_size] =
"small";
2579 info[jss::node_size] =
"medium";
2582 info[jss::node_size] =
"large";
2585 info[jss::node_size] =
"huge";
2595 info[jss::validator_list_expires] =
2596 safe_cast<Json::UInt>(when->time_since_epoch().count());
2600 info[jss::validator_list_expires] = 0;
2611 if (*when == TimeKeeper::time_point::max())
2613 x[jss::expiration] =
"never";
2614 x[jss::status] =
"active";
2620 if (*when >
registry_.get().getTimeKeeper().now())
2622 x[jss::status] =
"active";
2626 x[jss::status] =
"expired";
2632 x[jss::status] =
"unknown";
2633 x[jss::expiration] =
"unknown";
2646 info[jss::io_latency_ms] =
2651 if (
auto const localPubKey =
registry_.get().getValidators().localPublicKey();
2652 localPubKey &&
registry_.get().getApp().getValidationPublicKey())
2658 info[jss::pubkey_validator] =
"none";
2664 info[jss::counters] =
registry_.
get().getPerfLog().countersJson();
2667 registry_.get().getNodeStore().getCountsJson(nodestore);
2668 info[jss::counters][jss::nodestore] = nodestore;
2669 info[jss::current_activities] =
registry_.
get().getPerfLog().currentJson();
2672 info[jss::pubkey_node] =
2675 info[jss::complete_ledgers] =
registry_.
get().getLedgerMaster().getCompleteLedgers();
2678 info[jss::amendment_blocked] =
true;
2692 lastClose[jss::converge_time_s] =
2700 info[jss::last_close] = lastClose;
2707 if (
auto const netid =
registry_.get().getOverlay().networkID())
2708 info[jss::network_id] =
static_cast<Json::UInt>(*netid);
2710 auto const escalationMetrics =
2713 auto const loadFactorServer =
registry_.
get().getFeeTrack().getLoadFactor();
2714 auto const loadBaseServer =
registry_.
get().getFeeTrack().getLoadBase();
2718 auto const loadFactorFeeEscalation =
mulDiv(
2719 escalationMetrics.openLedgerFeeLevel,
2721 escalationMetrics.referenceFeeLevel)
2724 auto const loadFactor =
2725 std::max(safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2729 info[jss::load_base] = loadBaseServer;
2730 info[jss::load_factor] =
trunc32(loadFactor);
2731 info[jss::load_factor_server] = loadFactorServer;
2738 info[jss::load_factor_fee_escalation] = escalationMetrics.openLedgerFeeLevel.jsonClipped();
2739 info[jss::load_factor_fee_queue] = escalationMetrics.minProcessingFeeLevel.jsonClipped();
2740 info[jss::load_factor_fee_reference] = escalationMetrics.referenceFeeLevel.jsonClipped();
2744 info[jss::load_factor] =
static_cast<double>(loadFactor) / loadBaseServer;
2746 if (loadFactorServer != loadFactor)
2747 info[jss::load_factor_server] =
static_cast<double>(loadFactorServer) / loadBaseServer;
2752 if (fee != loadBaseServer)
2753 info[jss::load_factor_local] =
static_cast<double>(fee) / loadBaseServer;
2754 fee =
registry_.get().getFeeTrack().getRemoteFee();
2755 if (fee != loadBaseServer)
2756 info[jss::load_factor_net] =
static_cast<double>(fee) / loadBaseServer;
2757 fee =
registry_.get().getFeeTrack().getClusterFee();
2758 if (fee != loadBaseServer)
2759 info[jss::load_factor_cluster] =
static_cast<double>(fee) / loadBaseServer;
2761 if (escalationMetrics.openLedgerFeeLevel != escalationMetrics.referenceFeeLevel &&
2762 (admin || loadFactorFeeEscalation != loadFactor))
2764 info[jss::load_factor_fee_escalation] =
2765 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2766 escalationMetrics.referenceFeeLevel);
2768 if (escalationMetrics.minProcessingFeeLevel != escalationMetrics.referenceFeeLevel)
2770 info[jss::load_factor_fee_queue] =
2771 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2772 escalationMetrics.referenceFeeLevel);
2790 XRPAmount const baseFee = lpClosed->fees().base;
2792 l[jss::seq] =
Json::UInt(lpClosed->header().seq);
2793 l[jss::hash] =
to_string(lpClosed->header().hash);
2798 l[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
2799 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2800 l[jss::close_time] =
2806 l[jss::reserve_base_xrp] = lpClosed->fees().reserve.decimalXRP();
2807 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2809 if (
auto const closeOffset =
registry_.get().getTimeKeeper().closeOffset();
2810 std::abs(closeOffset.count()) >= 60)
2811 l[jss::close_time_offset] =
static_cast<std::uint32_t>(closeOffset.count());
2817 l[jss::age] =
Json::UInt(age < highAgeThreshold ? age.count() : 0);
2821 auto lCloseTime = lpClosed->header().closeTime;
2822 auto closeTime =
registry_.
get().getTimeKeeper().closeTime();
2823 if (lCloseTime <= closeTime)
2825 using namespace std::chrono_literals;
2826 auto age = closeTime - lCloseTime;
2827 l[jss::age] =
Json::UInt(age < highAgeThreshold ? age.count() : 0);
2834 info[jss::validated_ledger] = l;
2838 info[jss::closed_ledger] = l;
2844 info[jss::published_ledger] =
"none";
2846 else if (lpPublished->header().seq != lpClosed->header().seq)
2848 info[jss::published_ledger] = lpPublished->header().seq;
2854 info[jss::jq_trans_overflow] =
2857 info[jss::peer_disconnects_resources] =
2862 "http",
"https",
"peer",
"ws",
"ws2",
"wss",
"wss2"};
2866 for (
auto const& port :
registry_.get().getServerHandler().setup().ports)
2870 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2871 port.admin_user.empty() && port.admin_password.empty()))
2885 for (
auto const& p : proto)
2886 jv[jss::protocol].append(p);
2890 if (
registry_.get().getApp().config().exists(SECTION_PORT_GRPC))
2892 auto const& grpcSection =
registry_.
get().getApp().config().section(SECTION_PORT_GRPC);
2893 auto const optPort = grpcSection.
get(
"port");
2894 if (optPort && grpcSection.get(
"ip"))
2897 jv[jss::port] = *optPort;
2899 jv[jss::protocol].
append(
"grpc");
2902 info[jss::ports] = std::move(ports);
2947 [&](
Json::Value const& jv) { p->send(jv, true); });
2967 registry_.
get().getAcceptedLedgerCache().fetch(lpAccepted->header().hash);
2971 registry_.get().getAcceptedLedgerCache().canonicalize_replace_client(
2972 lpAccepted->header().hash, alpAccepted);
2976 alpAccepted->getLedger().
get() == lpAccepted.
get(),
2977 "xrpl::NetworkOPsImp::pubLedger : accepted input");
2980 JLOG(
m_journal.
debug()) <<
"Publishing ledger " << lpAccepted->header().seq <<
" "
2981 << lpAccepted->header().hash;
2989 jvObj[jss::type] =
"ledgerClosed";
2990 jvObj[jss::ledger_index] = lpAccepted->header().seq;
2991 jvObj[jss::ledger_hash] =
to_string(lpAccepted->header().hash);
2992 jvObj[jss::ledger_time] =
2995 jvObj[jss::network_id] =
registry_.
get().getNetworkIDService().getNetworkID();
2997 if (!lpAccepted->rules().enabled(featureXRPFees))
2999 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3000 jvObj[jss::reserve_base] = lpAccepted->fees().reserve.jsonClipped();
3001 jvObj[jss::reserve_inc] = lpAccepted->fees().increment.jsonClipped();
3003 jvObj[jss::txn_count] =
Json::UInt(alpAccepted->size());
3007 jvObj[jss::validated_ledgers] =
3008 registry_.
get().getLedgerMaster().getCompleteLedgers();
3017 p->send(jvObj,
true);
3037 p->send(jvObj,
true);
3048 static bool firstTime =
true;
3055 for (
auto& inner : outer.second)
3057 auto& subInfo = inner.second;
3058 if (subInfo.index_->separationLedgerSeq_ == 0)
3069 for (
auto const& accTx : *alpAccepted)
3080 registry_.get().getOpenLedger().current()->fees().base,
3124 jvObj[jss::type] =
"transaction";
3139 if (
auto const& lookup = ledger->txRead(transaction->getTransactionID());
3140 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3142 uint32_t
const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3143 uint32_t netID =
registry_.get().getNetworkIDService().getNetworkID();
3144 if (transaction->isFieldPresent(sfNetworkID))
3145 netID = transaction->getFieldU32(sfNetworkID);
3149 jvObj[jss::ctid] = *ctid;
3151 if (!ledger->open())
3152 jvObj[jss::ledger_hash] =
to_string(ledger->header().hash);
3156 jvObj[jss::ledger_index] = ledger->header().seq;
3157 jvObj[jss::transaction][jss::date] = ledger->header().closeTime.time_since_epoch().count();
3158 jvObj[jss::validated] =
true;
3159 jvObj[jss::close_time_iso] =
to_string_iso(ledger->header().closeTime);
3165 jvObj[jss::validated] =
false;
3166 jvObj[jss::ledger_current_index] = ledger->header().seq;
3169 jvObj[jss::status] = validated ?
"closed" :
"proposed";
3170 jvObj[jss::engine_result] = sToken;
3171 jvObj[jss::engine_result_code] = result;
3172 jvObj[jss::engine_result_message] = sHuman;
3174 if (transaction->getTxnType() == ttOFFER_CREATE)
3176 auto const account = transaction->getAccountID(sfAccount);
3177 auto const amount = transaction->getFieldAmount(sfTakerGets);
3180 if (account != amount.issue().account)
3184 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3193 RPC::insertDeliverMax(jvTx[jss::transaction], transaction->getTxnType(), Version);
3195 if constexpr (Version > 1)
3197 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3198 jvTx[jss::hash] = hash;
3202 jvTx[jss::transaction][jss::hash] = hash;
3215 auto const& stTxn = transaction.
getTxn();
3219 auto const trResult = transaction.
getResult();
3234 [&](
Json::Value const& jv) { p->send(jv, true); });
3253 [&](
Json::Value const& jv) { p->send(jv, true); });
3264 registry_.get().getOrderBookDB().processTxn(ledger, transaction, jvObj);
3280 auto const currLedgerSeq = ledger->seq();
3286 for (
auto const& affectedAccount : transaction.
getAffected())
3291 auto it = simiIt->second.begin();
3293 while (it != simiIt->second.end())
3305 it = simiIt->second.erase(it);
3312 auto it = simiIt->second.begin();
3313 while (it != simiIt->second.end())
3325 it = simiIt->second.erase(it);
3333 auto& subs = historyIt->second;
3334 auto it = subs.begin();
3335 while (it != subs.end())
3338 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3352 it = subs.erase(it);
3363 <<
"proposed=" << iProposed <<
", accepted=" << iAccepted;
3365 if (!notify.
empty() || !accountHistoryNotify.
empty())
3367 auto const& stTxn = transaction.
getTxn();
3371 auto const trResult = transaction.
getResult();
3377 isrListener->getApiVersion(),
3378 [&](
Json::Value const& jv) { isrListener->send(jv,
true); });
3382 jvObj.
set(jss::account_history_boundary,
true);
3386 "xrpl::NetworkOPsImp::pubAccountTransaction : "
3387 "account_history_tx_stream not set");
3388 for (
auto& info : accountHistoryNotify)
3390 auto& index = info.index_;
3391 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3392 jvObj.
set(jss::account_history_tx_first,
true);
3394 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3397 info.sink_->getApiVersion(),
3398 [&](
Json::Value const& jv) { info.sink_->send(jv,
true); });
3422 for (
auto const& affectedAccount : tx->getMentionedAccounts())
3427 auto it = simiIt->second.begin();
3429 while (it != simiIt->second.end())
3441 it = simiIt->second.erase(it);
3449 JLOG(
m_journal.
trace()) <<
"pubProposedAccountTransaction: " << iProposed;
3451 if (!notify.
empty() || !accountHistoryNotify.
empty())
3459 isrListener->getApiVersion(),
3460 [&](
Json::Value const& jv) { isrListener->send(jv,
true); });
3465 "xrpl::NetworkOPs::pubProposedAccountTransaction : "
3466 "account_history_tx_stream not set");
3467 for (
auto& info : accountHistoryNotify)
3469 auto& index = info.index_;
3470 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3471 jvObj.
set(jss::account_history_tx_first,
true);
3472 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3474 info.sink_->getApiVersion(),
3475 [&](
Json::Value const& jv) { info.sink_->send(jv,
true); });
3492 for (
auto const& naAccountID : vnaAccountIDs)
3496 isrListener->insertSubAccountInfo(naAccountID, rt);
3501 for (
auto const& naAccountID : vnaAccountIDs)
3503 auto simIterator = subMap.
find(naAccountID);
3504 if (simIterator == subMap.
end())
3508 usisElement[isrListener->getSeq()] = isrListener;
3510 subMap.
insert(simIterator, make_pair(naAccountID, usisElement));
3515 simIterator->second[isrListener->getSeq()] = isrListener;
3526 for (
auto const& naAccountID : vnaAccountIDs)
3529 isrListener->deleteSubAccountInfo(naAccountID, rt);
3546 for (
auto const& naAccountID : vnaAccountIDs)
3548 auto simIterator = subMap.
find(naAccountID);
3550 if (simIterator != subMap.
end())
3553 simIterator->second.erase(uSeq);
3555 if (simIterator->second.empty())
3558 subMap.
erase(simIterator);
3567 enum DatabaseType { Sqlite,
None };
3568 static auto const databaseType = [&]() -> DatabaseType {
3573 return DatabaseType::Sqlite;
3575 return DatabaseType::None;
3578 if (databaseType == DatabaseType::None)
3581 UNREACHABLE(
"xrpl::NetworkOPsImp::addAccountHistoryJob : no database");
3595 auto const& accountId = subInfo.
index_->accountId_;
3596 auto& lastLedgerSeq = subInfo.
index_->historyLastLedgerSeq_;
3597 auto& txHistoryIndex = subInfo.
index_->historyTxIndex_;
3600 <<
" started. lastLedgerSeq=" << lastLedgerSeq;
3610 auto stx = tx->getSTransaction();
3611 if (stx->getAccountID(sfAccount) == accountId && stx->getSeqValue() == 1)
3615 for (
auto& node : meta->getNodes())
3617 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3620 if (node.isFieldPresent(sfNewFields))
3623 dynamic_cast<STObject const*
>(node.peekAtPField(sfNewFields));
3626 if (inner->isFieldPresent(sfAccount) &&
3627 inner->getAccountID(sfAccount) == accountId)
3638 auto send = [&](
Json::Value const& jvObj,
bool unsubscribe) ->
bool {
3641 sptr->send(jvObj,
true);
3650 auto sendMultiApiJson = [&](
MultiApiJson const& jvObj,
bool unsubscribe) ->
bool {
3654 sptr->getApiVersion(),
3655 [&](
Json::Value const& jv) { sptr->send(jv,
true); });
3674 auto& db =
registry_.get().getRelationalDatabase();
3676 accountId, {minLedger, maxLedger}, marker, 0,
true};
3677 return db.newestAccountTxPage(options);
3682 "xrpl::NetworkOPsImp::addAccountHistoryJob : "
3683 "getMoreTxns : invalid database type");
3693 while (lastLedgerSeq >= 2 && !subInfo.
index_->stopHistorical_)
3695 int feeChargeCount = 0;
3704 <<
"AccountHistory job for account " <<
toBase58(accountId)
3705 <<
" no InfoSub. Fee charged " << feeChargeCount <<
" times.";
3710 auto startLedgerSeq = (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3712 <<
", working on ledger range [" << startLedgerSeq <<
","
3713 << lastLedgerSeq <<
"]";
3715 auto haveRange = [&]() ->
bool {
3718 auto haveSomeValidatedLedgers =
3719 registry_.get().getLedgerMaster().getValidatedRange(
3720 validatedMin, validatedMax);
3722 return haveSomeValidatedLedgers && validatedMin <= startLedgerSeq &&
3723 lastLedgerSeq <= validatedMax;
3728 JLOG(
m_journal.
debug()) <<
"AccountHistory reschedule job for account "
3729 <<
toBase58(accountId) <<
", incomplete ledger range ["
3730 << startLedgerSeq <<
"," << lastLedgerSeq <<
"]";
3736 while (!subInfo.
index_->stopHistorical_)
3738 auto dbResult = getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3743 "xrpl::NetworkOPsImp::addAccountHistoryJob : "
3744 "getMoreTxns failed");
3746 <<
toBase58(accountId) <<
" getMoreTxns failed.";
3752 auto const& txns = dbResult->first;
3753 marker = dbResult->second;
3754 size_t const num_txns = txns.size();
3755 for (
size_t i = 0; i < num_txns; ++i)
3757 auto const& [tx, meta] = txns[i];
3762 <<
toBase58(accountId) <<
" empty tx or meta.";
3767 registry_.get().getLedgerMaster().getLedgerBySeq(tx->getLedger());
3772 "xrpl::NetworkOPsImp::addAccountHistoryJob : "
3773 "getLedgerBySeq failed");
3775 <<
toBase58(accountId) <<
" no ledger.";
3785 "NetworkOPsImp::addAccountHistoryJob : "
3786 "getSTransaction failed");
3788 <<
"AccountHistory job for account " <<
toBase58(accountId)
3789 <<
" getSTransaction failed.";
3796 auto const trR = meta->getResultTER();
3799 jvTx.
set(jss::account_history_tx_index, txHistoryIndex--);
3800 if (i + 1 == num_txns || txns[i + 1].first->getLedger() != tx->getLedger())
3801 jvTx.
set(jss::account_history_boundary,
true);
3803 if (isFirstTx(tx, meta))
3805 jvTx.
set(jss::account_history_tx_first,
true);
3806 sendMultiApiJson(jvTx,
false);
3809 <<
"AccountHistory job for account " <<
toBase58(accountId)
3810 <<
" done, found last tx.";
3814 sendMultiApiJson(jvTx,
false);
3820 <<
"AccountHistory job for account " <<
toBase58(accountId)
3821 <<
" paging, marker=" << marker->ledgerSeq <<
":" << marker->txnSeq;
3829 if (!subInfo.
index_->stopHistorical_)
3831 lastLedgerSeq = startLedgerSeq - 1;
3832 if (lastLedgerSeq <= 1)
3835 <<
"AccountHistory job for account " <<
toBase58(accountId)
3836 <<
" done, reached genesis ledger.";
3849 subInfo.
index_->separationLedgerSeq_ = ledger->seq();
3850 auto const& accountId = subInfo.
index_->accountId_;
3852 if (!ledger->exists(accountKeylet))
3855 <<
", no need to add AccountHistory job.";
3860 if (
auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3862 if (sleAcct->getFieldU32(sfSequence) == 1)
3865 <<
"subAccountHistoryStart, genesis account " <<
toBase58(accountId)
3866 <<
" does not have tx, no need to add AccountHistory job.";
3874 "xrpl::NetworkOPsImp::subAccountHistoryStart : failed to "
3875 "access genesis account");
3880 subInfo.
index_->historyLastLedgerSeq_ = ledger->seq();
3881 subInfo.
index_->haveHistorical_ =
true;
3883 JLOG(
m_journal.
debug()) <<
"subAccountHistoryStart, add AccountHistory job: accountId="
3884 <<
toBase58(accountId) <<
", currentLedgerSeq=" << ledger->seq();
3892 if (!isrListener->insertSubAccountHistory(accountId))
3894 JLOG(
m_journal.
debug()) <<
"subAccountHistory, already subscribed to account "
3905 inner.
emplace(isrListener->getSeq(), ahi);
3910 simIterator->second.emplace(isrListener->getSeq(), ahi);
3913 auto const ledger =
registry_.get().getLedgerMaster().getValidatedLedger();
3923 JLOG(
m_journal.
debug()) <<
"subAccountHistory, no validated ledger yet, delay start";
3936 isrListener->deleteSubAccountHistory(account);
3950 auto& subInfoMap = simIterator->second;
3951 auto subInfoIter = subInfoMap.find(seq);
3952 if (subInfoIter != subInfoMap.end())
3954 subInfoIter->second.index_->stopHistorical_ =
true;
3959 simIterator->second.erase(seq);
3960 if (simIterator->second.empty())
3966 <<
", historyOnly = " << (historyOnly ?
"true" :
"false");
3973 if (
auto listeners =
registry_.get().getOrderBookDB().makeBookListeners(book))
3975 listeners->addSubscriber(isrListener);
3980 UNREACHABLE(
"xrpl::NetworkOPsImp::subBook : null book listeners");
3989 if (
auto listeners =
registry_.get().getOrderBookDB().getBookListeners(book))
3990 listeners->removeSubscriber(uSeq);
4000 XRPL_ASSERT(
m_standalone,
"xrpl::NetworkOPsImp::acceptLedger : is standalone");
4003 Throw<std::runtime_error>(
"Operation only possible in STANDALONE mode.");
4018 jvResult[jss::ledger_index] = lpClosed->header().seq;
4019 jvResult[jss::ledger_hash] =
to_string(lpClosed->header().hash);
4020 jvResult[jss::ledger_time] =
4022 if (!lpClosed->rules().enabled(featureXRPFees))
4024 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4025 jvResult[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
4026 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4027 jvResult[jss::network_id] =
registry_.
get().getNetworkIDService().getNetworkID();
4032 jvResult[jss::validated_ledgers] =
registry_.
get().getLedgerMaster().getCompleteLedgers();
4091 auto const& feeTrack =
registry_.get().getFeeTrack();
4092 jvResult[jss::random] =
to_string(uRandom);
4094 jvResult[jss::load_base] = feeTrack.getLoadBase();
4095 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4096 jvResult[jss::hostid] =
getHostId(admin);
4097 jvResult[jss::pubkey_node] =
4234 if (map.contains(pInfo->getSeq()))
4241#ifndef USE_NEW_BOOK_PAGE
4252 unsigned int iLimit,
4261 uint256 uTipIndex = uBookBase;
4265 stream <<
"getBookPage:" << book;
4266 stream <<
"getBookPage: uBookBase=" << uBookBase;
4267 stream <<
"getBookPage: uBookEnd=" << uBookEnd;
4268 stream <<
"getBookPage: uTipIndex=" << uTipIndex;
4273 bool const bGlobalFreeze =
4277 bool bDirectAdvance =
true;
4281 unsigned int uBookEntry = 0;
4285 auto viewJ =
registry_.get().getJournal(
"View");
4287 while (!bDone && iLimit-- > 0)
4291 bDirectAdvance =
false;
4295 auto const ledgerIndex = view.
succ(uTipIndex, uBookEnd);
4302 sleOfferDir.
reset();
4312 uTipIndex = sleOfferDir->key();
4315 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4317 JLOG(
m_journal.
trace()) <<
"getBookPage: uTipIndex=" << uTipIndex;
4318 JLOG(
m_journal.
trace()) <<
"getBookPage: offerIndex=" << offerIndex;
4328 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4329 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4330 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4332 bool firstOwnerOffer(
true);
4338 saOwnerFunds = saTakerGets;
4340 else if (bGlobalFreeze)
4348 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4349 if (umBalanceEntry != umBalance.
end())
4353 saOwnerFunds = umBalanceEntry->second;
4354 firstOwnerOffer =
false;
4368 if (saOwnerFunds < beast::zero)
4372 saOwnerFunds.
clear();
4380 STAmount saOwnerFundsLimit = saOwnerFunds;
4392 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4395 if (saOwnerFundsLimit >= saTakerGets)
4398 saTakerGetsFunded = saTakerGets;
4404 saTakerGetsFunded = saOwnerFundsLimit;
4406 saTakerGetsFunded.
setJson(jvOffer[jss::taker_gets_funded]);
4408 saTakerPays,
multiply(saTakerGetsFunded, saDirRate, saTakerPays.
issue()))
4409 .setJson(jvOffer[jss::taker_pays_funded]);
4416 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4420 jvOf[jss::quality] = saDirRate.
getText();
4422 if (firstOwnerOffer)
4423 jvOf[jss::owner_funds] = saOwnerFunds.
getText();
4430 if (!
cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4432 bDirectAdvance =
true;
4436 JLOG(
m_journal.
trace()) <<
"getBookPage: offerIndex=" << offerIndex;
4456 unsigned int iLimit,
4464 MetaView lesActive(lpLedger,
tapNONE,
true);
4465 OrderBookIterator obIterator(lesActive, book);
4469 bool const bGlobalFreeze =
4472 while (iLimit-- > 0 && obIterator.nextOffer())
4477 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4478 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4479 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4480 STAmount saDirRate = obIterator.getCurrentRate();
4486 saOwnerFunds = saTakerGets;
4488 else if (bGlobalFreeze)
4496 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4498 if (umBalanceEntry != umBalance.
end())
4502 saOwnerFunds = umBalanceEntry->second;
4508 saOwnerFunds = lesActive.accountHolds(
4511 if (saOwnerFunds.isNegative())
4515 saOwnerFunds.zero();
4522 STAmount saTakerGetsFunded;
4523 STAmount saOwnerFundsLimit = saOwnerFunds;
4535 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4538 if (saOwnerFundsLimit >= saTakerGets)
4541 saTakerGetsFunded = saTakerGets;
4546 saTakerGetsFunded = saOwnerFundsLimit;
4548 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4552 std::min(saTakerPays,
multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4553 .setJson(jvOffer[jss::taker_pays_funded]);
4556 STAmount saOwnerPays = (
parityRate == offerRate)
4558 :
std::
min(saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4560 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4562 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4566 jvOf[jss::quality] = saDirRate.
getText();
4581 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4614 ++counters_[
static_cast<std::size_t>(om)].transitions;
4618 std::chrono::duration_cast<std::chrono::microseconds>(now - processStart_).count();
4621 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4630 auto [counters, mode, start, initialSync] = getCounterData();
4631 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4641 auto& state = obj[jss::state_accounting][
states_[i]];
4642 state[jss::transitions] =
std::to_string(counters[i].transitions);
4643 state[jss::duration_us] =
std::to_string(counters[i].dur.count());
4646 if (initialSync != 0u)
4647 obj[jss::initial_sync_duration_us] =
std::to_string(initialSync);
4662 boost::asio::io_context& ioCtx,
T back_inserter(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
const_iterator begin() const
Value & append(Value const &value)
Append value to array at the end.
bool isMember(char const *key) const
Return true if the object has a member named key.
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
A generic endpoint for log messages.
Stream trace() const
Severity stream access functions.
A metric for measuring an integral value.
void set(value_type value) const
Set the value on the gauge.
A reference to a handler for performing polled collection.
A transaction that is in a closed ledger.
TxMeta const & getMeta() const
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::uint32_t getLoadFee() const
NetClock::time_point getReportTime() const
PublicKey const & identity() const
std::string const & name() const
std::shared_ptr< InfoSub > pointer
A pool of threads to perform work.
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Json::Value getJson(int c=0)
std::chrono::seconds getValidatedLedgerAge()
bool haveValidated()
Whether we have ever fully validated a ledger.
std::size_t getFetchPackCacheSize() const
std::shared_ptr< Ledger const > getClosedLedger()
std::shared_ptr< Ledger const > getValidatedLedger()
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< ReadView const > getCurrentLedger()
Manages the current fee schedule.
void heartbeat()
Reset the stall detection timer.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void json(Json::Value &obj) const
Output state counters in JSON format.
std::chrono::steady_clock::time_point const processStart_
static std::array< Json::StaticString const, 5 > const states_
CounterData getCounterData() const
std::uint64_t initialSyncUs_
std::array< Counters, 5 > counters_
void mode(OperatingMode om)
Record state transition.
std::chrono::steady_clock::time_point start_
Transaction with input flags and results to be applied in batches.
std::shared_ptr< Transaction > const transaction
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::string getHostId(bool forAdmin)
void reportConsensusStateChange(ConsensusPhase phase)
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void clearNeedNetworkLedger() override
ServerFeeSummary mLastFeeSummary
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
DispatchState mDispatchState
std::size_t const minPeerCount_
static std::array< char const *, 5 > const states_
std::set< uint256 > pendingValidations_
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
bool unsubManifests(std::uint64_t uListener) override
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool subManifests(InfoSub::ref ispListener) override
void stateAccounting(Json::Value &obj) override
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
SubInfoMapType mSubRTAccount
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
void transactionBatch()
Apply transactions in batches.
bool unsubRTTransactions(std::uint64_t uListener) override
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
bool processTrustedProposal(RCLCxPeerPos proposal) override
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
void pubValidation(std::shared_ptr< STValidation > const &val) override
bool subBook(InfoSub::ref ispListener, Book const &) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
std::atomic< OperatingMode > mMode
void setMode(OperatingMode om) override
void setAmendmentBlocked() override
void pubConsensus(ConsensusPhase phase)
std::recursive_mutex mSubLock
bool isNeedNetworkLedger() override
DispatchState
Synchronization states for transaction batches.
std::atomic< bool > needNetworkLedger_
boost::asio::steady_timer heartbeatTimer_
bool subConsensus(InfoSub::ref ispListener) override
std::reference_wrapper< ServiceRegistry > registry_
bool unsubBook(std::uint64_t uListener, Book const &) override
bool unsubLedger(std::uint64_t uListener) override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
std::optional< PublicKey > const validatorPK_
std::atomic< bool > amendmentBlocked_
void clearAmendmentWarned() override
void updateLocalTx(ReadView const &view) override
void clearLedgerFetch() override
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool isAmendmentBlocked() override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::unique_ptr< LocalTxs > m_localTX
NetworkOPsImp(ServiceRegistry ®istry, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &ioCtx, beast::Journal journal, beast::insight::Collector::ptr const &collector)
void setStandAlone() override
void setNeedNetworkLedger() override
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
bool unsubServer(std::uint64_t uListener) override
SubAccountHistoryMapType mSubAccountHistory
void processClusterTimer()
bool unsubConsensus(std::uint64_t uListener) override
std::condition_variable mCond
void pubManifest(Manifest const &) override
void consensusViewChange() override
boost::asio::steady_timer accountHistoryTxTimer_
Json::Value getConsensusInfo() override
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void setUNLBlocked() override
bool unsubValidations(std::uint64_t uListener) override
bool subPeerStatus(InfoSub::ref ispListener) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
ConsensusPhase mLastConsensusPhase
OperatingMode getOperatingMode() const override
std::optional< PublicKey > const validatorMasterPK_
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
std::vector< TransactionStatus > mTransactions
bool tryRemoveRpcSub(std::string const &strUrl) override
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void processHeartbeatTimer()
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void submitTransaction(std::shared_ptr< STTx const > const &) override
void setAmendmentWarned() override
LedgerMaster & m_ledgerMaster
Json::Value getServerInfo(bool human, bool admin, bool counters) override
StateAccounting accounting_
bool subValidations(InfoSub::ref ispListener) override
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool subRTTransactions(InfoSub::ref ispListener) override
std::atomic< bool > unlBlocked_
bool unsubBookChanges(std::uint64_t uListener) override
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
void setStateTimer() override
Called to initially start our timers.
std::size_t getLocalTxCount() override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
bool unsubTransactions(std::uint64_t uListener) override
bool isAmendmentWarned() override
bool subTransactions(InfoSub::ref ispListener) override
std::mutex validationsMutex_
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
SubInfoMapType mSubAccount
void clearUNLBlocked() override
bool isUNLBlocked() override
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
std::atomic< bool > amendmentWarned_
boost::asio::steady_timer clusterTimer_
bool unsubPeerStatus(std::uint64_t uListener) override
void reportFeeChange() override
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
bool isBlocked() override
~NetworkOPsImp() override
Json::Value getLedgerFetchInfo() override
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool subBookChanges(InfoSub::ref ispListener) override
Provides server functionality for clients.
Writable ledger view that accumulates state and tx changes.
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
Json::Value getJson(bool full) const
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
Issue const & issue() const
std::string getText() const override
void setJson(Json::Value &) const
std::size_t size() const noexcept
void const * data() const noexcept
Service registry for dependency injection.
Validator keys and manifest as set in configuration file.
Json::Value jsonClipped() const
constexpr double decimalXRP() const
static constexpr std::size_t size()
Automatically unlocks and re-locks a unique_lock object.
T emplace_back(T... args)
@ arrayValue
array value (ordered list)
@ objectValue
object value (collection of name/value pairs).
void rngfill(void *const buffer, std::size_t const bytes, Generator &g)
std::string const & getVersionString()
Server version.
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Encodes ledger sequence, transaction index, and network ID into a CTID string.
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
std::string const & getCommitHash()
std::string const & getBuildBranch()
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Keylet account(AccountID const &id) noexcept
AccountID root.
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
STAmount divide(STAmount const &amount, Rate const &rate)
bool set(T &target, std::string const &name, Section const §ion)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
constexpr FlagValue tfInnerBatchTxn
bool isTerRetry(TER x) noexcept
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
std::string to_string(base_uint< Bits, Tag > const &a)
T get(Section const §ion, std::string const &name, T const &defaultValue=T{})
Retrieve a key/value pair from a section.
std::string strHex(FwdIt begin, FwdIt end)
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules)
Checks transaction signature and local checks.
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const ¤t)
std::uint64_t getQuality(uint256 const &uBase)
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
FeeSetup setup_FeeVote(Section const §ion)
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const ¤cy, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j, SpendableHandling includeFullBalance=shSIMPLE_BALANCE)
Number root(Number f, unsigned d)
bool transResultInfo(TER code, std::string &token, std::string &text)
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
STAmount multiply(STAmount const &amount, Rate const &rate)
static auto const genesisAccountId
void forAllApiVersions(Fn const &fn, Args &&... args)
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
@ current
This was a new validation and was added.
constexpr std::size_t maxPoppedTransactions
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Check if the issuer has the global freeze flag set.
STAmount amountFromQuality(std::uint64_t rate)
bool isTefFailure(TER x) noexcept
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
auto constexpr muldiv_max
uint256 getQualityNext(uint256 const &uBase)
ConsensusPhase
Phases of consensus for a single ledger round.
@ open
We haven't closed our ledger yet, but others might have.
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
constexpr std::uint32_t FEE_UNITS_DEPRECATED
AccountID calcAccountID(PublicKey const &pk)
uint256 getBookBase(Book const &book)
Json::Value rpcError(error_code_i iError)
std::string to_string_iso(date::sys_time< Duration > tp)
std::unique_ptr< LocalTxs > make_LocalTxs()
bool isTelLocal(TER x) noexcept
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
bool isTesSuccess(TER x) noexcept
static std::uint32_t trunc32(std::uint64_t v)
static std::array< char const *, 5 > const stateNames
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
OperatingMode
Specifies the mode under which the server believes it's operating.
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
bool isTemMalformed(TER x) noexcept
std::unique_ptr< NetworkOPs > make_NetworkOPs(ServiceRegistry ®istry, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startValid, JobQueue &jobQueue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &ioCtx, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
@ warnRPC_AMENDMENT_BLOCKED
@ warnRPC_UNSUPPORTED_MAJORITY
@ warnRPC_EXPIRED_VALIDATOR_LIST
T set_intersection(T... args)
PublicKey masterKey
The master key associated with this manifest.
std::string serialized
The manifest in serialized form.
Blob getMasterSignature() const
Returns manifest master key signature.
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
std::uint32_t sequence
The sequence number of this manifest.
Server fees published on server subscription.
std::optional< TxQ::Metrics > em
bool operator!=(ServerFeeSummary const &b) const
bool operator==(ServerFeeSummary const &b) const
std::uint32_t loadBaseServer
ServerFeeSummary()=default
std::uint32_t loadFactorServer
decltype(initialSyncUs_) initialSyncUs
decltype(counters_) counters
std::chrono::microseconds dur
std::uint64_t transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Gauge connected_transitions
beast::insight::Gauge full_transitions
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge full_duration
beast::insight::Hook hook
std::int32_t historyTxIndex_
AccountID const accountId_
std::uint32_t forwardTxIndex_
std::uint32_t separationLedgerSeq_
std::uint32_t historyLastLedgerSeq_
SubAccountHistoryIndex(AccountID const &accountId)
std::atomic< bool > stopHistorical_
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Data format for exchanging consumption information across peers.
std::vector< Item > items
Changes in trusted nodes after updating validator list.
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
void set(char const *key, auto const &v)
IsMemberResult isMember(char const *key) const
Select all peers (except optional excluded) that are in our cluster.
Sends a message to all peers.
T time_since_epoch(T... args)