Clio develop
The XRP Ledger API server.
Loading...
Searching...
No Matches
CassandraBackend.hpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of clio: https://github.com/XRPLF/clio
4 Copyright (c) 2023, the clio developers.
5
6 Permission to use, copy, modify, and distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#pragma once
21
22#include "data/BackendInterface.hpp"
23#include "data/DBHelpers.hpp"
24#include "data/LedgerCacheInterface.hpp"
25#include "data/LedgerHeaderCache.hpp"
26#include "data/Types.hpp"
27#include "data/cassandra/Concepts.hpp"
28#include "data/cassandra/Handle.hpp"
29#include "data/cassandra/Schema.hpp"
30#include "data/cassandra/SettingsProvider.hpp"
31#include "data/cassandra/Types.hpp"
32#include "data/cassandra/impl/ExecutionStrategy.hpp"
33#include "util/Assert.hpp"
34#include "util/LedgerUtils.hpp"
35#include "util/Profiler.hpp"
36#include "util/log/Logger.hpp"
37
38#include <boost/asio/spawn.hpp>
39#include <boost/json/object.hpp>
40#include <boost/uuid/string_generator.hpp>
41#include <boost/uuid/uuid.hpp>
42#include <cassandra.h>
43#include <fmt/format.h>
44#include <xrpl/basics/Blob.h>
45#include <xrpl/basics/base_uint.h>
46#include <xrpl/basics/strHex.h>
47#include <xrpl/protocol/AccountID.h>
48#include <xrpl/protocol/Indexes.h>
49#include <xrpl/protocol/LedgerHeader.h>
50#include <xrpl/protocol/nft.h>
51
52#include <algorithm>
53#include <atomic>
54#include <chrono>
55#include <cstddef>
56#include <cstdint>
57#include <iterator>
58#include <limits>
59#include <optional>
60#include <stdexcept>
61#include <string>
62#include <tuple>
63#include <utility>
64#include <vector>
65
66class CacheBackendCassandraTest;
67
68namespace data::cassandra {
69
79template <
80 SomeSettingsProvider SettingsProviderType,
81 SomeExecutionStrategy ExecutionStrategyType,
82 typename FetchLedgerCacheType = FetchLedgerCache>
84 util::Logger log_{"Backend"};
85
86 SettingsProviderType settingsProvider_;
88 std::atomic_uint32_t ledgerSequence_ = 0u;
89 friend class ::CacheBackendCassandraTest;
90
91protected:
92 Handle handle_;
93
94 // have to be mutable because BackendInterface constness :(
95 mutable ExecutionStrategyType executor_;
96 // TODO: move to interface level
97 mutable FetchLedgerCacheType ledgerCache_{};
98
99public:
107 BasicCassandraBackend(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly)
109 , settingsProvider_{std::move(settingsProvider)}
110 , schema_{settingsProvider_}
111 , handle_{settingsProvider_.getSettings()}
112 , executor_{settingsProvider_.getSettings(), handle_}
113 {
114 if (auto const res = handle_.connect(); not res)
115 throw std::runtime_error("Could not connect to database: " + res.error());
116
117 if (not readOnly) {
118 if (auto const res = handle_.execute(schema_.createKeyspace); not res) {
119 // on datastax, creation of keyspaces can be configured to only be done thru the admin
120 // interface. this does not mean that the keyspace does not already exist tho.
121 if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED)
122 throw std::runtime_error("Could not create keyspace: " + res.error());
123 }
124
125 if (auto const res = handle_.executeEach(schema_.createSchema); not res)
126 throw std::runtime_error("Could not create schema: " + res.error());
127 }
128
129 try {
130 schema_.prepareStatements(handle_);
131 } catch (std::runtime_error const& ex) {
132 auto const error = fmt::format(
133 "Failed to prepare the statements: {}; readOnly: {}. ReadOnly should be turned off or another Clio "
134 "node with write access to DB should be started first.",
135 ex.what(),
136 readOnly
137 );
138 LOG(log_.error()) << error;
139 throw std::runtime_error(error);
140 }
141 LOG(log_.info()) << "Created (revamped) CassandraBackend";
142 }
143
144 /*
145 * @brief Move constructor is deleted because handle_ is shared by reference with executor
146 */
148
151 ripple::AccountID const& account,
152 std::uint32_t const limit,
153 bool forward,
154 std::optional<TransactionsCursor> const& cursorIn,
155 boost::asio::yield_context yield
156 ) const override
157 {
158 auto rng = fetchLedgerRange();
159 if (!rng)
160 return {.txns = {}, .cursor = {}};
161
162 Statement const statement = [this, forward, &account]() {
163 if (forward)
164 return schema_->selectAccountTxForward.bind(account);
165
166 return schema_->selectAccountTx.bind(account);
167 }();
168
169 auto cursor = cursorIn;
170 if (cursor) {
171 statement.bindAt(1, cursor->asTuple());
172 LOG(log_.debug()) << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence
173 << cursor->transactionIndex;
174 } else {
175 auto const seq = forward ? rng->minSequence : rng->maxSequence;
176 auto const placeHolder = forward ? 0u : std::numeric_limits<std::uint32_t>::max();
177
178 statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
179 LOG(log_.debug()) << "account = " << ripple::strHex(account) << " idx = " << seq
180 << " tuple = " << placeHolder;
181 }
182
183 // FIXME: Limit is a hack to support uint32_t properly for the time
184 // being. Should be removed later and schema updated to use proper
185 // types.
186 statement.bindAt(2, Limit{limit});
187 auto const res = executor_.read(yield, statement);
188 auto const& results = res.value();
189 if (not results.hasRows()) {
190 LOG(log_.debug()) << "No rows returned";
191 return {};
192 }
193
194 std::vector<ripple::uint256> hashes = {};
195 auto numRows = results.numRows();
196 LOG(log_.info()) << "num_rows = " << numRows;
197
198 for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
199 hashes.push_back(hash);
200 if (--numRows == 0) {
201 LOG(log_.debug()) << "Setting cursor";
202 cursor = data;
203 }
204 }
205
206 auto const txns = fetchTransactions(hashes, yield);
207 LOG(log_.debug()) << "Txns = " << txns.size();
208
209 if (txns.size() == limit) {
210 LOG(log_.debug()) << "Returning cursor";
211 return {txns, cursor};
212 }
213
214 return {txns, {}};
215 }
216
217 void
219 {
220 executor_.sync();
221 }
222
223 bool
224 doFinishWrites() override
225 {
227
228 if (!range_) {
229 executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_);
230 }
231
232 if (not executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
233 LOG(log_.warn()) << "Update failed for ledger " << ledgerSequence_;
234 return false;
235 }
236
237 LOG(log_.info()) << "Committed ledger " << ledgerSequence_;
238 return true;
239 }
240
241 void
242 writeLedger(ripple::LedgerHeader const& ledgerHeader, std::string&& blob) override
243 {
244 executor_.write(schema_->insertLedgerHeader, ledgerHeader.seq, std::move(blob));
245
246 executor_.write(schema_->insertLedgerHash, ledgerHeader.hash, ledgerHeader.seq);
247
248 ledgerSequence_ = ledgerHeader.seq;
249 }
250
251 std::optional<std::uint32_t>
252 fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
253 {
254 if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res) {
255 if (auto const& result = res.value(); result) {
256 if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
257 return maybeValue;
258
259 LOG(log_.error()) << "Could not fetch latest ledger - no rows";
260 return std::nullopt;
261 }
262
263 LOG(log_.error()) << "Could not fetch latest ledger - no result";
264 } else {
265 LOG(log_.error()) << "Could not fetch latest ledger: " << res.error();
266 }
267
268 return std::nullopt;
269 }
270
271 std::optional<ripple::LedgerHeader>
272 fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
273 {
274 if (auto const lock = ledgerCache_.get(); lock.has_value() && lock->seq == sequence)
275 return lock->ledger;
276
277 auto const res = executor_.read(yield, schema_->selectLedgerBySeq, sequence);
278 if (res) {
279 if (auto const& result = res.value(); result) {
280 if (auto const maybeValue = result.template get<std::vector<unsigned char>>(); maybeValue) {
281 auto const header = util::deserializeHeader(ripple::makeSlice(*maybeValue));
282 ledgerCache_.put(FetchLedgerCache::CacheEntry{header, sequence});
283 return header;
284 }
285
286 LOG(log_.error()) << "Could not fetch ledger by sequence - no rows";
287 return std::nullopt;
288 }
289
290 LOG(log_.error()) << "Could not fetch ledger by sequence - no result";
291 } else {
292 LOG(log_.error()) << "Could not fetch ledger by sequence: " << res.error();
293 }
294
295 return std::nullopt;
296 }
297
298 std::optional<ripple::LedgerHeader>
299 fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
300 {
301 if (auto const res = executor_.read(yield, schema_->selectLedgerByHash, hash); res) {
302 if (auto const& result = res.value(); result) {
303 if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
304 return fetchLedgerBySequence(*maybeValue, yield);
305
306 LOG(log_.error()) << "Could not fetch ledger by hash - no rows";
307 return std::nullopt;
308 }
309
310 LOG(log_.error()) << "Could not fetch ledger by hash - no result";
311 } else {
312 LOG(log_.error()) << "Could not fetch ledger by hash: " << res.error();
313 }
314
315 return std::nullopt;
316 }
317
318 std::optional<LedgerRange>
319 hardFetchLedgerRange(boost::asio::yield_context yield) const override
320 {
321 auto const res = executor_.read(yield, schema_->selectLedgerRange);
322 if (res) {
323 auto const& results = res.value();
324 if (not results.hasRows()) {
325 LOG(log_.debug()) << "Could not fetch ledger range - no rows";
326 return std::nullopt;
327 }
328
329 // TODO: this is probably a good place to use user type in
330 // cassandra instead of having two rows with bool flag. or maybe at
331 // least use tuple<int, int>?
332 LedgerRange range;
333 std::size_t idx = 0;
334 for (auto [seq] : extract<uint32_t>(results)) {
335 if (idx == 0) {
336 range.maxSequence = range.minSequence = seq;
337 } else if (idx == 1) {
338 range.maxSequence = seq;
339 }
340
341 ++idx;
342 }
343
344 if (range.minSequence > range.maxSequence)
345 std::swap(range.minSequence, range.maxSequence);
346
347 LOG(log_.debug()) << "After hardFetchLedgerRange range is " << range.minSequence << ":"
348 << range.maxSequence;
349 return range;
350 }
351 LOG(log_.error()) << "Could not fetch ledger range: " << res.error();
352
353 return std::nullopt;
354 }
355
356 std::vector<TransactionAndMetadata>
357 fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
358 {
359 auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence, yield);
360 return fetchTransactions(hashes, yield);
361 }
362
363 std::vector<ripple::uint256>
365 std::uint32_t const ledgerSequence,
366 boost::asio::yield_context yield
367 ) const override
368 {
369 auto start = std::chrono::system_clock::now();
370 auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
371
372 if (not res) {
373 LOG(log_.error()) << "Could not fetch all transaction hashes: " << res.error();
374 return {};
375 }
376
377 auto const& result = res.value();
378 if (not result.hasRows()) {
379 LOG(log_.warn()) << "Could not fetch all transaction hashes - no rows; ledger = "
380 << std::to_string(ledgerSequence);
381 return {};
382 }
383
384 std::vector<ripple::uint256> hashes;
385 for (auto [hash] : extract<ripple::uint256>(result))
386 hashes.push_back(std::move(hash));
387
388 auto end = std::chrono::system_clock::now();
389 LOG(log_.debug()) << "Fetched " << hashes.size() << " transaction hashes from database in "
390 << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
391 << " milliseconds";
392
393 return hashes;
394 }
395
396 std::optional<NFT>
398 ripple::uint256 const& tokenID,
399 std::uint32_t const ledgerSequence,
400 boost::asio::yield_context yield
401 ) const override
402 {
403 auto const res = executor_.read(yield, schema_->selectNFT, tokenID, ledgerSequence);
404 if (not res)
405 return std::nullopt;
406
407 if (auto const maybeRow = res->template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
408 auto [seq, owner, isBurned] = *maybeRow;
409 auto result = std::make_optional<NFT>(tokenID, seq, owner, isBurned);
410
411 // now fetch URI. Usually we will have the URI even for burned NFTs,
412 // but if the first ledger on this clio included NFTokenBurn
413 // transactions we will not have the URIs for any of those tokens.
414 // In any other case not having the URI indicates something went
415 // wrong with our data.
416 //
417 // TODO - in the future would be great for any handlers that use
418 // this could inject a warning in this case (the case of not having
419 // a URI because it was burned in the first ledger) to indicate that
420 // even though we are returning a blank URI, the NFT might have had
421 // one.
422 auto uriRes = executor_.read(yield, schema_->selectNFTURI, tokenID, ledgerSequence);
423 if (uriRes) {
424 if (auto const maybeUri = uriRes->template get<ripple::Blob>(); maybeUri)
425 result->uri = *maybeUri;
426 }
427
428 return result;
429 }
430
431 LOG(log_.error()) << "Could not fetch NFT - no rows";
432 return std::nullopt;
433 }
434
437 ripple::uint256 const& tokenID,
438 std::uint32_t const limit,
439 bool const forward,
440 std::optional<TransactionsCursor> const& cursorIn,
441 boost::asio::yield_context yield
442 ) const override
443 {
444 auto rng = fetchLedgerRange();
445 if (!rng)
446 return {.txns = {}, .cursor = {}};
447
448 Statement const statement = [this, forward, &tokenID]() {
449 if (forward)
450 return schema_->selectNFTTxForward.bind(tokenID);
451
452 return schema_->selectNFTTx.bind(tokenID);
453 }();
454
455 auto cursor = cursorIn;
456 if (cursor) {
457 statement.bindAt(1, cursor->asTuple());
458 LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence
459 << cursor->transactionIndex;
460 } else {
461 auto const seq = forward ? rng->minSequence : rng->maxSequence;
462 auto const placeHolder = forward ? 0 : std::numeric_limits<std::uint32_t>::max();
463
464 statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
465 LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " idx = " << seq
466 << " tuple = " << placeHolder;
467 }
468
469 statement.bindAt(2, Limit{limit});
470
471 auto const res = executor_.read(yield, statement);
472 auto const& results = res.value();
473 if (not results.hasRows()) {
474 LOG(log_.debug()) << "No rows returned";
475 return {};
476 }
477
478 std::vector<ripple::uint256> hashes = {};
479 auto numRows = results.numRows();
480 LOG(log_.info()) << "num_rows = " << numRows;
481
482 for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
483 hashes.push_back(hash);
484 if (--numRows == 0) {
485 LOG(log_.debug()) << "Setting cursor";
486 cursor = data;
487
488 // forward queries by ledger/tx sequence `>=`
489 // so we have to advance the index by one
490 if (forward)
491 ++cursor->transactionIndex;
492 }
493 }
494
495 auto const txns = fetchTransactions(hashes, yield);
496 LOG(log_.debug()) << "NFT Txns = " << txns.size();
497
498 if (txns.size() == limit) {
499 LOG(log_.debug()) << "Returning cursor";
500 return {txns, cursor};
501 }
502
503 return {txns, {}};
504 }
505
508 ripple::AccountID const& issuer,
509 std::optional<std::uint32_t> const& taxon,
510 std::uint32_t const ledgerSequence,
511 std::uint32_t const limit,
512 std::optional<ripple::uint256> const& cursorIn,
513 boost::asio::yield_context yield
514 ) const override
515 {
516 NFTsAndCursor ret;
517
518 Statement const idQueryStatement = [&taxon, &issuer, &cursorIn, &limit, this]() {
519 if (taxon.has_value()) {
520 auto r = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
521 r.bindAt(1, *taxon);
522 r.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
523 r.bindAt(3, Limit{limit});
524 return r;
525 }
526
527 auto r = schema_->selectNFTIDsByIssuer.bind(issuer);
528 r.bindAt(
529 1,
530 std::make_tuple(
531 cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0,
532 cursorIn.value_or(ripple::uint256(0))
533 )
534 );
535 r.bindAt(2, Limit{limit});
536 return r;
537 }();
538
539 // Query for all the NFTs issued by the account, potentially filtered by the taxon
540 auto const res = executor_.read(yield, idQueryStatement);
541
542 auto const& idQueryResults = res.value();
543 if (not idQueryResults.hasRows()) {
544 LOG(log_.debug()) << "No rows returned";
545 return {};
546 }
547
548 std::vector<ripple::uint256> nftIDs;
549 for (auto const [nftID] : extract<ripple::uint256>(idQueryResults))
550 nftIDs.push_back(nftID);
551
552 if (nftIDs.empty())
553 return ret;
554
555 if (nftIDs.size() == limit)
556 ret.cursor = nftIDs.back();
557
558 std::vector<Statement> selectNFTStatements;
559 selectNFTStatements.reserve(nftIDs.size());
560
561 std::transform(
562 std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTStatements), [&](auto const& nftID) {
563 return schema_->selectNFT.bind(nftID, ledgerSequence);
564 }
565 );
566
567 auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
568
569 std::vector<Statement> selectNFTURIStatements;
570 selectNFTURIStatements.reserve(nftIDs.size());
571
572 std::transform(
573 std::cbegin(nftIDs), std::cend(nftIDs), std::back_inserter(selectNFTURIStatements), [&](auto const& nftID) {
574 return schema_->selectNFTURI.bind(nftID, ledgerSequence);
575 }
576 );
577
578 auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
579
580 for (auto i = 0u; i < nftIDs.size(); i++) {
581 if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
582 auto [seq, owner, isBurned] = *maybeRow;
583 NFT nft(nftIDs[i], seq, owner, isBurned);
584 if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
585 nft.uri = *maybeUri;
586 ret.nfts.push_back(nft);
587 }
588 }
589 return ret;
590 }
591
594 ripple::uint192 const& mptID,
595 std::uint32_t const limit,
596 std::optional<ripple::AccountID> const& cursorIn,
597 std::uint32_t const ledgerSequence,
598 boost::asio::yield_context yield
599 ) const override
600 {
601 auto const holderEntries = executor_.read(
602 yield, schema_->selectMPTHolders, mptID, cursorIn.value_or(ripple::AccountID(0)), Limit{limit}
603 );
604
605 auto const& holderResults = holderEntries.value();
606 if (not holderResults.hasRows()) {
607 LOG(log_.debug()) << "No rows returned";
608 return {};
609 }
610
611 std::vector<ripple::uint256> mptKeys;
612 std::optional<ripple::AccountID> cursor;
613 for (auto const [holder] : extract<ripple::AccountID>(holderResults)) {
614 mptKeys.push_back(ripple::keylet::mptoken(mptID, holder).key);
615 cursor = holder;
616 }
617
618 auto mptObjects = doFetchLedgerObjects(mptKeys, ledgerSequence, yield);
619
620 auto it = std::remove_if(mptObjects.begin(), mptObjects.end(), [](Blob const& mpt) { return mpt.empty(); });
621
622 mptObjects.erase(it, mptObjects.end());
623
624 ASSERT(mptKeys.size() <= limit, "Number of keys can't exceed the limit");
625 if (mptKeys.size() == limit)
626 return {mptObjects, cursor};
627
628 return {mptObjects, {}};
629 }
630
631 std::optional<Blob>
633 ripple::uint256 const& key,
634 std::uint32_t const sequence,
635 boost::asio::yield_context yield
636 ) const override
637 {
638 LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
639 if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
640 if (auto const result = res->template get<Blob>(); result) {
641 if (result->size())
642 return result;
643 } else {
644 LOG(log_.debug()) << "Could not fetch ledger object - no rows";
645 }
646 } else {
647 LOG(log_.error()) << "Could not fetch ledger object: " << res.error();
648 }
649
650 return std::nullopt;
651 }
652
653 std::optional<std::uint32_t>
655 ripple::uint256 const& key,
656 std::uint32_t const sequence,
657 boost::asio::yield_context yield
658 ) const override
659 {
660 LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
661 if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
662 if (auto const result = res->template get<Blob, std::uint32_t>(); result) {
663 auto [_, seq] = result.value();
664 return seq;
665 }
666 LOG(log_.debug()) << "Could not fetch ledger object sequence - no rows";
667 } else {
668 LOG(log_.error()) << "Could not fetch ledger object sequence: " << res.error();
669 }
670
671 return std::nullopt;
672 }
673
674 std::optional<TransactionAndMetadata>
675 fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
676 {
677 if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res) {
678 if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>(); maybeValue) {
679 auto [transaction, meta, seq, date] = *maybeValue;
680 return std::make_optional<TransactionAndMetadata>(transaction, meta, seq, date);
681 }
682
683 LOG(log_.debug()) << "Could not fetch transaction - no rows";
684 } else {
685 LOG(log_.error()) << "Could not fetch transaction: " << res.error();
686 }
687
688 return std::nullopt;
689 }
690
691 std::optional<ripple::uint256>
693 ripple::uint256 key,
694 std::uint32_t const ledgerSequence,
695 boost::asio::yield_context yield
696 ) const override
697 {
698 if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
699 if (auto const result = res->template get<ripple::uint256>(); result) {
700 if (*result == kLAST_KEY)
701 return std::nullopt;
702 return result;
703 }
704
705 LOG(log_.debug()) << "Could not fetch successor - no rows";
706 } else {
707 LOG(log_.error()) << "Could not fetch successor: " << res.error();
708 }
709
710 return std::nullopt;
711 }
712
713 std::vector<TransactionAndMetadata>
714 fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
715 {
716 if (hashes.empty())
717 return {};
718
719 auto const numHashes = hashes.size();
720 std::vector<TransactionAndMetadata> results;
721 results.reserve(numHashes);
722
723 std::vector<Statement> statements;
724 statements.reserve(numHashes);
725
726 auto const timeDiff = util::timed([this, yield, &results, &hashes, &statements]() {
727 // TODO: seems like a job for "hash IN (list of hashes)" instead?
728 std::transform(
729 std::cbegin(hashes), std::cend(hashes), std::back_inserter(statements), [this](auto const& hash) {
730 return schema_->selectTransaction.bind(hash);
731 }
732 );
733
734 auto const entries = executor_.readEach(yield, statements);
735 std::transform(
736 std::cbegin(entries),
737 std::cend(entries),
738 std::back_inserter(results),
739 [](auto const& res) -> TransactionAndMetadata {
740 if (auto const maybeRow = res.template get<Blob, Blob, uint32_t, uint32_t>(); maybeRow)
741 return *maybeRow;
742
743 return {};
744 }
745 );
746 });
747
748 ASSERT(numHashes == results.size(), "Number of hashes and results must match");
749 LOG(log_.debug()) << "Fetched " << numHashes << " transactions from database in " << timeDiff
750 << " milliseconds";
751 return results;
752 }
753
754 std::vector<Blob>
756 std::vector<ripple::uint256> const& keys,
757 std::uint32_t const sequence,
758 boost::asio::yield_context yield
759 ) const override
760 {
761 if (keys.empty())
762 return {};
763
764 auto const numKeys = keys.size();
765 LOG(log_.trace()) << "Fetching " << numKeys << " objects";
766
767 std::vector<Blob> results;
768 results.reserve(numKeys);
769
770 std::vector<Statement> statements;
771 statements.reserve(numKeys);
772
773 // TODO: seems like a job for "key IN (list of keys)" instead?
774 std::transform(
775 std::cbegin(keys), std::cend(keys), std::back_inserter(statements), [this, &sequence](auto const& key) {
776 return schema_->selectObject.bind(key, sequence);
777 }
778 );
779
780 auto const entries = executor_.readEach(yield, statements);
781 std::transform(
782 std::cbegin(entries), std::cend(entries), std::back_inserter(results), [](auto const& res) -> Blob {
783 if (auto const maybeValue = res.template get<Blob>(); maybeValue)
784 return *maybeValue;
785
786 return {};
787 }
788 );
789
790 LOG(log_.trace()) << "Fetched " << numKeys << " objects";
791 return results;
792 }
793
794 std::vector<ripple::uint256>
796 std::uint32_t number,
797 std::uint32_t pageSize,
798 std::uint32_t seq,
799 boost::asio::yield_context yield
800 ) const override
801 {
802 std::vector<ripple::uint256> liveAccounts;
803 std::optional<ripple::AccountID> lastItem;
804
805 while (liveAccounts.size() < number) {
806 Statement const statement = lastItem ? schema_->selectAccountFromToken.bind(*lastItem, Limit{pageSize})
807 : schema_->selectAccountFromBeginning.bind(Limit{pageSize});
808
809 auto const res = executor_.read(yield, statement);
810 if (res) {
811 auto const& results = res.value();
812 if (not results.hasRows()) {
813 LOG(log_.debug()) << "No rows returned";
814 break;
815 }
816 // The results should not contain duplicates, we just filter out deleted accounts
817 std::vector<ripple::uint256> fullAccounts;
818 for (auto [account] : extract<ripple::AccountID>(results)) {
819 fullAccounts.push_back(ripple::keylet::account(account).key);
820 lastItem = account;
821 }
822 auto const objs = doFetchLedgerObjects(fullAccounts, seq, yield);
823
824 for (auto i = 0u; i < fullAccounts.size(); i++) {
825 if (not objs[i].empty()) {
826 if (liveAccounts.size() < number) {
827 liveAccounts.push_back(fullAccounts[i]);
828 } else {
829 break;
830 }
831 }
832 }
833 } else {
834 LOG(log_.error()) << "Could not fetch account from account_tx: " << res.error();
835 break;
836 }
837 }
838
839 return liveAccounts;
840 }
841
842 std::vector<LedgerObject>
843 fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
844 {
845 auto const [keys, timeDiff] = util::timed([this, &ledgerSequence, yield]() -> std::vector<ripple::uint256> {
846 auto const res = executor_.read(yield, schema_->selectDiff, ledgerSequence);
847 if (not res) {
848 LOG(log_.error()) << "Could not fetch ledger diff: " << res.error() << "; ledger = " << ledgerSequence;
849 return {};
850 }
851
852 auto const& results = res.value();
853 if (not results) {
854 LOG(log_.error()) << "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence;
855 return {};
856 }
857
858 std::vector<ripple::uint256> resultKeys;
859 for (auto [key] : extract<ripple::uint256>(results))
860 resultKeys.push_back(key);
861
862 return resultKeys;
863 });
864
865 // one of the above errors must have happened
866 if (keys.empty())
867 return {};
868
869 LOG(log_.debug()) << "Fetched " << keys.size() << " diff hashes from database in " << timeDiff
870 << " milliseconds";
871
872 auto const objs = fetchLedgerObjects(keys, ledgerSequence, yield);
873 std::vector<LedgerObject> results;
874 results.reserve(keys.size());
875
876 std::transform(
877 std::cbegin(keys),
878 std::cend(keys),
879 std::cbegin(objs),
880 std::back_inserter(results),
881 [](auto const& key, auto const& obj) { return LedgerObject{key, obj}; }
882 );
883
884 return results;
885 }
886
887 std::optional<std::string>
888 fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const override
889 {
890 auto const res = executor_.read(yield, schema_->selectMigratorStatus, Text(migratorName));
891 if (not res) {
892 LOG(log_.error()) << "Could not fetch migrator status: " << res.error();
893 return {};
894 }
895
896 auto const& results = res.value();
897 if (not results) {
898 return {};
899 }
900
901 for (auto [statusString] : extract<std::string>(results))
902 return statusString;
903
904 return {};
905 }
906
907 std::expected<std::vector<std::pair<boost::uuids::uuid, std::string>>, std::string>
908 fetchClioNodesData(boost::asio::yield_context yield) const override
909 {
910 auto const readResult = executor_.read(yield, schema_->selectClioNodesData);
911 if (not readResult)
912 return std::unexpected{readResult.error().message()};
913
914 std::vector<std::pair<boost::uuids::uuid, std::string>> result;
915
916 for (auto [uuid, message] : extract<boost::uuids::uuid, std::string>(*readResult)) {
917 result.emplace_back(uuid, std::move(message));
918 }
919
920 return result;
921 }
922
923 void
924 doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override
925 {
926 LOG(log_.trace()) << " Writing ledger object " << key.size() << ":" << seq << " [" << blob.size() << " bytes]";
927
928 if (range_)
929 executor_.write(schema_->insertDiff, seq, key);
930
931 executor_.write(schema_->insertObject, std::move(key), seq, std::move(blob));
932 }
933
934 void
935 writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) override
936 {
937 LOG(log_.trace()) << "Writing successor. key = " << key.size() << " bytes. "
938 << " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes.";
939 ASSERT(!key.empty(), "Key must not be empty");
940 ASSERT(!successor.empty(), "Successor must not be empty");
941
942 executor_.write(schema_->insertSuccessor, std::move(key), seq, std::move(successor));
943 }
944
945 void
946 writeAccountTransactions(std::vector<AccountTransactionsData> data) override
947 {
948 std::vector<Statement> statements;
949 statements.reserve(data.size() * 10); // assume 10 transactions avg
950
951 for (auto& record : data) {
952 std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
953 return schema_->insertAccountTx.bind(
954 std::forward<decltype(account)>(account),
955 std::make_tuple(record.ledgerSequence, record.transactionIndex),
956 record.txHash
957 );
958 });
959 }
960
961 executor_.write(std::move(statements));
962 }
963
964 void
966 {
967 std::vector<Statement> statements;
968 statements.reserve(record.accounts.size());
969
970 std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
971 return schema_->insertAccountTx.bind(
972 std::forward<decltype(account)>(account),
973 std::make_tuple(record.ledgerSequence, record.transactionIndex),
974 record.txHash
975 );
976 });
977
978 executor_.write(std::move(statements));
979 }
980
981 void
982 writeNFTTransactions(std::vector<NFTTransactionsData> const& data) override
983 {
984 std::vector<Statement> statements;
985 statements.reserve(data.size());
986
987 std::ranges::transform(data, std::back_inserter(statements), [this](auto const& record) {
988 return schema_->insertNFTTx.bind(
989 record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash
990 );
991 });
992
993 executor_.write(std::move(statements));
994 }
995
996 void
998 std::string&& hash,
999 std::uint32_t const seq,
1000 std::uint32_t const date,
1001 std::string&& transaction,
1002 std::string&& metadata
1003 ) override
1004 {
1005 LOG(log_.trace()) << "Writing txn to database";
1006
1007 executor_.write(schema_->insertLedgerTransaction, seq, hash);
1008 executor_.write(
1009 schema_->insertTransaction, std::move(hash), seq, date, std::move(transaction), std::move(metadata)
1010 );
1011 }
1012
1013 void
1014 writeNFTs(std::vector<NFTsData> const& data) override
1015 {
1016 std::vector<Statement> statements;
1017 statements.reserve(data.size() * 3);
1018
1019 for (NFTsData const& record : data) {
1020 if (!record.onlyUriChanged) {
1021 statements.push_back(
1022 schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
1023 );
1024
1025 // If `uri` is set (and it can be set to an empty uri), we know this
1026 // is a net-new NFT. That is, this NFT has not been seen before by
1027 // us _OR_ it is in the extreme edge case of a re-minted NFT ID with
1028 // the same NFT ID as an already-burned token. In this case, we need
1029 // to record the URI and link to the issuer_nf_tokens table.
1030 if (record.uri) {
1031 statements.push_back(schema_->insertIssuerNFT.bind(
1032 ripple::nft::getIssuer(record.tokenID),
1033 static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
1034 record.tokenID
1035 ));
1036 statements.push_back(
1037 schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
1038 );
1039 }
1040 } else {
1041 // only uri changed, we update the uri table only
1042 statements.push_back(
1043 schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
1044 );
1045 }
1046 }
1047
1048 executor_.writeEach(std::move(statements));
1049 }
1050
1051 void
1052 writeMPTHolders(std::vector<MPTHolderData> const& data) override
1053 {
1054 std::vector<Statement> statements;
1055 statements.reserve(data.size());
1056 for (auto [mptId, holder] : data)
1057 statements.push_back(schema_->insertMPTHolder.bind(mptId, holder));
1058
1059 executor_.write(std::move(statements));
1060 }
1061
1062 void
1063 startWrites() const override
1064 {
1065 // Note: no-op in original implementation too.
1066 // probably was used in PG to start a transaction or smth.
1067 }
1068
1069 void
1070 writeMigratorStatus(std::string const& migratorName, std::string const& status) override
1071 {
1072 executor_.writeSync(
1073 schema_->insertMigratorStatus, data::cassandra::Text{migratorName}, data::cassandra::Text(status)
1074 );
1075 }
1076
1077 void
1078 writeNodeMessage(boost::uuids::uuid const& uuid, std::string message) override
1079 {
1080 executor_.writeSync(schema_->updateClioNodeMessage, data::cassandra::Text{std::move(message)}, uuid);
1081 }
1082
1083 bool
1084 isTooBusy() const override
1085 {
1086 return executor_.isTooBusy();
1087 }
1088
1089 boost::json::object
1090 stats() const override
1091 {
1092 return executor_.stats();
1093 }
1094
1095private:
1096 bool
1097 executeSyncUpdate(Statement statement)
1098 {
1099 auto const res = executor_.writeSync(statement);
1100 auto maybeSuccess = res->template get<bool>();
1101 if (not maybeSuccess) {
1102 LOG(log_.error()) << "executeSyncUpdate - error getting result - no row";
1103 return false;
1104 }
1105
1106 if (not maybeSuccess.value()) {
1107 LOG(log_.warn()) << "Update failed. Checking if DB state is what we expect";
1108
1109 // error may indicate that another writer wrote something.
1110 // in this case let's just compare the current state of things
1111 // against what we were trying to write in the first place and
1112 // use that as the source of truth for the result.
1113 auto rng = hardFetchLedgerRangeNoThrow();
1114 return rng && rng->maxSequence == ledgerSequence_;
1115 }
1116
1117 return true;
1118 }
1119};
1120
1121using CassandraBackend = BasicCassandraBackend<SettingsProvider, impl::DefaultExecutionStrategy<>>;
1122
1123} // namespace data::cassandra
The interface to the database used by Clio.
Definition BackendInterface.hpp:139
std::vector< Blob > fetchLedgerObjects(std::vector< ripple::uint256 > const &keys, std::uint32_t sequence, boost::asio::yield_context yield) const
Fetches all ledger objects by their keys.
Definition BackendInterface.cpp:119
std::optional< LedgerRange > hardFetchLedgerRangeNoThrow() const
Fetches the ledger range from DB retrying until no DatabaseTimeout is thrown.
Definition BackendInterface.cpp:77
std::optional< LedgerRange > fetchLedgerRange() const
Fetch the current ledger range.
Definition BackendInterface.cpp:267
LedgerCacheInterface const & cache() const
Definition BackendInterface.hpp:164
Cache for an entire ledger.
Definition LedgerCacheInterface.hpp:38
Implements BackendInterface for Cassandra/ScyllaDB.
Definition CassandraBackend.hpp:83
bool isTooBusy() const override
Definition CassandraBackend.hpp:1084
std::optional< std::uint32_t > fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
Fetches the latest ledger sequence.
Definition CassandraBackend.hpp:252
MPTHoldersAndCursor fetchMPTHolders(ripple::uint192 const &mptID, std::uint32_t const limit, std::optional< ripple::AccountID > const &cursorIn, std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Fetches all holders' balances for a MPTIssuanceID.
Definition CassandraBackend.hpp:593
std::vector< ripple::uint256 > fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Fetches all transaction hashes from a specific ledger.
Definition CassandraBackend.hpp:364
void writeMPTHolders(std::vector< MPTHolderData > const &data) override
Write accounts that started holding onto a MPT.
Definition CassandraBackend.hpp:1052
std::optional< Blob > doFetchLedgerObject(ripple::uint256 const &key, std::uint32_t const sequence, boost::asio::yield_context yield) const override
The database-specific implementation for fetching a ledger object.
Definition CassandraBackend.hpp:632
std::optional< LedgerRange > hardFetchLedgerRange(boost::asio::yield_context yield) const override
Fetches the ledger range from DB.
Definition CassandraBackend.hpp:319
void writeSuccessor(std::string &&key, std::uint32_t const seq, std::string &&successor) override
Write a new successor.
Definition CassandraBackend.hpp:935
void waitForWritesToFinish() override
Wait for all pending writes to finish.
Definition CassandraBackend.hpp:218
std::optional< ripple::LedgerHeader > fetchLedgerByHash(ripple::uint256 const &hash, boost::asio::yield_context yield) const override
Fetches a specific ledger by hash.
Definition CassandraBackend.hpp:299
std::vector< TransactionAndMetadata > fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Fetches all transactions from a specific ledger.
Definition CassandraBackend.hpp:357
std::optional< ripple::LedgerHeader > fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
Fetches a specific ledger by sequence number.
Definition CassandraBackend.hpp:272
std::vector< Blob > doFetchLedgerObjects(std::vector< ripple::uint256 > const &keys, std::uint32_t const sequence, boost::asio::yield_context yield) const override
The database-specific implementation for fetching ledger objects.
Definition CassandraBackend.hpp:755
TransactionsAndCursor fetchAccountTransactions(ripple::AccountID const &account, std::uint32_t const limit, bool forward, std::optional< TransactionsCursor > const &cursorIn, boost::asio::yield_context yield) const override
Fetches all transactions for a specific account.
Definition CassandraBackend.hpp:150
void writeAccountTransaction(AccountTransactionsData record) override
Write a new account transaction.
Definition CassandraBackend.hpp:965
void writeAccountTransactions(std::vector< AccountTransactionsData > data) override
Write a new set of account transactions.
Definition CassandraBackend.hpp:946
BasicCassandraBackend(SettingsProviderType settingsProvider, data::LedgerCacheInterface &cache, bool readOnly)
Create a new cassandra/scylla backend instance.
Definition CassandraBackend.hpp:107
std::optional< NFT > fetchNFT(ripple::uint256 const &tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Fetches a specific NFT.
Definition CassandraBackend.hpp:397
std::vector< ripple::uint256 > fetchAccountRoots(std::uint32_t number, std::uint32_t pageSize, std::uint32_t seq, boost::asio::yield_context yield) const override
Fetch the specified number of account root object indexes by page, the accounts need to exist for seq...
Definition CassandraBackend.hpp:795
void writeNFTs(std::vector< NFTsData > const &data) override
Writes NFTs to the database.
Definition CassandraBackend.hpp:1014
std::vector< LedgerObject > fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Returns the difference between ledgers.
Definition CassandraBackend.hpp:843
void writeLedger(ripple::LedgerHeader const &ledgerHeader, std::string &&blob) override
Writes to a specific ledger.
Definition CassandraBackend.hpp:242
std::optional< std::uint32_t > doFetchLedgerObjectSeq(ripple::uint256 const &key, std::uint32_t const sequence, boost::asio::yield_context yield) const override
The database-specific implementation for fetching a ledger object sequence.
Definition CassandraBackend.hpp:654
void writeNodeMessage(boost::uuids::uuid const &uuid, std::string message) override
Write a node message. Used by ClusterCommunicationService.
Definition CassandraBackend.hpp:1078
std::optional< TransactionAndMetadata > fetchTransaction(ripple::uint256 const &hash, boost::asio::yield_context yield) const override
Fetches a specific transaction.
Definition CassandraBackend.hpp:675
std::vector< TransactionAndMetadata > fetchTransactions(std::vector< ripple::uint256 > const &hashes, boost::asio::yield_context yield) const override
Fetches multiple transactions.
Definition CassandraBackend.hpp:714
std::optional< std::string > fetchMigratorStatus(std::string const &migratorName, boost::asio::yield_context yield) const override
Fetches the status of migrator by name.
Definition CassandraBackend.hpp:888
void startWrites() const override
Starts a write transaction with the DB. No-op for cassandra.
Definition CassandraBackend.hpp:1063
void doWriteLedgerObject(std::string &&key, std::uint32_t const seq, std::string &&blob) override
Writes a ledger object to the database.
Definition CassandraBackend.hpp:924
TransactionsAndCursor fetchNFTTransactions(ripple::uint256 const &tokenID, std::uint32_t const limit, bool const forward, std::optional< TransactionsCursor > const &cursorIn, boost::asio::yield_context yield) const override
Fetches all transactions for a specific NFT.
Definition CassandraBackend.hpp:436
boost::json::object stats() const override
Definition CassandraBackend.hpp:1090
std::expected< std::vector< std::pair< boost::uuids::uuid, std::string > >, std::string > fetchClioNodesData(boost::asio::yield_context yield) const override
Fetches the data of all nodes in the cluster.
Definition CassandraBackend.hpp:908
NFTsAndCursor fetchNFTsByIssuer(ripple::AccountID const &issuer, std::optional< std::uint32_t > const &taxon, std::uint32_t const ledgerSequence, std::uint32_t const limit, std::optional< ripple::uint256 > const &cursorIn, boost::asio::yield_context yield) const override
Fetches all NFTs issued by a given address.
Definition CassandraBackend.hpp:507
void writeNFTTransactions(std::vector< NFTTransactionsData > const &data) override
Write NFTs transactions.
Definition CassandraBackend.hpp:982
bool doFinishWrites() override
The implementation should wait for all pending writes to finish.
Definition CassandraBackend.hpp:224
std::optional< ripple::uint256 > doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Database-specific implementation of fetching the successor key.
Definition CassandraBackend.hpp:692
void writeTransaction(std::string &&hash, std::uint32_t const seq, std::uint32_t const date, std::string &&transaction, std::string &&metadata) override
Writes a new transaction.
Definition CassandraBackend.hpp:997
void writeMigratorStatus(std::string const &migratorName, std::string const &status) override
Mark the migration status of a migrator as Migrated in the database.
Definition CassandraBackend.hpp:1070
Represents a handle to the cassandra database cluster.
Definition Handle.hpp:46
MaybeErrorType connect() const
Synchronous version of the above.
Definition Handle.cpp:55
MaybeErrorType executeEach(std::vector< StatementType > const &statements) const
Synchronous version of the above.
Definition Handle.cpp:109
ResultOrErrorType execute(std::string_view query, Args &&... args) const
Synchronous version of the above.
Definition Handle.hpp:185
Manages the DB schema and provides access to prepared statements.
Definition Schema.hpp:55
void prepareStatements(Handle const &handle)
Recreates the prepared statements.
Definition Schema.hpp:964
Definition Statement.hpp:47
void bindAt(std::size_t const idx, Type &&value) const
Binds an argument to a specific index.
Definition Statement.hpp:93
A simple thread-safe logger for the channel specified in the constructor.
Definition Logger.hpp:111
Pump warn(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::WRN severity.
Definition Logger.cpp:317
Pump error(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::ERR severity.
Definition Logger.cpp:322
Pump debug(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::DBG severity.
Definition Logger.cpp:307
Pump trace(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::TRC severity.
Definition Logger.cpp:302
Pump info(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::NFO severity.
Definition Logger.cpp:312
This namespace implements a wrapper for the Cassandra C++ driver.
Definition Concepts.hpp:37
impl::ResultExtractor< Types... > extract(Handle::ResultType const &result)
Extracts the results into series of std::tuple<Types...> by creating a simple wrapper with an STL inp...
Definition Handle.hpp:329
This namespace implements the data access layer and related components.
Definition AmendmentCenter.cpp:70
ripple::LedgerHeader deserializeHeader(ripple::Slice data)
Deserializes a ripple::LedgerHeader from ripple::Slice of data.
Definition LedgerUtils.hpp:205
auto timed(FnType &&func)
Profiler function to measure the time a function execution consumes.
Definition Profiler.hpp:40
Struct used to keep track of what to write to account_transactions/account_tx tables.
Definition DBHelpers.hpp:45
Represents an NFT state at a particular ledger.
Definition DBHelpers.hpp:103
Struct to store ledger header cache entry and the sequence it belongs to.
Definition LedgerHeaderCache.hpp:48
Represents an object in the ledger.
Definition Types.hpp:41
Stores a range of sequences as a min and max pair.
Definition Types.hpp:247
Represents an array of MPTokens.
Definition Types.hpp:239
Represents a NFToken.
Definition Types.hpp:172
Represents a bundle of NFTs with a cursor to the next page.
Definition Types.hpp:231
Represents a transaction and its metadata bundled together.
Definition Types.hpp:68
Represests a bundle of transactions with metadata and a cursor to the next page.
Definition Types.hpp:164
A strong type wrapper for int32_t.
Definition Types.hpp:56
A strong type wrapper for string.
Definition Types.hpp:67