Clio develop
The XRP Ledger API server.
Loading...
Searching...
No Matches
CassandraBackend.hpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of clio: https://github.com/XRPLF/clio
4 Copyright (c) 2023, the clio developers.
5
6 Permission to use, copy, modify, and distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#pragma once
21
22#include "data/BackendInterface.hpp"
23#include "data/DBHelpers.hpp"
24#include "data/LedgerCacheInterface.hpp"
25#include "data/LedgerHeaderCache.hpp"
26#include "data/Types.hpp"
27#include "data/cassandra/Concepts.hpp"
28#include "data/cassandra/Handle.hpp"
29#include "data/cassandra/Schema.hpp"
30#include "data/cassandra/SettingsProvider.hpp"
31#include "data/cassandra/Types.hpp"
32#include "data/cassandra/impl/ExecutionStrategy.hpp"
33#include "util/Assert.hpp"
34#include "util/LedgerUtils.hpp"
35#include "util/Profiler.hpp"
36#include "util/log/Logger.hpp"
37
38#include <boost/asio/spawn.hpp>
39#include <boost/json/object.hpp>
40#include <boost/uuid/string_generator.hpp>
41#include <boost/uuid/uuid.hpp>
42#include <cassandra.h>
43#include <fmt/core.h>
44#include <xrpl/basics/Blob.h>
45#include <xrpl/basics/base_uint.h>
46#include <xrpl/basics/strHex.h>
47#include <xrpl/protocol/AccountID.h>
48#include <xrpl/protocol/Indexes.h>
49#include <xrpl/protocol/LedgerHeader.h>
50#include <xrpl/protocol/nft.h>
51
52#include <algorithm>
53#include <atomic>
54#include <chrono>
55#include <cstddef>
56#include <cstdint>
57#include <iterator>
58#include <limits>
59#include <optional>
60#include <stdexcept>
61#include <string>
62#include <tuple>
63#include <utility>
64#include <vector>
65
66class CacheBackendCassandraTest;
67
68namespace data::cassandra {
69
79template <
80 SomeSettingsProvider SettingsProviderType,
81 SomeExecutionStrategy ExecutionStrategyType,
82 typename FetchLedgerCacheType = FetchLedgerCache>
84 util::Logger log_{"Backend"};
85
86 SettingsProviderType settingsProvider_;
88 std::atomic_uint32_t ledgerSequence_ = 0u;
89 friend class ::CacheBackendCassandraTest;
90
91protected:
92 Handle handle_;
93
94 // have to be mutable because BackendInterface constness :(
95 mutable ExecutionStrategyType executor_;
96 // TODO: move to interface level
97 mutable FetchLedgerCacheType ledgerCache_{};
98
99public:
107 BasicCassandraBackend(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly)
109 , settingsProvider_{std::move(settingsProvider)}
110 , schema_{settingsProvider_}
111 , handle_{settingsProvider_.getSettings()}
112 , executor_{settingsProvider_.getSettings(), handle_}
113 {
114 if (auto const res = handle_.connect(); not res)
115 throw std::runtime_error("Could not connect to database: " + res.error());
116
117 if (not readOnly) {
118 if (auto const res = handle_.execute(schema_.createKeyspace); not res) {
119 // on datastax, creation of keyspaces can be configured to only be done thru the admin
120 // interface. this does not mean that the keyspace does not already exist tho.
121 if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED)
122 throw std::runtime_error("Could not create keyspace: " + res.error());
123 }
124
125 if (auto const res = handle_.executeEach(schema_.createSchema); not res)
126 throw std::runtime_error("Could not create schema: " + res.error());
127 }
128
129 try {
130 schema_.prepareStatements(handle_);
131 } catch (std::runtime_error const& ex) {
132 auto const error = fmt::format(
133 "Failed to prepare the statements: {}; readOnly: {}. ReadOnly should be turned off or another Clio "
134 "node with write access to DB should be started first.",
135 ex.what(),
136 readOnly
137 );
138 LOG(log_.error()) << error;
139 throw std::runtime_error(error);
140 }
141 LOG(log_.info()) << "Created (revamped) CassandraBackend";
142 }
143
144 /*
145 * @brief Move constructor is deleted because handle_ is shared by reference with executor
146 */
148
151 ripple::AccountID const& account,
152 std::uint32_t const limit,
153 bool forward,
154 std::optional<TransactionsCursor> const& cursorIn,
155 boost::asio::yield_context yield
156 ) const override
157 {
158 auto rng = fetchLedgerRange();
159 if (!rng)
160 return {.txns = {}, .cursor = {}};
161
162 Statement const statement = [this, forward, &account]() {
163 if (forward)
164 return schema_->selectAccountTxForward.bind(account);
165
166 return schema_->selectAccountTx.bind(account);
167 }();
168
169 auto cursor = cursorIn;
170 if (cursor) {
171 statement.bindAt(1, cursor->asTuple());
172 LOG(log_.debug()) << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence
173 << cursor->transactionIndex;
174 } else {
175 auto const seq = forward ? rng->minSequence : rng->maxSequence;
176 auto const placeHolder = forward ? 0u : std::numeric_limits<std::uint32_t>::max();
177
178 statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
179 LOG(log_.debug()) << "account = " << ripple::strHex(account) << " idx = " << seq
180 << " tuple = " << placeHolder;
181 }
182
183 // FIXME: Limit is a hack to support uint32_t properly for the time
184 // being. Should be removed later and schema updated to use proper
185 // types.
186 statement.bindAt(2, Limit{limit});
187 auto const res = executor_.read(yield, statement);
188 auto const& results = res.value();
189 if (not results.hasRows()) {
190 LOG(log_.debug()) << "No rows returned";
191 return {};
192 }
193
194 std::vector<ripple::uint256> hashes = {};
195 auto numRows = results.numRows();
196 LOG(log_.info()) << "num_rows = " << numRows;
197
198 for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
199 hashes.push_back(hash);
200 if (--numRows == 0) {
201 LOG(log_.debug()) << "Setting cursor";
202 cursor = data;
203 }
204 }
205
206 auto const txns = fetchTransactions(hashes, yield);
207 LOG(log_.debug()) << "Txns = " << txns.size();
208
209 if (txns.size() == limit) {
210 LOG(log_.debug()) << "Returning cursor";
211 return {txns, cursor};
212 }
213
214 return {txns, {}};
215 }
216
217 void
219 {
220 executor_.sync();
221 }
222
223 bool
224 doFinishWrites() override
225 {
227
228 if (!range_) {
229 executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_);
230 }
231
232 if (not executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
233 LOG(log_.warn()) << "Update failed for ledger " << ledgerSequence_;
234 return false;
235 }
236
237 LOG(log_.info()) << "Committed ledger " << ledgerSequence_;
238 return true;
239 }
240
241 void
242 writeLedger(ripple::LedgerHeader const& ledgerHeader, std::string&& blob) override
243 {
244 executor_.write(schema_->insertLedgerHeader, ledgerHeader.seq, std::move(blob));
245
246 executor_.write(schema_->insertLedgerHash, ledgerHeader.hash, ledgerHeader.seq);
247
248 ledgerSequence_ = ledgerHeader.seq;
249 }
250
251 std::optional<std::uint32_t>
252 fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
253 {
254 if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res) {
255 if (auto const& result = res.value(); result) {
256 if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
257 return maybeValue;
258
259 LOG(log_.error()) << "Could not fetch latest ledger - no rows";
260 return std::nullopt;
261 }
262
263 LOG(log_.error()) << "Could not fetch latest ledger - no result";
264 } else {
265 LOG(log_.error()) << "Could not fetch latest ledger: " << res.error();
266 }
267
268 return std::nullopt;
269 }
270
271 std::optional<ripple::LedgerHeader>
272 fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
273 {
274 if (auto const lock = ledgerCache_.get(); lock.has_value() && lock->seq == sequence)
275 return lock->ledger;
276
277 auto const res = executor_.read(yield, schema_->selectLedgerBySeq, sequence);
278 if (res) {
279 if (auto const& result = res.value(); result) {
280 if (auto const maybeValue = result.template get<std::vector<unsigned char>>(); maybeValue) {
281 auto const header = util::deserializeHeader(ripple::makeSlice(*maybeValue));
282 ledgerCache_.put(FetchLedgerCache::CacheEntry{header, sequence});
283 return header;
284 }
285
286 LOG(log_.error()) << "Could not fetch ledger by sequence - no rows";
287 return std::nullopt;
288 }
289
290 LOG(log_.error()) << "Could not fetch ledger by sequence - no result";
291 } else {
292 LOG(log_.error()) << "Could not fetch ledger by sequence: " << res.error();
293 }
294
295 return std::nullopt;
296 }
297
298 std::optional<ripple::LedgerHeader>
299 fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
300 {
301 if (auto const res = executor_.read(yield, schema_->selectLedgerByHash, hash); res) {
302 if (auto const& result = res.value(); result) {
303 if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
304 return fetchLedgerBySequence(*maybeValue, yield);
305
306 LOG(log_.error()) << "Could not fetch ledger by hash - no rows";
307 return std::nullopt;
308 }
309
310 LOG(log_.error()) << "Could not fetch ledger by hash - no result";
311 } else {
312 LOG(log_.error()) << "Could not fetch ledger by hash: " << res.error();
313 }
314
315 return std::nullopt;
316 }
317
318 std::optional<LedgerRange>
319 hardFetchLedgerRange(boost::asio::yield_context yield) const override
320 {
321 auto const res = executor_.read(yield, schema_->selectLedgerRange);
322 if (res) {
323 auto const& results = res.value();
324 if (not results.hasRows()) {
325 LOG(log_.debug()) << "Could not fetch ledger range - no rows";
326 return std::nullopt;
327 }
328
329 // TODO: this is probably a good place to use user type in
330 // cassandra instead of having two rows with bool flag. or maybe at
331 // least use tuple<int, int>?
332 LedgerRange range;
333 std::size_t idx = 0;
334 for (auto [seq] : extract<uint32_t>(results)) {
335 if (idx == 0) {
336 range.maxSequence = range.minSequence = seq;
337 } else if (idx == 1) {
338 range.maxSequence = seq;
339 }
340
341 ++idx;
342 }
343
344 if (range.minSequence > range.maxSequence)
345 std::swap(range.minSequence, range.maxSequence);
346
347 LOG(log_.debug()) << "After hardFetchLedgerRange range is " << range.minSequence << ":"
348 << range.maxSequence;
349 return range;
350 }
351 LOG(log_.error()) << "Could not fetch ledger range: " << res.error();
352
353 return std::nullopt;
354 }
355
356 std::vector<TransactionAndMetadata>
357 fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
358 {
359 auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence, yield);
360 return fetchTransactions(hashes, yield);
361 }
362
363 std::vector<ripple::uint256>
364 fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
365 const override
366 {
367 auto start = std::chrono::system_clock::now();
368 auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
369
370 if (not res) {
371 LOG(log_.error()) << "Could not fetch all transaction hashes: " << res.error();
372 return {};
373 }
374
375 auto const& result = res.value();
376 if (not result.hasRows()) {
377 LOG(log_.warn()) << "Could not fetch all transaction hashes - no rows; ledger = "
378 << std::to_string(ledgerSequence);
379 return {};
380 }
381
382 std::vector<ripple::uint256> hashes;
383 for (auto [hash] : extract<ripple::uint256>(result))
384 hashes.push_back(std::move(hash));
385
386 auto end = std::chrono::system_clock::now();
387 LOG(log_.debug()) << "Fetched " << hashes.size() << " transaction hashes from database in "
388 << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
389 << " milliseconds";
390
391 return hashes;
392 }
393
394 std::optional<NFT>
395 fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
396 const override
397 {
398 auto const res = executor_.read(yield, schema_->selectNFT, tokenID, ledgerSequence);
399 if (not res)
400 return std::nullopt;
401
402 if (auto const maybeRow = res->template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
403 auto [seq, owner, isBurned] = *maybeRow;
404 auto result = std::make_optional<NFT>(tokenID, seq, owner, isBurned);
405
406 // now fetch URI. Usually we will have the URI even for burned NFTs,
407 // but if the first ledger on this clio included NFTokenBurn
408 // transactions we will not have the URIs for any of those tokens.
409 // In any other case not having the URI indicates something went
410 // wrong with our data.
411 //
412 // TODO - in the future would be great for any handlers that use
413 // this could inject a warning in this case (the case of not having
414 // a URI because it was burned in the first ledger) to indicate that
415 // even though we are returning a blank URI, the NFT might have had
416 // one.
417 auto uriRes = executor_.read(yield, schema_->selectNFTURI, tokenID, ledgerSequence);
418 if (uriRes) {
419 if (auto const maybeUri = uriRes->template get<ripple::Blob>(); maybeUri)
420 result->uri = *maybeUri;
421 }
422
423 return result;
424 }
425
426 LOG(log_.error()) << "Could not fetch NFT - no rows";
427 return std::nullopt;
428 }
429
432 ripple::uint256 const& tokenID,
433 std::uint32_t const limit,
434 bool const forward,
435 std::optional<TransactionsCursor> const& cursorIn,
436 boost::asio::yield_context yield
437 ) const override
438 {
439 auto rng = fetchLedgerRange();
440 if (!rng)
441 return {.txns = {}, .cursor = {}};
442
443 Statement const statement = [this, forward, &tokenID]() {
444 if (forward)
445 return schema_->selectNFTTxForward.bind(tokenID);
446
447 return schema_->selectNFTTx.bind(tokenID);
448 }();
449
450 auto cursor = cursorIn;
451 if (cursor) {
452 statement.bindAt(1, cursor->asTuple());
453 LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence
454 << cursor->transactionIndex;
455 } else {
456 auto const seq = forward ? rng->minSequence : rng->maxSequence;
457 auto const placeHolder = forward ? 0 : std::numeric_limits<std::uint32_t>::max();
458
459 statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
460 LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " idx = " << seq
461 << " tuple = " << placeHolder;
462 }
463
464 statement.bindAt(2, Limit{limit});
465
466 auto const res = executor_.read(yield, statement);
467 auto const& results = res.value();
468 if (not results.hasRows()) {
469 LOG(log_.debug()) << "No rows returned";
470 return {};
471 }
472
473 std::vector<ripple::uint256> hashes = {};
474 auto numRows = results.numRows();
475 LOG(log_.info()) << "num_rows = " << numRows;
476
477 for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
478 hashes.push_back(hash);
479 if (--numRows == 0) {
480 LOG(log_.debug()) << "Setting cursor";
481 cursor = data;
482
483 // forward queries by ledger/tx sequence `>=`
484 // so we have to advance the index by one
485 if (forward)
486 ++cursor->transactionIndex;
487 }
488 }
489
490 auto const txns = fetchTransactions(hashes, yield);
491 LOG(log_.debug()) << "NFT Txns = " << txns.size();
492
493 if (txns.size() == limit) {
494 LOG(log_.debug()) << "Returning cursor";
495 return {txns, cursor};
496 }
497
498 return {txns, {}};
499 }
500
503 ripple::AccountID const& issuer,
504 std::optional<std::uint32_t> const& taxon,
505 std::uint32_t const ledgerSequence,
506 std::uint32_t const limit,
507 std::optional<ripple::uint256> const& cursorIn,
508 boost::asio::yield_context yield
509 ) const override
510 {
511 NFTsAndCursor ret;
512
513 Statement const idQueryStatement = [&taxon, &issuer, &cursorIn, &limit, this]() {
514 if (taxon.has_value()) {
515 auto r = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
516 r.bindAt(1, *taxon);
517 r.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
518 r.bindAt(3, Limit{limit});
519 return r;
520 }
521
522 auto r = schema_->selectNFTIDsByIssuer.bind(issuer);
523 r.bindAt(
524 1,
525 std::make_tuple(
526 cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0,
527 cursorIn.value_or(ripple::uint256(0))
528 )
529 );
530 r.bindAt(2, Limit{limit});
531 return r;
532 }();
533
534 // Query for all the NFTs issued by the account, potentially filtered by the taxon
535 auto const res = executor_.read(yield, idQueryStatement);
536
537 auto const& idQueryResults = res.value();
538 if (not idQueryResults.hasRows()) {
539 LOG(log_.debug()) << "No rows returned";
540 return {};
541 }
542
543 std::vector<ripple::uint256> nftIDs;
544 for (auto const [nftID] : extract<ripple::uint256>(idQueryResults))
545 nftIDs.push_back(nftID);
546
547 if (nftIDs.empty())
548 return ret;
549
550 if (nftIDs.size() == limit)
551 ret.cursor = nftIDs.back();
552
553 std::vector<Statement> selectNFTStatements;
554 selectNFTStatements.reserve(nftIDs.size());
555
556 std::transform(
557 std::cbegin(nftIDs),
558 std::cend(nftIDs),
559 std::back_inserter(selectNFTStatements),
560 [&](auto const& nftID) { return schema_->selectNFT.bind(nftID, ledgerSequence); }
561 );
562
563 auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
564
565 std::vector<Statement> selectNFTURIStatements;
566 selectNFTURIStatements.reserve(nftIDs.size());
567
568 std::transform(
569 std::cbegin(nftIDs),
570 std::cend(nftIDs),
571 std::back_inserter(selectNFTURIStatements),
572 [&](auto const& nftID) { return schema_->selectNFTURI.bind(nftID, ledgerSequence); }
573 );
574
575 auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
576
577 for (auto i = 0u; i < nftIDs.size(); i++) {
578 if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
579 auto [seq, owner, isBurned] = *maybeRow;
580 NFT nft(nftIDs[i], seq, owner, isBurned);
581 if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
582 nft.uri = *maybeUri;
583 ret.nfts.push_back(nft);
584 }
585 }
586 return ret;
587 }
588
591 ripple::uint192 const& mptID,
592 std::uint32_t const limit,
593 std::optional<ripple::AccountID> const& cursorIn,
594 std::uint32_t const ledgerSequence,
595 boost::asio::yield_context yield
596 ) const override
597 {
598 auto const holderEntries = executor_.read(
599 yield, schema_->selectMPTHolders, mptID, cursorIn.value_or(ripple::AccountID(0)), Limit{limit}
600 );
601
602 auto const& holderResults = holderEntries.value();
603 if (not holderResults.hasRows()) {
604 LOG(log_.debug()) << "No rows returned";
605 return {};
606 }
607
608 std::vector<ripple::uint256> mptKeys;
609 std::optional<ripple::AccountID> cursor;
610 for (auto const [holder] : extract<ripple::AccountID>(holderResults)) {
611 mptKeys.push_back(ripple::keylet::mptoken(mptID, holder).key);
612 cursor = holder;
613 }
614
615 auto mptObjects = doFetchLedgerObjects(mptKeys, ledgerSequence, yield);
616
617 auto it = std::remove_if(mptObjects.begin(), mptObjects.end(), [](Blob const& mpt) { return mpt.empty(); });
618
619 mptObjects.erase(it, mptObjects.end());
620
621 ASSERT(mptKeys.size() <= limit, "Number of keys can't exceed the limit");
622 if (mptKeys.size() == limit)
623 return {mptObjects, cursor};
624
625 return {mptObjects, {}};
626 }
627
628 std::optional<Blob>
629 doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
630 const override
631 {
632 LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
633 if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
634 if (auto const result = res->template get<Blob>(); result) {
635 if (result->size())
636 return result;
637 } else {
638 LOG(log_.debug()) << "Could not fetch ledger object - no rows";
639 }
640 } else {
641 LOG(log_.error()) << "Could not fetch ledger object: " << res.error();
642 }
643
644 return std::nullopt;
645 }
646
647 std::optional<std::uint32_t>
648 doFetchLedgerObjectSeq(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
649 const override
650 {
651 LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
652 if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
653 if (auto const result = res->template get<Blob, std::uint32_t>(); result) {
654 auto [_, seq] = result.value();
655 return seq;
656 }
657 LOG(log_.debug()) << "Could not fetch ledger object sequence - no rows";
658 } else {
659 LOG(log_.error()) << "Could not fetch ledger object sequence: " << res.error();
660 }
661
662 return std::nullopt;
663 }
664
665 std::optional<TransactionAndMetadata>
666 fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
667 {
668 if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res) {
669 if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>(); maybeValue) {
670 auto [transaction, meta, seq, date] = *maybeValue;
671 return std::make_optional<TransactionAndMetadata>(transaction, meta, seq, date);
672 }
673
674 LOG(log_.debug()) << "Could not fetch transaction - no rows";
675 } else {
676 LOG(log_.error()) << "Could not fetch transaction: " << res.error();
677 }
678
679 return std::nullopt;
680 }
681
682 std::optional<ripple::uint256>
683 doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
684 const override
685 {
686 if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
687 if (auto const result = res->template get<ripple::uint256>(); result) {
688 if (*result == kLAST_KEY)
689 return std::nullopt;
690 return result;
691 }
692
693 LOG(log_.debug()) << "Could not fetch successor - no rows";
694 } else {
695 LOG(log_.error()) << "Could not fetch successor: " << res.error();
696 }
697
698 return std::nullopt;
699 }
700
701 std::vector<TransactionAndMetadata>
702 fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
703 {
704 if (hashes.empty())
705 return {};
706
707 auto const numHashes = hashes.size();
708 std::vector<TransactionAndMetadata> results;
709 results.reserve(numHashes);
710
711 std::vector<Statement> statements;
712 statements.reserve(numHashes);
713
714 auto const timeDiff = util::timed([this, yield, &results, &hashes, &statements]() {
715 // TODO: seems like a job for "hash IN (list of hashes)" instead?
716 std::transform(
717 std::cbegin(hashes),
718 std::cend(hashes),
719 std::back_inserter(statements),
720 [this](auto const& hash) { return schema_->selectTransaction.bind(hash); }
721 );
722
723 auto const entries = executor_.readEach(yield, statements);
724 std::transform(
725 std::cbegin(entries),
726 std::cend(entries),
727 std::back_inserter(results),
728 [](auto const& res) -> TransactionAndMetadata {
729 if (auto const maybeRow = res.template get<Blob, Blob, uint32_t, uint32_t>(); maybeRow)
730 return *maybeRow;
731
732 return {};
733 }
734 );
735 });
736
737 ASSERT(numHashes == results.size(), "Number of hashes and results must match");
738 LOG(log_.debug()) << "Fetched " << numHashes << " transactions from database in " << timeDiff
739 << " milliseconds";
740 return results;
741 }
742
743 std::vector<Blob>
745 std::vector<ripple::uint256> const& keys,
746 std::uint32_t const sequence,
747 boost::asio::yield_context yield
748 ) const override
749 {
750 if (keys.empty())
751 return {};
752
753 auto const numKeys = keys.size();
754 LOG(log_.trace()) << "Fetching " << numKeys << " objects";
755
756 std::vector<Blob> results;
757 results.reserve(numKeys);
758
759 std::vector<Statement> statements;
760 statements.reserve(numKeys);
761
762 // TODO: seems like a job for "key IN (list of keys)" instead?
763 std::transform(
764 std::cbegin(keys),
765 std::cend(keys),
766 std::back_inserter(statements),
767 [this, &sequence](auto const& key) { return schema_->selectObject.bind(key, sequence); }
768 );
769
770 auto const entries = executor_.readEach(yield, statements);
771 std::transform(
772 std::cbegin(entries),
773 std::cend(entries),
774 std::back_inserter(results),
775 [](auto const& res) -> Blob {
776 if (auto const maybeValue = res.template get<Blob>(); maybeValue)
777 return *maybeValue;
778
779 return {};
780 }
781 );
782
783 LOG(log_.trace()) << "Fetched " << numKeys << " objects";
784 return results;
785 }
786
787 std::vector<ripple::uint256>
788 fetchAccountRoots(std::uint32_t number, std::uint32_t pageSize, std::uint32_t seq, boost::asio::yield_context yield)
789 const override
790 {
791 std::vector<ripple::uint256> liveAccounts;
792 std::optional<ripple::AccountID> lastItem;
793
794 while (liveAccounts.size() < number) {
795 Statement const statement = lastItem ? schema_->selectAccountFromToken.bind(*lastItem, Limit{pageSize})
796 : schema_->selectAccountFromBeginning.bind(Limit{pageSize});
797
798 auto const res = executor_.read(yield, statement);
799 if (res) {
800 auto const& results = res.value();
801 if (not results.hasRows()) {
802 LOG(log_.debug()) << "No rows returned";
803 break;
804 }
805 // The results should not contain duplicates, we just filter out deleted accounts
806 std::vector<ripple::uint256> fullAccounts;
807 for (auto [account] : extract<ripple::AccountID>(results)) {
808 fullAccounts.push_back(ripple::keylet::account(account).key);
809 lastItem = account;
810 }
811 auto const objs = doFetchLedgerObjects(fullAccounts, seq, yield);
812
813 for (auto i = 0u; i < fullAccounts.size(); i++) {
814 if (not objs[i].empty()) {
815 if (liveAccounts.size() < number) {
816 liveAccounts.push_back(fullAccounts[i]);
817 } else {
818 break;
819 }
820 }
821 }
822 } else {
823 LOG(log_.error()) << "Could not fetch account from account_tx: " << res.error();
824 break;
825 }
826 }
827
828 return liveAccounts;
829 }
830
831 std::vector<LedgerObject>
832 fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
833 {
834 auto const [keys, timeDiff] = util::timed([this, &ledgerSequence, yield]() -> std::vector<ripple::uint256> {
835 auto const res = executor_.read(yield, schema_->selectDiff, ledgerSequence);
836 if (not res) {
837 LOG(log_.error()) << "Could not fetch ledger diff: " << res.error() << "; ledger = " << ledgerSequence;
838 return {};
839 }
840
841 auto const& results = res.value();
842 if (not results) {
843 LOG(log_.error()) << "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence;
844 return {};
845 }
846
847 std::vector<ripple::uint256> resultKeys;
848 for (auto [key] : extract<ripple::uint256>(results))
849 resultKeys.push_back(key);
850
851 return resultKeys;
852 });
853
854 // one of the above errors must have happened
855 if (keys.empty())
856 return {};
857
858 LOG(log_.debug()) << "Fetched " << keys.size() << " diff hashes from database in " << timeDiff
859 << " milliseconds";
860
861 auto const objs = fetchLedgerObjects(keys, ledgerSequence, yield);
862 std::vector<LedgerObject> results;
863 results.reserve(keys.size());
864
865 std::transform(
866 std::cbegin(keys),
867 std::cend(keys),
868 std::cbegin(objs),
869 std::back_inserter(results),
870 [](auto const& key, auto const& obj) { return LedgerObject{key, obj}; }
871 );
872
873 return results;
874 }
875
876 std::optional<std::string>
877 fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const override
878 {
879 auto const res = executor_.read(yield, schema_->selectMigratorStatus, Text(migratorName));
880 if (not res) {
881 LOG(log_.error()) << "Could not fetch migrator status: " << res.error();
882 return {};
883 }
884
885 auto const& results = res.value();
886 if (not results) {
887 return {};
888 }
889
890 for (auto [statusString] : extract<std::string>(results))
891 return statusString;
892
893 return {};
894 }
895
896 std::expected<std::vector<std::pair<boost::uuids::uuid, std::string>>, std::string>
897 fetchClioNodesData(boost::asio::yield_context yield) const override
898 {
899 auto const readResult = executor_.read(yield, schema_->selectClioNodesData);
900 if (not readResult)
901 return std::unexpected{readResult.error().message()};
902
903 std::vector<std::pair<boost::uuids::uuid, std::string>> result;
904
905 for (auto [uuid, message] : extract<boost::uuids::uuid, std::string>(*readResult)) {
906 result.emplace_back(uuid, std::move(message));
907 }
908
909 return result;
910 }
911
912 void
913 doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override
914 {
915 LOG(log_.trace()) << " Writing ledger object " << key.size() << ":" << seq << " [" << blob.size() << " bytes]";
916
917 if (range_)
918 executor_.write(schema_->insertDiff, seq, key);
919
920 executor_.write(schema_->insertObject, std::move(key), seq, std::move(blob));
921 }
922
923 void
924 writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) override
925 {
926 LOG(log_.trace()) << "Writing successor. key = " << key.size() << " bytes. "
927 << " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes.";
928 ASSERT(!key.empty(), "Key must not be empty");
929 ASSERT(!successor.empty(), "Successor must not be empty");
930
931 executor_.write(schema_->insertSuccessor, std::move(key), seq, std::move(successor));
932 }
933
934 void
935 writeAccountTransactions(std::vector<AccountTransactionsData> data) override
936 {
937 std::vector<Statement> statements;
938 statements.reserve(data.size() * 10); // assume 10 transactions avg
939
940 for (auto& record : data) {
941 std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
942 return schema_->insertAccountTx.bind(
943 std::forward<decltype(account)>(account),
944 std::make_tuple(record.ledgerSequence, record.transactionIndex),
945 record.txHash
946 );
947 });
948 }
949
950 executor_.write(std::move(statements));
951 }
952
953 void
955 {
956 std::vector<Statement> statements;
957 statements.reserve(record.accounts.size());
958
959 std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
960 return schema_->insertAccountTx.bind(
961 std::forward<decltype(account)>(account),
962 std::make_tuple(record.ledgerSequence, record.transactionIndex),
963 record.txHash
964 );
965 });
966
967 executor_.write(std::move(statements));
968 }
969
970 void
971 writeNFTTransactions(std::vector<NFTTransactionsData> const& data) override
972 {
973 std::vector<Statement> statements;
974 statements.reserve(data.size());
975
976 std::ranges::transform(data, std::back_inserter(statements), [this](auto const& record) {
977 return schema_->insertNFTTx.bind(
978 record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash
979 );
980 });
981
982 executor_.write(std::move(statements));
983 }
984
985 void
987 std::string&& hash,
988 std::uint32_t const seq,
989 std::uint32_t const date,
990 std::string&& transaction,
991 std::string&& metadata
992 ) override
993 {
994 LOG(log_.trace()) << "Writing txn to database";
995
996 executor_.write(schema_->insertLedgerTransaction, seq, hash);
997 executor_.write(
998 schema_->insertTransaction, std::move(hash), seq, date, std::move(transaction), std::move(metadata)
999 );
1000 }
1001
1002 void
1003 writeNFTs(std::vector<NFTsData> const& data) override
1004 {
1005 std::vector<Statement> statements;
1006 statements.reserve(data.size() * 3);
1007
1008 for (NFTsData const& record : data) {
1009 if (!record.onlyUriChanged) {
1010 statements.push_back(
1011 schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
1012 );
1013
1014 // If `uri` is set (and it can be set to an empty uri), we know this
1015 // is a net-new NFT. That is, this NFT has not been seen before by
1016 // us _OR_ it is in the extreme edge case of a re-minted NFT ID with
1017 // the same NFT ID as an already-burned token. In this case, we need
1018 // to record the URI and link to the issuer_nf_tokens table.
1019 if (record.uri) {
1020 statements.push_back(schema_->insertIssuerNFT.bind(
1021 ripple::nft::getIssuer(record.tokenID),
1022 static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
1023 record.tokenID
1024 ));
1025 statements.push_back(
1026 schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
1027 );
1028 }
1029 } else {
1030 // only uri changed, we update the uri table only
1031 statements.push_back(
1032 schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
1033 );
1034 }
1035 }
1036
1037 executor_.writeEach(std::move(statements));
1038 }
1039
1040 void
1041 writeMPTHolders(std::vector<MPTHolderData> const& data) override
1042 {
1043 std::vector<Statement> statements;
1044 statements.reserve(data.size());
1045 for (auto [mptId, holder] : data)
1046 statements.push_back(schema_->insertMPTHolder.bind(mptId, holder));
1047
1048 executor_.write(std::move(statements));
1049 }
1050
1051 void
1052 startWrites() const override
1053 {
1054 // Note: no-op in original implementation too.
1055 // probably was used in PG to start a transaction or smth.
1056 }
1057
1058 void
1059 writeMigratorStatus(std::string const& migratorName, std::string const& status) override
1060 {
1061 executor_.writeSync(
1062 schema_->insertMigratorStatus, data::cassandra::Text{migratorName}, data::cassandra::Text(status)
1063 );
1064 }
1065
1066 void
1067 writeNodeMessage(boost::uuids::uuid const& uuid, std::string message) override
1068 {
1069 executor_.writeSync(schema_->updateClioNodeMessage, data::cassandra::Text{std::move(message)}, uuid);
1070 }
1071
1072 bool
1073 isTooBusy() const override
1074 {
1075 return executor_.isTooBusy();
1076 }
1077
1078 boost::json::object
1079 stats() const override
1080 {
1081 return executor_.stats();
1082 }
1083
1084private:
1085 bool
1086 executeSyncUpdate(Statement statement)
1087 {
1088 auto const res = executor_.writeSync(statement);
1089 auto maybeSuccess = res->template get<bool>();
1090 if (not maybeSuccess) {
1091 LOG(log_.error()) << "executeSyncUpdate - error getting result - no row";
1092 return false;
1093 }
1094
1095 if (not maybeSuccess.value()) {
1096 LOG(log_.warn()) << "Update failed. Checking if DB state is what we expect";
1097
1098 // error may indicate that another writer wrote something.
1099 // in this case let's just compare the current state of things
1100 // against what we were trying to write in the first place and
1101 // use that as the source of truth for the result.
1102 auto rng = hardFetchLedgerRangeNoThrow();
1103 return rng && rng->maxSequence == ledgerSequence_;
1104 }
1105
1106 return true;
1107 }
1108};
1109
1110using CassandraBackend = BasicCassandraBackend<SettingsProvider, impl::DefaultExecutionStrategy<>>;
1111
1112} // namespace data::cassandra
The interface to the database used by Clio.
Definition BackendInterface.hpp:140
std::vector< Blob > fetchLedgerObjects(std::vector< ripple::uint256 > const &keys, std::uint32_t sequence, boost::asio::yield_context yield) const
Fetches all ledger objects by their keys.
Definition BackendInterface.cpp:119
std::optional< LedgerRange > hardFetchLedgerRangeNoThrow() const
Fetches the ledger range from DB retrying until no DatabaseTimeout is thrown.
Definition BackendInterface.cpp:77
std::optional< LedgerRange > fetchLedgerRange() const
Fetch the current ledger range.
Definition BackendInterface.cpp:267
LedgerCacheInterface const & cache() const
Definition BackendInterface.hpp:165
Cache for an entire ledger.
Definition LedgerCacheInterface.hpp:38
Implements BackendInterface for Cassandra/ScyllaDB.
Definition CassandraBackend.hpp:83
bool isTooBusy() const override
Definition CassandraBackend.hpp:1073
std::optional< std::uint32_t > fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
Fetches the latest ledger sequence.
Definition CassandraBackend.hpp:252
MPTHoldersAndCursor fetchMPTHolders(ripple::uint192 const &mptID, std::uint32_t const limit, std::optional< ripple::AccountID > const &cursorIn, std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Fetches all holders' balances for a MPTIssuanceID.
Definition CassandraBackend.hpp:590
std::vector< ripple::uint256 > fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Fetches all transaction hashes from a specific ledger.
Definition CassandraBackend.hpp:364
void writeMPTHolders(std::vector< MPTHolderData > const &data) override
Write accounts that started holding onto a MPT.
Definition CassandraBackend.hpp:1041
std::optional< Blob > doFetchLedgerObject(ripple::uint256 const &key, std::uint32_t const sequence, boost::asio::yield_context yield) const override
The database-specific implementation for fetching a ledger object.
Definition CassandraBackend.hpp:629
std::optional< LedgerRange > hardFetchLedgerRange(boost::asio::yield_context yield) const override
Fetches the ledger range from DB.
Definition CassandraBackend.hpp:319
void writeSuccessor(std::string &&key, std::uint32_t const seq, std::string &&successor) override
Write a new successor.
Definition CassandraBackend.hpp:924
void waitForWritesToFinish() override
Wait for all pending writes to finish.
Definition CassandraBackend.hpp:218
std::optional< ripple::LedgerHeader > fetchLedgerByHash(ripple::uint256 const &hash, boost::asio::yield_context yield) const override
Fetches a specific ledger by hash.
Definition CassandraBackend.hpp:299
std::vector< TransactionAndMetadata > fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Fetches all transactions from a specific ledger.
Definition CassandraBackend.hpp:357
std::optional< ripple::LedgerHeader > fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
Fetches a specific ledger by sequence number.
Definition CassandraBackend.hpp:272
std::vector< Blob > doFetchLedgerObjects(std::vector< ripple::uint256 > const &keys, std::uint32_t const sequence, boost::asio::yield_context yield) const override
The database-specific implementation for fetching ledger objects.
Definition CassandraBackend.hpp:744
TransactionsAndCursor fetchAccountTransactions(ripple::AccountID const &account, std::uint32_t const limit, bool forward, std::optional< TransactionsCursor > const &cursorIn, boost::asio::yield_context yield) const override
Fetches all transactions for a specific account.
Definition CassandraBackend.hpp:150
void writeAccountTransaction(AccountTransactionsData record) override
Write a new account transaction.
Definition CassandraBackend.hpp:954
void writeAccountTransactions(std::vector< AccountTransactionsData > data) override
Write a new set of account transactions.
Definition CassandraBackend.hpp:935
BasicCassandraBackend(SettingsProviderType settingsProvider, data::LedgerCacheInterface &cache, bool readOnly)
Create a new cassandra/scylla backend instance.
Definition CassandraBackend.hpp:107
std::optional< NFT > fetchNFT(ripple::uint256 const &tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Fetches a specific NFT.
Definition CassandraBackend.hpp:395
std::vector< ripple::uint256 > fetchAccountRoots(std::uint32_t number, std::uint32_t pageSize, std::uint32_t seq, boost::asio::yield_context yield) const override
Fetch the specified number of account root object indexes by page, the accounts need to exist for seq...
Definition CassandraBackend.hpp:788
void writeNFTs(std::vector< NFTsData > const &data) override
Writes NFTs to the database.
Definition CassandraBackend.hpp:1003
std::vector< LedgerObject > fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Returns the difference between ledgers.
Definition CassandraBackend.hpp:832
void writeLedger(ripple::LedgerHeader const &ledgerHeader, std::string &&blob) override
Writes to a specific ledger.
Definition CassandraBackend.hpp:242
std::optional< std::uint32_t > doFetchLedgerObjectSeq(ripple::uint256 const &key, std::uint32_t const sequence, boost::asio::yield_context yield) const override
The database-specific implementation for fetching a ledger object sequence.
Definition CassandraBackend.hpp:648
void writeNodeMessage(boost::uuids::uuid const &uuid, std::string message) override
Write a node message. Used by ClusterCommunicationService.
Definition CassandraBackend.hpp:1067
std::optional< TransactionAndMetadata > fetchTransaction(ripple::uint256 const &hash, boost::asio::yield_context yield) const override
Fetches a specific transaction.
Definition CassandraBackend.hpp:666
std::vector< TransactionAndMetadata > fetchTransactions(std::vector< ripple::uint256 > const &hashes, boost::asio::yield_context yield) const override
Fetches multiple transactions.
Definition CassandraBackend.hpp:702
std::optional< std::string > fetchMigratorStatus(std::string const &migratorName, boost::asio::yield_context yield) const override
Fetches the status of migrator by name.
Definition CassandraBackend.hpp:877
void startWrites() const override
Starts a write transaction with the DB. No-op for cassandra.
Definition CassandraBackend.hpp:1052
void doWriteLedgerObject(std::string &&key, std::uint32_t const seq, std::string &&blob) override
Writes a ledger object to the database.
Definition CassandraBackend.hpp:913
TransactionsAndCursor fetchNFTTransactions(ripple::uint256 const &tokenID, std::uint32_t const limit, bool const forward, std::optional< TransactionsCursor > const &cursorIn, boost::asio::yield_context yield) const override
Fetches all transactions for a specific NFT.
Definition CassandraBackend.hpp:431
boost::json::object stats() const override
Definition CassandraBackend.hpp:1079
std::expected< std::vector< std::pair< boost::uuids::uuid, std::string > >, std::string > fetchClioNodesData(boost::asio::yield_context yield) const override
Fetches the data of all nodes in the cluster.
Definition CassandraBackend.hpp:897
NFTsAndCursor fetchNFTsByIssuer(ripple::AccountID const &issuer, std::optional< std::uint32_t > const &taxon, std::uint32_t const ledgerSequence, std::uint32_t const limit, std::optional< ripple::uint256 > const &cursorIn, boost::asio::yield_context yield) const override
Fetches all NFTs issued by a given address.
Definition CassandraBackend.hpp:502
void writeNFTTransactions(std::vector< NFTTransactionsData > const &data) override
Write NFTs transactions.
Definition CassandraBackend.hpp:971
bool doFinishWrites() override
The implementation should wait for all pending writes to finish.
Definition CassandraBackend.hpp:224
std::optional< ripple::uint256 > doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Database-specific implementation of fetching the successor key.
Definition CassandraBackend.hpp:683
void writeTransaction(std::string &&hash, std::uint32_t const seq, std::uint32_t const date, std::string &&transaction, std::string &&metadata) override
Writes a new transaction.
Definition CassandraBackend.hpp:986
void writeMigratorStatus(std::string const &migratorName, std::string const &status) override
Mark the migration status of a migrator as Migrated in the database.
Definition CassandraBackend.hpp:1059
Represents a handle to the cassandra database cluster.
Definition Handle.hpp:46
MaybeErrorType connect() const
Synchronous version of the above.
Definition Handle.cpp:55
MaybeErrorType executeEach(std::vector< StatementType > const &statements) const
Synchronous version of the above.
Definition Handle.cpp:109
ResultOrErrorType execute(std::string_view query, Args &&... args) const
Synchronous version of the above.
Definition Handle.hpp:185
Manages the DB schema and provides access to prepared statements.
Definition Schema.hpp:55
void prepareStatements(Handle const &handle)
Recreates the prepared statements.
Definition Schema.hpp:848
Definition Statement.hpp:47
void bindAt(std::size_t const idx, Type &&value) const
Binds an argument to a specific index.
Definition Statement.hpp:93
A simple thread-safe logger for the channel specified in the constructor.
Definition Logger.hpp:111
Pump warn(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::WRN severity.
Definition Logger.cpp:224
Pump error(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::ERR severity.
Definition Logger.cpp:229
Pump debug(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::DBG severity.
Definition Logger.cpp:214
Pump trace(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::TRC severity.
Definition Logger.cpp:209
Pump info(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::NFO severity.
Definition Logger.cpp:219
This namespace implements a wrapper for the Cassandra C++ driver.
Definition Concepts.hpp:37
impl::ResultExtractor< Types... > extract(Handle::ResultType const &result)
Extracts the results into series of std::tuple<Types...> by creating a simple wrapper with an STL inp...
Definition Handle.hpp:329
This namespace implements the data access layer and related components.
Definition AmendmentCenter.cpp:70
ripple::LedgerHeader deserializeHeader(ripple::Slice data)
Deserializes a ripple::LedgerHeader from ripple::Slice of data.
Definition LedgerUtils.hpp:204
auto timed(FnType &&func)
Profiler function to measure the time a function execution consumes.
Definition Profiler.hpp:40
Struct used to keep track of what to write to account_transactions/account_tx tables.
Definition DBHelpers.hpp:45
Represents an NFT state at a particular ledger.
Definition DBHelpers.hpp:103
Struct to store ledger header cache entry and the sequence it belongs to.
Definition LedgerHeaderCache.hpp:48
Represents an object in the ledger.
Definition Types.hpp:41
Stores a range of sequences as a min and max pair.
Definition Types.hpp:247
Represents an array of MPTokens.
Definition Types.hpp:239
Represents a NFToken.
Definition Types.hpp:172
Represents a bundle of NFTs with a cursor to the next page.
Definition Types.hpp:231
Represents a transaction and its metadata bundled together.
Definition Types.hpp:68
Represests a bundle of transactions with metadata and a cursor to the next page.
Definition Types.hpp:164
A strong type wrapper for int32_t.
Definition Types.hpp:56
A strong type wrapper for string.
Definition Types.hpp:67