Clio develop
The XRP Ledger API server.
Loading...
Searching...
No Matches
CassandraBackend.hpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of clio: https://github.com/XRPLF/clio
4 Copyright (c) 2023, the clio developers.
5
6 Permission to use, copy, modify, and distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#pragma once
21
22#include "data/BackendInterface.hpp"
23#include "data/DBHelpers.hpp"
24#include "data/LedgerCacheInterface.hpp"
25#include "data/Types.hpp"
26#include "data/cassandra/Concepts.hpp"
27#include "data/cassandra/Handle.hpp"
28#include "data/cassandra/Schema.hpp"
29#include "data/cassandra/SettingsProvider.hpp"
30#include "data/cassandra/Types.hpp"
31#include "data/cassandra/impl/ExecutionStrategy.hpp"
32#include "util/Assert.hpp"
33#include "util/LedgerUtils.hpp"
34#include "util/Profiler.hpp"
35#include "util/log/Logger.hpp"
36
37#include <boost/asio/spawn.hpp>
38#include <boost/json/object.hpp>
39#include <boost/uuid/string_generator.hpp>
40#include <boost/uuid/uuid.hpp>
41#include <cassandra.h>
42#include <fmt/core.h>
43#include <xrpl/basics/Blob.h>
44#include <xrpl/basics/base_uint.h>
45#include <xrpl/basics/strHex.h>
46#include <xrpl/protocol/AccountID.h>
47#include <xrpl/protocol/Indexes.h>
48#include <xrpl/protocol/LedgerHeader.h>
49#include <xrpl/protocol/nft.h>
50
51#include <algorithm>
52#include <atomic>
53#include <chrono>
54#include <cstddef>
55#include <cstdint>
56#include <iterator>
57#include <limits>
58#include <optional>
59#include <stdexcept>
60#include <string>
61#include <tuple>
62#include <utility>
63#include <vector>
64
65namespace data::cassandra {
66
75template <SomeSettingsProvider SettingsProviderType, SomeExecutionStrategy ExecutionStrategyType>
77 util::Logger log_{"Backend"};
78
79 SettingsProviderType settingsProvider_;
81
82 std::atomic_uint32_t ledgerSequence_ = 0u;
83
84protected:
85 Handle handle_;
86
87 // have to be mutable because BackendInterface constness :(
88 mutable ExecutionStrategyType executor_;
89
90public:
98 BasicCassandraBackend(SettingsProviderType settingsProvider, data::LedgerCacheInterface& cache, bool readOnly)
100 , settingsProvider_{std::move(settingsProvider)}
101 , schema_{settingsProvider_}
102 , handle_{settingsProvider_.getSettings()}
103 , executor_{settingsProvider_.getSettings(), handle_}
104 {
105 if (auto const res = handle_.connect(); not res)
106 throw std::runtime_error("Could not connect to database: " + res.error());
107
108 if (not readOnly) {
109 if (auto const res = handle_.execute(schema_.createKeyspace); not res) {
110 // on datastax, creation of keyspaces can be configured to only be done thru the admin
111 // interface. this does not mean that the keyspace does not already exist tho.
112 if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED)
113 throw std::runtime_error("Could not create keyspace: " + res.error());
114 }
115
116 if (auto const res = handle_.executeEach(schema_.createSchema); not res)
117 throw std::runtime_error("Could not create schema: " + res.error());
118 }
119
120 try {
121 schema_.prepareStatements(handle_);
122 } catch (std::runtime_error const& ex) {
123 auto const error = fmt::format(
124 "Failed to prepare the statements: {}; readOnly: {}. ReadOnly should be turned off or another Clio "
125 "node with write access to DB should be started first.",
126 ex.what(),
127 readOnly
128 );
129 LOG(log_.error()) << error;
130 throw std::runtime_error(error);
131 }
132
133 LOG(log_.info()) << "Created (revamped) CassandraBackend";
134 }
135
136 /*
137 * @brief Move constructor is deleted because handle_ is shared by reference with executor
138 */
140
143 ripple::AccountID const& account,
144 std::uint32_t const limit,
145 bool forward,
146 std::optional<TransactionsCursor> const& cursorIn,
147 boost::asio::yield_context yield
148 ) const override
149 {
150 auto rng = fetchLedgerRange();
151 if (!rng)
152 return {.txns = {}, .cursor = {}};
153
154 Statement const statement = [this, forward, &account]() {
155 if (forward)
156 return schema_->selectAccountTxForward.bind(account);
157
158 return schema_->selectAccountTx.bind(account);
159 }();
160
161 auto cursor = cursorIn;
162 if (cursor) {
163 statement.bindAt(1, cursor->asTuple());
164 LOG(log_.debug()) << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence
165 << cursor->transactionIndex;
166 } else {
167 auto const seq = forward ? rng->minSequence : rng->maxSequence;
168 auto const placeHolder = forward ? 0u : std::numeric_limits<std::uint32_t>::max();
169
170 statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
171 LOG(log_.debug()) << "account = " << ripple::strHex(account) << " idx = " << seq
172 << " tuple = " << placeHolder;
173 }
174
175 // FIXME: Limit is a hack to support uint32_t properly for the time
176 // being. Should be removed later and schema updated to use proper
177 // types.
178 statement.bindAt(2, Limit{limit});
179 auto const res = executor_.read(yield, statement);
180 auto const& results = res.value();
181 if (not results.hasRows()) {
182 LOG(log_.debug()) << "No rows returned";
183 return {};
184 }
185
186 std::vector<ripple::uint256> hashes = {};
187 auto numRows = results.numRows();
188 LOG(log_.info()) << "num_rows = " << numRows;
189
190 for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
191 hashes.push_back(hash);
192 if (--numRows == 0) {
193 LOG(log_.debug()) << "Setting cursor";
194 cursor = data;
195 }
196 }
197
198 auto const txns = fetchTransactions(hashes, yield);
199 LOG(log_.debug()) << "Txns = " << txns.size();
200
201 if (txns.size() == limit) {
202 LOG(log_.debug()) << "Returning cursor";
203 return {txns, cursor};
204 }
205
206 return {txns, {}};
207 }
208
209 void
211 {
212 executor_.sync();
213 }
214
215 bool
216 doFinishWrites() override
217 {
219
220 if (!range_) {
221 executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_);
222 }
223
224 if (not executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
225 LOG(log_.warn()) << "Update failed for ledger " << ledgerSequence_;
226 return false;
227 }
228
229 LOG(log_.info()) << "Committed ledger " << ledgerSequence_;
230 return true;
231 }
232
233 void
234 writeLedger(ripple::LedgerHeader const& ledgerHeader, std::string&& blob) override
235 {
236 executor_.write(schema_->insertLedgerHeader, ledgerHeader.seq, std::move(blob));
237
238 executor_.write(schema_->insertLedgerHash, ledgerHeader.hash, ledgerHeader.seq);
239
240 ledgerSequence_ = ledgerHeader.seq;
241 }
242
243 std::optional<std::uint32_t>
244 fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
245 {
246 if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res) {
247 if (auto const& result = res.value(); result) {
248 if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
249 return maybeValue;
250
251 LOG(log_.error()) << "Could not fetch latest ledger - no rows";
252 return std::nullopt;
253 }
254
255 LOG(log_.error()) << "Could not fetch latest ledger - no result";
256 } else {
257 LOG(log_.error()) << "Could not fetch latest ledger: " << res.error();
258 }
259
260 return std::nullopt;
261 }
262
263 std::optional<ripple::LedgerHeader>
264 fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
265 {
266 auto const res = executor_.read(yield, schema_->selectLedgerBySeq, sequence);
267 if (res) {
268 if (auto const& result = res.value(); result) {
269 if (auto const maybeValue = result.template get<std::vector<unsigned char>>(); maybeValue) {
270 return util::deserializeHeader(ripple::makeSlice(*maybeValue));
271 }
272
273 LOG(log_.error()) << "Could not fetch ledger by sequence - no rows";
274 return std::nullopt;
275 }
276
277 LOG(log_.error()) << "Could not fetch ledger by sequence - no result";
278 } else {
279 LOG(log_.error()) << "Could not fetch ledger by sequence: " << res.error();
280 }
281
282 return std::nullopt;
283 }
284
285 std::optional<ripple::LedgerHeader>
286 fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
287 {
288 if (auto const res = executor_.read(yield, schema_->selectLedgerByHash, hash); res) {
289 if (auto const& result = res.value(); result) {
290 if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
291 return fetchLedgerBySequence(*maybeValue, yield);
292
293 LOG(log_.error()) << "Could not fetch ledger by hash - no rows";
294 return std::nullopt;
295 }
296
297 LOG(log_.error()) << "Could not fetch ledger by hash - no result";
298 } else {
299 LOG(log_.error()) << "Could not fetch ledger by hash: " << res.error();
300 }
301
302 return std::nullopt;
303 }
304
305 std::optional<LedgerRange>
306 hardFetchLedgerRange(boost::asio::yield_context yield) const override
307 {
308 auto const res = executor_.read(yield, schema_->selectLedgerRange);
309 if (res) {
310 auto const& results = res.value();
311 if (not results.hasRows()) {
312 LOG(log_.debug()) << "Could not fetch ledger range - no rows";
313 return std::nullopt;
314 }
315
316 // TODO: this is probably a good place to use user type in
317 // cassandra instead of having two rows with bool flag. or maybe at
318 // least use tuple<int, int>?
319 LedgerRange range;
320 std::size_t idx = 0;
321 for (auto [seq] : extract<uint32_t>(results)) {
322 if (idx == 0) {
323 range.maxSequence = range.minSequence = seq;
324 } else if (idx == 1) {
325 range.maxSequence = seq;
326 }
327
328 ++idx;
329 }
330
331 if (range.minSequence > range.maxSequence)
332 std::swap(range.minSequence, range.maxSequence);
333
334 LOG(log_.debug()) << "After hardFetchLedgerRange range is " << range.minSequence << ":"
335 << range.maxSequence;
336 return range;
337 }
338 LOG(log_.error()) << "Could not fetch ledger range: " << res.error();
339
340 return std::nullopt;
341 }
342
343 std::vector<TransactionAndMetadata>
344 fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
345 {
346 auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence, yield);
347 return fetchTransactions(hashes, yield);
348 }
349
350 std::vector<ripple::uint256>
351 fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
352 const override
353 {
354 auto start = std::chrono::system_clock::now();
355 auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
356
357 if (not res) {
358 LOG(log_.error()) << "Could not fetch all transaction hashes: " << res.error();
359 return {};
360 }
361
362 auto const& result = res.value();
363 if (not result.hasRows()) {
364 LOG(log_.warn()) << "Could not fetch all transaction hashes - no rows; ledger = "
365 << std::to_string(ledgerSequence);
366 return {};
367 }
368
369 std::vector<ripple::uint256> hashes;
370 for (auto [hash] : extract<ripple::uint256>(result))
371 hashes.push_back(std::move(hash));
372
373 auto end = std::chrono::system_clock::now();
374 LOG(log_.debug()) << "Fetched " << hashes.size() << " transaction hashes from database in "
375 << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
376 << " milliseconds";
377
378 return hashes;
379 }
380
381 std::optional<NFT>
382 fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
383 const override
384 {
385 auto const res = executor_.read(yield, schema_->selectNFT, tokenID, ledgerSequence);
386 if (not res)
387 return std::nullopt;
388
389 if (auto const maybeRow = res->template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
390 auto [seq, owner, isBurned] = *maybeRow;
391 auto result = std::make_optional<NFT>(tokenID, seq, owner, isBurned);
392
393 // now fetch URI. Usually we will have the URI even for burned NFTs,
394 // but if the first ledger on this clio included NFTokenBurn
395 // transactions we will not have the URIs for any of those tokens.
396 // In any other case not having the URI indicates something went
397 // wrong with our data.
398 //
399 // TODO - in the future would be great for any handlers that use
400 // this could inject a warning in this case (the case of not having
401 // a URI because it was burned in the first ledger) to indicate that
402 // even though we are returning a blank URI, the NFT might have had
403 // one.
404 auto uriRes = executor_.read(yield, schema_->selectNFTURI, tokenID, ledgerSequence);
405 if (uriRes) {
406 if (auto const maybeUri = uriRes->template get<ripple::Blob>(); maybeUri)
407 result->uri = *maybeUri;
408 }
409
410 return result;
411 }
412
413 LOG(log_.error()) << "Could not fetch NFT - no rows";
414 return std::nullopt;
415 }
416
419 ripple::uint256 const& tokenID,
420 std::uint32_t const limit,
421 bool const forward,
422 std::optional<TransactionsCursor> const& cursorIn,
423 boost::asio::yield_context yield
424 ) const override
425 {
426 auto rng = fetchLedgerRange();
427 if (!rng)
428 return {.txns = {}, .cursor = {}};
429
430 Statement const statement = [this, forward, &tokenID]() {
431 if (forward)
432 return schema_->selectNFTTxForward.bind(tokenID);
433
434 return schema_->selectNFTTx.bind(tokenID);
435 }();
436
437 auto cursor = cursorIn;
438 if (cursor) {
439 statement.bindAt(1, cursor->asTuple());
440 LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence
441 << cursor->transactionIndex;
442 } else {
443 auto const seq = forward ? rng->minSequence : rng->maxSequence;
444 auto const placeHolder = forward ? 0 : std::numeric_limits<std::uint32_t>::max();
445
446 statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
447 LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " idx = " << seq
448 << " tuple = " << placeHolder;
449 }
450
451 statement.bindAt(2, Limit{limit});
452
453 auto const res = executor_.read(yield, statement);
454 auto const& results = res.value();
455 if (not results.hasRows()) {
456 LOG(log_.debug()) << "No rows returned";
457 return {};
458 }
459
460 std::vector<ripple::uint256> hashes = {};
461 auto numRows = results.numRows();
462 LOG(log_.info()) << "num_rows = " << numRows;
463
464 for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
465 hashes.push_back(hash);
466 if (--numRows == 0) {
467 LOG(log_.debug()) << "Setting cursor";
468 cursor = data;
469
470 // forward queries by ledger/tx sequence `>=`
471 // so we have to advance the index by one
472 if (forward)
473 ++cursor->transactionIndex;
474 }
475 }
476
477 auto const txns = fetchTransactions(hashes, yield);
478 LOG(log_.debug()) << "NFT Txns = " << txns.size();
479
480 if (txns.size() == limit) {
481 LOG(log_.debug()) << "Returning cursor";
482 return {txns, cursor};
483 }
484
485 return {txns, {}};
486 }
487
490 ripple::AccountID const& issuer,
491 std::optional<std::uint32_t> const& taxon,
492 std::uint32_t const ledgerSequence,
493 std::uint32_t const limit,
494 std::optional<ripple::uint256> const& cursorIn,
495 boost::asio::yield_context yield
496 ) const override
497 {
498 NFTsAndCursor ret;
499
500 Statement const idQueryStatement = [&taxon, &issuer, &cursorIn, &limit, this]() {
501 if (taxon.has_value()) {
502 auto r = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
503 r.bindAt(1, *taxon);
504 r.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
505 r.bindAt(3, Limit{limit});
506 return r;
507 }
508
509 auto r = schema_->selectNFTIDsByIssuer.bind(issuer);
510 r.bindAt(
511 1,
512 std::make_tuple(
513 cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0,
514 cursorIn.value_or(ripple::uint256(0))
515 )
516 );
517 r.bindAt(2, Limit{limit});
518 return r;
519 }();
520
521 // Query for all the NFTs issued by the account, potentially filtered by the taxon
522 auto const res = executor_.read(yield, idQueryStatement);
523
524 auto const& idQueryResults = res.value();
525 if (not idQueryResults.hasRows()) {
526 LOG(log_.debug()) << "No rows returned";
527 return {};
528 }
529
530 std::vector<ripple::uint256> nftIDs;
531 for (auto const [nftID] : extract<ripple::uint256>(idQueryResults))
532 nftIDs.push_back(nftID);
533
534 if (nftIDs.empty())
535 return ret;
536
537 if (nftIDs.size() == limit)
538 ret.cursor = nftIDs.back();
539
540 std::vector<Statement> selectNFTStatements;
541 selectNFTStatements.reserve(nftIDs.size());
542
543 std::transform(
544 std::cbegin(nftIDs),
545 std::cend(nftIDs),
546 std::back_inserter(selectNFTStatements),
547 [&](auto const& nftID) { return schema_->selectNFT.bind(nftID, ledgerSequence); }
548 );
549
550 auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
551
552 std::vector<Statement> selectNFTURIStatements;
553 selectNFTURIStatements.reserve(nftIDs.size());
554
555 std::transform(
556 std::cbegin(nftIDs),
557 std::cend(nftIDs),
558 std::back_inserter(selectNFTURIStatements),
559 [&](auto const& nftID) { return schema_->selectNFTURI.bind(nftID, ledgerSequence); }
560 );
561
562 auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
563
564 for (auto i = 0u; i < nftIDs.size(); i++) {
565 if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
566 auto [seq, owner, isBurned] = *maybeRow;
567 NFT nft(nftIDs[i], seq, owner, isBurned);
568 if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
569 nft.uri = *maybeUri;
570 ret.nfts.push_back(nft);
571 }
572 }
573 return ret;
574 }
575
578 ripple::uint192 const& mptID,
579 std::uint32_t const limit,
580 std::optional<ripple::AccountID> const& cursorIn,
581 std::uint32_t const ledgerSequence,
582 boost::asio::yield_context yield
583 ) const override
584 {
585 auto const holderEntries = executor_.read(
586 yield, schema_->selectMPTHolders, mptID, cursorIn.value_or(ripple::AccountID(0)), Limit{limit}
587 );
588
589 auto const& holderResults = holderEntries.value();
590 if (not holderResults.hasRows()) {
591 LOG(log_.debug()) << "No rows returned";
592 return {};
593 }
594
595 std::vector<ripple::uint256> mptKeys;
596 std::optional<ripple::AccountID> cursor;
597 for (auto const [holder] : extract<ripple::AccountID>(holderResults)) {
598 mptKeys.push_back(ripple::keylet::mptoken(mptID, holder).key);
599 cursor = holder;
600 }
601
602 auto mptObjects = doFetchLedgerObjects(mptKeys, ledgerSequence, yield);
603
604 auto it = std::remove_if(mptObjects.begin(), mptObjects.end(), [](Blob const& mpt) { return mpt.empty(); });
605
606 mptObjects.erase(it, mptObjects.end());
607
608 ASSERT(mptKeys.size() <= limit, "Number of keys can't exceed the limit");
609 if (mptKeys.size() == limit)
610 return {mptObjects, cursor};
611
612 return {mptObjects, {}};
613 }
614
615 std::optional<Blob>
616 doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
617 const override
618 {
619 LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
620 if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
621 if (auto const result = res->template get<Blob>(); result) {
622 if (result->size())
623 return result;
624 } else {
625 LOG(log_.debug()) << "Could not fetch ledger object - no rows";
626 }
627 } else {
628 LOG(log_.error()) << "Could not fetch ledger object: " << res.error();
629 }
630
631 return std::nullopt;
632 }
633
634 std::optional<std::uint32_t>
635 doFetchLedgerObjectSeq(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
636 const override
637 {
638 LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
639 if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
640 if (auto const result = res->template get<Blob, std::uint32_t>(); result) {
641 auto [_, seq] = result.value();
642 return seq;
643 }
644 LOG(log_.debug()) << "Could not fetch ledger object sequence - no rows";
645 } else {
646 LOG(log_.error()) << "Could not fetch ledger object sequence: " << res.error();
647 }
648
649 return std::nullopt;
650 }
651
652 std::optional<TransactionAndMetadata>
653 fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
654 {
655 if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res) {
656 if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>(); maybeValue) {
657 auto [transaction, meta, seq, date] = *maybeValue;
658 return std::make_optional<TransactionAndMetadata>(transaction, meta, seq, date);
659 }
660
661 LOG(log_.debug()) << "Could not fetch transaction - no rows";
662 } else {
663 LOG(log_.error()) << "Could not fetch transaction: " << res.error();
664 }
665
666 return std::nullopt;
667 }
668
669 std::optional<ripple::uint256>
670 doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
671 const override
672 {
673 if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
674 if (auto const result = res->template get<ripple::uint256>(); result) {
675 if (*result == kLAST_KEY)
676 return std::nullopt;
677 return result;
678 }
679
680 LOG(log_.debug()) << "Could not fetch successor - no rows";
681 } else {
682 LOG(log_.error()) << "Could not fetch successor: " << res.error();
683 }
684
685 return std::nullopt;
686 }
687
688 std::vector<TransactionAndMetadata>
689 fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
690 {
691 if (hashes.empty())
692 return {};
693
694 auto const numHashes = hashes.size();
695 std::vector<TransactionAndMetadata> results;
696 results.reserve(numHashes);
697
698 std::vector<Statement> statements;
699 statements.reserve(numHashes);
700
701 auto const timeDiff = util::timed([this, yield, &results, &hashes, &statements]() {
702 // TODO: seems like a job for "hash IN (list of hashes)" instead?
703 std::transform(
704 std::cbegin(hashes),
705 std::cend(hashes),
706 std::back_inserter(statements),
707 [this](auto const& hash) { return schema_->selectTransaction.bind(hash); }
708 );
709
710 auto const entries = executor_.readEach(yield, statements);
711 std::transform(
712 std::cbegin(entries),
713 std::cend(entries),
714 std::back_inserter(results),
715 [](auto const& res) -> TransactionAndMetadata {
716 if (auto const maybeRow = res.template get<Blob, Blob, uint32_t, uint32_t>(); maybeRow)
717 return *maybeRow;
718
719 return {};
720 }
721 );
722 });
723
724 ASSERT(numHashes == results.size(), "Number of hashes and results must match");
725 LOG(log_.debug()) << "Fetched " << numHashes << " transactions from database in " << timeDiff
726 << " milliseconds";
727 return results;
728 }
729
730 std::vector<Blob>
732 std::vector<ripple::uint256> const& keys,
733 std::uint32_t const sequence,
734 boost::asio::yield_context yield
735 ) const override
736 {
737 if (keys.empty())
738 return {};
739
740 auto const numKeys = keys.size();
741 LOG(log_.trace()) << "Fetching " << numKeys << " objects";
742
743 std::vector<Blob> results;
744 results.reserve(numKeys);
745
746 std::vector<Statement> statements;
747 statements.reserve(numKeys);
748
749 // TODO: seems like a job for "key IN (list of keys)" instead?
750 std::transform(
751 std::cbegin(keys),
752 std::cend(keys),
753 std::back_inserter(statements),
754 [this, &sequence](auto const& key) { return schema_->selectObject.bind(key, sequence); }
755 );
756
757 auto const entries = executor_.readEach(yield, statements);
758 std::transform(
759 std::cbegin(entries),
760 std::cend(entries),
761 std::back_inserter(results),
762 [](auto const& res) -> Blob {
763 if (auto const maybeValue = res.template get<Blob>(); maybeValue)
764 return *maybeValue;
765
766 return {};
767 }
768 );
769
770 LOG(log_.trace()) << "Fetched " << numKeys << " objects";
771 return results;
772 }
773
774 std::vector<ripple::uint256>
775 fetchAccountRoots(std::uint32_t number, std::uint32_t pageSize, std::uint32_t seq, boost::asio::yield_context yield)
776 const override
777 {
778 std::vector<ripple::uint256> liveAccounts;
779 std::optional<ripple::AccountID> lastItem;
780
781 while (liveAccounts.size() < number) {
782 Statement const statement = lastItem ? schema_->selectAccountFromToken.bind(*lastItem, Limit{pageSize})
783 : schema_->selectAccountFromBegining.bind(Limit{pageSize});
784
785 auto const res = executor_.read(yield, statement);
786 if (res) {
787 auto const& results = res.value();
788 if (not results.hasRows()) {
789 LOG(log_.debug()) << "No rows returned";
790 break;
791 }
792 // The results should not contain duplicates, we just filter out deleted accounts
793 std::vector<ripple::uint256> fullAccounts;
794 for (auto [account] : extract<ripple::AccountID>(results)) {
795 fullAccounts.push_back(ripple::keylet::account(account).key);
796 lastItem = account;
797 }
798 auto const objs = doFetchLedgerObjects(fullAccounts, seq, yield);
799
800 for (auto i = 0u; i < fullAccounts.size(); i++) {
801 if (not objs[i].empty()) {
802 if (liveAccounts.size() < number) {
803 liveAccounts.push_back(fullAccounts[i]);
804 } else {
805 break;
806 }
807 }
808 }
809 } else {
810 LOG(log_.error()) << "Could not fetch account from account_tx: " << res.error();
811 break;
812 }
813 }
814
815 return liveAccounts;
816 }
817
818 std::vector<LedgerObject>
819 fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
820 {
821 auto const [keys, timeDiff] = util::timed([this, &ledgerSequence, yield]() -> std::vector<ripple::uint256> {
822 auto const res = executor_.read(yield, schema_->selectDiff, ledgerSequence);
823 if (not res) {
824 LOG(log_.error()) << "Could not fetch ledger diff: " << res.error() << "; ledger = " << ledgerSequence;
825 return {};
826 }
827
828 auto const& results = res.value();
829 if (not results) {
830 LOG(log_.error()) << "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence;
831 return {};
832 }
833
834 std::vector<ripple::uint256> resultKeys;
835 for (auto [key] : extract<ripple::uint256>(results))
836 resultKeys.push_back(key);
837
838 return resultKeys;
839 });
840
841 // one of the above errors must have happened
842 if (keys.empty())
843 return {};
844
845 LOG(log_.debug()) << "Fetched " << keys.size() << " diff hashes from database in " << timeDiff
846 << " milliseconds";
847
848 auto const objs = fetchLedgerObjects(keys, ledgerSequence, yield);
849 std::vector<LedgerObject> results;
850 results.reserve(keys.size());
851
852 std::transform(
853 std::cbegin(keys),
854 std::cend(keys),
855 std::cbegin(objs),
856 std::back_inserter(results),
857 [](auto const& key, auto const& obj) { return LedgerObject{key, obj}; }
858 );
859
860 return results;
861 }
862
863 std::optional<std::string>
864 fetchMigratorStatus(std::string const& migratorName, boost::asio::yield_context yield) const override
865 {
866 auto const res = executor_.read(yield, schema_->selectMigratorStatus, Text(migratorName));
867 if (not res) {
868 LOG(log_.error()) << "Could not fetch migrator status: " << res.error();
869 return {};
870 }
871
872 auto const& results = res.value();
873 if (not results) {
874 return {};
875 }
876
877 for (auto [statusString] : extract<std::string>(results))
878 return statusString;
879
880 return {};
881 }
882
883 std::expected<std::vector<std::pair<boost::uuids::uuid, std::string>>, std::string>
884 fetchClioNodesData(boost::asio::yield_context yield) const override
885 {
886 auto const readResult = executor_.read(yield, schema_->selectClioNodesData);
887 if (not readResult)
888 return std::unexpected{readResult.error().message()};
889
890 std::vector<std::pair<boost::uuids::uuid, std::string>> result;
891
892 for (auto [uuid, message] : extract<boost::uuids::uuid, std::string>(*readResult)) {
893 result.emplace_back(uuid, std::move(message));
894 }
895
896 return result;
897 }
898
899 void
900 doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override
901 {
902 LOG(log_.trace()) << " Writing ledger object " << key.size() << ":" << seq << " [" << blob.size() << " bytes]";
903
904 if (range_)
905 executor_.write(schema_->insertDiff, seq, key);
906
907 executor_.write(schema_->insertObject, std::move(key), seq, std::move(blob));
908 }
909
910 void
911 writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) override
912 {
913 LOG(log_.trace()) << "Writing successor. key = " << key.size() << " bytes. "
914 << " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes.";
915 ASSERT(!key.empty(), "Key must not be empty");
916 ASSERT(!successor.empty(), "Successor must not be empty");
917
918 executor_.write(schema_->insertSuccessor, std::move(key), seq, std::move(successor));
919 }
920
921 void
922 writeAccountTransactions(std::vector<AccountTransactionsData> data) override
923 {
924 std::vector<Statement> statements;
925 statements.reserve(data.size() * 10); // assume 10 transactions avg
926
927 for (auto& record : data) {
928 std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
929 return schema_->insertAccountTx.bind(
930 std::forward<decltype(account)>(account),
931 std::make_tuple(record.ledgerSequence, record.transactionIndex),
932 record.txHash
933 );
934 });
935 }
936
937 executor_.write(std::move(statements));
938 }
939
940 void
942 {
943 std::vector<Statement> statements;
944 statements.reserve(record.accounts.size());
945
946 std::ranges::transform(record.accounts, std::back_inserter(statements), [this, &record](auto&& account) {
947 return schema_->insertAccountTx.bind(
948 std::forward<decltype(account)>(account),
949 std::make_tuple(record.ledgerSequence, record.transactionIndex),
950 record.txHash
951 );
952 });
953
954 executor_.write(std::move(statements));
955 }
956
957 void
958 writeNFTTransactions(std::vector<NFTTransactionsData> const& data) override
959 {
960 std::vector<Statement> statements;
961 statements.reserve(data.size());
962
963 std::ranges::transform(data, std::back_inserter(statements), [this](auto const& record) {
964 return schema_->insertNFTTx.bind(
965 record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash
966 );
967 });
968
969 executor_.write(std::move(statements));
970 }
971
972 void
974 std::string&& hash,
975 std::uint32_t const seq,
976 std::uint32_t const date,
977 std::string&& transaction,
978 std::string&& metadata
979 ) override
980 {
981 LOG(log_.trace()) << "Writing txn to database";
982
983 executor_.write(schema_->insertLedgerTransaction, seq, hash);
984 executor_.write(
985 schema_->insertTransaction, std::move(hash), seq, date, std::move(transaction), std::move(metadata)
986 );
987 }
988
989 void
990 writeNFTs(std::vector<NFTsData> const& data) override
991 {
992 std::vector<Statement> statements;
993 statements.reserve(data.size() * 3);
994
995 for (NFTsData const& record : data) {
996 if (!record.onlyUriChanged) {
997 statements.push_back(
998 schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
999 );
1000
1001 // If `uri` is set (and it can be set to an empty uri), we know this
1002 // is a net-new NFT. That is, this NFT has not been seen before by
1003 // us _OR_ it is in the extreme edge case of a re-minted NFT ID with
1004 // the same NFT ID as an already-burned token. In this case, we need
1005 // to record the URI and link to the issuer_nf_tokens table.
1006 if (record.uri) {
1007 statements.push_back(schema_->insertIssuerNFT.bind(
1008 ripple::nft::getIssuer(record.tokenID),
1009 static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
1010 record.tokenID
1011 ));
1012 statements.push_back(
1013 schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
1014 );
1015 }
1016 } else {
1017 // only uri changed, we update the uri table only
1018 statements.push_back(
1019 schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
1020 );
1021 }
1022 }
1023
1024 executor_.writeEach(std::move(statements));
1025 }
1026
1027 void
1028 writeMPTHolders(std::vector<MPTHolderData> const& data) override
1029 {
1030 std::vector<Statement> statements;
1031 statements.reserve(data.size());
1032 for (auto [mptId, holder] : data)
1033 statements.push_back(schema_->insertMPTHolder.bind(mptId, holder));
1034
1035 executor_.write(std::move(statements));
1036 }
1037
1038 void
1039 startWrites() const override
1040 {
1041 // Note: no-op in original implementation too.
1042 // probably was used in PG to start a transaction or smth.
1043 }
1044
1045 void
1046 writeMigratorStatus(std::string const& migratorName, std::string const& status) override
1047 {
1048 executor_.writeSync(
1049 schema_->insertMigratorStatus, data::cassandra::Text{migratorName}, data::cassandra::Text(status)
1050 );
1051 }
1052
1053 void
1054 writeNodeMessage(boost::uuids::uuid const& uuid, std::string message) override
1055 {
1056 executor_.writeSync(schema_->updateClioNodeMessage, data::cassandra::Text{std::move(message)}, uuid);
1057 }
1058
1059 bool
1060 isTooBusy() const override
1061 {
1062 return executor_.isTooBusy();
1063 }
1064
1065 boost::json::object
1066 stats() const override
1067 {
1068 return executor_.stats();
1069 }
1070
1071private:
1072 bool
1073 executeSyncUpdate(Statement statement)
1074 {
1075 auto const res = executor_.writeSync(statement);
1076 auto maybeSuccess = res->template get<bool>();
1077 if (not maybeSuccess) {
1078 LOG(log_.error()) << "executeSyncUpdate - error getting result - no row";
1079 return false;
1080 }
1081
1082 if (not maybeSuccess.value()) {
1083 LOG(log_.warn()) << "Update failed. Checking if DB state is what we expect";
1084
1085 // error may indicate that another writer wrote something.
1086 // in this case let's just compare the current state of things
1087 // against what we were trying to write in the first place and
1088 // use that as the source of truth for the result.
1089 auto rng = hardFetchLedgerRangeNoThrow();
1090 return rng && rng->maxSequence == ledgerSequence_;
1091 }
1092
1093 return true;
1094 }
1095};
1096
1097using CassandraBackend = BasicCassandraBackend<SettingsProvider, impl::DefaultExecutionStrategy<>>;
1098
1099} // namespace data::cassandra
The interface to the database used by Clio.
Definition BackendInterface.hpp:140
std::vector< Blob > fetchLedgerObjects(std::vector< ripple::uint256 > const &keys, std::uint32_t sequence, boost::asio::yield_context yield) const
Fetches all ledger objects by their keys.
Definition BackendInterface.cpp:119
std::optional< LedgerRange > hardFetchLedgerRangeNoThrow() const
Fetches the ledger range from DB retrying until no DatabaseTimeout is thrown.
Definition BackendInterface.cpp:77
std::optional< LedgerRange > fetchLedgerRange() const
Fetch the current ledger range.
Definition BackendInterface.cpp:267
LedgerCacheInterface const & cache() const
Definition BackendInterface.hpp:165
Cache for an entire ledger.
Definition LedgerCacheInterface.hpp:38
Implements BackendInterface for Cassandra/ScyllaDB.
Definition CassandraBackend.hpp:76
std::vector< TransactionAndMetadata > fetchTransactions(std::vector< ripple::uint256 > const &hashes, boost::asio::yield_context yield) const override
Fetches multiple transactions.
Definition CassandraBackend.hpp:689
MPTHoldersAndCursor fetchMPTHolders(ripple::uint192 const &mptID, std::uint32_t const limit, std::optional< ripple::AccountID > const &cursorIn, std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Fetches all holders' balances for a MPTIssuanceID.
Definition CassandraBackend.hpp:577
void writeTransaction(std::string &&hash, std::uint32_t const seq, std::uint32_t const date, std::string &&transaction, std::string &&metadata) override
Writes a new transaction.
Definition CassandraBackend.hpp:973
void writeNFTTransactions(std::vector< NFTTransactionsData > const &data) override
Write NFTs transactions.
Definition CassandraBackend.hpp:958
void writeAccountTransaction(AccountTransactionsData record) override
Write a new account transaction.
Definition CassandraBackend.hpp:941
std::vector< ripple::uint256 > fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Fetches all transaction hashes from a specific ledger.
Definition CassandraBackend.hpp:351
std::optional< ripple::uint256 > doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Database-specific implementation of fetching the successor key.
Definition CassandraBackend.hpp:670
std::optional< std::uint32_t > doFetchLedgerObjectSeq(ripple::uint256 const &key, std::uint32_t const sequence, boost::asio::yield_context yield) const override
The database-specific implementation for fetching a ledger object sequence.
Definition CassandraBackend.hpp:635
bool isTooBusy() const override
Definition CassandraBackend.hpp:1060
boost::json::object stats() const override
Definition CassandraBackend.hpp:1066
void writeNodeMessage(boost::uuids::uuid const &uuid, std::string message) override
Write a node message. Used by ClusterCommunicationService.
Definition CassandraBackend.hpp:1054
std::optional< std::string > fetchMigratorStatus(std::string const &migratorName, boost::asio::yield_context yield) const override
Fetches the status of migrator by name.
Definition CassandraBackend.hpp:864
std::optional< NFT > fetchNFT(ripple::uint256 const &tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Fetches a specific NFT.
Definition CassandraBackend.hpp:382
void startWrites() const override
Starts a write transaction with the DB. No-op for cassandra.
Definition CassandraBackend.hpp:1039
void writeLedger(ripple::LedgerHeader const &ledgerHeader, std::string &&blob) override
Writes to a specific ledger.
Definition CassandraBackend.hpp:234
void waitForWritesToFinish() override
Wait for all pending writes to finish.
Definition CassandraBackend.hpp:210
void writeMPTHolders(std::vector< MPTHolderData > const &data) override
Write accounts that started holding onto a MPT.
Definition CassandraBackend.hpp:1028
void writeNFTs(std::vector< NFTsData > const &data) override
Writes NFTs to the database.
Definition CassandraBackend.hpp:990
void writeAccountTransactions(std::vector< AccountTransactionsData > data) override
Write a new set of account transactions.
Definition CassandraBackend.hpp:922
std::optional< std::uint32_t > fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
Fetches the latest ledger sequence.
Definition CassandraBackend.hpp:244
TransactionsAndCursor fetchAccountTransactions(ripple::AccountID const &account, std::uint32_t const limit, bool forward, std::optional< TransactionsCursor > const &cursorIn, boost::asio::yield_context yield) const override
Fetches all transactions for a specific account.
Definition CassandraBackend.hpp:142
void doWriteLedgerObject(std::string &&key, std::uint32_t const seq, std::string &&blob) override
Writes a ledger object to the database.
Definition CassandraBackend.hpp:900
NFTsAndCursor fetchNFTsByIssuer(ripple::AccountID const &issuer, std::optional< std::uint32_t > const &taxon, std::uint32_t const ledgerSequence, std::uint32_t const limit, std::optional< ripple::uint256 > const &cursorIn, boost::asio::yield_context yield) const override
Fetches all NFTs issued by a given address.
Definition CassandraBackend.hpp:489
BasicCassandraBackend(SettingsProviderType settingsProvider, data::LedgerCacheInterface &cache, bool readOnly)
Create a new cassandra/scylla backend instance.
Definition CassandraBackend.hpp:98
void writeSuccessor(std::string &&key, std::uint32_t const seq, std::string &&successor) override
Write a new successor.
Definition CassandraBackend.hpp:911
std::optional< TransactionAndMetadata > fetchTransaction(ripple::uint256 const &hash, boost::asio::yield_context yield) const override
Fetches a specific transaction.
Definition CassandraBackend.hpp:653
std::vector< TransactionAndMetadata > fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Fetches all transactions from a specific ledger.
Definition CassandraBackend.hpp:344
std::optional< Blob > doFetchLedgerObject(ripple::uint256 const &key, std::uint32_t const sequence, boost::asio::yield_context yield) const override
The database-specific implementation for fetching a ledger object.
Definition CassandraBackend.hpp:616
bool doFinishWrites() override
The implementation should wait for all pending writes to finish.
Definition CassandraBackend.hpp:216
std::vector< ripple::uint256 > fetchAccountRoots(std::uint32_t number, std::uint32_t pageSize, std::uint32_t seq, boost::asio::yield_context yield) const override
Fetch the specified number of account root object indexes by page, the accounts need to exist for seq...
Definition CassandraBackend.hpp:775
std::vector< LedgerObject > fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
Returns the difference between ledgers.
Definition CassandraBackend.hpp:819
void writeMigratorStatus(std::string const &migratorName, std::string const &status) override
Mark the migration status of a migrator as Migrated in the database.
Definition CassandraBackend.hpp:1046
TransactionsAndCursor fetchNFTTransactions(ripple::uint256 const &tokenID, std::uint32_t const limit, bool const forward, std::optional< TransactionsCursor > const &cursorIn, boost::asio::yield_context yield) const override
Fetches all transactions for a specific NFT.
Definition CassandraBackend.hpp:418
std::expected< std::vector< std::pair< boost::uuids::uuid, std::string > >, std::string > fetchClioNodesData(boost::asio::yield_context yield) const override
Fetches the data of all nodes in the cluster.
Definition CassandraBackend.hpp:884
std::optional< ripple::LedgerHeader > fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
Fetches a specific ledger by sequence number.
Definition CassandraBackend.hpp:264
std::vector< Blob > doFetchLedgerObjects(std::vector< ripple::uint256 > const &keys, std::uint32_t const sequence, boost::asio::yield_context yield) const override
The database-specific implementation for fetching ledger objects.
Definition CassandraBackend.hpp:731
std::optional< LedgerRange > hardFetchLedgerRange(boost::asio::yield_context yield) const override
Fetches the ledger range from DB.
Definition CassandraBackend.hpp:306
std::optional< ripple::LedgerHeader > fetchLedgerByHash(ripple::uint256 const &hash, boost::asio::yield_context yield) const override
Fetches a specific ledger by hash.
Definition CassandraBackend.hpp:286
Represents a handle to the cassandra database cluster.
Definition Handle.hpp:46
MaybeErrorType connect() const
Synchonous version of the above.
Definition Handle.cpp:55
MaybeErrorType executeEach(std::vector< StatementType > const &statements) const
Synchonous version of the above.
Definition Handle.cpp:109
ResultOrErrorType execute(std::string_view query, Args &&... args) const
Synchonous version of the above.
Definition Handle.hpp:185
Manages the DB schema and provides access to prepared statements.
Definition Schema.hpp:55
void prepareStatements(Handle const &handle)
Recreates the prepared statements.
Definition Schema.hpp:848
Definition Statement.hpp:47
void bindAt(std::size_t const idx, Type &&value) const
Binds an argument to a specific index.
Definition Statement.hpp:93
A simple thread-safe logger for the channel specified in the constructor.
Definition Logger.hpp:111
Pump warn(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::WRN severity.
Definition Logger.cpp:224
Pump error(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::ERR severity.
Definition Logger.cpp:229
Pump debug(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::DBG severity.
Definition Logger.cpp:214
Pump trace(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::TRC severity.
Definition Logger.cpp:209
Pump info(SourceLocationType const &loc=CURRENT_SRC_LOCATION) const
Interface for logging at Severity::NFO severity.
Definition Logger.cpp:219
This namespace implements a wrapper for the Cassandra C++ driver.
Definition Concepts.hpp:37
impl::ResultExtractor< Types... > extract(Handle::ResultType const &result)
Extracts the results into series of std::tuple<Types...> by creating a simple wrapper with an STL inp...
Definition Handle.hpp:329
This namespace implements the data access layer and related components.
Definition AmendmentCenter.cpp:70
ripple::LedgerHeader deserializeHeader(ripple::Slice data)
Deserializes a ripple::LedgerHeader from ripple::Slice of data.
Definition LedgerUtils.hpp:203
auto timed(FnType &&func)
Profiler function to measure the time a function execution consumes.
Definition Profiler.hpp:40
Struct used to keep track of what to write to account_transactions/account_tx tables.
Definition DBHelpers.hpp:45
Represents an NFT state at a particular ledger.
Definition DBHelpers.hpp:103
Represents an object in the ledger.
Definition Types.hpp:41
Stores a range of sequences as a min and max pair.
Definition Types.hpp:247
Represents an array of MPTokens.
Definition Types.hpp:239
Represents a NFToken.
Definition Types.hpp:172
Represents a bundle of NFTs with a cursor to the next page.
Definition Types.hpp:231
Represents a transaction and its metadata bundled together.
Definition Types.hpp:68
Represests a bundle of transactions with metadata and a cursor to the next page.
Definition Types.hpp:164
A strong type wrapper for int32_t.
Definition Types.hpp:56
A strong type wrapper for string.
Definition Types.hpp:67