rippled
Loading...
Searching...
No Matches
test/csf/Peer.h
1#pragma once
2
3#include <test/csf/CollectorRef.h>
4#include <test/csf/Scheduler.h>
5#include <test/csf/TrustGraph.h>
6#include <test/csf/Tx.h>
7#include <test/csf/Validation.h>
8#include <test/csf/events.h>
9#include <test/csf/ledgers.h>
10
11#include <xrpld/consensus/Consensus.h>
12#include <xrpld/consensus/Validations.h>
13
14#include <xrpl/beast/utility/WrappedSink.h>
15#include <xrpl/protocol/PublicKey.h>
16
17#include <boost/container/flat_map.hpp>
18#include <boost/container/flat_set.hpp>
19
20#include <algorithm>
21
22namespace xrpl {
23namespace test {
24namespace csf {
25
26namespace bc = boost::container;
27
39struct Peer
40{
47 {
48 public:
50 {
51 }
52
53 Proposal const&
54 proposal() const
55 {
56 return proposal_;
57 }
58
60 getJson() const
61 {
62 return proposal_.getJson();
63 }
64
66 render() const
67 {
68 return "";
69 }
70
71 private:
73 };
74
78 {
83
86
87 // Return the receive delay for message type M, default is no delay
88 // Received delay is the time from receiving the message to actually
89 // handling it.
90 template <class M>
92 onReceive(M const&) const
93 {
94 return SimDuration{};
95 }
96
98 onReceive(Validation const&) const
99 {
100 return recvValidation;
101 }
102 };
103
105 {
106 };
107
112 {
114
115 public:
116 struct Mutex
117 {
118 void
120 {
121 }
122
123 void
125 {
126 }
127 };
128
131
133 {
134 }
135
137 now() const
138 {
139 return p_.now();
140 }
141
143 acquire(Ledger::ID const& lId)
144 {
145 if (Ledger const* ledger = p_.acquireLedger(lId))
146 return *ledger;
147 return std::nullopt;
148 }
149 };
150
155 using TxSet_t = TxSet;
159
163
166
169
172
175
178
181
184
187
190
193
196
200
201 //-------------------------------------------------------------------------
202 // Store most network messages; these could be purged if memory use ever
203 // becomes problematic
204
207 bc::flat_map<Ledger::ID, std::vector<Proposal>> peerPositions;
209 bc::flat_map<TxSet::ID, TxSet> txSets;
210
211 // Ledgers/TxSets we are acquiring and when that request times out
212 bc::flat_map<Ledger::ID, SimTime> acquiringLedgers;
213 bc::flat_map<TxSet::ID, SimTime> acquiringTxSets;
214
217
220
223
226
228 bool runAsValidator = true;
229
230 // TODO: Consider removing these two, they are only a convenience for tests
231 // Number of proposers in the prior round
233 // Duration of prior round
235
236 // Quorum of validations needed for a ledger to be fully validated
237 // TODO: Use the logic in ValidatorList to set this dynamically
239
241
242 // Simulation parameters
244
247
260 PeerID i,
261 Scheduler& s,
262 LedgerOracle& o,
265 CollectorRefs& c,
266 beast::Journal jIn)
267 : sink(jIn, "Peer " + to_string(i) + ": ")
268 , j(sink)
269 , consensus(s.clock(), *this, j)
270 , id{i}
271 , key{id, 0}
272 , oracle{o}
273 , scheduler{s}
274 , net{n}
275 , trustGraph(tg)
276 , lastClosedLedger{Ledger::MakeGenesis{}}
277 , validations{ValidationParms{}, s.clock(), *this}
278 , fullyValidatedLedger{Ledger::MakeGenesis{}}
279 , collectors{c}
280 {
281 // All peers start from the default constructed genesis ledger
283
284 // nodes always trust themselves . . SHOULD THEY?
285 trustGraph.trust(this, this);
286 }
287
291 template <class T>
292 void
294 {
295 using namespace std::chrono_literals;
296
297 if (when == 0ns)
298 what();
299 else
300 scheduler.in(when, std::forward<T>(what));
301 }
302
303 // Issue a new event to the collectors
304 template <class E>
305 void
306 issue(E const& event)
307 {
308 // Use the scheduler time and not the peer's (skewed) local time
309 collectors.on(id, scheduler.now(), event);
310 }
311
312 //--------------------------------------------------------------------------
313 // Trust and Network members
314 // Methods for modifying and querying the network and trust graphs from
315 // the perspective of this Peer
316
317 //< Extend trust to a peer
318 void
320 {
321 trustGraph.trust(this, &o);
322 }
323
324 //< Revoke trust from a peer
325 void
327 {
328 trustGraph.untrust(this, &o);
329 }
330
331 //< Check whether we trust a peer
332 bool
334 {
335 return trustGraph.trusts(this, &o);
336 }
337
338 //< Check whether we trust a peer based on its ID
339 bool
340 trusts(PeerID const& oId)
341 {
342 for (auto const p : trustGraph.trustedPeers(this))
343 if (p->id == oId)
344 return true;
345 return false;
346 }
347
357 bool
359 {
360 return net.connect(this, &o, dur);
361 }
362
370 bool
372 {
373 return net.disconnect(this, &o);
374 }
375
376 //--------------------------------------------------------------------------
377 // Generic Consensus members
378
379 // Attempt to acquire the Ledger associated with the given ID
380 Ledger const*
381 acquireLedger(Ledger::ID const& ledgerID)
382 {
383 if (auto it = ledgers.find(ledgerID); it != ledgers.end())
384 {
385 return &(it->second);
386 }
387
388 // No peers
389 if (net.links(this).empty())
390 return nullptr;
391
392 // Don't retry if we already are acquiring it and haven't timed out
393 auto aIt = acquiringLedgers.find(ledgerID);
394 if (aIt != acquiringLedgers.end())
395 {
396 if (scheduler.now() < aIt->second)
397 return nullptr;
398 }
399
400 using namespace std::chrono_literals;
401 SimDuration minDuration{10s};
402 for (auto const link : net.links(this))
403 {
404 minDuration = std::min(minDuration, link.data.delay);
405
406 // Send a message to neighbors to find the ledger
407 net.send(this, link.target, [to = link.target, from = this, ledgerID]() {
408 if (auto it = to->ledgers.find(ledgerID); it != to->ledgers.end())
409 {
410 // if the ledger is found, send it back to the original
411 // requesting peer where it is added to the available
412 // ledgers
413 to->net.send(to, from, [from, ledger = it->second]() {
414 from->acquiringLedgers.erase(ledger.id());
415 from->ledgers.emplace(ledger.id(), ledger);
416 });
417 }
418 });
419 }
420 acquiringLedgers[ledgerID] = scheduler.now() + 2 * minDuration;
421 return nullptr;
422 }
423
424 // Attempt to acquire the TxSet associated with the given ID
425 TxSet const*
427 {
428 if (auto it = txSets.find(setId); it != txSets.end())
429 {
430 return &(it->second);
431 }
432
433 // No peers
434 if (net.links(this).empty())
435 return nullptr;
436
437 // Don't retry if we already are acquiring it and haven't timed out
438 auto aIt = acquiringTxSets.find(setId);
439 if (aIt != acquiringTxSets.end())
440 {
441 if (scheduler.now() < aIt->second)
442 return nullptr;
443 }
444
445 using namespace std::chrono_literals;
446 SimDuration minDuration{10s};
447 for (auto const link : net.links(this))
448 {
449 minDuration = std::min(minDuration, link.data.delay);
450 // Send a message to neighbors to find the tx set
451 net.send(this, link.target, [to = link.target, from = this, setId]() {
452 if (auto it = to->txSets.find(setId); it != to->txSets.end())
453 {
454 // If the txSet is found, send it back to the original
455 // requesting peer, where it is handled like a TxSet
456 // that was broadcast over the network
457 to->net.send(to, from, [from, txSet = it->second]() {
458 from->acquiringTxSets.erase(txSet.id());
459 from->handle(txSet);
460 });
461 }
462 });
463 }
464 acquiringTxSets[setId] = scheduler.now() + 2 * minDuration;
465 return nullptr;
466 }
467
468 bool
470 {
471 return !openTxs.empty();
472 }
473
476 {
477 return validations.numTrustedForLedger(prevLedger);
478 }
479
481 proposersFinished(Ledger const& prevLedger, Ledger::ID const& prevLedgerID)
482 {
483 return validations.getNodesAfter(prevLedger, prevLedgerID);
484 }
485
486 Result
487 onClose(Ledger const& prevLedger, NetClock::time_point closeTime, ConsensusMode mode)
488 {
489 issue(CloseLedger{prevLedger, openTxs});
490
491 return Result(
492 TxSet{openTxs}, Proposal(prevLedger.id(), Proposal::seqJoin, TxSet::calcID(openTxs), closeTime, now(), id));
493 }
494
495 void
497 Result const& result,
498 Ledger const& prevLedger,
499 NetClock::duration const& closeResolution,
500 ConsensusCloseTimes const& rawCloseTimes,
501 ConsensusMode const& mode,
502 Json::Value&& consensusJson)
503 {
504 onAccept(result, prevLedger, closeResolution, rawCloseTimes, mode, std::move(consensusJson), validating());
505 }
506
507 void
509 Result const& result,
510 Ledger const& prevLedger,
511 NetClock::duration const& closeResolution,
512 ConsensusCloseTimes const& rawCloseTimes,
513 ConsensusMode const& mode,
514 Json::Value&& consensusJson,
515 bool const validating)
516 {
517 schedule(delays.ledgerAccept, [=, this]() {
518 bool const proposing = mode == ConsensusMode::proposing;
519 bool const consensusFail = result.state == ConsensusState::MovedOn;
520
521 TxSet const acceptedTxs = injectTxs(prevLedger, result.txns);
522 Ledger const newLedger =
523 oracle.accept(prevLedger, acceptedTxs.txs(), closeResolution, result.position.closeTime());
524 ledgers[newLedger.id()] = newLedger;
525
526 issue(AcceptLedger{newLedger, lastClosedLedger});
527 prevProposers = result.proposers;
528 prevRoundTime = result.roundTime.read();
529 lastClosedLedger = newLedger;
530
531 auto const it = std::remove_if(
532 openTxs.begin(), openTxs.end(), [&](Tx const& tx) { return acceptedTxs.exists(tx.id()); });
533 openTxs.erase(it, openTxs.end());
534
535 // Only send validation if the new ledger is compatible with our
536 // fully validated ledger
537 bool const isCompatible = newLedger.isAncestor(fullyValidatedLedger);
538
539 // Can only send one validated ledger per seq
540 if (runAsValidator && isCompatible && !consensusFail && validations.canValidateSeq(newLedger.seq()))
541 {
542 bool isFull = proposing;
543
544 Validation v{newLedger.id(), newLedger.seq(), now(), now(), key, id, isFull};
545 // share the new validation; it is trusted by the receiver
546 share(v);
547 // we trust ourselves
548 addTrustedValidation(v);
549 }
550
551 checkFullyValidated(newLedger);
552
553 // kick off the next round...
554 // in the actual implementation, this passes back through
555 // network ops
556 ++completedLedgers;
557 // startRound sets the LCL state, so we need to call it once after
558 // the last requested round completes
559 if (completedLedgers <= targetLedgers)
560 {
561 startRound();
562 }
563 });
564 }
565
566 // Earliest allowed sequence number when checking for ledgers with more
567 // validations than our current ledger
568 Ledger::Seq
570 {
571 return fullyValidatedLedger.seq();
572 }
573
575 getPrevLedger(Ledger::ID const& ledgerID, Ledger const& ledger, ConsensusMode mode)
576 {
577 // only do if we are past the genesis ledger
578 if (ledger.seq() == Ledger::Seq{0})
579 return ledgerID;
580
581 Ledger::ID const netLgr = validations.getPreferred(ledger, earliestAllowedSeq());
582
583 if (netLgr != ledgerID)
584 {
585 JLOG(j.trace()) << Json::Compact(validations.getJsonTrie());
586 issue(WrongPrevLedger{ledgerID, netLgr});
587 }
588
589 return netLgr;
590 }
591
592 void
593 propose(Proposal const& pos)
594 {
595 share(pos);
596 }
597
598 ConsensusParms const&
599 parms() const
600 {
601 return consensusParms;
602 }
603
604 // Not interested in tracking consensus mode changes for now
605 void
609
610 // Share a message by broadcasting to all connected peers
611 template <class M>
612 void
613 share(M const& m)
614 {
615 issue(Share<M>{m});
616 send(BroadcastMesg<M>{m, router.nextSeq++, this->id}, this->id);
617 }
618
619 // Unwrap the Position and share the raw proposal
620 void
621 share(Position const& p)
622 {
623 share(p.proposal());
624 }
625
626 //--------------------------------------------------------------------------
627 // Validation members
628
630 bool
632 {
633 v.setTrusted();
634 v.setSeen(now());
635 ValStatus const res = validations.add(v.nodeID(), v);
636
637 if (res == ValStatus::stale)
638 return false;
639
640 // Acquire will try to get from network if not already local
641 if (Ledger const* lgr = acquireLedger(v.ledgerID()))
642 checkFullyValidated(*lgr);
643 return true;
644 }
645
647 void
649 {
650 // Only consider ledgers newer than our last fully validated ledger
651 if (ledger.seq() <= fullyValidatedLedger.seq())
652 return;
653
654 std::size_t const count = validations.numTrustedForLedger(ledger.id());
655 std::size_t const numTrustedPeers = trustGraph.graph().outDegree(this);
656 quorum = static_cast<std::size_t>(std::ceil(numTrustedPeers * 0.8));
657 if (count >= quorum && ledger.isAncestor(fullyValidatedLedger))
658 {
659 issue(FullyValidateLedger{ledger, fullyValidatedLedger});
660 fullyValidatedLedger = ledger;
661 }
662 }
663
664 //-------------------------------------------------------------------------
665 // Peer messaging members
666
667 // Basic Sequence number router
668 // A message that will be flooded across the network is tagged with a
669 // sequence number by the origin node in a BroadcastMesg. Receivers will
670 // ignore a message as stale if they've already processed a newer sequence
671 // number, or will process and potentially relay the message along.
672 //
673 // The various bool handle(MessageType) members do the actual processing
674 // and should return true if the message should continue to be sent to
675 // peers.
676 //
677 // WARN: This assumes messages are received and processed in the order they
678 // are sent, so that a peer receives a message with seq 1 from node 0
679 // before seq 2 from node 0, etc.
680 // TODO: Break this out into a class and identify type interface to allow
681 // alternate routing strategies
682 template <class M>
689
690 struct Router
691 {
692 std::size_t nextSeq = 1;
693 bc::flat_map<PeerID, std::size_t> lastObservedSeq;
694 };
695
697
698 // Send a broadcast message to all peers
699 template <class M>
700 void
701 send(BroadcastMesg<M> const& bm, PeerID from)
702 {
703 for (auto const link : net.links(this))
704 {
705 if (link.target->id != from && link.target->id != bm.origin)
706 {
707 // cheat and don't bother sending if we know it has already been
708 // used on the other end
709 if (link.target->router.lastObservedSeq[bm.origin] < bm.seq)
710 {
711 issue(Relay<M>{link.target->id, bm.msg});
712 net.send(this, link.target, [to = link.target, bm, id = this->id] { to->receive(bm, id); });
713 }
714 }
715 }
716 }
717
718 // Receive a shared message, process it and consider continuing to relay it
719 template <class M>
720 void
722 {
723 issue(Receive<M>{from, bm.msg});
724 if (router.lastObservedSeq[bm.origin] < bm.seq)
725 {
726 router.lastObservedSeq[bm.origin] = bm.seq;
727 schedule(delays.onReceive(bm.msg), [this, bm, from] {
728 if (handle(bm.msg))
729 send(bm, from);
730 });
731 }
732 }
733
734 // Type specific receive handlers, return true if the message should
735 // continue to be broadcast to peers
736 bool
737 handle(Proposal const& p)
738 {
739 // Only relay untrusted proposals on the same ledger
740 if (!trusts(p.nodeID()))
741 return p.prevLedger() == lastClosedLedger.id();
742
743 // TODO: This always suppresses relay of peer positions already seen
744 // Should it allow forwarding if for a recent ledger ?
745 auto& dest = peerPositions[p.prevLedger()];
746 if (std::find(dest.begin(), dest.end(), p) != dest.end())
747 return false;
748
749 dest.push_back(p);
750
751 // Rely on consensus to decide whether to relay
752 return consensus.peerProposal(now(), Position{p});
753 }
754
755 bool
756 handle(TxSet const& txs)
757 {
758 bool const inserted = txSets.insert(std::make_pair(txs.id(), txs)).second;
759 if (inserted)
760 consensus.gotTxSet(now(), txs);
761 // relay only if new
762 return inserted;
763 }
764
765 bool
766 handle(Tx const& tx)
767 {
768 // Ignore and suppress relay of transactions already in last ledger
769 TxSetType const& lastClosedTxs = lastClosedLedger.txs();
770 if (lastClosedTxs.find(tx) != lastClosedTxs.end())
771 return false;
772
773 // only relay if it was new to our open ledger
774 return openTxs.insert(tx).second;
775 }
776
777 bool
779 {
780 // TODO: This is not relaying untrusted validations
781 if (!trusts(v.nodeID()))
782 return false;
783
784 // Will only relay if current
785 return addTrustedValidation(v);
786 }
787
788 bool
790 {
791 return fullyValidatedLedger.seq() > Ledger::Seq{0};
792 }
793
796 {
797 return earliestAllowedSeq();
798 }
799
802 {
804 for (auto const p : trustGraph.trustedPeers(this))
805 keys.insert(p->key);
806 return {quorum, keys};
807 }
808
811 {
812 return validations.laggards(seq, trusted);
813 }
814
815 bool
816 validator() const
817 {
818 return runAsValidator;
819 }
820
821 void
822 updateOperatingMode(std::size_t const positions) const
823 {
824 }
825
826 bool
828 {
829 // does not matter
830 return false;
831 }
832
833 //--------------------------------------------------------------------------
834 // A locally submitted transaction
835 void
836 submit(Tx const& tx)
837 {
838 issue(SubmitTx{tx});
839 if (handle(tx))
840 share(tx);
841 }
842
843 //--------------------------------------------------------------------------
844 // Simulation "driver" members
845
847 void
849 {
850 consensus.timerEntry(now());
851 // only reschedule if not completed
852 if (completedLedgers < targetLedgers)
853 scheduler.in(parms().ledgerGRANULARITY, [this]() { timerEntry(); });
854 }
855
856 // Called to begin the next round
857 void
859 {
860 // Between rounds, we take the majority ledger
861 // In the future, consider taking peer dominant ledger if no validations
862 // yet
863 Ledger::ID bestLCL = validations.getPreferred(lastClosedLedger, earliestAllowedSeq());
864 if (bestLCL == Ledger::ID{0})
865 bestLCL = lastClosedLedger.id();
866
867 issue(StartRound{bestLCL, lastClosedLedger});
868
869 // Not yet modeling dynamic UNL.
870 hash_set<PeerID> nowUntrusted;
871 consensus.startRound(now(), bestLCL, lastClosedLedger, nowUntrusted, runAsValidator, {});
872 }
873
874 // Start the consensus process assuming it is not yet running
875 // This runs forever unless targetLedgers is specified
876 void
878 {
879 // TODO: Expire validations less frequently?
880 validations.expire(j);
881 scheduler.in(parms().ledgerGRANULARITY, [&]() { timerEntry(); });
882 startRound();
883 }
884
886 now() const
887 {
888 // We don't care about the actual epochs, but do want the
889 // generated NetClock time to be well past its epoch to ensure
890 // any subtractions of two NetClock::time_point in the consensus
891 // code are positive. (e.g. proposeFRESHNESS)
892 using namespace std::chrono;
893 using namespace std::chrono_literals;
895 duration_cast<NetClock::duration>(scheduler.now().time_since_epoch() + 86400s + clockSkew));
896 }
897
900 {
901 return consensus.prevLedgerID();
902 }
903
904 //-------------------------------------------------------------------------
905 // Injects a specific transaction when generating the ledger following
906 // the provided sequence. This allows simulating a byzantine failure in
907 // which a node generates the wrong ledger, even when consensus worked
908 // properly.
909 // TODO: Make this more robust
911
922 TxSet
923 injectTxs(Ledger prevLedger, TxSet const& src)
924 {
925 auto const it = txInjections.find(prevLedger.seq());
926
927 if (it == txInjections.end())
928 return src;
929 TxSetType res{src.txs()};
930 res.insert(it->second);
931
932 return TxSet{res};
933 }
934};
935
936} // namespace csf
937} // namespace test
938} // namespace xrpl
T ceil(T... args)
Decorator for streaming out compact json.
Represents a JSON value.
Definition json_value.h:130
A generic endpoint for log messages.
Definition Journal.h:40
Wraps a Journal::Sink to prefix its output with a string.
Definition WrappedSink.h:14
LedgerID_t const & prevLedger() const
Get the prior accepted ledger this position is based on.
Json::Value getJson() const
Get JSON representation for debugging.
NodeID_t const & nodeID() const
Identifying which peer took this position.
std::chrono::milliseconds read() const
Generic implementation of consensus algorithm.
Definition Consensus.h:278
std::chrono::time_point< NetClock > time_point
Definition chrono.h:45
Maintains current and recent ledger validations.
std::size_t getNodesAfter(Ledger const &ledger, ID const &ledgerID)
Count the number of current trusted validators working on a ledger after the specified one.
bool canValidateSeq(Seq const s)
Return whether the local node can issue a validation for the given sequence number.
std::size_t numTrustedForLedger(ID const &ledgerID)
Count the number of trusted full validations for the given ledger.
Peer to peer network simulator.
auto links(Peer const &from)
Return the range of active links.
void send(Peer const &from, Peer const &to, Function &&f)
Send a message to a peer.
A container of CollectorRefs.
void on(PeerID node, SimTime when, E const &e)
Oracle maintaining unique ledgers for a simulation.
Definition ledgers.h:223
A ledger is a set of observed transactions and a sequence number identifying the ledger.
Definition ledgers.h:44
bool isAncestor(Ledger const &ancestor) const
Determine whether ancestor is really an ancestor of this ledger.
Definition ledgers.cpp:21
Basic wrapper of a proposed position taken by a peer.
Proposal const & proposal() const
Generic Validations adaptor that simply ignores recently stale validations.
NetClock::time_point now() const
std::optional< Ledger > acquire(Ledger::ID const &lId)
Simulated discrete-event scheduler.
cancel_token in(duration const &delay, Function &&f)
Schedule an event after a specified duration passes.
time_point now() const
Return the current network time.
TxSet is a set of transactions to consider including in the ledger.
Definition Tx.h:63
Tx const * find(Tx::ID const &txId) const
Definition Tx.h:115
ID id() const
Definition Tx.h:130
TxSetType const & txs() const
Definition Tx.h:124
beast::uhash<>::result_type ID
Definition Tx.h:65
A single transaction.
Definition Tx.h:22
Validation of a specific ledger by a specific Peer.
Definition Validation.h:30
void setSeen(NetClock::time_point seen)
Definition Validation.h:169
PeerID const & nodeID() const
Definition Validation.h:100
Ledger::Seq seq() const
Definition Validation.h:76
Ledger::ID ledgerID() const
Definition Validation.h:70
T end(T... args)
T find(T... args)
T insert(T... args)
T is_same_v
T make_pair(T... args)
T max(T... args)
T min(T... args)
typename SimClock::duration SimDuration
Definition SimTime.h:16
std::string to_string(TxSetType const &txs)
Definition Tx.h:191
boost::container::flat_set< Tx > TxSetType
Definition Tx.h:59
std::pair< PeerID, std::uint32_t > PeerKey
The current key of a peer.
Definition Validation.h:25
tagged_integer< std::uint32_t, PeerIDTag > PeerID
Definition Validation.h:17
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:5
ConsensusMode
Represents how a node currently participates in Consensus.
@ proposing
We are normal participant in consensus and propose our position.
ValStatus
Status of validation we received.
@ stale
Not current or was older than current from this node.
boost::outcome_v2::result< T, std::error_code > Result
Definition b58_utils.h:17
T remove_if(T... args)
Stores the set of initial close times.
Consensus algorithm parameters.
Encapsulates the result of consensus.
ConsensusTimer roundTime
Timing parameters to control validation staleness and expiration.
Definition Validations.h:27
Peer closed the open ledger.
Definition events.h:91
Peer fully validated a new ledger.
Definition events.h:120
Simulated delays in internal peer processing.
std::chrono::milliseconds recvValidation
Delay in processing validations from remote peers.
SimDuration onReceive(Validation const &) const
std::chrono::milliseconds ledgerAccept
Delay in consensus calling doAccept to accepting and issuing validation TODO: This should be a functi...
SimDuration onReceive(M const &) const
bc::flat_map< PeerID, std::size_t > lastObservedSeq
A single peer in the simulation.
Result onClose(Ledger const &prevLedger, NetClock::time_point closeTime, ConsensusMode mode)
std::chrono::seconds clockSkew
Skew of time relative to the common scheduler clock.
void propose(Proposal const &pos)
Ledger::ID prevLedgerID() const
TxSetType openTxs
openTxs that haven't been closed in a ledger yet
void updateOperatingMode(std::size_t const positions) const
void receive(BroadcastMesg< M > const &bm, PeerID from)
ConsensusParms const & parms() const
Ledger::Seq getValidLedgerIndex() const
bc::flat_map< TxSet::ID, TxSet > txSets
TxSet associated with a TxSet::ID.
BasicNetwork< Peer * > & net
Handle to network for sending messages.
int completedLedgers
The number of ledgers this peer has completed.
void share(M const &m)
hash_set< NodeKey_t > trustedKeys
hash_map< Ledger::ID, Ledger > ledgers
Ledgers this node has closed or loaded from the network.
bool haveValidated() const
Peer(PeerID i, Scheduler &s, LedgerOracle &o, BasicNetwork< Peer * > &n, TrustGraph< Peer * > &tg, CollectorRefs &c, beast::Journal jIn)
Constructor.
bool runAsValidator
Whether to simulate running as validator or a tracking node.
hash_map< Ledger::Seq, Tx > txInjections
TxSet const * acquireTxSet(TxSet::ID const &setId)
ConsensusParms consensusParms
CollectorRefs & collectors
The collectors to report events to.
bool trusts(PeerID const &oId)
void onAccept(Result const &result, Ledger const &prevLedger, NetClock::duration const &closeResolution, ConsensusCloseTimes const &rawCloseTimes, ConsensusMode const &mode, Json::Value &&consensusJson, bool const validating)
TxSet injectTxs(Ledger prevLedger, TxSet const &src)
Inject non-consensus Tx.
Ledger lastClosedLedger
The last ledger closed by this node.
LedgerOracle & oracle
The oracle that manages unique ledgers.
std::pair< std::size_t, hash_set< NodeKey_t > > getQuorumKeys()
Ledger const * acquireLedger(Ledger::ID const &ledgerID)
bc::flat_map< TxSet::ID, SimTime > acquiringTxSets
Ledger::ID getPrevLedger(Ledger::ID const &ledgerID, Ledger const &ledger, ConsensusMode mode)
Consensus< Peer > consensus
Generic consensus.
bool connect(Peer &o, SimDuration dur)
Create network connection.
void onForceAccept(Result const &result, Ledger const &prevLedger, NetClock::duration const &closeResolution, ConsensusCloseTimes const &rawCloseTimes, ConsensusMode const &mode, Json::Value &&consensusJson)
bool handle(Validation const &v)
void send(BroadcastMesg< M > const &bm, PeerID from)
void issue(E const &event)
bool handle(TxSet const &txs)
void checkFullyValidated(Ledger const &ledger)
Check if a new ledger can be deemed fully validated.
bc::flat_map< Ledger::ID, SimTime > acquiringLedgers
void onModeChange(ConsensusMode, ConsensusMode)
NetClock::time_point now() const
TrustGraph< Peer * > & trustGraph
Handle to Trust graph of network.
std::size_t laggards(Ledger::Seq const seq, hash_set< NodeKey_t > &trusted)
bool handle(Proposal const &p)
void timerEntry()
Heartbeat timer call.
PeerID id
Our unique ID.
bc::flat_map< Ledger::ID, std::vector< Proposal > > peerPositions
Map from Ledger::ID to vector of Positions with that ledger as the prior ledger.
Validations< ValAdaptor > validations
Validations from trusted nodes.
bool addTrustedValidation(Validation v)
Add a trusted validation and return true if it is worth forwarding.
bool handle(Tx const &tx)
PeerKey key
Current signing key.
void schedule(std::chrono::nanoseconds when, T &&what)
Schedule the provided callback in when duration, but if when is 0, call immediately.
Scheduler & scheduler
Scheduler of events.
ProcessingDelays delays
Simulated delays to use for internal processing.
void share(Position const &p)
Ledger fullyValidatedLedger
The most recent ledger that has been fully validated by the network from the perspective of this Peer...
bool hasOpenTransactions() const
beast::WrappedSink sink
Logging support that prefixes messages with the peer ID.
int targetLedgers
The number of ledgers this peer should complete before stopping to run.
std::size_t proposersFinished(Ledger const &prevLedger, Ledger::ID const &prevLedgerID)
std::size_t proposersValidated(Ledger::ID const &prevLedger)
bool disconnect(Peer &o)
Remove a network connection.
Ledger::Seq earliestAllowedSeq() const
std::chrono::milliseconds prevRoundTime
void submit(Tx const &tx)
A value received from another peer as part of flooding.
Definition events.h:62
A value relayed to another peer as part of flooding.
Definition events.h:50
A value to be flooded to all other peers starting from this peer.
Definition events.h:41
Peer starts a new consensus round.
Definition events.h:80
A transaction submitted to a peer.
Definition events.h:72
Peer detected a wrong prior ledger during consensus.
Definition events.h:111
Set the sequence number on a JTx.
Definition seq.h:14