rippled
Loading...
Searching...
No Matches
Consensus_test.cpp
1#include <test/csf.h>
2#include <test/unit_test/SuiteJournal.h>
3
4#include <xrpld/consensus/Consensus.h>
5
6#include <xrpl/beast/unit_test.h>
7#include <xrpl/json/to_string.h>
8
9namespace xrpl {
10namespace test {
11
13{
15
16public:
17 Consensus_test() : journal_("Consensus_test", *this)
18 {
19 }
20
21 void
23 {
24 using namespace std::chrono_literals;
25 testcase("should close ledger");
26
27 // Use default parameters
28 ConsensusParms const p{};
29
30 // Bizarre times forcibly close
31 BEAST_EXPECT(shouldCloseLedger(true, 10, 10, 10, -10s, 10s, 1s, 1s, p, journal_));
32 BEAST_EXPECT(shouldCloseLedger(true, 10, 10, 10, 100h, 10s, 1s, 1s, p, journal_));
33 BEAST_EXPECT(shouldCloseLedger(true, 10, 10, 10, 10s, 100h, 1s, 1s, p, journal_));
34
35 // Rest of network has closed
36 BEAST_EXPECT(shouldCloseLedger(true, 10, 3, 5, 10s, 10s, 10s, 10s, p, journal_));
37
38 // No transactions means wait until end of internval
39 BEAST_EXPECT(!shouldCloseLedger(false, 10, 0, 0, 1s, 1s, 1s, 10s, p, journal_));
40 BEAST_EXPECT(shouldCloseLedger(false, 10, 0, 0, 1s, 10s, 1s, 10s, p, journal_));
41
42 // Enforce minimum ledger open time
43 BEAST_EXPECT(!shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 1s, 10s, p, journal_));
44
45 // Don't go too much faster than last time
46 BEAST_EXPECT(!shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 3s, 10s, p, journal_));
47
48 BEAST_EXPECT(shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 10s, 10s, p, journal_));
49 }
50
51 void
53 {
54 using namespace std::chrono_literals;
55 testcase("check consensus");
56
57 // Use default parameters
58 ConsensusParms const p{};
59
61 // Disputes still in doubt
62 //
63 // Not enough time has elapsed
64 BEAST_EXPECT(
65 ConsensusState::No == checkConsensus(10, 2, 2, 0, 3s, 2s, false, p, true, journal_));
66
67 // If not enough peers have proposed, ensure
68 // more time for proposals
69 BEAST_EXPECT(
70 ConsensusState::No == checkConsensus(10, 2, 2, 0, 3s, 4s, false, p, true, journal_));
71
72 // Enough time has elapsed and we all agree
73 BEAST_EXPECT(
74 ConsensusState::Yes == checkConsensus(10, 2, 2, 0, 3s, 10s, false, p, true, journal_));
75
76 // Enough time has elapsed and we don't yet agree
77 BEAST_EXPECT(
78 ConsensusState::No == checkConsensus(10, 2, 1, 0, 3s, 10s, false, p, true, journal_));
79
80 // Our peers have moved on
81 // Enough time has elapsed and we all agree
82 BEAST_EXPECT(
84 checkConsensus(10, 2, 1, 8, 3s, 10s, false, p, true, journal_));
85
86 // If no peers, don't agree until time has passed.
87 BEAST_EXPECT(
88 ConsensusState::No == checkConsensus(0, 0, 0, 0, 3s, 10s, false, p, true, journal_));
89
90 // Agree if no peers and enough time has passed.
91 BEAST_EXPECT(
92 ConsensusState::Yes == checkConsensus(0, 0, 0, 0, 3s, 16s, false, p, true, journal_));
93
94 // Expire if too much time has passed without agreement
95 BEAST_EXPECT(
97 checkConsensus(10, 8, 1, 0, 1s, 19s, false, p, true, journal_));
98
100 // Stalled
101 //
102 // Not enough time has elapsed
103 BEAST_EXPECT(
104 ConsensusState::No == checkConsensus(10, 2, 2, 0, 3s, 2s, true, p, true, journal_));
105
106 // If not enough peers have proposed, ensure
107 // more time for proposals
108 BEAST_EXPECT(
109 ConsensusState::No == checkConsensus(10, 2, 2, 0, 3s, 4s, true, p, true, journal_));
110
111 // Enough time has elapsed and we all agree
112 BEAST_EXPECT(
113 ConsensusState::Yes == checkConsensus(10, 2, 2, 0, 3s, 10s, true, p, true, journal_));
114
115 // Enough time has elapsed and we don't yet agree, but there's nothing
116 // left to dispute
117 BEAST_EXPECT(
118 ConsensusState::Yes == checkConsensus(10, 2, 1, 0, 3s, 10s, true, p, true, journal_));
119
120 // Our peers have moved on
121 // Enough time has elapsed and we all agree, nothing left to dispute
122 BEAST_EXPECT(
123 ConsensusState::Yes == checkConsensus(10, 2, 1, 8, 3s, 10s, true, p, true, journal_));
124
125 // If no peers, don't agree until time has passed.
126 BEAST_EXPECT(
127 ConsensusState::No == checkConsensus(0, 0, 0, 0, 3s, 10s, true, p, true, journal_));
128
129 // Agree if no peers and enough time has passed.
130 BEAST_EXPECT(
131 ConsensusState::Yes == checkConsensus(0, 0, 0, 0, 3s, 16s, true, p, true, journal_));
132
133 // We are done if there's nothing left to dispute, no matter how much
134 // time has passed
135 BEAST_EXPECT(
136 ConsensusState::Yes == checkConsensus(10, 8, 1, 0, 1s, 19s, true, p, true, journal_));
137 }
138
139 void
141 {
142 using namespace std::chrono_literals;
143 using namespace csf;
144 testcase("standalone");
145
146 Sim s;
147 PeerGroup const peers = s.createGroup(1);
148 Peer* peer = peers[0];
149 peer->targetLedgers = 1;
150 peer->start();
151 peer->submit(Tx{1});
152
153 s.scheduler.step();
154
155 // Inspect that the proper ledger was created
156 auto const& lcl = peer->lastClosedLedger;
157 BEAST_EXPECT(peer->prevLedgerID() == lcl.id());
158 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
159 BEAST_EXPECT(lcl.txs().size() == 1);
160 BEAST_EXPECT(lcl.txs().contains(Tx{1}));
161 BEAST_EXPECT(peer->prevProposers == 0);
162 }
163
164 void
166 {
167 using namespace csf;
168 using namespace std::chrono;
169 testcase("peers agree");
170
171 ConsensusParms const parms{};
172 Sim sim;
173 PeerGroup peers = sim.createGroup(5);
174
175 // Connected trust and network graphs with single fixed delay
176 peers.trustAndConnect(peers, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
177
178 // everyone submits their own ID as a TX
179 for (Peer* p : peers)
180 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
181
182 sim.run(1);
183
184 // All peers are in sync
185 if (BEAST_EXPECT(sim.synchronized()))
186 {
187 for (Peer const* peer : peers)
188 {
189 auto const& lcl = peer->lastClosedLedger;
190 BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
191 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
192 // All peers proposed
193 BEAST_EXPECT(peer->prevProposers == peers.size() - 1);
194 // All transactions were accepted
195 for (std::uint32_t i = 0; i < peers.size(); ++i)
196 BEAST_EXPECT(lcl.txs().contains(Tx{i}));
197 }
198 }
199 }
200
201 void
203 {
204 using namespace csf;
205 using namespace std::chrono;
206 testcase("slow peers");
207
208 // Several tests of a complete trust graph with a subset of peers
209 // that have significantly longer network delays to the rest of the
210 // network
211
212 // Test when a slow peer doesn't delay a consensus quorum (4/5 agree)
213 {
214 ConsensusParms const parms{};
215 Sim sim;
216 PeerGroup slow = sim.createGroup(1);
217 PeerGroup fast = sim.createGroup(4);
218 PeerGroup network = fast + slow;
219
220 // Fully connected trust graph
221 network.trust(network);
222
223 // Fast and slow network connections
224 fast.connect(fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
225
226 slow.connect(network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
227
228 // All peers submit their own ID as a transaction
229 for (Peer* peer : network)
230 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
231
232 sim.run(1);
233
234 // Verify all peers have same LCL but are missing transaction 0
235 // All peers are in sync even with a slower peer 0
236 if (BEAST_EXPECT(sim.synchronized()))
237 {
238 for (Peer const* peer : network)
239 {
240 auto const& lcl = peer->lastClosedLedger;
241 BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
242 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
243
244 BEAST_EXPECT(peer->prevProposers == network.size() - 1);
245 BEAST_EXPECT(peer->prevRoundTime == network[0]->prevRoundTime);
246
247 BEAST_EXPECT(not lcl.txs().contains(Tx{0}));
248 for (std::uint32_t i = 2; i < network.size(); ++i)
249 BEAST_EXPECT(lcl.txs().contains(Tx{i}));
250
251 // Tx 0 didn't make it
252 BEAST_EXPECT(peer->openTxs.contains(Tx{0}));
253 }
254 }
255 }
256
257 // Test when the slow peers delay a consensus quorum (4/6 agree)
258 {
259 // Run two tests
260 // 1. The slow peers are participating in consensus
261 // 2. The slow peers are just observing
262
263 for (auto isParticipant : {true, false})
264 {
265 ConsensusParms const parms{};
266
267 Sim sim;
268 PeerGroup slow = sim.createGroup(2);
269 PeerGroup fast = sim.createGroup(4);
270 PeerGroup network = fast + slow;
271
272 // Connected trust graph
273 network.trust(network);
274
275 // Fast and slow network connections
276 fast.connect(fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
277
278 slow.connect(network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
279
280 for (Peer* peer : slow)
281 peer->runAsValidator = isParticipant;
282
283 // All peers submit their own ID as a transaction and relay it
284 // to peers
285 for (Peer* peer : network)
286 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
287
288 sim.run(1);
289
290 if (BEAST_EXPECT(sim.synchronized()))
291 {
292 // Verify all peers have same LCL but are missing
293 // transaction 0,1 which was not received by all peers
294 // before the ledger closed
295 for (Peer const* peer : network)
296 {
297 // Closed ledger has all but transaction 0,1
298 auto const& lcl = peer->lastClosedLedger;
299 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
300 BEAST_EXPECT(not lcl.txs().contains(Tx{0}));
301 BEAST_EXPECT(not lcl.txs().contains(Tx{1}));
302 for (std::uint32_t i = slow.size(); i < network.size(); ++i)
303 BEAST_EXPECT(lcl.txs().contains(Tx{i}));
304
305 // Tx 0-1 didn't make it
306 BEAST_EXPECT(peer->openTxs.contains(Tx{0}));
307 BEAST_EXPECT(peer->openTxs.contains(Tx{1}));
308 }
309
310 Peer const* slowPeer = slow[0];
311 if (isParticipant)
312 {
313 BEAST_EXPECT(slowPeer->prevProposers == network.size() - 1);
314 }
315 else
316 {
317 BEAST_EXPECT(slowPeer->prevProposers == fast.size());
318 }
319
320 for (Peer const* peer : fast)
321 {
322 // Due to the network link delay settings
323 // Peer 0 initially proposes {0}
324 // Peer 1 initially proposes {1}
325 // Peers 2-5 initially propose {2,3,4,5}
326 // Since peers 2-5 agree, 4/6 > the initial 50% needed
327 // to include a disputed transaction, so Peer 0/1 switch
328 // to agree with those peers. Peer 0/1 then closes with
329 // an 80% quorum of agreeing positions (5/6) match.
330 //
331 // Peers 2-5 do not change position, since tx 0 or tx 1
332 // have less than the 50% initial threshold. They also
333 // cannot declare consensus, since 4/6 agreeing
334 // positions are < 80% threshold. They therefore need an
335 // additional timerEntry call to see the updated
336 // positions from Peer 0 & 1.
337
338 if (isParticipant)
339 {
340 BEAST_EXPECT(peer->prevProposers == network.size() - 1);
341 BEAST_EXPECT(peer->prevRoundTime > slowPeer->prevRoundTime);
342 }
343 else
344 {
345 BEAST_EXPECT(peer->prevProposers == fast.size() - 1);
346 // so all peers should have closed together
347 BEAST_EXPECT(peer->prevRoundTime == slowPeer->prevRoundTime);
348 }
349 }
350 }
351 }
352 }
353 }
354
355 void
357 {
358 using namespace csf;
359 using namespace std::chrono;
360 testcase("close time disagree");
361
362 // This is a very specialized test to get ledgers to disagree on
363 // the close time. It unfortunately assumes knowledge about current
364 // timing constants. This is a necessary evil to get coverage up
365 // pending more extensive refactorings of timing constants.
366
367 // In order to agree-to-disagree on the close time, there must be no
368 // clear majority of nodes agreeing on a close time. This test
369 // sets a relative offset to the peers internal clocks so that they
370 // send proposals with differing times.
371
372 // However, agreement is on the effective close time, not the
373 // exact close time. The minimum closeTimeResolution is given by
374 // ledgerPossibleTimeResolutions[0], which is currently 10s. This means
375 // the skews need to be at least 10 seconds to have different effective
376 // close times.
377
378 // Complicating this matter is that nodes will ignore proposals
379 // with times more than proposeFRESHNESS =20s in the past. So at
380 // the minimum granularity, we have at most 3 types of skews
381 // (0s,10s,20s).
382
383 // This test therefore has 6 nodes, with 2 nodes having each type of
384 // skew. Then no majority (1/3 < 1/2) of nodes will agree on an
385 // actual close time.
386
387 ConsensusParms const parms{};
388 Sim sim;
389
390 PeerGroup groupA = sim.createGroup(2);
391 PeerGroup const groupB = sim.createGroup(2);
392 PeerGroup const groupC = sim.createGroup(2);
393 PeerGroup network = groupA + groupB + groupC;
394
395 network.trust(network);
396 network.connect(network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
397
398 // Run consensus without skew until we have a short close time
399 // resolution
400 Peer const* firstPeer = *groupA.begin();
401 while (firstPeer->lastClosedLedger.closeTimeResolution() >= parms.proposeFRESHNESS)
402 sim.run(1);
403
404 // Introduce a shift on the time of 2/3 of peers
405 for (Peer* peer : groupA)
406 peer->clockSkew = parms.proposeFRESHNESS / 2;
407 for (Peer* peer : groupB)
408 peer->clockSkew = parms.proposeFRESHNESS;
409
410 sim.run(1);
411
412 // All nodes agreed to disagree on the close time
413 if (BEAST_EXPECT(sim.synchronized()))
414 {
415 for (Peer const* peer : network)
416 BEAST_EXPECT(!peer->lastClosedLedger.closeAgree());
417 }
418 }
419
420 void
422 {
423 using namespace csf;
424 using namespace std::chrono;
425 testcase("wrong LCL");
426
427 // Specialized test to exercise a temporary fork in which some peers
428 // are working on an incorrect prior ledger.
429
430 ConsensusParms const parms{};
431
432 // Vary the time it takes to process validations to exercise detecting
433 // the wrong LCL at different phases of consensus
434 for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE})
435 {
436 // Consider 10 peers:
437 // 0 1 2 3 4 5 6 7 8 9
438 // minority majorityA majorityB
439 //
440 // Nodes 0-1 trust nodes 0-4
441 // Nodes 2-9 trust nodes 2-9
442 //
443 // By submitting tx 0 to nodes 0-4 and tx 1 to nodes 5-9,
444 // nodes 0-1 will generate the wrong LCL (with tx 0). The remaining
445 // nodes will instead accept the ledger with tx 1.
446
447 // Nodes 0-1 will detect this mismatch during a subsequent round
448 // since nodes 2-4 will validate a different ledger.
449
450 // Nodes 0-1 will acquire the proper ledger from the network and
451 // resume consensus and eventually generate the dominant network
452 // ledger.
453
454 // This topology can potentially fork with the above trust relations
455 // but that is intended for this test.
456
457 Sim sim;
458
459 PeerGroup minority = sim.createGroup(2);
460 PeerGroup const majorityA = sim.createGroup(3);
461 PeerGroup const majorityB = sim.createGroup(5);
462
463 PeerGroup majority = majorityA + majorityB;
464 PeerGroup const network = minority + majority;
465
466 SimDuration const delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
467 minority.trustAndConnect(minority + majorityA, delay);
468 majority.trustAndConnect(majority, delay);
469
470 CollectByNode<JumpCollector> jumps;
471 sim.collectors.add(jumps);
472
473 BEAST_EXPECT(sim.trustGraph.canFork(parms.minCONSENSUS_PCT / 100.));
474
475 // initial round to set prior state
476 sim.run(1);
477
478 // Nodes in smaller UNL have seen tx 0, nodes in other unl have seen
479 // tx 1
480 for (Peer* peer : network)
481 peer->delays.recvValidation = validationDelay;
482 for (Peer* peer : (minority + majorityA))
483 peer->openTxs.insert(Tx{0});
484 for (Peer* peer : majorityB)
485 peer->openTxs.insert(Tx{1});
486
487 // Run for additional rounds
488 // With no validation delay, only 2 more rounds are needed.
489 // 1. Round to generate different ledgers
490 // 2. Round to detect different prior ledgers (but still generate
491 // wrong ones) and recover within that round since wrong LCL
492 // is detected before we close
493 //
494 // With a validation delay of ledgerMIN_CLOSE, we need 3 more
495 // rounds.
496 // 1. Round to generate different ledgers
497 // 2. Round to detect different prior ledgers (but still generate
498 // wrong ones) but end up declaring consensus on wrong LCL (but
499 // with the right transaction set!). This is because we detect
500 // the wrong LCL after we have closed the ledger, so we declare
501 // consensus based solely on our peer proposals. But we haven't
502 // had time to acquire the right ledger.
503 // 3. Round to correct
504 sim.run(3);
505
506 // The network never actually forks, since node 0-1 never see a
507 // quorum of validations to fully validate the incorrect chain.
508
509 // However, for a non zero-validation delay, the network is not
510 // synchronized because nodes 0 and 1 are running one ledger behind
511 if (BEAST_EXPECT(sim.branches() == 1))
512 {
513 for (Peer const* peer : majority)
514 {
515 // No jumps for majority nodes
516 BEAST_EXPECT(jumps[peer->id].closeJumps.empty());
517 BEAST_EXPECT(jumps[peer->id].fullyValidatedJumps.empty());
518 }
519 for (Peer const* peer : minority)
520 {
521 auto& peerJumps = jumps[peer->id];
522 // last closed ledger jump between chains
523 {
524 if (BEAST_EXPECT(peerJumps.closeJumps.size() == 1))
525 {
526 JumpCollector::Jump const& jump = peerJumps.closeJumps.front();
527 // Jump is to a different chain
528 BEAST_EXPECT(jump.from.seq() <= jump.to.seq());
529 BEAST_EXPECT(!jump.to.isAncestor(jump.from));
530 }
531 }
532 // fully validated jump forward in same chain
533 {
534 if (BEAST_EXPECT(peerJumps.fullyValidatedJumps.size() == 1))
535 {
536 JumpCollector::Jump const& jump = peerJumps.fullyValidatedJumps.front();
537 // Jump is to a different chain with same seq
538 BEAST_EXPECT(jump.from.seq() < jump.to.seq());
539 BEAST_EXPECT(jump.to.isAncestor(jump.from));
540 }
541 }
542 }
543 }
544 }
545
546 {
547 // Additional test engineered to switch LCL during the establish
548 // phase. This was added to trigger a scenario that previously
549 // crashed, in which switchLCL switched from establish to open
550 // phase, but still processed the establish phase logic.
551
552 // Loner node will accept an initial ledger A, but all other nodes
553 // accept ledger B a bit later. By delaying the time it takes
554 // to process a validation, loner node will detect the wrongLCL
555 // after it is already in the establish phase of the next round.
556
557 Sim sim;
558 PeerGroup loner = sim.createGroup(1);
559 PeerGroup const friends = sim.createGroup(3);
560 loner.trust(loner + friends);
561
562 PeerGroup const others = sim.createGroup(6);
563 PeerGroup clique = friends + others;
564 clique.trust(clique);
565
566 PeerGroup network = loner + clique;
567 network.connect(network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
568
569 // initial round to set prior state
570 sim.run(1);
571 for (Peer* peer : (loner + friends))
572 peer->openTxs.insert(Tx(0));
573 for (Peer* peer : others)
574 peer->openTxs.insert(Tx(1));
575
576 // Delay validation processing
577 for (Peer* peer : network)
578 peer->delays.recvValidation = parms.ledgerGRANULARITY;
579
580 // additional rounds to generate wrongLCL and recover
581 sim.run(2);
582
583 // Check all peers recovered
584 for (Peer const* p : network)
585 BEAST_EXPECT(p->prevLedgerID() == network[0]->prevLedgerID());
586 }
587 }
588
589 void
591 {
592 using namespace csf;
593 using namespace std::chrono;
594 testcase("consensus close time rounding");
595
596 // This is a specialized test engineered to yield ledgers with different
597 // close times even though the peers believe they had close time
598 // consensus on the ledger.
599 ConsensusParms const parms;
600
601 Sim sim;
602
603 // This requires a group of 4 fast and 2 slow peers to create a
604 // situation in which a subset of peers requires seeing additional
605 // proposals to declare consensus.
606 PeerGroup slow = sim.createGroup(2);
607 PeerGroup fast = sim.createGroup(4);
608 PeerGroup network = fast + slow;
609
610 // Connected trust graph
611 network.trust(network);
612
613 // Fast and slow network connections
614 fast.connect(fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
615 slow.connect(network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
616
617 // Run to the ledger *prior* to decreasing the resolution
619
620 // In order to create the discrepancy, we want a case where if
621 // X = effCloseTime(closeTime, resolution, parentCloseTime)
622 // X != effCloseTime(X, resolution, parentCloseTime)
623 //
624 // That is, the effective close time is not a fixed point. This can
625 // happen if X = parentCloseTime + 1, but a subsequent rounding goes
626 // to the next highest multiple of resolution.
627
628 // So we want to find an offset (now + offset) % 30s = 15
629 // (now + offset) % 20s = 15
630 // This way, the next ledger will close and round up Due to the
631 // network delay settings, the round of consensus will take 5s, so
632 // the next ledger's close time will
633
634 NetClock::duration when = network[0]->now().time_since_epoch();
635
636 // Check we are before the 30s to 20s transition
637 NetClock::duration const resolution = network[0]->lastClosedLedger.closeTimeResolution();
638 BEAST_EXPECT(resolution == NetClock::duration{30s});
639
640 while (((when % NetClock::duration{30s}) != NetClock::duration{15s}) ||
641 ((when % NetClock::duration{20s}) != NetClock::duration{15s}))
642 when += 1s;
643 // Advance the clock without consensus running (IS THIS WHAT
644 // PREVENTS IT IN PRACTICE?)
645 sim.scheduler.step_for(NetClock::time_point{when} - network[0]->now());
646
647 // Run one more ledger with 30s resolution
648 sim.run(1);
649 if (BEAST_EXPECT(sim.synchronized()))
650 {
651 // close time should be ahead of clock time since we engineered
652 // the close time to round up
653 for (Peer const* peer : network)
654 {
655 BEAST_EXPECT(peer->lastClosedLedger.closeTime() > peer->now());
656 BEAST_EXPECT(peer->lastClosedLedger.closeAgree());
657 }
658 }
659
660 // All peers submit their own ID as a transaction
661 for (Peer* peer : network)
662 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
663
664 // Run 1 more round, this time it will have a decreased
665 // resolution of 20 seconds.
666
667 // The network delays are engineered so that the slow peers
668 // initially have the wrong tx hash, but they see a majority
669 // of agreement from their peers and declare consensus
670 //
671 // The trick is that everyone starts with a raw close time of
672 // 84681s
673 // Which has
674 // effCloseTime(86481s, 20s, 86490s) = 86491s
675 // However, when the slow peers update their position, they change
676 // the close time to 86451s. The fast peers declare consensus with
677 // the 86481s as their position still.
678 //
679 // When accepted the ledger
680 // - fast peers use eff(86481s) -> 86491s as the close time
681 // - slow peers use eff(eff(86481s)) -> eff(86491s) -> 86500s!
682
683 sim.run(1);
684
685 BEAST_EXPECT(sim.synchronized());
686 }
687
688 void
690 {
691 using namespace csf;
692 using namespace std::chrono;
693 testcase("fork");
694
695 std::uint32_t const numPeers = 10;
696 // Vary overlap between two UNLs
697 for (std::uint32_t overlap = 0; overlap <= numPeers; ++overlap)
698 {
699 ConsensusParms const parms{};
700 Sim sim;
701
702 std::uint32_t const numA = (numPeers - overlap) / 2;
703 std::uint32_t const numB = numPeers - numA - overlap;
704
705 PeerGroup const aOnly = sim.createGroup(numA);
706 PeerGroup const bOnly = sim.createGroup(numB);
707 PeerGroup const commonOnly = sim.createGroup(overlap);
708
709 PeerGroup a = aOnly + commonOnly;
710 PeerGroup b = bOnly + commonOnly;
711
712 PeerGroup const network = a + b;
713
714 SimDuration const delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
715 a.trustAndConnect(a, delay);
716 b.trustAndConnect(b, delay);
717
718 // Initial round to set prior state
719 sim.run(1);
720 for (Peer* peer : network)
721 {
722 // Nodes have only seen transactions from their neighbors
723 peer->openTxs.insert(Tx{static_cast<std::uint32_t>(peer->id)});
724 for (Peer const* to : sim.trustGraph.trustedPeers(peer))
725 peer->openTxs.insert(Tx{static_cast<std::uint32_t>(to->id)});
726 }
727 sim.run(1);
728
729 // Fork should not happen for 40% or greater overlap
730 // Since the overlapped nodes have a UNL that is the union of the
731 // two cliques, the maximum sized UNL list is the number of peers
732 if (overlap > 0.4 * numPeers)
733 {
734 BEAST_EXPECT(sim.synchronized());
735 }
736 else
737 {
738 // Even if we do fork, there shouldn't be more than 3 ledgers
739 // One for cliqueA, one for cliqueB and one for nodes in both
740 BEAST_EXPECT(sim.branches() <= 3);
741 }
742 }
743 }
744
745 void
747 {
748 using namespace csf;
749 using namespace std::chrono;
750 testcase("hub network");
751
752 // Simulate a set of 5 validators that aren't directly connected but
753 // rely on a single hub node for communication
754
755 ConsensusParms const parms{};
756 Sim sim;
757 PeerGroup validators = sim.createGroup(5);
758 PeerGroup center = sim.createGroup(1);
759 validators.trust(validators);
760 center.trust(validators);
761
762 SimDuration const delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
763 validators.connect(center, delay);
764
765 center[0]->runAsValidator = false;
766
767 // prep round to set initial state.
768 sim.run(1);
769
770 // everyone submits their own ID as a TX and relay it to peers
771 for (Peer* p : validators)
772 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
773
774 sim.run(1);
775
776 // All peers are in sync
777 BEAST_EXPECT(sim.synchronized());
778 }
779
780 // Helper collector for testPreferredByBranch
781 // Invasively disconnects network at bad times to cause splits
783 {
788 bool reconnected = false;
789
791 : network(net), groupCfast(c), groupCsplit(split), delay(d)
792 {
793 }
794
795 template <class E>
796 void
798 {
799 }
800
801 void
803 {
804 using namespace std::chrono;
805 // As soon as the fastC node fully validates C, disconnect
806 // ALL c nodes from the network. The fast C node needs to disconnect
807 // as well to prevent it from relaying the validations it did see
808 if (who == groupCfast[0]->id && e.ledger.seq() == csf::Ledger::Seq{2})
809 {
810 network.disconnect(groupCsplit);
811 network.disconnect(groupCfast);
812 }
813 }
814
815 void
817 {
818 // As soon as anyone generates a child of B or C, reconnect the
819 // network so those validations make it through
820 if (!reconnected && e.ledger.seq() == csf::Ledger::Seq{3})
821 {
822 reconnected = true;
823 network.connect(groupCsplit, delay);
824 }
825 }
826 };
827
828 void
830 {
831 using namespace csf;
832 using namespace std::chrono;
833 testcase("preferred by branch");
834
835 // Simulate network splits that are prevented from forking when using
836 // preferred ledger by trie. This is a contrived example that involves
837 // excessive network splits, but demonstrates the safety improvement
838 // from the preferred ledger by trie approach.
839
840 // Consider 10 validating nodes that comprise a single common UNL
841 // Ledger history:
842 // 1: A
843 // _/ \_
844 // 2: B C
845 // _/ _/ \_
846 // 3: D C' |||||||| (8 different ledgers)
847
848 // - All nodes generate the common ledger A
849 // - 2 nodes generate B and 8 nodes generate C
850 // - Only 1 of the C nodes sees all the C validations and fully
851 // validates C. The rest of the C nodes split at just the right time
852 // such that they never see any C validations but their own.
853 // - The C nodes continue and generate 8 different child ledgers.
854 // - Meanwhile, the D nodes only saw 1 validation for C and 2
855 // validations
856 // for B.
857 // - The network reconnects and the validations for generation 3 ledgers
858 // are observed (D and the 8 C's)
859 // - In the old approach, 2 votes for D outweighs 1 vote for each C'
860 // so the network would avalanche towards D and fully validate it
861 // EVEN though C was fully validated by one node
862 // - In the new approach, 2 votes for D are not enough to outweight the
863 // 8 implicit votes for C, so nodes will avalanche to C instead
864
865 ConsensusParms const parms{};
866 Sim sim;
867
868 // Goes A->B->D
869 PeerGroup const groupABD = sim.createGroup(2);
870 // Single node that initially fully validates C before the split
871 PeerGroup groupCfast = sim.createGroup(1);
872 // Generates C, but fails to fully validate before the split
873 PeerGroup groupCsplit = sim.createGroup(7);
874
875 PeerGroup groupNotFastC = groupABD + groupCsplit;
876 PeerGroup network = groupABD + groupCsplit + groupCfast;
877
878 SimDuration const delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
879 SimDuration const fDelay = round<milliseconds>(0.1 * parms.ledgerGRANULARITY);
880
881 network.trust(network);
882 // C must have a shorter delay to see all the validations before the
883 // other nodes
884 network.connect(groupCfast, fDelay);
885 // The rest of the network is connected at the same speed
886 groupNotFastC.connect(groupNotFastC, delay);
887
888 Disruptor dc(network, groupCfast, groupCsplit, delay);
889 sim.collectors.add(dc);
890
891 // Consensus round to generate ledger A
892 sim.run(1);
893 BEAST_EXPECT(sim.synchronized());
894
895 // Next round generates B and C
896 // To force B, we inject an extra transaction in to those nodes
897 for (Peer* peer : groupABD)
898 {
899 peer->txInjections.emplace(peer->lastClosedLedger.seq(), Tx{42});
900 }
901 // The Disruptor will ensure that nodes disconnect before the C
902 // validations make it to all but the fastC node
903 sim.run(1);
904
905 // We are no longer in sync, but have not yet forked:
906 // 9 nodes consider A the last fully validated ledger and fastC sees C
907 BEAST_EXPECT(!sim.synchronized());
908 BEAST_EXPECT(sim.branches() == 1);
909
910 // Run another round to generate the 8 different C' ledgers
911 for (Peer* p : network)
912 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
913 sim.run(1);
914
915 // Still not forked
916 BEAST_EXPECT(!sim.synchronized());
917 BEAST_EXPECT(sim.branches() == 1);
918
919 // Disruptor will reconnect all but the fastC node
920 sim.run(1);
921
922 if (BEAST_EXPECT(sim.branches() == 1))
923 {
924 BEAST_EXPECT(sim.synchronized());
925 }
926 else // old approach caused a fork
927 {
928 BEAST_EXPECT(sim.branches(groupNotFastC) == 1);
929 BEAST_EXPECT(sim.synchronized(groupNotFastC) == 1);
930 }
931 }
932
933 // Helper collector for testPauseForLaggards
934 // This will remove the ledgerAccept delay used to
935 // initially create the slow vs. fast validator groups.
937 {
939
941 {
942 }
943
944 template <class E>
945 void
947 {
948 }
949
950 void
952 {
953 for (csf::Peer* p : g)
954 {
955 if (p->id == who)
956 p->delays.ledgerAccept = std::chrono::seconds{0};
957 }
958 }
959 };
960
961 void
963 {
964 using namespace csf;
965 using namespace std::chrono;
966 testcase("pause for laggards");
967
968 // Test that validators that jump ahead of the network slow
969 // down.
970
971 // We engineer the following validated ledger history scenario:
972 //
973 // / --> B1 --> C1 --> ... -> G1 "ahead"
974 // A
975 // \ --> B2 --> C2 "behind"
976 //
977 // After validating a common ledger A, a set of "behind" validators
978 // briefly run slower and validate the lower chain of ledgers.
979 // The "ahead" validators run normal speed and run ahead validating the
980 // upper chain of ledgers.
981 //
982 // Due to the uncommitted support definition of the preferred branch
983 // protocol, even if the "behind" validators are a majority, the "ahead"
984 // validators cannot jump to the proper branch until the "behind"
985 // validators catch up to the same sequence number. For this test to
986 // succeed, the ahead validators need to briefly slow down consensus.
987
988 ConsensusParms const parms{};
989 Sim sim;
990 SimDuration const delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
991
992 PeerGroup behind = sim.createGroup(3);
993 PeerGroup const ahead = sim.createGroup(2);
994 PeerGroup network = ahead + behind;
995
996 hash_set<Peer::NodeKey_t> trustedKeys;
997 for (Peer const* p : network)
998 trustedKeys.insert(p->key);
999 for (Peer* p : network)
1000 p->trustedKeys = trustedKeys;
1001
1002 network.trustAndConnect(network, delay);
1003
1004 // Initial seed round to set prior state
1005 sim.run(1);
1006
1007 // Have the "behind" group initially take a really long time to
1008 // accept a ledger after ending deliberation
1009 for (Peer* p : behind)
1010 p->delays.ledgerAccept = 20s;
1011
1012 // Use the collector to revert the delay after the single
1013 // slow ledger is generated
1014 UndoDelay undoDelay{behind};
1015 sim.collectors.add(undoDelay);
1016
1017#if 0
1018 // Have all beast::journal output printed to stdout
1019 for (Peer* p : network)
1020 p->sink.threshold(beast::severities::kAll);
1021
1022 // Print ledger accept and fully validated events to stdout
1023 StreamCollector sc{std::cout};
1024 sim.collectors.add(sc);
1025#endif
1026 // Run the simulation for 100 seconds of simulation time with
1027 std::chrono::nanoseconds const simDuration = 100s;
1028
1029 // Simulate clients submitting 1 tx every 5 seconds to a random
1030 // validator
1031 Rate const rate{1, 5s};
1032 auto peerSelector = makeSelector(
1033 network.begin(), network.end(), std::vector<double>(network.size(), 1.), sim.rng);
1034 auto txSubmitter = makeSubmitter(
1035 ConstantDistribution{rate.inv()},
1036 sim.scheduler.now(),
1037 sim.scheduler.now() + simDuration,
1038 peerSelector,
1039 sim.scheduler,
1040 sim.rng);
1041
1042 // Run simulation
1043 sim.run(simDuration);
1044
1045 // Verify that the network recovered
1046 BEAST_EXPECT(sim.synchronized());
1047 }
1048
1049 void
1051 {
1052 testcase("disputes");
1053
1054 using namespace csf;
1055
1056 // Test dispute objects directly
1057 using Dispute = DisputedTx<Tx, PeerID>;
1058
1059 Tx const txTrue{99};
1060 Tx const txFalse{98};
1061 Tx const txFollowingTrue{97};
1062 Tx const txFollowingFalse{96};
1063 int const numPeers = 100;
1064 ConsensusParms const p;
1065 std::size_t peersUnchanged = 0;
1066
1068 auto j = logs->journal("Test");
1070
1071 // Three cases:
1072 // 1 proposing, initial vote yes
1073 // 2 proposing, initial vote no
1074 // 3 not proposing, initial vote doesn't matter after the first update,
1075 // use yes
1076 {
1077 Dispute proposingTrue{txTrue.id(), true, numPeers, journal_};
1078 Dispute proposingFalse{txFalse.id(), false, numPeers, journal_};
1079 Dispute followingTrue{txFollowingTrue.id(), true, numPeers, journal_};
1080 Dispute followingFalse{txFollowingFalse.id(), false, numPeers, journal_};
1081 BEAST_EXPECT(proposingTrue.ID() == 99);
1082 BEAST_EXPECT(proposingFalse.ID() == 98);
1083 BEAST_EXPECT(followingTrue.ID() == 97);
1084 BEAST_EXPECT(followingFalse.ID() == 96);
1085
1086 // Create an even split in the peer votes
1087 for (int i = 0; i < numPeers; ++i)
1088 {
1089 BEAST_EXPECT(proposingTrue.setVote(PeerID(i), i < 50));
1090 BEAST_EXPECT(proposingFalse.setVote(PeerID(i), i < 50));
1091 BEAST_EXPECT(followingTrue.setVote(PeerID(i), i < 50));
1092 BEAST_EXPECT(followingFalse.setVote(PeerID(i), i < 50));
1093 }
1094 // Switch the middle vote to match mine
1095 BEAST_EXPECT(proposingTrue.setVote(PeerID(50), true));
1096 BEAST_EXPECT(proposingFalse.setVote(PeerID(49), false));
1097 BEAST_EXPECT(followingTrue.setVote(PeerID(50), true));
1098 BEAST_EXPECT(followingFalse.setVote(PeerID(49), false));
1099
1100 // no changes yet
1101 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1102 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1103 BEAST_EXPECT(followingTrue.getOurVote() == true);
1104 BEAST_EXPECT(followingFalse.getOurVote() == false);
1105 BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1106 BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1107 BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged, j, clog));
1108 BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged, j, clog));
1109 BEAST_EXPECT(clog->str().empty());
1110
1111 // I'm in the majority, my vote should not change
1112 BEAST_EXPECT(!proposingTrue.updateVote(5, true, p));
1113 BEAST_EXPECT(!proposingFalse.updateVote(5, true, p));
1114 BEAST_EXPECT(!followingTrue.updateVote(5, false, p));
1115 BEAST_EXPECT(!followingFalse.updateVote(5, false, p));
1116
1117 BEAST_EXPECT(!proposingTrue.updateVote(10, true, p));
1118 BEAST_EXPECT(!proposingFalse.updateVote(10, true, p));
1119 BEAST_EXPECT(!followingTrue.updateVote(10, false, p));
1120 BEAST_EXPECT(!followingFalse.updateVote(10, false, p));
1121
1122 peersUnchanged = 2;
1123 BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1124 BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1125 BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged, j, clog));
1126 BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged, j, clog));
1127 BEAST_EXPECT(clog->str().empty());
1128
1129 // Right now, the vote is 51%. The requirement is about to jump to
1130 // 65%
1131 BEAST_EXPECT(proposingTrue.updateVote(55, true, p));
1132 BEAST_EXPECT(!proposingFalse.updateVote(55, true, p));
1133 BEAST_EXPECT(!followingTrue.updateVote(55, false, p));
1134 BEAST_EXPECT(!followingFalse.updateVote(55, false, p));
1135
1136 BEAST_EXPECT(proposingTrue.getOurVote() == false);
1137 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1138 BEAST_EXPECT(followingTrue.getOurVote() == true);
1139 BEAST_EXPECT(followingFalse.getOurVote() == false);
1140 // 16 validators change their vote to match my original vote
1141 for (int i = 0; i < 16; ++i)
1142 {
1143 auto pTrue = PeerID(numPeers - i - 1);
1144 auto pFalse = PeerID(i);
1145 BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
1146 BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
1147 BEAST_EXPECT(followingTrue.setVote(pTrue, true));
1148 BEAST_EXPECT(followingFalse.setVote(pFalse, false));
1149 }
1150 // The vote should now be 66%, threshold is 65%
1151 BEAST_EXPECT(proposingTrue.updateVote(60, true, p));
1152 BEAST_EXPECT(!proposingFalse.updateVote(60, true, p));
1153 BEAST_EXPECT(!followingTrue.updateVote(60, false, p));
1154 BEAST_EXPECT(!followingFalse.updateVote(60, false, p));
1155
1156 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1157 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1158 BEAST_EXPECT(followingTrue.getOurVote() == true);
1159 BEAST_EXPECT(followingFalse.getOurVote() == false);
1160
1161 // Threshold jumps to 70%
1162 BEAST_EXPECT(proposingTrue.updateVote(86, true, p));
1163 BEAST_EXPECT(!proposingFalse.updateVote(86, true, p));
1164 BEAST_EXPECT(!followingTrue.updateVote(86, false, p));
1165 BEAST_EXPECT(!followingFalse.updateVote(86, false, p));
1166
1167 BEAST_EXPECT(proposingTrue.getOurVote() == false);
1168 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1169 BEAST_EXPECT(followingTrue.getOurVote() == true);
1170 BEAST_EXPECT(followingFalse.getOurVote() == false);
1171
1172 // 5 more validators change their vote to match my original vote
1173 for (int i = 16; i < 21; ++i)
1174 {
1175 auto pTrue = PeerID(numPeers - i - 1);
1176 auto pFalse = PeerID(i);
1177 BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
1178 BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
1179 BEAST_EXPECT(followingTrue.setVote(pTrue, true));
1180 BEAST_EXPECT(followingFalse.setVote(pFalse, false));
1181 }
1182
1183 // The vote should now be 71%, threshold is 70%
1184 BEAST_EXPECT(proposingTrue.updateVote(90, true, p));
1185 BEAST_EXPECT(!proposingFalse.updateVote(90, true, p));
1186 BEAST_EXPECT(!followingTrue.updateVote(90, false, p));
1187 BEAST_EXPECT(!followingFalse.updateVote(90, false, p));
1188
1189 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1190 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1191 BEAST_EXPECT(followingTrue.getOurVote() == true);
1192 BEAST_EXPECT(followingFalse.getOurVote() == false);
1193
1194 // The vote should now be 71%, threshold is 70%
1195 BEAST_EXPECT(!proposingTrue.updateVote(150, true, p));
1196 BEAST_EXPECT(!proposingFalse.updateVote(150, true, p));
1197 BEAST_EXPECT(!followingTrue.updateVote(150, false, p));
1198 BEAST_EXPECT(!followingFalse.updateVote(150, false, p));
1199
1200 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1201 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1202 BEAST_EXPECT(followingTrue.getOurVote() == true);
1203 BEAST_EXPECT(followingFalse.getOurVote() == false);
1204
1205 // The vote should now be 71%, threshold is 70%
1206 BEAST_EXPECT(!proposingTrue.updateVote(190, true, p));
1207 BEAST_EXPECT(!proposingFalse.updateVote(190, true, p));
1208 BEAST_EXPECT(!followingTrue.updateVote(190, false, p));
1209 BEAST_EXPECT(!followingFalse.updateVote(190, false, p));
1210
1211 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1212 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1213 BEAST_EXPECT(followingTrue.getOurVote() == true);
1214 BEAST_EXPECT(followingFalse.getOurVote() == false);
1215
1216 peersUnchanged = 3;
1217 BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1218 BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1219 BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged, j, clog));
1220 BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged, j, clog));
1221 BEAST_EXPECT(clog->str().empty());
1222
1223 // Threshold jumps to 95%
1224 BEAST_EXPECT(proposingTrue.updateVote(220, true, p));
1225 BEAST_EXPECT(!proposingFalse.updateVote(220, true, p));
1226 BEAST_EXPECT(!followingTrue.updateVote(220, false, p));
1227 BEAST_EXPECT(!followingFalse.updateVote(220, false, p));
1228
1229 BEAST_EXPECT(proposingTrue.getOurVote() == false);
1230 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1231 BEAST_EXPECT(followingTrue.getOurVote() == true);
1232 BEAST_EXPECT(followingFalse.getOurVote() == false);
1233
1234 // 25 more validators change their vote to match my original vote
1235 for (int i = 21; i < 46; ++i)
1236 {
1237 auto pTrue = PeerID(numPeers - i - 1);
1238 auto pFalse = PeerID(i);
1239 BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
1240 BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
1241 BEAST_EXPECT(followingTrue.setVote(pTrue, true));
1242 BEAST_EXPECT(followingFalse.setVote(pFalse, false));
1243 }
1244
1245 // The vote should now be 96%, threshold is 95%
1246 BEAST_EXPECT(proposingTrue.updateVote(250, true, p));
1247 BEAST_EXPECT(!proposingFalse.updateVote(250, true, p));
1248 BEAST_EXPECT(!followingTrue.updateVote(250, false, p));
1249 BEAST_EXPECT(!followingFalse.updateVote(250, false, p));
1250
1251 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1252 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1253 BEAST_EXPECT(followingTrue.getOurVote() == true);
1254 BEAST_EXPECT(followingFalse.getOurVote() == false);
1255
1256 for (peersUnchanged = 0; peersUnchanged < 6; ++peersUnchanged)
1257 {
1258 BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1259 BEAST_EXPECT(!proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1260 BEAST_EXPECT(!followingTrue.stalled(p, false, peersUnchanged, j, clog));
1261 BEAST_EXPECT(!followingFalse.stalled(p, false, peersUnchanged, j, clog));
1262 BEAST_EXPECT(clog->str().empty());
1263 }
1264
1265 auto expectStalled = [this, &clog](
1266 int txid,
1267 bool ourVote,
1268 int ourTime,
1269 int peerTime,
1270 int support,
1271 std::uint32_t line) {
1272 using namespace std::string_literals;
1273
1274 auto const s = clog->str();
1275 expect(s.find("stalled"), s, __FILE__, line);
1276 expect(s.starts_with("Transaction "s + std::to_string(txid)), s, __FILE__, line);
1277 expect(s.find("voting "s + (ourVote ? "YES" : "NO")) != s.npos, s, __FILE__, line);
1278 expect(
1279 s.find("for "s + std::to_string(ourTime) + " rounds."s) != s.npos,
1280 s,
1281 __FILE__,
1282 line);
1283 expect(
1284 s.find("votes in "s + std::to_string(peerTime) + " rounds.") != s.npos,
1285 s,
1286 __FILE__,
1287 line);
1288 expect(
1289 s.ends_with("has "s + std::to_string(support) + "% support. "s),
1290 s,
1291 __FILE__,
1292 line);
1294 };
1295
1296 for (int i = 0; i < 1; ++i)
1297 {
1298 BEAST_EXPECT(!proposingTrue.updateVote(250 + (10 * i), true, p));
1299 BEAST_EXPECT(!proposingFalse.updateVote(250 + (10 * i), true, p));
1300 BEAST_EXPECT(!followingTrue.updateVote(250 + (10 * i), false, p));
1301 BEAST_EXPECT(!followingFalse.updateVote(250 + (10 * i), false, p));
1302
1303 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1304 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1305 BEAST_EXPECT(followingTrue.getOurVote() == true);
1306 BEAST_EXPECT(followingFalse.getOurVote() == false);
1307
1308 // true vote has changed recently, so not stalled
1309 BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog));
1310 BEAST_EXPECT(clog->str().empty());
1311 // remaining votes have been unchanged in so long that we only
1312 // need to hit the second round at 95% to be stalled, regardless
1313 // of peers
1314 BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
1315 expectStalled(98, false, 11, 0, 2, __LINE__);
1316 BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
1317 expectStalled(97, true, 11, 0, 97, __LINE__);
1318 BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
1319 expectStalled(96, false, 11, 0, 3, __LINE__);
1320
1321 // true vote has changed recently, so not stalled
1322 BEAST_EXPECT(!proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1323 BEAST_EXPECTS(clog->str().empty(), clog->str());
1324 // remaining votes have been unchanged in so long that we only
1325 // need to hit the second round at 95% to be stalled, regardless
1326 // of peers
1327 BEAST_EXPECT(proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1328 expectStalled(98, false, 11, 6, 2, __LINE__);
1329 BEAST_EXPECT(followingTrue.stalled(p, false, peersUnchanged, j, clog));
1330 expectStalled(97, true, 11, 6, 97, __LINE__);
1331 BEAST_EXPECT(followingFalse.stalled(p, false, peersUnchanged, j, clog));
1332 expectStalled(96, false, 11, 6, 3, __LINE__);
1333 }
1334 for (int i = 1; i < 3; ++i)
1335 {
1336 BEAST_EXPECT(!proposingTrue.updateVote(250 + (10 * i), true, p));
1337 BEAST_EXPECT(!proposingFalse.updateVote(250 + (10 * i), true, p));
1338 BEAST_EXPECT(!followingTrue.updateVote(250 + (10 * i), false, p));
1339 BEAST_EXPECT(!followingFalse.updateVote(250 + (10 * i), false, p));
1340
1341 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1342 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1343 BEAST_EXPECT(followingTrue.getOurVote() == true);
1344 BEAST_EXPECT(followingFalse.getOurVote() == false);
1345
1346 // true vote changed 2 rounds ago, and peers are changing, so
1347 // not stalled
1348 BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog));
1349 BEAST_EXPECTS(clog->str().empty(), clog->str());
1350 // still stalled
1351 BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
1352 expectStalled(98, false, 11 + i, 0, 2, __LINE__);
1353 BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
1354 expectStalled(97, true, 11 + i, 0, 97, __LINE__);
1355 BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
1356 expectStalled(96, false, 11 + i, 0, 3, __LINE__);
1357
1358 // true vote changed 2 rounds ago, and peers are NOT changing,
1359 // so stalled
1360 BEAST_EXPECT(proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1361 expectStalled(99, true, 1 + i, 6, 97, __LINE__);
1362 // still stalled
1363 BEAST_EXPECT(proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1364 expectStalled(98, false, 11 + i, 6, 2, __LINE__);
1365 BEAST_EXPECT(followingTrue.stalled(p, false, peersUnchanged, j, clog));
1366 expectStalled(97, true, 11 + i, 6, 97, __LINE__);
1367 BEAST_EXPECT(followingFalse.stalled(p, false, peersUnchanged, j, clog));
1368 expectStalled(96, false, 11 + i, 6, 3, __LINE__);
1369 }
1370 for (int i = 3; i < 5; ++i)
1371 {
1372 BEAST_EXPECT(!proposingTrue.updateVote(250 + (10 * i), true, p));
1373 BEAST_EXPECT(!proposingFalse.updateVote(250 + (10 * i), true, p));
1374 BEAST_EXPECT(!followingTrue.updateVote(250 + (10 * i), false, p));
1375 BEAST_EXPECT(!followingFalse.updateVote(250 + (10 * i), false, p));
1376
1377 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1378 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1379 BEAST_EXPECT(followingTrue.getOurVote() == true);
1380 BEAST_EXPECT(followingFalse.getOurVote() == false);
1381
1382 BEAST_EXPECT(proposingTrue.stalled(p, true, 0, j, clog));
1383 expectStalled(99, true, 1 + i, 0, 97, __LINE__);
1384 BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
1385 expectStalled(98, false, 11 + i, 0, 2, __LINE__);
1386 BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
1387 expectStalled(97, true, 11 + i, 0, 97, __LINE__);
1388 BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
1389 expectStalled(96, false, 11 + i, 0, 3, __LINE__);
1390
1391 BEAST_EXPECT(proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1392 expectStalled(99, true, 1 + i, 6, 97, __LINE__);
1393 BEAST_EXPECT(proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1394 expectStalled(98, false, 11 + i, 6, 2, __LINE__);
1395 BEAST_EXPECT(followingTrue.stalled(p, false, peersUnchanged, j, clog));
1396 expectStalled(97, true, 11 + i, 6, 97, __LINE__);
1397 BEAST_EXPECT(followingFalse.stalled(p, false, peersUnchanged, j, clog));
1398 expectStalled(96, false, 11 + i, 6, 3, __LINE__);
1399 }
1400 }
1401 }
1402
1403 void
1404 run() override
1405 {
1406 testShouldCloseLedger();
1407 testCheckConsensus();
1408
1409 testStandalone();
1410 testPeersAgree();
1411 testSlowPeers();
1412 testCloseTimeDisagree();
1413 testWrongLCL();
1414 testConsensusCloseTimeRounding();
1415 testFork();
1416 testHubNetwork();
1417 testPreferredByBranch();
1418 testPauseForLaggards();
1419 testDisputes();
1420 }
1421};
1422
1423BEAST_DEFINE_TESTSUITE(Consensus, consensus, xrpl);
1424} // namespace test
1425} // namespace xrpl
A testsuite class.
Definition suite.h:51
testcase_t testcase
Memberspace for declaring test cases.
Definition suite.h:150
Generic implementation of consensus algorithm.
Definition Consensus.h:278
A transaction discovered to be in dispute during consensus.
Definition DisputedTx.h:29
Represents a peer connection in the overlay.
virtual id_t id() const =0
void run() override
Runs the suite.
A group of simulation Peers.
Definition PeerGroup.h:22
void disconnect(PeerGroup const &o)
Destroy network connection.
Definition PeerGroup.h:166
void connect(PeerGroup const &o, SimDuration delay)
Establish network connection.
Definition PeerGroup.h:146
T insert(T... args)
T is_same_v
STL namespace.
typename SimClock::duration SimDuration
Definition SimTime.h:16
typename SimClock::time_point SimTime
Definition SimTime.h:17
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:5
auto constexpr increaseLedgerTimeResolutionEvery
How often we increase the close time resolution (in numbers of ledgers)
ConsensusState checkConsensus(std::size_t prevProposers, std::size_t currentProposers, std::size_t currentAgree, std::size_t currentFinished, std::chrono::milliseconds previousAgreeTime, std::chrono::milliseconds currentAgreeTime, bool stalled, ConsensusParms const &parms, bool proposing, beast::Journal j, std::unique_ptr< std::stringstream > const &clog)
Determine whether the network reached consensus and whether we joined.
@ Expired
Consensus time limit has hard-expired.
@ MovedOn
The network has consensus without us.
@ Yes
We have consensus along with the network.
@ No
We do not have consensus.
bool shouldCloseLedger(bool anyTransactions, std::size_t prevProposers, std::size_t proposersClosed, std::size_t proposersValidated, std::chrono::milliseconds prevRoundTime, std::chrono::milliseconds timeSincePrevClose, std::chrono::milliseconds openTime, std::chrono::milliseconds idleInterval, ConsensusParms const &parms, beast::Journal j, std::unique_ptr< std::stringstream > const &clog)
Determines whether the current ledger should close at this time.
Definition Consensus.cpp:8
Consensus algorithm parameters.
std::chrono::milliseconds const ledgerGRANULARITY
How often we check state or change positions.
Represents a transfer rate.
Definition Rate.h:20
void on(csf::PeerID who, csf::SimTime, csf::FullyValidateLedger const &e)
void on(csf::PeerID, csf::SimTime, E const &)
Disruptor(csf::PeerGroup &net, csf::PeerGroup &c, csf::PeerGroup &split, csf::SimDuration d)
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
void on(csf::PeerID, csf::SimTime, E const &)
Peer accepted consensus results.
Definition events.h:101
Peer fully validated a new ledger.
Definition events.h:120
Ledger ledger
The new fully validated ledger.
Definition events.h:122
A single peer in the simulation.
T to_string(T... args)