rippled
Loading...
Searching...
No Matches
Consensus_test.cpp
1#include <test/csf.h>
2#include <test/unit_test/SuiteJournal.h>
3
4#include <xrpld/consensus/Consensus.h>
5
6#include <xrpl/beast/unit_test.h>
7#include <xrpl/json/to_string.h>
8
9namespace ripple {
10namespace test {
11
13{
15
16public:
17 Consensus_test() : journal_("Consensus_test", *this)
18 {
19 }
20
21 void
23 {
24 using namespace std::chrono_literals;
25 testcase("should close ledger");
26
27 // Use default parameters
28 ConsensusParms const p{};
29
30 // Bizarre times forcibly close
31 BEAST_EXPECT(shouldCloseLedger(
32 true, 10, 10, 10, -10s, 10s, 1s, 1s, p, journal_));
33 BEAST_EXPECT(shouldCloseLedger(
34 true, 10, 10, 10, 100h, 10s, 1s, 1s, p, journal_));
35 BEAST_EXPECT(shouldCloseLedger(
36 true, 10, 10, 10, 10s, 100h, 1s, 1s, p, journal_));
37
38 // Rest of network has closed
39 BEAST_EXPECT(
40 shouldCloseLedger(true, 10, 3, 5, 10s, 10s, 10s, 10s, p, journal_));
41
42 // No transactions means wait until end of internval
43 BEAST_EXPECT(
44 !shouldCloseLedger(false, 10, 0, 0, 1s, 1s, 1s, 10s, p, journal_));
45 BEAST_EXPECT(
46 shouldCloseLedger(false, 10, 0, 0, 1s, 10s, 1s, 10s, p, journal_));
47
48 // Enforce minimum ledger open time
49 BEAST_EXPECT(
50 !shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 1s, 10s, p, journal_));
51
52 // Don't go too much faster than last time
53 BEAST_EXPECT(
54 !shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 3s, 10s, p, journal_));
55
56 BEAST_EXPECT(
57 shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 10s, 10s, p, journal_));
58 }
59
60 void
62 {
63 using namespace std::chrono_literals;
64 testcase("check consensus");
65
66 // Use default parameterss
67 ConsensusParms const p{};
68
70 // Disputes still in doubt
71 //
72 // Not enough time has elapsed
73 BEAST_EXPECT(
75 checkConsensus(10, 2, 2, 0, 3s, 2s, false, p, true, journal_));
76
77 // If not enough peers have propsed, ensure
78 // more time for proposals
79 BEAST_EXPECT(
81 checkConsensus(10, 2, 2, 0, 3s, 4s, false, p, true, journal_));
82
83 // Enough time has elapsed and we all agree
84 BEAST_EXPECT(
86 checkConsensus(10, 2, 2, 0, 3s, 10s, false, p, true, journal_));
87
88 // Enough time has elapsed and we don't yet agree
89 BEAST_EXPECT(
91 checkConsensus(10, 2, 1, 0, 3s, 10s, false, p, true, journal_));
92
93 // Our peers have moved on
94 // Enough time has elapsed and we all agree
95 BEAST_EXPECT(
97 checkConsensus(10, 2, 1, 8, 3s, 10s, false, p, true, journal_));
98
99 // If no peers, don't agree until time has passed.
100 BEAST_EXPECT(
102 checkConsensus(0, 0, 0, 0, 3s, 10s, false, p, true, journal_));
103
104 // Agree if no peers and enough time has passed.
105 BEAST_EXPECT(
107 checkConsensus(0, 0, 0, 0, 3s, 16s, false, p, true, journal_));
108
109 // Expire if too much time has passed without agreement
110 BEAST_EXPECT(
112 checkConsensus(10, 8, 1, 0, 1s, 19s, false, p, true, journal_));
113
115 // Stalled
116 //
117 // Not enough time has elapsed
118 BEAST_EXPECT(
120 checkConsensus(10, 2, 2, 0, 3s, 2s, true, p, true, journal_));
121
122 // If not enough peers have propsed, ensure
123 // more time for proposals
124 BEAST_EXPECT(
126 checkConsensus(10, 2, 2, 0, 3s, 4s, true, p, true, journal_));
127
128 // Enough time has elapsed and we all agree
129 BEAST_EXPECT(
131 checkConsensus(10, 2, 2, 0, 3s, 10s, true, p, true, journal_));
132
133 // Enough time has elapsed and we don't yet agree, but there's nothing
134 // left to dispute
135 BEAST_EXPECT(
137 checkConsensus(10, 2, 1, 0, 3s, 10s, true, p, true, journal_));
138
139 // Our peers have moved on
140 // Enough time has elapsed and we all agree, nothing left to dispute
141 BEAST_EXPECT(
143 checkConsensus(10, 2, 1, 8, 3s, 10s, true, p, true, journal_));
144
145 // If no peers, don't agree until time has passed.
146 BEAST_EXPECT(
148 checkConsensus(0, 0, 0, 0, 3s, 10s, true, p, true, journal_));
149
150 // Agree if no peers and enough time has passed.
151 BEAST_EXPECT(
153 checkConsensus(0, 0, 0, 0, 3s, 16s, true, p, true, journal_));
154
155 // We are done if there's nothing left to dispute, no matter how much
156 // time has passed
157 BEAST_EXPECT(
159 checkConsensus(10, 8, 1, 0, 1s, 19s, true, p, true, journal_));
160 }
161
162 void
164 {
165 using namespace std::chrono_literals;
166 using namespace csf;
167 testcase("standalone");
168
169 Sim s;
170 PeerGroup peers = s.createGroup(1);
171 Peer* peer = peers[0];
172 peer->targetLedgers = 1;
173 peer->start();
174 peer->submit(Tx{1});
175
176 s.scheduler.step();
177
178 // Inspect that the proper ledger was created
179 auto const& lcl = peer->lastClosedLedger;
180 BEAST_EXPECT(peer->prevLedgerID() == lcl.id());
181 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
182 BEAST_EXPECT(lcl.txs().size() == 1);
183 BEAST_EXPECT(lcl.txs().find(Tx{1}) != lcl.txs().end());
184 BEAST_EXPECT(peer->prevProposers == 0);
185 }
186
187 void
189 {
190 using namespace csf;
191 using namespace std::chrono;
192 testcase("peers agree");
193
194 ConsensusParms const parms{};
195 Sim sim;
196 PeerGroup peers = sim.createGroup(5);
197
198 // Connected trust and network graphs with single fixed delay
199 peers.trustAndConnect(
200 peers, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
201
202 // everyone submits their own ID as a TX
203 for (Peer* p : peers)
204 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
205
206 sim.run(1);
207
208 // All peers are in sync
209 if (BEAST_EXPECT(sim.synchronized()))
210 {
211 for (Peer const* peer : peers)
212 {
213 auto const& lcl = peer->lastClosedLedger;
214 BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
215 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
216 // All peers proposed
217 BEAST_EXPECT(peer->prevProposers == peers.size() - 1);
218 // All transactions were accepted
219 for (std::uint32_t i = 0; i < peers.size(); ++i)
220 BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
221 }
222 }
223 }
224
225 void
227 {
228 using namespace csf;
229 using namespace std::chrono;
230 testcase("slow peers");
231
232 // Several tests of a complete trust graph with a subset of peers
233 // that have significantly longer network delays to the rest of the
234 // network
235
236 // Test when a slow peer doesn't delay a consensus quorum (4/5 agree)
237 {
238 ConsensusParms const parms{};
239 Sim sim;
240 PeerGroup slow = sim.createGroup(1);
241 PeerGroup fast = sim.createGroup(4);
242 PeerGroup network = fast + slow;
243
244 // Fully connected trust graph
245 network.trust(network);
246
247 // Fast and slow network connections
248 fast.connect(
249 fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
250
251 slow.connect(
252 network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
253
254 // All peers submit their own ID as a transaction
255 for (Peer* peer : network)
256 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
257
258 sim.run(1);
259
260 // Verify all peers have same LCL but are missing transaction 0
261 // All peers are in sync even with a slower peer 0
262 if (BEAST_EXPECT(sim.synchronized()))
263 {
264 for (Peer* peer : network)
265 {
266 auto const& lcl = peer->lastClosedLedger;
267 BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
268 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
269
270 BEAST_EXPECT(peer->prevProposers == network.size() - 1);
271 BEAST_EXPECT(
272 peer->prevRoundTime == network[0]->prevRoundTime);
273
274 BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
275 for (std::uint32_t i = 2; i < network.size(); ++i)
276 BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
277
278 // Tx 0 didn't make it
279 BEAST_EXPECT(
280 peer->openTxs.find(Tx{0}) != peer->openTxs.end());
281 }
282 }
283 }
284
285 // Test when the slow peers delay a consensus quorum (4/6 agree)
286 {
287 // Run two tests
288 // 1. The slow peers are participating in consensus
289 // 2. The slow peers are just observing
290
291 for (auto isParticipant : {true, false})
292 {
293 ConsensusParms const parms{};
294
295 Sim sim;
296 PeerGroup slow = sim.createGroup(2);
297 PeerGroup fast = sim.createGroup(4);
298 PeerGroup network = fast + slow;
299
300 // Connected trust graph
301 network.trust(network);
302
303 // Fast and slow network connections
304 fast.connect(
305 fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
306
307 slow.connect(
308 network,
309 round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
310
311 for (Peer* peer : slow)
312 peer->runAsValidator = isParticipant;
313
314 // All peers submit their own ID as a transaction and relay it
315 // to peers
316 for (Peer* peer : network)
317 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
318
319 sim.run(1);
320
321 if (BEAST_EXPECT(sim.synchronized()))
322 {
323 // Verify all peers have same LCL but are missing
324 // transaction 0,1 which was not received by all peers
325 // before the ledger closed
326 for (Peer* peer : network)
327 {
328 // Closed ledger has all but transaction 0,1
329 auto const& lcl = peer->lastClosedLedger;
330 BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
331 BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
332 BEAST_EXPECT(lcl.txs().find(Tx{1}) == lcl.txs().end());
333 for (std::uint32_t i = slow.size(); i < network.size();
334 ++i)
335 BEAST_EXPECT(
336 lcl.txs().find(Tx{i}) != lcl.txs().end());
337
338 // Tx 0-1 didn't make it
339 BEAST_EXPECT(
340 peer->openTxs.find(Tx{0}) != peer->openTxs.end());
341 BEAST_EXPECT(
342 peer->openTxs.find(Tx{1}) != peer->openTxs.end());
343 }
344
345 Peer const* slowPeer = slow[0];
346 if (isParticipant)
347 BEAST_EXPECT(
348 slowPeer->prevProposers == network.size() - 1);
349 else
350 BEAST_EXPECT(slowPeer->prevProposers == fast.size());
351
352 for (Peer* peer : fast)
353 {
354 // Due to the network link delay settings
355 // Peer 0 initially proposes {0}
356 // Peer 1 initially proposes {1}
357 // Peers 2-5 initially propose {2,3,4,5}
358 // Since peers 2-5 agree, 4/6 > the initial 50% needed
359 // to include a disputed transaction, so Peer 0/1 switch
360 // to agree with those peers. Peer 0/1 then closes with
361 // an 80% quorum of agreeing positions (5/6) match.
362 //
363 // Peers 2-5 do not change position, since tx 0 or tx 1
364 // have less than the 50% initial threshold. They also
365 // cannot declare consensus, since 4/6 agreeing
366 // positions are < 80% threshold. They therefore need an
367 // additional timerEntry call to see the updated
368 // positions from Peer 0 & 1.
369
370 if (isParticipant)
371 {
372 BEAST_EXPECT(
373 peer->prevProposers == network.size() - 1);
374 BEAST_EXPECT(
375 peer->prevRoundTime > slowPeer->prevRoundTime);
376 }
377 else
378 {
379 BEAST_EXPECT(
380 peer->prevProposers == fast.size() - 1);
381 // so all peers should have closed together
382 BEAST_EXPECT(
383 peer->prevRoundTime == slowPeer->prevRoundTime);
384 }
385 }
386 }
387 }
388 }
389 }
390
391 void
393 {
394 using namespace csf;
395 using namespace std::chrono;
396 testcase("close time disagree");
397
398 // This is a very specialized test to get ledgers to disagree on
399 // the close time. It unfortunately assumes knowledge about current
400 // timing constants. This is a necessary evil to get coverage up
401 // pending more extensive refactorings of timing constants.
402
403 // In order to agree-to-disagree on the close time, there must be no
404 // clear majority of nodes agreeing on a close time. This test
405 // sets a relative offset to the peers internal clocks so that they
406 // send proposals with differing times.
407
408 // However, agreement is on the effective close time, not the
409 // exact close time. The minimum closeTimeResolution is given by
410 // ledgerPossibleTimeResolutions[0], which is currently 10s. This means
411 // the skews need to be at least 10 seconds to have different effective
412 // close times.
413
414 // Complicating this matter is that nodes will ignore proposals
415 // with times more than proposeFRESHNESS =20s in the past. So at
416 // the minimum granularity, we have at most 3 types of skews
417 // (0s,10s,20s).
418
419 // This test therefore has 6 nodes, with 2 nodes having each type of
420 // skew. Then no majority (1/3 < 1/2) of nodes will agree on an
421 // actual close time.
422
423 ConsensusParms const parms{};
424 Sim sim;
425
426 PeerGroup groupA = sim.createGroup(2);
427 PeerGroup groupB = sim.createGroup(2);
428 PeerGroup groupC = sim.createGroup(2);
429 PeerGroup network = groupA + groupB + groupC;
430
431 network.trust(network);
432 network.connect(
433 network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
434
435 // Run consensus without skew until we have a short close time
436 // resolution
437 Peer* firstPeer = *groupA.begin();
438 while (firstPeer->lastClosedLedger.closeTimeResolution() >=
439 parms.proposeFRESHNESS)
440 sim.run(1);
441
442 // Introduce a shift on the time of 2/3 of peers
443 for (Peer* peer : groupA)
444 peer->clockSkew = parms.proposeFRESHNESS / 2;
445 for (Peer* peer : groupB)
446 peer->clockSkew = parms.proposeFRESHNESS;
447
448 sim.run(1);
449
450 // All nodes agreed to disagree on the close time
451 if (BEAST_EXPECT(sim.synchronized()))
452 {
453 for (Peer* peer : network)
454 BEAST_EXPECT(!peer->lastClosedLedger.closeAgree());
455 }
456 }
457
458 void
460 {
461 using namespace csf;
462 using namespace std::chrono;
463 testcase("wrong LCL");
464
465 // Specialized test to exercise a temporary fork in which some peers
466 // are working on an incorrect prior ledger.
467
468 ConsensusParms const parms{};
469
470 // Vary the time it takes to process validations to exercise detecting
471 // the wrong LCL at different phases of consensus
472 for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE})
473 {
474 // Consider 10 peers:
475 // 0 1 2 3 4 5 6 7 8 9
476 // minority majorityA majorityB
477 //
478 // Nodes 0-1 trust nodes 0-4
479 // Nodes 2-9 trust nodes 2-9
480 //
481 // By submitting tx 0 to nodes 0-4 and tx 1 to nodes 5-9,
482 // nodes 0-1 will generate the wrong LCL (with tx 0). The remaining
483 // nodes will instead accept the ledger with tx 1.
484
485 // Nodes 0-1 will detect this mismatch during a subsequent round
486 // since nodes 2-4 will validate a different ledger.
487
488 // Nodes 0-1 will acquire the proper ledger from the network and
489 // resume consensus and eventually generate the dominant network
490 // ledger.
491
492 // This topology can potentially fork with the above trust relations
493 // but that is intended for this test.
494
495 Sim sim;
496
497 PeerGroup minority = sim.createGroup(2);
498 PeerGroup majorityA = sim.createGroup(3);
499 PeerGroup majorityB = sim.createGroup(5);
500
501 PeerGroup majority = majorityA + majorityB;
502 PeerGroup network = minority + majority;
503
504 SimDuration delay =
505 round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
506 minority.trustAndConnect(minority + majorityA, delay);
507 majority.trustAndConnect(majority, delay);
508
509 CollectByNode<JumpCollector> jumps;
510 sim.collectors.add(jumps);
511
512 BEAST_EXPECT(sim.trustGraph.canFork(parms.minCONSENSUS_PCT / 100.));
513
514 // initial round to set prior state
515 sim.run(1);
516
517 // Nodes in smaller UNL have seen tx 0, nodes in other unl have seen
518 // tx 1
519 for (Peer* peer : network)
520 peer->delays.recvValidation = validationDelay;
521 for (Peer* peer : (minority + majorityA))
522 peer->openTxs.insert(Tx{0});
523 for (Peer* peer : majorityB)
524 peer->openTxs.insert(Tx{1});
525
526 // Run for additional rounds
527 // With no validation delay, only 2 more rounds are needed.
528 // 1. Round to generate different ledgers
529 // 2. Round to detect different prior ledgers (but still generate
530 // wrong ones) and recover within that round since wrong LCL
531 // is detected before we close
532 //
533 // With a validation delay of ledgerMIN_CLOSE, we need 3 more
534 // rounds.
535 // 1. Round to generate different ledgers
536 // 2. Round to detect different prior ledgers (but still generate
537 // wrong ones) but end up declaring consensus on wrong LCL (but
538 // with the right transaction set!). This is because we detect
539 // the wrong LCL after we have closed the ledger, so we declare
540 // consensus based solely on our peer proposals. But we haven't
541 // had time to acquire the right ledger.
542 // 3. Round to correct
543 sim.run(3);
544
545 // The network never actually forks, since node 0-1 never see a
546 // quorum of validations to fully validate the incorrect chain.
547
548 // However, for a non zero-validation delay, the network is not
549 // synchronized because nodes 0 and 1 are running one ledger behind
550 if (BEAST_EXPECT(sim.branches() == 1))
551 {
552 for (Peer const* peer : majority)
553 {
554 // No jumps for majority nodes
555 BEAST_EXPECT(jumps[peer->id].closeJumps.empty());
556 BEAST_EXPECT(jumps[peer->id].fullyValidatedJumps.empty());
557 }
558 for (Peer const* peer : minority)
559 {
560 auto& peerJumps = jumps[peer->id];
561 // last closed ledger jump between chains
562 {
563 if (BEAST_EXPECT(peerJumps.closeJumps.size() == 1))
564 {
565 JumpCollector::Jump const& jump =
566 peerJumps.closeJumps.front();
567 // Jump is to a different chain
568 BEAST_EXPECT(jump.from.seq() <= jump.to.seq());
569 BEAST_EXPECT(!jump.to.isAncestor(jump.from));
570 }
571 }
572 // fully validated jump forward in same chain
573 {
574 if (BEAST_EXPECT(
575 peerJumps.fullyValidatedJumps.size() == 1))
576 {
577 JumpCollector::Jump const& jump =
578 peerJumps.fullyValidatedJumps.front();
579 // Jump is to a different chain with same seq
580 BEAST_EXPECT(jump.from.seq() < jump.to.seq());
581 BEAST_EXPECT(jump.to.isAncestor(jump.from));
582 }
583 }
584 }
585 }
586 }
587
588 {
589 // Additional test engineered to switch LCL during the establish
590 // phase. This was added to trigger a scenario that previously
591 // crashed, in which switchLCL switched from establish to open
592 // phase, but still processed the establish phase logic.
593
594 // Loner node will accept an initial ledger A, but all other nodes
595 // accept ledger B a bit later. By delaying the time it takes
596 // to process a validation, loner node will detect the wrongLCL
597 // after it is already in the establish phase of the next round.
598
599 Sim sim;
600 PeerGroup loner = sim.createGroup(1);
601 PeerGroup friends = sim.createGroup(3);
602 loner.trust(loner + friends);
603
604 PeerGroup others = sim.createGroup(6);
605 PeerGroup clique = friends + others;
606 clique.trust(clique);
607
608 PeerGroup network = loner + clique;
609 network.connect(
610 network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
611
612 // initial round to set prior state
613 sim.run(1);
614 for (Peer* peer : (loner + friends))
615 peer->openTxs.insert(Tx(0));
616 for (Peer* peer : others)
617 peer->openTxs.insert(Tx(1));
618
619 // Delay validation processing
620 for (Peer* peer : network)
621 peer->delays.recvValidation = parms.ledgerGRANULARITY;
622
623 // additional rounds to generate wrongLCL and recover
624 sim.run(2);
625
626 // Check all peers recovered
627 for (Peer* p : network)
628 BEAST_EXPECT(p->prevLedgerID() == network[0]->prevLedgerID());
629 }
630 }
631
632 void
634 {
635 using namespace csf;
636 using namespace std::chrono;
637 testcase("consensus close time rounding");
638
639 // This is a specialized test engineered to yield ledgers with different
640 // close times even though the peers believe they had close time
641 // consensus on the ledger.
642 ConsensusParms parms;
643
644 Sim sim;
645
646 // This requires a group of 4 fast and 2 slow peers to create a
647 // situation in which a subset of peers requires seeing additional
648 // proposals to declare consensus.
649 PeerGroup slow = sim.createGroup(2);
650 PeerGroup fast = sim.createGroup(4);
651 PeerGroup network = fast + slow;
652
653 // Connected trust graph
654 network.trust(network);
655
656 // Fast and slow network connections
657 fast.connect(fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
658 slow.connect(
659 network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
660
661 // Run to the ledger *prior* to decreasing the resolution
663
664 // In order to create the discrepency, we want a case where if
665 // X = effCloseTime(closeTime, resolution, parentCloseTime)
666 // X != effCloseTime(X, resolution, parentCloseTime)
667 //
668 // That is, the effective close time is not a fixed point. This can
669 // happen if X = parentCloseTime + 1, but a subsequent rounding goes
670 // to the next highest multiple of resolution.
671
672 // So we want to find an offset (now + offset) % 30s = 15
673 // (now + offset) % 20s = 15
674 // This way, the next ledger will close and round up Due to the
675 // network delay settings, the round of consensus will take 5s, so
676 // the next ledger's close time will
677
678 NetClock::duration when = network[0]->now().time_since_epoch();
679
680 // Check we are before the 30s to 20s transition
681 NetClock::duration resolution =
682 network[0]->lastClosedLedger.closeTimeResolution();
683 BEAST_EXPECT(resolution == NetClock::duration{30s});
684
685 while (((when % NetClock::duration{30s}) != NetClock::duration{15s}) ||
686 ((when % NetClock::duration{20s}) != NetClock::duration{15s}))
687 when += 1s;
688 // Advance the clock without consensus running (IS THIS WHAT
689 // PREVENTS IT IN PRACTICE?)
690 sim.scheduler.step_for(NetClock::time_point{when} - network[0]->now());
691
692 // Run one more ledger with 30s resolution
693 sim.run(1);
694 if (BEAST_EXPECT(sim.synchronized()))
695 {
696 // close time should be ahead of clock time since we engineered
697 // the close time to round up
698 for (Peer* peer : network)
699 {
700 BEAST_EXPECT(peer->lastClosedLedger.closeTime() > peer->now());
701 BEAST_EXPECT(peer->lastClosedLedger.closeAgree());
702 }
703 }
704
705 // All peers submit their own ID as a transaction
706 for (Peer* peer : network)
707 peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
708
709 // Run 1 more round, this time it will have a decreased
710 // resolution of 20 seconds.
711
712 // The network delays are engineered so that the slow peers
713 // initially have the wrong tx hash, but they see a majority
714 // of agreement from their peers and declare consensus
715 //
716 // The trick is that everyone starts with a raw close time of
717 // 84681s
718 // Which has
719 // effCloseTime(86481s, 20s, 86490s) = 86491s
720 // However, when the slow peers update their position, they change
721 // the close time to 86451s. The fast peers declare consensus with
722 // the 86481s as their position still.
723 //
724 // When accepted the ledger
725 // - fast peers use eff(86481s) -> 86491s as the close time
726 // - slow peers use eff(eff(86481s)) -> eff(86491s) -> 86500s!
727
728 sim.run(1);
729
730 BEAST_EXPECT(sim.synchronized());
731 }
732
733 void
735 {
736 using namespace csf;
737 using namespace std::chrono;
738 testcase("fork");
739
740 std::uint32_t numPeers = 10;
741 // Vary overlap between two UNLs
742 for (std::uint32_t overlap = 0; overlap <= numPeers; ++overlap)
743 {
744 ConsensusParms const parms{};
745 Sim sim;
746
747 std::uint32_t numA = (numPeers - overlap) / 2;
748 std::uint32_t numB = numPeers - numA - overlap;
749
750 PeerGroup aOnly = sim.createGroup(numA);
751 PeerGroup bOnly = sim.createGroup(numB);
752 PeerGroup commonOnly = sim.createGroup(overlap);
753
754 PeerGroup a = aOnly + commonOnly;
755 PeerGroup b = bOnly + commonOnly;
756
757 PeerGroup network = a + b;
758
759 SimDuration delay =
760 round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
761 a.trustAndConnect(a, delay);
762 b.trustAndConnect(b, delay);
763
764 // Initial round to set prior state
765 sim.run(1);
766 for (Peer* peer : network)
767 {
768 // Nodes have only seen transactions from their neighbors
769 peer->openTxs.insert(Tx{static_cast<std::uint32_t>(peer->id)});
770 for (Peer* to : sim.trustGraph.trustedPeers(peer))
771 peer->openTxs.insert(
772 Tx{static_cast<std::uint32_t>(to->id)});
773 }
774 sim.run(1);
775
776 // Fork should not happen for 40% or greater overlap
777 // Since the overlapped nodes have a UNL that is the union of the
778 // two cliques, the maximum sized UNL list is the number of peers
779 if (overlap > 0.4 * numPeers)
780 BEAST_EXPECT(sim.synchronized());
781 else
782 {
783 // Even if we do fork, there shouldn't be more than 3 ledgers
784 // One for cliqueA, one for cliqueB and one for nodes in both
785 BEAST_EXPECT(sim.branches() <= 3);
786 }
787 }
788 }
789
790 void
792 {
793 using namespace csf;
794 using namespace std::chrono;
795 testcase("hub network");
796
797 // Simulate a set of 5 validators that aren't directly connected but
798 // rely on a single hub node for communication
799
800 ConsensusParms const parms{};
801 Sim sim;
802 PeerGroup validators = sim.createGroup(5);
803 PeerGroup center = sim.createGroup(1);
804 validators.trust(validators);
805 center.trust(validators);
806
807 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
808 validators.connect(center, delay);
809
810 center[0]->runAsValidator = false;
811
812 // prep round to set initial state.
813 sim.run(1);
814
815 // everyone submits their own ID as a TX and relay it to peers
816 for (Peer* p : validators)
817 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
818
819 sim.run(1);
820
821 // All peers are in sync
822 BEAST_EXPECT(sim.synchronized());
823 }
824
825 // Helper collector for testPreferredByBranch
826 // Invasively disconnects network at bad times to cause splits
828 {
833 bool reconnected = false;
834
836 csf::PeerGroup& net,
838 csf::PeerGroup& split,
840 : network(net), groupCfast(c), groupCsplit(split), delay(d)
841 {
842 }
843
844 template <class E>
845 void
847 {
848 }
849
850 void
852 {
853 using namespace std::chrono;
854 // As soon as the fastC node fully validates C, disconnect
855 // ALL c nodes from the network. The fast C node needs to disconnect
856 // as well to prevent it from relaying the validations it did see
857 if (who == groupCfast[0]->id &&
858 e.ledger.seq() == csf::Ledger::Seq{2})
859 {
860 network.disconnect(groupCsplit);
861 network.disconnect(groupCfast);
862 }
863 }
864
865 void
867 {
868 // As soon as anyone generates a child of B or C, reconnect the
869 // network so those validations make it through
870 if (!reconnected && e.ledger.seq() == csf::Ledger::Seq{3})
871 {
872 reconnected = true;
873 network.connect(groupCsplit, delay);
874 }
875 }
876 };
877
878 void
880 {
881 using namespace csf;
882 using namespace std::chrono;
883 testcase("preferred by branch");
884
885 // Simulate network splits that are prevented from forking when using
886 // preferred ledger by trie. This is a contrived example that involves
887 // excessive network splits, but demonstrates the safety improvement
888 // from the preferred ledger by trie approach.
889
890 // Consider 10 validating nodes that comprise a single common UNL
891 // Ledger history:
892 // 1: A
893 // _/ \_
894 // 2: B C
895 // _/ _/ \_
896 // 3: D C' |||||||| (8 different ledgers)
897
898 // - All nodes generate the common ledger A
899 // - 2 nodes generate B and 8 nodes generate C
900 // - Only 1 of the C nodes sees all the C validations and fully
901 // validates C. The rest of the C nodes split at just the right time
902 // such that they never see any C validations but their own.
903 // - The C nodes continue and generate 8 different child ledgers.
904 // - Meanwhile, the D nodes only saw 1 validation for C and 2
905 // validations
906 // for B.
907 // - The network reconnects and the validations for generation 3 ledgers
908 // are observed (D and the 8 C's)
909 // - In the old approach, 2 votes for D outweights 1 vote for each C'
910 // so the network would avalanche towards D and fully validate it
911 // EVEN though C was fully validated by one node
912 // - In the new approach, 2 votes for D are not enough to outweight the
913 // 8 implicit votes for C, so nodes will avalanche to C instead
914
915 ConsensusParms const parms{};
916 Sim sim;
917
918 // Goes A->B->D
919 PeerGroup groupABD = sim.createGroup(2);
920 // Single node that initially fully validates C before the split
921 PeerGroup groupCfast = sim.createGroup(1);
922 // Generates C, but fails to fully validate before the split
923 PeerGroup groupCsplit = sim.createGroup(7);
924
925 PeerGroup groupNotFastC = groupABD + groupCsplit;
926 PeerGroup network = groupABD + groupCsplit + groupCfast;
927
928 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
929 SimDuration fDelay = round<milliseconds>(0.1 * parms.ledgerGRANULARITY);
930
931 network.trust(network);
932 // C must have a shorter delay to see all the validations before the
933 // other nodes
934 network.connect(groupCfast, fDelay);
935 // The rest of the network is connected at the same speed
936 groupNotFastC.connect(groupNotFastC, delay);
937
938 Disruptor dc(network, groupCfast, groupCsplit, delay);
939 sim.collectors.add(dc);
940
941 // Consensus round to generate ledger A
942 sim.run(1);
943 BEAST_EXPECT(sim.synchronized());
944
945 // Next round generates B and C
946 // To force B, we inject an extra transaction in to those nodes
947 for (Peer* peer : groupABD)
948 {
949 peer->txInjections.emplace(peer->lastClosedLedger.seq(), Tx{42});
950 }
951 // The Disruptor will ensure that nodes disconnect before the C
952 // validations make it to all but the fastC node
953 sim.run(1);
954
955 // We are no longer in sync, but have not yet forked:
956 // 9 nodes consider A the last fully validated ledger and fastC sees C
957 BEAST_EXPECT(!sim.synchronized());
958 BEAST_EXPECT(sim.branches() == 1);
959
960 // Run another round to generate the 8 different C' ledgers
961 for (Peer* p : network)
962 p->submit(Tx(static_cast<std::uint32_t>(p->id)));
963 sim.run(1);
964
965 // Still not forked
966 BEAST_EXPECT(!sim.synchronized());
967 BEAST_EXPECT(sim.branches() == 1);
968
969 // Disruptor will reconnect all but the fastC node
970 sim.run(1);
971
972 if (BEAST_EXPECT(sim.branches() == 1))
973 {
974 BEAST_EXPECT(sim.synchronized());
975 }
976 else // old approach caused a fork
977 {
978 BEAST_EXPECT(sim.branches(groupNotFastC) == 1);
979 BEAST_EXPECT(sim.synchronized(groupNotFastC) == 1);
980 }
981 }
982
983 // Helper collector for testPauseForLaggards
984 // This will remove the ledgerAccept delay used to
985 // initially create the slow vs. fast validator groups.
987 {
989
991 {
992 }
993
994 template <class E>
995 void
997 {
998 }
999
1000 void
1002 {
1003 for (csf::Peer* p : g)
1004 {
1005 if (p->id == who)
1006 p->delays.ledgerAccept = std::chrono::seconds{0};
1007 }
1008 }
1009 };
1010
1011 void
1013 {
1014 using namespace csf;
1015 using namespace std::chrono;
1016 testcase("pause for laggards");
1017
1018 // Test that validators that jump ahead of the network slow
1019 // down.
1020
1021 // We engineer the following validated ledger history scenario:
1022 //
1023 // / --> B1 --> C1 --> ... -> G1 "ahead"
1024 // A
1025 // \ --> B2 --> C2 "behind"
1026 //
1027 // After validating a common ledger A, a set of "behind" validators
1028 // briefly run slower and validate the lower chain of ledgers.
1029 // The "ahead" validators run normal speed and run ahead validating the
1030 // upper chain of ledgers.
1031 //
1032 // Due to the uncommited support definition of the preferred branch
1033 // protocol, even if the "behind" validators are a majority, the "ahead"
1034 // validators cannot jump to the proper branch until the "behind"
1035 // validators catch up to the same sequence number. For this test to
1036 // succeed, the ahead validators need to briefly slow down consensus.
1037
1038 ConsensusParms const parms{};
1039 Sim sim;
1040 SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
1041
1042 PeerGroup behind = sim.createGroup(3);
1043 PeerGroup ahead = sim.createGroup(2);
1044 PeerGroup network = ahead + behind;
1045
1046 hash_set<Peer::NodeKey_t> trustedKeys;
1047 for (Peer* p : network)
1048 trustedKeys.insert(p->key);
1049 for (Peer* p : network)
1050 p->trustedKeys = trustedKeys;
1051
1052 network.trustAndConnect(network, delay);
1053
1054 // Initial seed round to set prior state
1055 sim.run(1);
1056
1057 // Have the "behind" group initially take a really long time to
1058 // accept a ledger after ending deliberation
1059 for (Peer* p : behind)
1060 p->delays.ledgerAccept = 20s;
1061
1062 // Use the collector to revert the delay after the single
1063 // slow ledger is generated
1064 UndoDelay undoDelay{behind};
1065 sim.collectors.add(undoDelay);
1066
1067#if 0
1068 // Have all beast::journal output printed to stdout
1069 for (Peer* p : network)
1070 p->sink.threshold(beast::severities::kAll);
1071
1072 // Print ledger accept and fully validated events to stdout
1073 StreamCollector sc{std::cout};
1074 sim.collectors.add(sc);
1075#endif
1076 // Run the simulation for 100 seconds of simulation time with
1077 std::chrono::nanoseconds const simDuration = 100s;
1078
1079 // Simulate clients submitting 1 tx every 5 seconds to a random
1080 // validator
1081 Rate const rate{1, 5s};
1082 auto peerSelector = makeSelector(
1083 network.begin(),
1084 network.end(),
1085 std::vector<double>(network.size(), 1.),
1086 sim.rng);
1087 auto txSubmitter = makeSubmitter(
1088 ConstantDistribution{rate.inv()},
1089 sim.scheduler.now(),
1090 sim.scheduler.now() + simDuration,
1091 peerSelector,
1092 sim.scheduler,
1093 sim.rng);
1094
1095 // Run simulation
1096 sim.run(simDuration);
1097
1098 // Verify that the network recovered
1099 BEAST_EXPECT(sim.synchronized());
1100 }
1101
1102 void
1104 {
1105 testcase("disputes");
1106
1107 using namespace csf;
1108
1109 // Test dispute objects directly
1110 using Dispute = DisputedTx<Tx, PeerID>;
1111
1112 Tx const txTrue{99};
1113 Tx const txFalse{98};
1114 Tx const txFollowingTrue{97};
1115 Tx const txFollowingFalse{96};
1116 int const numPeers = 100;
1118 std::size_t peersUnchanged = 0;
1119
1121 auto j = logs->journal("Test");
1123
1124 // Three cases:
1125 // 1 proposing, initial vote yes
1126 // 2 proposing, initial vote no
1127 // 3 not proposing, initial vote doesn't matter after the first update,
1128 // use yes
1129 {
1130 Dispute proposingTrue{txTrue.id(), true, numPeers, journal_};
1131 Dispute proposingFalse{txFalse.id(), false, numPeers, journal_};
1132 Dispute followingTrue{
1133 txFollowingTrue.id(), true, numPeers, journal_};
1134 Dispute followingFalse{
1135 txFollowingFalse.id(), false, numPeers, journal_};
1136 BEAST_EXPECT(proposingTrue.ID() == 99);
1137 BEAST_EXPECT(proposingFalse.ID() == 98);
1138 BEAST_EXPECT(followingTrue.ID() == 97);
1139 BEAST_EXPECT(followingFalse.ID() == 96);
1140
1141 // Create an even split in the peer votes
1142 for (int i = 0; i < numPeers; ++i)
1143 {
1144 BEAST_EXPECT(proposingTrue.setVote(PeerID(i), i < 50));
1145 BEAST_EXPECT(proposingFalse.setVote(PeerID(i), i < 50));
1146 BEAST_EXPECT(followingTrue.setVote(PeerID(i), i < 50));
1147 BEAST_EXPECT(followingFalse.setVote(PeerID(i), i < 50));
1148 }
1149 // Switch the middle vote to match mine
1150 BEAST_EXPECT(proposingTrue.setVote(PeerID(50), true));
1151 BEAST_EXPECT(proposingFalse.setVote(PeerID(49), false));
1152 BEAST_EXPECT(followingTrue.setVote(PeerID(50), true));
1153 BEAST_EXPECT(followingFalse.setVote(PeerID(49), false));
1154
1155 // no changes yet
1156 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1157 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1158 BEAST_EXPECT(followingTrue.getOurVote() == true);
1159 BEAST_EXPECT(followingFalse.getOurVote() == false);
1160 BEAST_EXPECT(
1161 !proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1162 BEAST_EXPECT(
1163 !proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1164 BEAST_EXPECT(
1165 !followingTrue.stalled(p, false, peersUnchanged, j, clog));
1166 BEAST_EXPECT(
1167 !followingFalse.stalled(p, false, peersUnchanged, j, clog));
1168 BEAST_EXPECT(clog->str() == "");
1169
1170 // I'm in the majority, my vote should not change
1171 BEAST_EXPECT(!proposingTrue.updateVote(5, true, p));
1172 BEAST_EXPECT(!proposingFalse.updateVote(5, true, p));
1173 BEAST_EXPECT(!followingTrue.updateVote(5, false, p));
1174 BEAST_EXPECT(!followingFalse.updateVote(5, false, p));
1175
1176 BEAST_EXPECT(!proposingTrue.updateVote(10, true, p));
1177 BEAST_EXPECT(!proposingFalse.updateVote(10, true, p));
1178 BEAST_EXPECT(!followingTrue.updateVote(10, false, p));
1179 BEAST_EXPECT(!followingFalse.updateVote(10, false, p));
1180
1181 peersUnchanged = 2;
1182 BEAST_EXPECT(
1183 !proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1184 BEAST_EXPECT(
1185 !proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1186 BEAST_EXPECT(
1187 !followingTrue.stalled(p, false, peersUnchanged, j, clog));
1188 BEAST_EXPECT(
1189 !followingFalse.stalled(p, false, peersUnchanged, j, clog));
1190 BEAST_EXPECT(clog->str() == "");
1191
1192 // Right now, the vote is 51%. The requirement is about to jump to
1193 // 65%
1194 BEAST_EXPECT(proposingTrue.updateVote(55, true, p));
1195 BEAST_EXPECT(!proposingFalse.updateVote(55, true, p));
1196 BEAST_EXPECT(!followingTrue.updateVote(55, false, p));
1197 BEAST_EXPECT(!followingFalse.updateVote(55, false, p));
1198
1199 BEAST_EXPECT(proposingTrue.getOurVote() == false);
1200 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1201 BEAST_EXPECT(followingTrue.getOurVote() == true);
1202 BEAST_EXPECT(followingFalse.getOurVote() == false);
1203 // 16 validators change their vote to match my original vote
1204 for (int i = 0; i < 16; ++i)
1205 {
1206 auto pTrue = PeerID(numPeers - i - 1);
1207 auto pFalse = PeerID(i);
1208 BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
1209 BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
1210 BEAST_EXPECT(followingTrue.setVote(pTrue, true));
1211 BEAST_EXPECT(followingFalse.setVote(pFalse, false));
1212 }
1213 // The vote should now be 66%, threshold is 65%
1214 BEAST_EXPECT(proposingTrue.updateVote(60, true, p));
1215 BEAST_EXPECT(!proposingFalse.updateVote(60, true, p));
1216 BEAST_EXPECT(!followingTrue.updateVote(60, false, p));
1217 BEAST_EXPECT(!followingFalse.updateVote(60, false, p));
1218
1219 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1220 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1221 BEAST_EXPECT(followingTrue.getOurVote() == true);
1222 BEAST_EXPECT(followingFalse.getOurVote() == false);
1223
1224 // Threshold jumps to 70%
1225 BEAST_EXPECT(proposingTrue.updateVote(86, true, p));
1226 BEAST_EXPECT(!proposingFalse.updateVote(86, true, p));
1227 BEAST_EXPECT(!followingTrue.updateVote(86, false, p));
1228 BEAST_EXPECT(!followingFalse.updateVote(86, false, p));
1229
1230 BEAST_EXPECT(proposingTrue.getOurVote() == false);
1231 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1232 BEAST_EXPECT(followingTrue.getOurVote() == true);
1233 BEAST_EXPECT(followingFalse.getOurVote() == false);
1234
1235 // 5 more validators change their vote to match my original vote
1236 for (int i = 16; i < 21; ++i)
1237 {
1238 auto pTrue = PeerID(numPeers - i - 1);
1239 auto pFalse = PeerID(i);
1240 BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
1241 BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
1242 BEAST_EXPECT(followingTrue.setVote(pTrue, true));
1243 BEAST_EXPECT(followingFalse.setVote(pFalse, false));
1244 }
1245
1246 // The vote should now be 71%, threshold is 70%
1247 BEAST_EXPECT(proposingTrue.updateVote(90, true, p));
1248 BEAST_EXPECT(!proposingFalse.updateVote(90, true, p));
1249 BEAST_EXPECT(!followingTrue.updateVote(90, false, p));
1250 BEAST_EXPECT(!followingFalse.updateVote(90, false, p));
1251
1252 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1253 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1254 BEAST_EXPECT(followingTrue.getOurVote() == true);
1255 BEAST_EXPECT(followingFalse.getOurVote() == false);
1256
1257 // The vote should now be 71%, threshold is 70%
1258 BEAST_EXPECT(!proposingTrue.updateVote(150, true, p));
1259 BEAST_EXPECT(!proposingFalse.updateVote(150, true, p));
1260 BEAST_EXPECT(!followingTrue.updateVote(150, false, p));
1261 BEAST_EXPECT(!followingFalse.updateVote(150, false, p));
1262
1263 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1264 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1265 BEAST_EXPECT(followingTrue.getOurVote() == true);
1266 BEAST_EXPECT(followingFalse.getOurVote() == false);
1267
1268 // The vote should now be 71%, threshold is 70%
1269 BEAST_EXPECT(!proposingTrue.updateVote(190, true, p));
1270 BEAST_EXPECT(!proposingFalse.updateVote(190, true, p));
1271 BEAST_EXPECT(!followingTrue.updateVote(190, false, p));
1272 BEAST_EXPECT(!followingFalse.updateVote(190, false, p));
1273
1274 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1275 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1276 BEAST_EXPECT(followingTrue.getOurVote() == true);
1277 BEAST_EXPECT(followingFalse.getOurVote() == false);
1278
1279 peersUnchanged = 3;
1280 BEAST_EXPECT(
1281 !proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1282 BEAST_EXPECT(
1283 !proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1284 BEAST_EXPECT(
1285 !followingTrue.stalled(p, false, peersUnchanged, j, clog));
1286 BEAST_EXPECT(
1287 !followingFalse.stalled(p, false, peersUnchanged, j, clog));
1288 BEAST_EXPECT(clog->str() == "");
1289
1290 // Threshold jumps to 95%
1291 BEAST_EXPECT(proposingTrue.updateVote(220, true, p));
1292 BEAST_EXPECT(!proposingFalse.updateVote(220, true, p));
1293 BEAST_EXPECT(!followingTrue.updateVote(220, false, p));
1294 BEAST_EXPECT(!followingFalse.updateVote(220, false, p));
1295
1296 BEAST_EXPECT(proposingTrue.getOurVote() == false);
1297 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1298 BEAST_EXPECT(followingTrue.getOurVote() == true);
1299 BEAST_EXPECT(followingFalse.getOurVote() == false);
1300
1301 // 25 more validators change their vote to match my original vote
1302 for (int i = 21; i < 46; ++i)
1303 {
1304 auto pTrue = PeerID(numPeers - i - 1);
1305 auto pFalse = PeerID(i);
1306 BEAST_EXPECT(proposingTrue.setVote(pTrue, true));
1307 BEAST_EXPECT(proposingFalse.setVote(pFalse, false));
1308 BEAST_EXPECT(followingTrue.setVote(pTrue, true));
1309 BEAST_EXPECT(followingFalse.setVote(pFalse, false));
1310 }
1311
1312 // The vote should now be 96%, threshold is 95%
1313 BEAST_EXPECT(proposingTrue.updateVote(250, true, p));
1314 BEAST_EXPECT(!proposingFalse.updateVote(250, true, p));
1315 BEAST_EXPECT(!followingTrue.updateVote(250, false, p));
1316 BEAST_EXPECT(!followingFalse.updateVote(250, false, p));
1317
1318 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1319 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1320 BEAST_EXPECT(followingTrue.getOurVote() == true);
1321 BEAST_EXPECT(followingFalse.getOurVote() == false);
1322
1323 for (peersUnchanged = 0; peersUnchanged < 6; ++peersUnchanged)
1324 {
1325 BEAST_EXPECT(
1326 !proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1327 BEAST_EXPECT(
1328 !proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1329 BEAST_EXPECT(
1330 !followingTrue.stalled(p, false, peersUnchanged, j, clog));
1331 BEAST_EXPECT(
1332 !followingFalse.stalled(p, false, peersUnchanged, j, clog));
1333 BEAST_EXPECT(clog->str() == "");
1334 }
1335
1336 auto expectStalled = [this, &clog](
1337 int txid,
1338 bool ourVote,
1339 int ourTime,
1340 int peerTime,
1341 int support,
1342 std::uint32_t line) {
1343 using namespace std::string_literals;
1344
1345 auto const s = clog->str();
1346 expect(s.find("stalled"), s, __FILE__, line);
1347 expect(
1348 s.starts_with("Transaction "s + std::to_string(txid)),
1349 s,
1350 __FILE__,
1351 line);
1352 expect(
1353 s.find("voting "s + (ourVote ? "YES" : "NO")) != s.npos,
1354 s,
1355 __FILE__,
1356 line);
1357 expect(
1358 s.find("for "s + std::to_string(ourTime) + " rounds."s) !=
1359 s.npos,
1360 s,
1361 __FILE__,
1362 line);
1363 expect(
1364 s.find(
1365 "votes in "s + std::to_string(peerTime) + " rounds.") !=
1366 s.npos,
1367 s,
1368 __FILE__,
1369 line);
1370 expect(
1371 s.ends_with(
1372 "has "s + std::to_string(support) + "% support. "s),
1373 s,
1374 __FILE__,
1375 line);
1377 };
1378
1379 for (int i = 0; i < 1; ++i)
1380 {
1381 BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
1382 BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
1383 BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
1384 BEAST_EXPECT(
1385 !followingFalse.updateVote(250 + 10 * i, false, p));
1386
1387 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1388 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1389 BEAST_EXPECT(followingTrue.getOurVote() == true);
1390 BEAST_EXPECT(followingFalse.getOurVote() == false);
1391
1392 // true vote has changed recently, so not stalled
1393 BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog));
1394 BEAST_EXPECT(clog->str() == "");
1395 // remaining votes have been unchanged in so long that we only
1396 // need to hit the second round at 95% to be stalled, regardless
1397 // of peers
1398 BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
1399 expectStalled(98, false, 11, 0, 2, __LINE__);
1400 BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
1401 expectStalled(97, true, 11, 0, 97, __LINE__);
1402 BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
1403 expectStalled(96, false, 11, 0, 3, __LINE__);
1404
1405 // true vote has changed recently, so not stalled
1406 BEAST_EXPECT(
1407 !proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1408 BEAST_EXPECTS(clog->str() == "", clog->str());
1409 // remaining votes have been unchanged in so long that we only
1410 // need to hit the second round at 95% to be stalled, regardless
1411 // of peers
1412 BEAST_EXPECT(
1413 proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1414 expectStalled(98, false, 11, 6, 2, __LINE__);
1415 BEAST_EXPECT(
1416 followingTrue.stalled(p, false, peersUnchanged, j, clog));
1417 expectStalled(97, true, 11, 6, 97, __LINE__);
1418 BEAST_EXPECT(
1419 followingFalse.stalled(p, false, peersUnchanged, j, clog));
1420 expectStalled(96, false, 11, 6, 3, __LINE__);
1421 }
1422 for (int i = 1; i < 3; ++i)
1423 {
1424 BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
1425 BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
1426 BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
1427 BEAST_EXPECT(
1428 !followingFalse.updateVote(250 + 10 * i, false, p));
1429
1430 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1431 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1432 BEAST_EXPECT(followingTrue.getOurVote() == true);
1433 BEAST_EXPECT(followingFalse.getOurVote() == false);
1434
1435 // true vote changed 2 rounds ago, and peers are changing, so
1436 // not stalled
1437 BEAST_EXPECT(!proposingTrue.stalled(p, true, 0, j, clog));
1438 BEAST_EXPECTS(clog->str() == "", clog->str());
1439 // still stalled
1440 BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
1441 expectStalled(98, false, 11 + i, 0, 2, __LINE__);
1442 BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
1443 expectStalled(97, true, 11 + i, 0, 97, __LINE__);
1444 BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
1445 expectStalled(96, false, 11 + i, 0, 3, __LINE__);
1446
1447 // true vote changed 2 rounds ago, and peers are NOT changing,
1448 // so stalled
1449 BEAST_EXPECT(
1450 proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1451 expectStalled(99, true, 1 + i, 6, 97, __LINE__);
1452 // still stalled
1453 BEAST_EXPECT(
1454 proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1455 expectStalled(98, false, 11 + i, 6, 2, __LINE__);
1456 BEAST_EXPECT(
1457 followingTrue.stalled(p, false, peersUnchanged, j, clog));
1458 expectStalled(97, true, 11 + i, 6, 97, __LINE__);
1459 BEAST_EXPECT(
1460 followingFalse.stalled(p, false, peersUnchanged, j, clog));
1461 expectStalled(96, false, 11 + i, 6, 3, __LINE__);
1462 }
1463 for (int i = 3; i < 5; ++i)
1464 {
1465 BEAST_EXPECT(!proposingTrue.updateVote(250 + 10 * i, true, p));
1466 BEAST_EXPECT(!proposingFalse.updateVote(250 + 10 * i, true, p));
1467 BEAST_EXPECT(!followingTrue.updateVote(250 + 10 * i, false, p));
1468 BEAST_EXPECT(
1469 !followingFalse.updateVote(250 + 10 * i, false, p));
1470
1471 BEAST_EXPECT(proposingTrue.getOurVote() == true);
1472 BEAST_EXPECT(proposingFalse.getOurVote() == false);
1473 BEAST_EXPECT(followingTrue.getOurVote() == true);
1474 BEAST_EXPECT(followingFalse.getOurVote() == false);
1475
1476 BEAST_EXPECT(proposingTrue.stalled(p, true, 0, j, clog));
1477 expectStalled(99, true, 1 + i, 0, 97, __LINE__);
1478 BEAST_EXPECT(proposingFalse.stalled(p, true, 0, j, clog));
1479 expectStalled(98, false, 11 + i, 0, 2, __LINE__);
1480 BEAST_EXPECT(followingTrue.stalled(p, false, 0, j, clog));
1481 expectStalled(97, true, 11 + i, 0, 97, __LINE__);
1482 BEAST_EXPECT(followingFalse.stalled(p, false, 0, j, clog));
1483 expectStalled(96, false, 11 + i, 0, 3, __LINE__);
1484
1485 BEAST_EXPECT(
1486 proposingTrue.stalled(p, true, peersUnchanged, j, clog));
1487 expectStalled(99, true, 1 + i, 6, 97, __LINE__);
1488 BEAST_EXPECT(
1489 proposingFalse.stalled(p, true, peersUnchanged, j, clog));
1490 expectStalled(98, false, 11 + i, 6, 2, __LINE__);
1491 BEAST_EXPECT(
1492 followingTrue.stalled(p, false, peersUnchanged, j, clog));
1493 expectStalled(97, true, 11 + i, 6, 97, __LINE__);
1494 BEAST_EXPECT(
1495 followingFalse.stalled(p, false, peersUnchanged, j, clog));
1496 expectStalled(96, false, 11 + i, 6, 3, __LINE__);
1497 }
1498 }
1499 }
1500
1501 void
1502 run() override
1503 {
1504 testShouldCloseLedger();
1505 testCheckConsensus();
1506
1507 testStandalone();
1508 testPeersAgree();
1509 testSlowPeers();
1510 testCloseTimeDisagree();
1511 testWrongLCL();
1512 testConsensusCloseTimeRounding();
1513 testFork();
1514 testHubNetwork();
1515 testPreferredByBranch();
1516 testPauseForLaggards();
1517 testDisputes();
1518 }
1519};
1520
1521BEAST_DEFINE_TESTSUITE(Consensus, consensus, ripple);
1522} // namespace test
1523} // namespace ripple
A testsuite class.
Definition suite.h:52
testcase_t testcase
Memberspace for declaring test cases.
Definition suite.h:152
Generic implementation of consensus algorithm.
Definition Consensus.h:279
A transaction discovered to be in dispute during consensus.
Definition DisputedTx.h:30
Represents a peer connection in the overlay.
virtual id_t id() const =0
void run() override
Runs the suite.
A group of simulation Peers.
Definition PeerGroup.h:23
void disconnect(PeerGroup const &o)
Destroy network connection.
Definition PeerGroup.h:167
void connect(PeerGroup const &o, SimDuration delay)
Establish network connection.
Definition PeerGroup.h:147
T insert(T... args)
T is_same_v
typename SimClock::duration SimDuration
Definition SimTime.h:17
typename SimClock::time_point SimTime
Definition SimTime.h:18
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:6
ConsensusState checkConsensus(std::size_t prevProposers, std::size_t currentProposers, std::size_t currentAgree, std::size_t currentFinished, std::chrono::milliseconds previousAgreeTime, std::chrono::milliseconds currentAgreeTime, bool stalled, ConsensusParms const &parms, bool proposing, beast::Journal j, std::unique_ptr< std::stringstream > const &clog)
Determine whether the network reached consensus and whether we joined.
@ Expired
Consensus time limit has hard-expired.
@ MovedOn
The network has consensus without us.
@ Yes
We have consensus along with the network.
@ No
We do not have consensus.
bool shouldCloseLedger(bool anyTransactions, std::size_t prevProposers, std::size_t proposersClosed, std::size_t proposersValidated, std::chrono::milliseconds prevRoundTime, std::chrono::milliseconds timeSincePrevClose, std::chrono::milliseconds openTime, std::chrono::milliseconds idleInterval, ConsensusParms const &parms, beast::Journal j, std::unique_ptr< std::stringstream > const &clog)
Determines whether the current ledger should close at this time.
Definition Consensus.cpp:8
auto constexpr increaseLedgerTimeResolutionEvery
How often we increase the close time resolution (in numbers of ledgers)
STL namespace.
Consensus algorithm parameters.
std::chrono::milliseconds const ledgerGRANULARITY
How often we check state or change positions.
Represents a transfer rate.
Definition Rate.h:21
void on(csf::PeerID, csf::SimTime, E const &)
Disruptor(csf::PeerGroup &net, csf::PeerGroup &c, csf::PeerGroup &split, csf::SimDuration d)
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
void on(csf::PeerID who, csf::SimTime, csf::FullyValidateLedger const &e)
void on(csf::PeerID, csf::SimTime, E const &)
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
Peer accepted consensus results.
Definition events.h:102
Peer fully validated a new ledger.
Definition events.h:121
Ledger ledger
The new fully validated ledger.
Definition events.h:123
A single peer in the simulation.
T to_string(T... args)