rippled
Consensus_test.cpp
1 //------------------------------------------------------------------------------
2 /*
3  This file is part of rippled: https://github.com/ripple/rippled
4  Copyright (c) 2012-2016 Ripple Labs Inc.
5 
6  Permission to use, copy, modify, and/or distribute this software for any
7  purpose with or without fee is hereby granted, provided that the above
8  copyright notice and this permission notice appear in all copies.
9 
10  THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 //==============================================================================
19 #include <ripple/beast/clock/manual_clock.h>
20 #include <ripple/beast/unit_test.h>
21 #include <ripple/consensus/Consensus.h>
22 #include <ripple/consensus/ConsensusProposal.h>
23 #include <test/csf.h>
24 #include <test/unit_test/SuiteJournal.h>
25 #include <utility>
26 
27 namespace ripple {
28 namespace test {
29 
30 class Consensus_test : public beast::unit_test::suite
31 {
33 
34 public:
35  Consensus_test() : journal_("Consensus_test", *this)
36  {
37  }
38 
39  void
41  {
42  using namespace std::chrono_literals;
43 
44  // Use default parameters
45  ConsensusParms const p{};
46 
47  // Bizarre times forcibly close
48  BEAST_EXPECT(shouldCloseLedger(
49  true, 10, 10, 10, -10s, 10s, 1s, 1s, p, journal_));
50  BEAST_EXPECT(shouldCloseLedger(
51  true, 10, 10, 10, 100h, 10s, 1s, 1s, p, journal_));
52  BEAST_EXPECT(shouldCloseLedger(
53  true, 10, 10, 10, 10s, 100h, 1s, 1s, p, journal_));
54 
55  // Rest of network has closed
56  BEAST_EXPECT(
57  shouldCloseLedger(true, 10, 3, 5, 10s, 10s, 10s, 10s, p, journal_));
58 
59  // No transactions means wait until end of internval
60  BEAST_EXPECT(
61  !shouldCloseLedger(false, 10, 0, 0, 1s, 1s, 1s, 10s, p, journal_));
62  BEAST_EXPECT(
63  shouldCloseLedger(false, 10, 0, 0, 1s, 10s, 1s, 10s, p, journal_));
64 
65  // Enforce minimum ledger open time
66  BEAST_EXPECT(
67  !shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 1s, 10s, p, journal_));
68 
69  // Don't go too much faster than last time
70  BEAST_EXPECT(
71  !shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 3s, 10s, p, journal_));
72 
73  BEAST_EXPECT(
74  shouldCloseLedger(true, 10, 0, 0, 10s, 10s, 10s, 10s, p, journal_));
75  }
76 
77  void
79  {
80  using namespace std::chrono_literals;
81 
82  // Use default parameterss
83  ConsensusParms const p{};
84 
85  // Not enough time has elapsed
86  BEAST_EXPECT(
88  checkConsensus(10, 2, 2, 0, 3s, 2s, p, true, journal_));
89 
90  // If not enough peers have propsed, ensure
91  // more time for proposals
92  BEAST_EXPECT(
94  checkConsensus(10, 2, 2, 0, 3s, 4s, p, true, journal_));
95 
96  // Enough time has elapsed and we all agree
97  BEAST_EXPECT(
99  checkConsensus(10, 2, 2, 0, 3s, 10s, p, true, journal_));
100 
101  // Enough time has elapsed and we don't yet agree
102  BEAST_EXPECT(
104  checkConsensus(10, 2, 1, 0, 3s, 10s, p, true, journal_));
105 
106  // Our peers have moved on
107  // Enough time has elapsed and we all agree
108  BEAST_EXPECT(
110  checkConsensus(10, 2, 1, 8, 3s, 10s, p, true, journal_));
111 
112  // No peers makes it easy to agree
113  BEAST_EXPECT(
115  checkConsensus(0, 0, 0, 0, 3s, 10s, p, true, journal_));
116  }
117 
118  void
120  {
121  using namespace std::chrono_literals;
122  using namespace csf;
123 
124  Sim s;
125  PeerGroup peers = s.createGroup(1);
126  Peer* peer = peers[0];
127  peer->targetLedgers = 1;
128  peer->start();
129  peer->submit(Tx{1});
130 
131  s.scheduler.step();
132 
133  // Inspect that the proper ledger was created
134  auto const& lcl = peer->lastClosedLedger;
135  BEAST_EXPECT(peer->prevLedgerID() == lcl.id());
136  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
137  BEAST_EXPECT(lcl.txs().size() == 1);
138  BEAST_EXPECT(lcl.txs().find(Tx{1}) != lcl.txs().end());
139  BEAST_EXPECT(peer->prevProposers == 0);
140  }
141 
142  void
144  {
145  using namespace csf;
146  using namespace std::chrono;
147 
148  ConsensusParms const parms{};
149  Sim sim;
150  PeerGroup peers = sim.createGroup(5);
151 
152  // Connected trust and network graphs with single fixed delay
153  peers.trustAndConnect(
154  peers, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
155 
156  // everyone submits their own ID as a TX
157  for (Peer* p : peers)
158  p->submit(Tx(static_cast<std::uint32_t>(p->id)));
159 
160  sim.run(1);
161 
162  // All peers are in sync
163  if (BEAST_EXPECT(sim.synchronized()))
164  {
165  for (Peer const* peer : peers)
166  {
167  auto const& lcl = peer->lastClosedLedger;
168  BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
169  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
170  // All peers proposed
171  BEAST_EXPECT(peer->prevProposers == peers.size() - 1);
172  // All transactions were accepted
173  for (std::uint32_t i = 0; i < peers.size(); ++i)
174  BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
175  }
176  }
177  }
178 
179  void
181  {
182  using namespace csf;
183  using namespace std::chrono;
184 
185  // Several tests of a complete trust graph with a subset of peers
186  // that have significantly longer network delays to the rest of the
187  // network
188 
189  // Test when a slow peer doesn't delay a consensus quorum (4/5 agree)
190  {
191  ConsensusParms const parms{};
192  Sim sim;
193  PeerGroup slow = sim.createGroup(1);
194  PeerGroup fast = sim.createGroup(4);
195  PeerGroup network = fast + slow;
196 
197  // Fully connected trust graph
198  network.trust(network);
199 
200  // Fast and slow network connections
201  fast.connect(
202  fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
203 
204  slow.connect(
205  network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
206 
207  // All peers submit their own ID as a transaction
208  for (Peer* peer : network)
209  peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
210 
211  sim.run(1);
212 
213  // Verify all peers have same LCL but are missing transaction 0
214  // All peers are in sync even with a slower peer 0
215  if (BEAST_EXPECT(sim.synchronized()))
216  {
217  for (Peer* peer : network)
218  {
219  auto const& lcl = peer->lastClosedLedger;
220  BEAST_EXPECT(lcl.id() == peer->prevLedgerID());
221  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
222 
223  BEAST_EXPECT(peer->prevProposers == network.size() - 1);
224  BEAST_EXPECT(
225  peer->prevRoundTime == network[0]->prevRoundTime);
226 
227  BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
228  for (std::uint32_t i = 2; i < network.size(); ++i)
229  BEAST_EXPECT(lcl.txs().find(Tx{i}) != lcl.txs().end());
230 
231  // Tx 0 didn't make it
232  BEAST_EXPECT(
233  peer->openTxs.find(Tx{0}) != peer->openTxs.end());
234  }
235  }
236  }
237 
238  // Test when the slow peers delay a consensus quorum (4/6 agree)
239  {
240  // Run two tests
241  // 1. The slow peers are participating in consensus
242  // 2. The slow peers are just observing
243 
244  for (auto isParticipant : {true, false})
245  {
246  ConsensusParms const parms{};
247 
248  Sim sim;
249  PeerGroup slow = sim.createGroup(2);
250  PeerGroup fast = sim.createGroup(4);
251  PeerGroup network = fast + slow;
252 
253  // Connected trust graph
254  network.trust(network);
255 
256  // Fast and slow network connections
257  fast.connect(
258  fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
259 
260  slow.connect(
261  network,
262  round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
263 
264  for (Peer* peer : slow)
265  peer->runAsValidator = isParticipant;
266 
267  // All peers submit their own ID as a transaction and relay it
268  // to peers
269  for (Peer* peer : network)
270  peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
271 
272  sim.run(1);
273 
274  if (BEAST_EXPECT(sim.synchronized()))
275  {
276  // Verify all peers have same LCL but are missing
277  // transaction 0,1 which was not received by all peers
278  // before the ledger closed
279  for (Peer* peer : network)
280  {
281  // Closed ledger has all but transaction 0,1
282  auto const& lcl = peer->lastClosedLedger;
283  BEAST_EXPECT(lcl.seq() == Ledger::Seq{1});
284  BEAST_EXPECT(lcl.txs().find(Tx{0}) == lcl.txs().end());
285  BEAST_EXPECT(lcl.txs().find(Tx{1}) == lcl.txs().end());
286  for (std::uint32_t i = slow.size(); i < network.size();
287  ++i)
288  BEAST_EXPECT(
289  lcl.txs().find(Tx{i}) != lcl.txs().end());
290 
291  // Tx 0-1 didn't make it
292  BEAST_EXPECT(
293  peer->openTxs.find(Tx{0}) != peer->openTxs.end());
294  BEAST_EXPECT(
295  peer->openTxs.find(Tx{1}) != peer->openTxs.end());
296  }
297 
298  Peer const* slowPeer = slow[0];
299  if (isParticipant)
300  BEAST_EXPECT(
301  slowPeer->prevProposers == network.size() - 1);
302  else
303  BEAST_EXPECT(slowPeer->prevProposers == fast.size());
304 
305  for (Peer* peer : fast)
306  {
307  // Due to the network link delay settings
308  // Peer 0 initially proposes {0}
309  // Peer 1 initially proposes {1}
310  // Peers 2-5 initially propose {2,3,4,5}
311  // Since peers 2-5 agree, 4/6 > the initial 50% needed
312  // to include a disputed transaction, so Peer 0/1 switch
313  // to agree with those peers. Peer 0/1 then closes with
314  // an 80% quorum of agreeing positions (5/6) match.
315  //
316  // Peers 2-5 do not change position, since tx 0 or tx 1
317  // have less than the 50% initial threshold. They also
318  // cannot declare consensus, since 4/6 agreeing
319  // positions are < 80% threshold. They therefore need an
320  // additional timerEntry call to see the updated
321  // positions from Peer 0 & 1.
322 
323  if (isParticipant)
324  {
325  BEAST_EXPECT(
326  peer->prevProposers == network.size() - 1);
327  BEAST_EXPECT(
328  peer->prevRoundTime > slowPeer->prevRoundTime);
329  }
330  else
331  {
332  BEAST_EXPECT(
333  peer->prevProposers == fast.size() - 1);
334  // so all peers should have closed together
335  BEAST_EXPECT(
336  peer->prevRoundTime == slowPeer->prevRoundTime);
337  }
338  }
339  }
340  }
341  }
342  }
343 
344  void
346  {
347  using namespace csf;
348  using namespace std::chrono;
349 
350  // This is a very specialized test to get ledgers to disagree on
351  // the close time. It unfortunately assumes knowledge about current
352  // timing constants. This is a necessary evil to get coverage up
353  // pending more extensive refactorings of timing constants.
354 
355  // In order to agree-to-disagree on the close time, there must be no
356  // clear majority of nodes agreeing on a close time. This test
357  // sets a relative offset to the peers internal clocks so that they
358  // send proposals with differing times.
359 
360  // However, agreement is on the effective close time, not the
361  // exact close time. The minimum closeTimeResolution is given by
362  // ledgerPossibleTimeResolutions[0], which is currently 10s. This means
363  // the skews need to be at least 10 seconds to have different effective
364  // close times.
365 
366  // Complicating this matter is that nodes will ignore proposals
367  // with times more than proposeFRESHNESS =20s in the past. So at
368  // the minimum granularity, we have at most 3 types of skews
369  // (0s,10s,20s).
370 
371  // This test therefore has 6 nodes, with 2 nodes having each type of
372  // skew. Then no majority (1/3 < 1/2) of nodes will agree on an
373  // actual close time.
374 
375  ConsensusParms const parms{};
376  Sim sim;
377 
378  PeerGroup groupA = sim.createGroup(2);
379  PeerGroup groupB = sim.createGroup(2);
380  PeerGroup groupC = sim.createGroup(2);
381  PeerGroup network = groupA + groupB + groupC;
382 
383  network.trust(network);
384  network.connect(
385  network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
386 
387  // Run consensus without skew until we have a short close time
388  // resolution
389  Peer* firstPeer = *groupA.begin();
390  while (firstPeer->lastClosedLedger.closeTimeResolution() >=
391  parms.proposeFRESHNESS)
392  sim.run(1);
393 
394  // Introduce a shift on the time of 2/3 of peers
395  for (Peer* peer : groupA)
396  peer->clockSkew = parms.proposeFRESHNESS / 2;
397  for (Peer* peer : groupB)
398  peer->clockSkew = parms.proposeFRESHNESS;
399 
400  sim.run(1);
401 
402  // All nodes agreed to disagree on the close time
403  if (BEAST_EXPECT(sim.synchronized()))
404  {
405  for (Peer* peer : network)
406  BEAST_EXPECT(!peer->lastClosedLedger.closeAgree());
407  }
408  }
409 
410  void
412  {
413  using namespace csf;
414  using namespace std::chrono;
415  // Specialized test to exercise a temporary fork in which some peers
416  // are working on an incorrect prior ledger.
417 
418  ConsensusParms const parms{};
419 
420  // Vary the time it takes to process validations to exercise detecting
421  // the wrong LCL at different phases of consensus
422  for (auto validationDelay : {0ms, parms.ledgerMIN_CLOSE})
423  {
424  // Consider 10 peers:
425  // 0 1 2 3 4 5 6 7 8 9
426  // minority majorityA majorityB
427  //
428  // Nodes 0-1 trust nodes 0-4
429  // Nodes 2-9 trust nodes 2-9
430  //
431  // By submitting tx 0 to nodes 0-4 and tx 1 to nodes 5-9,
432  // nodes 0-1 will generate the wrong LCL (with tx 0). The remaining
433  // nodes will instead accept the ledger with tx 1.
434 
435  // Nodes 0-1 will detect this mismatch during a subsequent round
436  // since nodes 2-4 will validate a different ledger.
437 
438  // Nodes 0-1 will acquire the proper ledger from the network and
439  // resume consensus and eventually generate the dominant network
440  // ledger.
441 
442  // This topology can potentially fork with the above trust relations
443  // but that is intended for this test.
444 
445  Sim sim;
446 
447  PeerGroup minority = sim.createGroup(2);
448  PeerGroup majorityA = sim.createGroup(3);
449  PeerGroup majorityB = sim.createGroup(5);
450 
451  PeerGroup majority = majorityA + majorityB;
452  PeerGroup network = minority + majority;
453 
454  SimDuration delay =
455  round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
456  minority.trustAndConnect(minority + majorityA, delay);
457  majority.trustAndConnect(majority, delay);
458 
459  CollectByNode<JumpCollector> jumps;
460  sim.collectors.add(jumps);
461 
462  BEAST_EXPECT(sim.trustGraph.canFork(parms.minCONSENSUS_PCT / 100.));
463 
464  // initial round to set prior state
465  sim.run(1);
466 
467  // Nodes in smaller UNL have seen tx 0, nodes in other unl have seen
468  // tx 1
469  for (Peer* peer : network)
470  peer->delays.recvValidation = validationDelay;
471  for (Peer* peer : (minority + majorityA))
472  peer->openTxs.insert(Tx{0});
473  for (Peer* peer : majorityB)
474  peer->openTxs.insert(Tx{1});
475 
476  // Run for additional rounds
477  // With no validation delay, only 2 more rounds are needed.
478  // 1. Round to generate different ledgers
479  // 2. Round to detect different prior ledgers (but still generate
480  // wrong ones) and recover within that round since wrong LCL
481  // is detected before we close
482  //
483  // With a validation delay of ledgerMIN_CLOSE, we need 3 more
484  // rounds.
485  // 1. Round to generate different ledgers
486  // 2. Round to detect different prior ledgers (but still generate
487  // wrong ones) but end up declaring consensus on wrong LCL (but
488  // with the right transaction set!). This is because we detect
489  // the wrong LCL after we have closed the ledger, so we declare
490  // consensus based solely on our peer proposals. But we haven't
491  // had time to acquire the right ledger.
492  // 3. Round to correct
493  sim.run(3);
494 
495  // The network never actually forks, since node 0-1 never see a
496  // quorum of validations to fully validate the incorrect chain.
497 
498  // However, for a non zero-validation delay, the network is not
499  // synchronized because nodes 0 and 1 are running one ledger behind
500  if (BEAST_EXPECT(sim.branches() == 1))
501  {
502  for (Peer const* peer : majority)
503  {
504  // No jumps for majority nodes
505  BEAST_EXPECT(jumps[peer->id].closeJumps.empty());
506  BEAST_EXPECT(jumps[peer->id].fullyValidatedJumps.empty());
507  }
508  for (Peer const* peer : minority)
509  {
510  auto& peerJumps = jumps[peer->id];
511  // last closed ledger jump between chains
512  {
513  if (BEAST_EXPECT(peerJumps.closeJumps.size() == 1))
514  {
515  JumpCollector::Jump const& jump =
516  peerJumps.closeJumps.front();
517  // Jump is to a different chain
518  BEAST_EXPECT(jump.from.seq() <= jump.to.seq());
519  BEAST_EXPECT(!jump.to.isAncestor(jump.from));
520  }
521  }
522  // fully validated jump forward in same chain
523  {
524  if (BEAST_EXPECT(
525  peerJumps.fullyValidatedJumps.size() == 1))
526  {
527  JumpCollector::Jump const& jump =
528  peerJumps.fullyValidatedJumps.front();
529  // Jump is to a different chain with same seq
530  BEAST_EXPECT(jump.from.seq() < jump.to.seq());
531  BEAST_EXPECT(jump.to.isAncestor(jump.from));
532  }
533  }
534  }
535  }
536  }
537 
538  {
539  // Additional test engineered to switch LCL during the establish
540  // phase. This was added to trigger a scenario that previously
541  // crashed, in which switchLCL switched from establish to open
542  // phase, but still processed the establish phase logic.
543 
544  // Loner node will accept an initial ledger A, but all other nodes
545  // accept ledger B a bit later. By delaying the time it takes
546  // to process a validation, loner node will detect the wrongLCL
547  // after it is already in the establish phase of the next round.
548 
549  Sim sim;
550  PeerGroup loner = sim.createGroup(1);
551  PeerGroup friends = sim.createGroup(3);
552  loner.trust(loner + friends);
553 
554  PeerGroup others = sim.createGroup(6);
555  PeerGroup clique = friends + others;
556  clique.trust(clique);
557 
558  PeerGroup network = loner + clique;
559  network.connect(
560  network, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
561 
562  // initial round to set prior state
563  sim.run(1);
564  for (Peer* peer : (loner + friends))
565  peer->openTxs.insert(Tx(0));
566  for (Peer* peer : others)
567  peer->openTxs.insert(Tx(1));
568 
569  // Delay validation processing
570  for (Peer* peer : network)
571  peer->delays.recvValidation = parms.ledgerGRANULARITY;
572 
573  // additional rounds to generate wrongLCL and recover
574  sim.run(2);
575 
576  // Check all peers recovered
577  for (Peer* p : network)
578  BEAST_EXPECT(p->prevLedgerID() == network[0]->prevLedgerID());
579  }
580  }
581 
582  void
584  {
585  using namespace csf;
586  using namespace std::chrono;
587 
588  // This is a specialized test engineered to yield ledgers with different
589  // close times even though the peers believe they had close time
590  // consensus on the ledger.
591  ConsensusParms parms;
592 
593  Sim sim;
594 
595  // This requires a group of 4 fast and 2 slow peers to create a
596  // situation in which a subset of peers requires seeing additional
597  // proposals to declare consensus.
598  PeerGroup slow = sim.createGroup(2);
599  PeerGroup fast = sim.createGroup(4);
600  PeerGroup network = fast + slow;
601 
602  for (Peer* peer : network)
603  peer->consensusParms = parms;
604 
605  // Connected trust graph
606  network.trust(network);
607 
608  // Fast and slow network connections
609  fast.connect(fast, round<milliseconds>(0.2 * parms.ledgerGRANULARITY));
610  slow.connect(
611  network, round<milliseconds>(1.1 * parms.ledgerGRANULARITY));
612 
613  // Run to the ledger *prior* to decreasing the resolution
615 
616  // In order to create the discrepency, we want a case where if
617  // X = effCloseTime(closeTime, resolution, parentCloseTime)
618  // X != effCloseTime(X, resolution, parentCloseTime)
619  //
620  // That is, the effective close time is not a fixed point. This can
621  // happen if X = parentCloseTime + 1, but a subsequent rounding goes
622  // to the next highest multiple of resolution.
623 
624  // So we want to find an offset (now + offset) % 30s = 15
625  // (now + offset) % 20s = 15
626  // This way, the next ledger will close and round up Due to the
627  // network delay settings, the round of consensus will take 5s, so
628  // the next ledger's close time will
629 
630  NetClock::duration when = network[0]->now().time_since_epoch();
631 
632  // Check we are before the 30s to 20s transition
633  NetClock::duration resolution =
634  network[0]->lastClosedLedger.closeTimeResolution();
635  BEAST_EXPECT(resolution == NetClock::duration{30s});
636 
637  while (((when % NetClock::duration{30s}) != NetClock::duration{15s}) ||
638  ((when % NetClock::duration{20s}) != NetClock::duration{15s}))
639  when += 1s;
640  // Advance the clock without consensus running (IS THIS WHAT
641  // PREVENTS IT IN PRACTICE?)
642  sim.scheduler.step_for(NetClock::time_point{when} - network[0]->now());
643 
644  // Run one more ledger with 30s resolution
645  sim.run(1);
646  if (BEAST_EXPECT(sim.synchronized()))
647  {
648  // close time should be ahead of clock time since we engineered
649  // the close time to round up
650  for (Peer* peer : network)
651  {
652  BEAST_EXPECT(peer->lastClosedLedger.closeTime() > peer->now());
653  BEAST_EXPECT(peer->lastClosedLedger.closeAgree());
654  }
655  }
656 
657  // All peers submit their own ID as a transaction
658  for (Peer* peer : network)
659  peer->submit(Tx{static_cast<std::uint32_t>(peer->id)});
660 
661  // Run 1 more round, this time it will have a decreased
662  // resolution of 20 seconds.
663 
664  // The network delays are engineered so that the slow peers
665  // initially have the wrong tx hash, but they see a majority
666  // of agreement from their peers and declare consensus
667  //
668  // The trick is that everyone starts with a raw close time of
669  // 84681s
670  // Which has
671  // effCloseTime(86481s, 20s, 86490s) = 86491s
672  // However, when the slow peers update their position, they change
673  // the close time to 86451s. The fast peers declare consensus with
674  // the 86481s as their position still.
675  //
676  // When accepted the ledger
677  // - fast peers use eff(86481s) -> 86491s as the close time
678  // - slow peers use eff(eff(86481s)) -> eff(86491s) -> 86500s!
679 
680  sim.run(1);
681 
682  BEAST_EXPECT(sim.synchronized());
683  }
684 
685  void
687  {
688  using namespace csf;
689  using namespace std::chrono;
690 
691  std::uint32_t numPeers = 10;
692  // Vary overlap between two UNLs
693  for (std::uint32_t overlap = 0; overlap <= numPeers; ++overlap)
694  {
695  ConsensusParms const parms{};
696  Sim sim;
697 
698  std::uint32_t numA = (numPeers - overlap) / 2;
699  std::uint32_t numB = numPeers - numA - overlap;
700 
701  PeerGroup aOnly = sim.createGroup(numA);
702  PeerGroup bOnly = sim.createGroup(numB);
703  PeerGroup commonOnly = sim.createGroup(overlap);
704 
705  PeerGroup a = aOnly + commonOnly;
706  PeerGroup b = bOnly + commonOnly;
707 
708  PeerGroup network = a + b;
709 
710  SimDuration delay =
711  round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
712  a.trustAndConnect(a, delay);
713  b.trustAndConnect(b, delay);
714 
715  // Initial round to set prior state
716  sim.run(1);
717  for (Peer* peer : network)
718  {
719  // Nodes have only seen transactions from their neighbors
720  peer->openTxs.insert(Tx{static_cast<std::uint32_t>(peer->id)});
721  for (Peer* to : sim.trustGraph.trustedPeers(peer))
722  peer->openTxs.insert(
723  Tx{static_cast<std::uint32_t>(to->id)});
724  }
725  sim.run(1);
726 
727  // Fork should not happen for 40% or greater overlap
728  // Since the overlapped nodes have a UNL that is the union of the
729  // two cliques, the maximum sized UNL list is the number of peers
730  if (overlap > 0.4 * numPeers)
731  BEAST_EXPECT(sim.synchronized());
732  else
733  {
734  // Even if we do fork, there shouldn't be more than 3 ledgers
735  // One for cliqueA, one for cliqueB and one for nodes in both
736  BEAST_EXPECT(sim.branches() <= 3);
737  }
738  }
739  }
740 
741  void
743  {
744  using namespace csf;
745  using namespace std::chrono;
746 
747  // Simulate a set of 5 validators that aren't directly connected but
748  // rely on a single hub node for communication
749 
750  ConsensusParms const parms{};
751  Sim sim;
752  PeerGroup validators = sim.createGroup(5);
753  PeerGroup center = sim.createGroup(1);
754  validators.trust(validators);
755  center.trust(validators);
756 
757  SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
758  validators.connect(center, delay);
759 
760  center[0]->runAsValidator = false;
761 
762  // prep round to set initial state.
763  sim.run(1);
764 
765  // everyone submits their own ID as a TX and relay it to peers
766  for (Peer* p : validators)
767  p->submit(Tx(static_cast<std::uint32_t>(p->id)));
768 
769  sim.run(1);
770 
771  // All peers are in sync
772  BEAST_EXPECT(sim.synchronized());
773  }
774 
775  // Helper collector for testPreferredByBranch
776  // Invasively disconnects network at bad times to cause splits
777  struct Disruptor
778  {
783  bool reconnected = false;
784 
786  csf::PeerGroup& net,
787  csf::PeerGroup& c,
788  csf::PeerGroup& split,
790  : network(net), groupCfast(c), groupCsplit(split), delay(d)
791  {
792  }
793 
794  template <class E>
795  void
797  {
798  }
799 
800  void
802  {
803  using namespace std::chrono;
804  // As soon as the the fastC node fully validates C, disconnect
805  // ALL c nodes from the network. The fast C node needs to disconnect
806  // as well to prevent it from relaying the validations it did see
807  if (who == groupCfast[0]->id &&
808  e.ledger.seq() == csf::Ledger::Seq{2})
809  {
810  network.disconnect(groupCsplit);
811  network.disconnect(groupCfast);
812  }
813  }
814 
815  void
817  {
818  // As soon as anyone generates a child of B or C, reconnect the
819  // network so those validations make it through
820  if (!reconnected && e.ledger.seq() == csf::Ledger::Seq{3})
821  {
822  reconnected = true;
823  network.connect(groupCsplit, delay);
824  }
825  }
826  };
827 
828  void
830  {
831  using namespace csf;
832  using namespace std::chrono;
833 
834  // Simulate network splits that are prevented from forking when using
835  // preferred ledger by trie. This is a contrived example that involves
836  // excessive network splits, but demonstrates the safety improvement
837  // from the preferred ledger by trie approach.
838 
839  // Consider 10 validating nodes that comprise a single common UNL
840  // Ledger history:
841  // 1: A
842  // _/ \_
843  // 2: B C
844  // _/ _/ \_
845  // 3: D C' |||||||| (8 different ledgers)
846 
847  // - All nodes generate the common ledger A
848  // - 2 nodes generate B and 8 nodes generate C
849  // - Only 1 of the C nodes sees all the C validations and fully
850  // validates C. The rest of the C nodes split at just the right time
851  // such that they never see any C validations but their own.
852  // - The C nodes continue and generate 8 different child ledgers.
853  // - Meanwhile, the D nodes only saw 1 validation for C and 2
854  // validations
855  // for B.
856  // - The network reconnects and the validations for generation 3 ledgers
857  // are observed (D and the 8 C's)
858  // - In the old approach, 2 votes for D outweights 1 vote for each C'
859  // so the network would avalanche towards D and fully validate it
860  // EVEN though C was fully validated by one node
861  // - In the new approach, 2 votes for D are not enough to outweight the
862  // 8 implicit votes for C, so nodes will avalanche to C instead
863 
864  ConsensusParms const parms{};
865  Sim sim;
866 
867  // Goes A->B->D
868  PeerGroup groupABD = sim.createGroup(2);
869  // Single node that initially fully validates C before the split
870  PeerGroup groupCfast = sim.createGroup(1);
871  // Generates C, but fails to fully validate before the split
872  PeerGroup groupCsplit = sim.createGroup(7);
873 
874  PeerGroup groupNotFastC = groupABD + groupCsplit;
875  PeerGroup network = groupABD + groupCsplit + groupCfast;
876 
877  SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
878  SimDuration fDelay = round<milliseconds>(0.1 * parms.ledgerGRANULARITY);
879 
880  network.trust(network);
881  // C must have a shorter delay to see all the validations before the
882  // other nodes
883  network.connect(groupCfast, fDelay);
884  // The rest of the network is connected at the same speed
885  groupNotFastC.connect(groupNotFastC, delay);
886 
887  Disruptor dc(network, groupCfast, groupCsplit, delay);
888  sim.collectors.add(dc);
889 
890  // Consensus round to generate ledger A
891  sim.run(1);
892  BEAST_EXPECT(sim.synchronized());
893 
894  // Next round generates B and C
895  // To force B, we inject an extra transaction in to those nodes
896  for (Peer* peer : groupABD)
897  {
898  peer->txInjections.emplace(peer->lastClosedLedger.seq(), Tx{42});
899  }
900  // The Disruptor will ensure that nodes disconnect before the C
901  // validations make it to all but the fastC node
902  sim.run(1);
903 
904  // We are no longer in sync, but have not yet forked:
905  // 9 nodes consider A the last fully validated ledger and fastC sees C
906  BEAST_EXPECT(!sim.synchronized());
907  BEAST_EXPECT(sim.branches() == 1);
908 
909  // Run another round to generate the 8 different C' ledgers
910  for (Peer* p : network)
911  p->submit(Tx(static_cast<std::uint32_t>(p->id)));
912  sim.run(1);
913 
914  // Still not forked
915  BEAST_EXPECT(!sim.synchronized());
916  BEAST_EXPECT(sim.branches() == 1);
917 
918  // Disruptor will reconnect all but the fastC node
919  sim.run(1);
920 
921  if (BEAST_EXPECT(sim.branches() == 1))
922  {
923  BEAST_EXPECT(sim.synchronized());
924  }
925  else // old approach caused a fork
926  {
927  BEAST_EXPECT(sim.branches(groupNotFastC) == 1);
928  BEAST_EXPECT(sim.synchronized(groupNotFastC) == 1);
929  }
930  }
931 
932  // Helper collector for testPauseForLaggards
933  // This will remove the ledgerAccept delay used to
934  // initially create the slow vs. fast validator groups.
935  struct UndoDelay
936  {
938 
940  {
941  }
942 
943  template <class E>
944  void
946  {
947  }
948 
949  void
951  {
952  for (csf::Peer* p : g)
953  {
954  if (p->id == who)
955  p->delays.ledgerAccept = std::chrono::seconds{0};
956  }
957  }
958  };
959 
960  void
962  {
963  using namespace csf;
964  using namespace std::chrono;
965 
966  // Test that validators that jump ahead of the network slow
967  // down.
968 
969  // We engineer the following validated ledger history scenario:
970  //
971  // / --> B1 --> C1 --> ... -> G1 "ahead"
972  // A
973  // \ --> B2 --> C2 "behind"
974  //
975  // After validating a common ledger A, a set of "behind" validators
976  // briefly run slower and validate the lower chain of ledgers.
977  // The "ahead" validators run normal speed and run ahead validating the
978  // upper chain of ledgers.
979  //
980  // Due to the uncommited support definition of the preferred branch
981  // protocol, even if the "behind" validators are a majority, the "ahead"
982  // validators cannot jump to the proper branch until the "behind"
983  // validators catch up to the same sequence number. For this test to
984  // succeed, the ahead validators need to briefly slow down consensus.
985 
986  ConsensusParms const parms{};
987  Sim sim;
988  SimDuration delay = round<milliseconds>(0.2 * parms.ledgerGRANULARITY);
989 
990  PeerGroup behind = sim.createGroup(3);
991  PeerGroup ahead = sim.createGroup(2);
992  PeerGroup network = ahead + behind;
993 
994  hash_set<Peer::NodeKey_t> trustedKeys;
995  for (Peer* p : network)
996  trustedKeys.insert(p->key);
997  for (Peer* p : network)
998  p->trustedKeys = trustedKeys;
999 
1000  network.trustAndConnect(network, delay);
1001 
1002  // Initial seed round to set prior state
1003  sim.run(1);
1004 
1005  // Have the "behind" group initially take a really long time to
1006  // accept a ledger after ending deliberation
1007  for (Peer* p : behind)
1008  p->delays.ledgerAccept = 20s;
1009 
1010  // Use the collector to revert the delay after the single
1011  // slow ledger is generated
1012  UndoDelay undoDelay{behind};
1013  sim.collectors.add(undoDelay);
1014 
1015 #if 0
1016  // Have all beast::journal output printed to stdout
1017  for (Peer* p : network)
1018  p->sink.threshold(beast::severities::kAll);
1019 
1020  // Print ledger accept and fully validated events to stdout
1021  StreamCollector sc{std::cout};
1022  sim.collectors.add(sc);
1023 #endif
1024  // Run the simulation for 100 seconds of simulation time with
1025  std::chrono::nanoseconds const simDuration = 100s;
1026 
1027  // Simulate clients submitting 1 tx every 5 seconds to a random
1028  // validator
1029  Rate const rate{1, 5s};
1030  auto peerSelector = makeSelector(
1031  network.begin(),
1032  network.end(),
1033  std::vector<double>(network.size(), 1.),
1034  sim.rng);
1035  auto txSubmitter = makeSubmitter(
1036  ConstantDistribution{rate.inv()},
1037  sim.scheduler.now(),
1038  sim.scheduler.now() + simDuration,
1039  peerSelector,
1040  sim.scheduler,
1041  sim.rng);
1042 
1043  // Run simulation
1044  sim.run(simDuration);
1045 
1046  // Verify that the network recovered
1047  BEAST_EXPECT(sim.synchronized());
1048  }
1049 
1050  void
1051  run() override
1052  {
1055 
1056  testStandalone();
1057  testPeersAgree();
1058  testSlowPeers();
1060  testWrongLCL();
1062  testFork();
1063  testHubNetwork();
1066  }
1067 };
1068 
1070 } // namespace test
1071 } // namespace ripple
ripple::test::csf::SimTime
typename SimClock::time_point SimTime
Definition: SimTime.h:36
ripple::test::Consensus_test::testHubNetwork
void testHubNetwork()
Definition: Consensus_test.cpp:742
ripple::checkConsensus
ConsensusState checkConsensus(std::size_t prevProposers, std::size_t currentProposers, std::size_t currentAgree, std::size_t currentFinished, std::chrono::milliseconds previousAgreeTime, std::chrono::milliseconds currentAgreeTime, ConsensusParms const &parms, bool proposing, beast::Journal j)
Determine whether the network reached consensus and whether we joined.
Definition: Consensus.cpp:108
ripple::test::Consensus_test::testSlowPeers
void testSlowPeers()
Definition: Consensus_test.cpp:180
ripple::test::Consensus_test::Disruptor
Definition: Consensus_test.cpp:777
utility
ripple::test::Consensus_test::testPeersAgree
void testPeersAgree()
Definition: Consensus_test.cpp:143
ripple::Rate
Represents a transfer rate.
Definition: Rate.h:37
ripple::shouldCloseLedger
bool shouldCloseLedger(bool anyTransactions, std::size_t prevProposers, std::size_t proposersClosed, std::size_t proposersValidated, std::chrono::milliseconds prevRoundTime, std::chrono::milliseconds timeSincePrevClose, std::chrono::milliseconds openTime, std::chrono::milliseconds idleInterval, ConsensusParms const &parms, beast::Journal j)
Determines whether the current ledger should close at this time.
Definition: Consensus.cpp:26
std::unordered_set
STL class.
ripple::test::Consensus_test::testConsensusCloseTimeRounding
void testConsensusCloseTimeRounding()
Definition: Consensus_test.cpp:583
std::vector
STL class.
ripple::test::Consensus_test::Disruptor::on
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
Definition: Consensus_test.cpp:816
ripple::test::Consensus_test::Disruptor::Disruptor
Disruptor(csf::PeerGroup &net, csf::PeerGroup &c, csf::PeerGroup &split, csf::SimDuration d)
Definition: Consensus_test.cpp:785
ripple::Consensus
Generic implementation of consensus algorithm.
Definition: Consensus.h:284
ripple::ConsensusState::Yes
@ Yes
We have consensus along with the network.
ripple::test::Consensus_test::Disruptor::delay
csf::SimDuration delay
Definition: Consensus_test.cpp:782
std::chrono::duration
beast::severities::kAll
@ kAll
Definition: Journal.h:32
ripple::test::csf::FullyValidateLedger
Peer fully validated a new ledger.
Definition: events.h:137
ripple::ConsensusParms::ledgerGRANULARITY
std::chrono::milliseconds ledgerGRANULARITY
How often we check state or change positions.
Definition: ConsensusParms.h:95
ripple::test::Consensus_test::Disruptor::on
void on(csf::PeerID who, csf::SimTime, csf::FullyValidateLedger const &e)
Definition: Consensus_test.cpp:801
ripple::test::csf::Ledger::seq
Seq seq() const
Definition: ledgers.h:173
ripple::test::Consensus_test::Disruptor::groupCfast
csf::PeerGroup & groupCfast
Definition: Consensus_test.cpp:780
ripple::test::csf::FullyValidateLedger::ledger
Ledger ledger
The new fully validated ledger.
Definition: events.h:140
ripple::test::csf::AcceptLedger
Peer accepted consensus results.
Definition: events.h:118
ripple::test::csf::PeerGroup::connect
void connect(PeerGroup const &o, SimDuration delay)
Establish network connection.
Definition: PeerGroup.h:164
std::cout
ripple::test::Consensus_test::testCheckConsensus
void testCheckConsensus()
Definition: Consensus_test.cpp:78
ripple::test::csf::AcceptLedger::ledger
Ledger ledger
Definition: events.h:121
std::chrono::time_point
ripple::test::Consensus_test::testStandalone
void testStandalone()
Definition: Consensus_test.cpp:119
ripple::test::Consensus_test::Disruptor::on
void on(csf::PeerID, csf::SimTime, E const &)
Definition: Consensus_test.cpp:796
std::uint32_t
ripple::ConsensusState::No
@ No
We do not have consensus.
ripple::ConsensusState::MovedOn
@ MovedOn
The network has consensus without us.
ripple::test::Consensus_test::run
void run() override
Definition: Consensus_test.cpp:1051
ripple::test::Consensus_test::testShouldCloseLedger
void testShouldCloseLedger()
Definition: Consensus_test.cpp:40
ripple::test::SuiteJournal
Definition: SuiteJournal.h:88
ripple::test::csf::PeerGroup::disconnect
void disconnect(PeerGroup const &o)
Destroy network connection.
Definition: PeerGroup.h:184
ripple::test::Consensus_test::testPreferredByBranch
void testPreferredByBranch()
Definition: Consensus_test.cpp:829
ripple::test::csf::PeerGroup
A group of simulation Peers.
Definition: PeerGroup.h:39
ripple::test::Consensus_test::testFork
void testFork()
Definition: Consensus_test.cpp:686
ripple::test::csf::Peer
A single peer in the simulation.
Definition: test/csf/Peer.h:54
ripple::test::Consensus_test::journal_
SuiteJournal journal_
Definition: Consensus_test.cpp:32
ripple
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: RCLCensorshipDetector.h:29
ripple::test::Consensus_test::UndoDelay::g
csf::PeerGroup & g
Definition: Consensus_test.cpp:937
ripple::test::Consensus_test::UndoDelay
Definition: Consensus_test.cpp:935
ripple::ConsensusParms
Consensus algorithm parameters.
Definition: ConsensusParms.h:33
ripple::test::Consensus_test::UndoDelay::on
void on(csf::PeerID, csf::SimTime, E const &)
Definition: Consensus_test.cpp:945
std::unordered_set::insert
T insert(T... args)
ripple::test::Consensus_test::testWrongLCL
void testWrongLCL()
Definition: Consensus_test.cpp:411
ripple::increaseLedgerTimeResolutionEvery
constexpr auto increaseLedgerTimeResolutionEvery
How often we increase the close time resolution (in numbers of ledgers)
Definition: LedgerTiming.h:50
ripple::test::Consensus_test::Disruptor::network
csf::PeerGroup & network
Definition: Consensus_test.cpp:779
ripple::test::Consensus_test::UndoDelay::on
void on(csf::PeerID who, csf::SimTime, csf::AcceptLedger const &e)
Definition: Consensus_test.cpp:950
ripple::Peer::id
virtual id_t id() const =0
ripple::tagged_integer< std::uint32_t, PeerIDTag >
ripple::test::csf::SimDuration
typename SimClock::duration SimDuration
Definition: SimTime.h:35
ripple::test::Consensus_test::UndoDelay::UndoDelay
UndoDelay(csf::PeerGroup &a)
Definition: Consensus_test.cpp:939
ripple::test::Consensus_test::Disruptor::groupCsplit
csf::PeerGroup & groupCsplit
Definition: Consensus_test.cpp:781
ripple::test::Consensus_test::testPauseForLaggards
void testPauseForLaggards()
Definition: Consensus_test.cpp:961
ripple::test::Consensus_test::Consensus_test
Consensus_test()
Definition: Consensus_test.cpp:35
ripple::test::Consensus_test
Definition: Consensus_test.cpp:30
ripple::test::Consensus_test::testCloseTimeDisagree
void testCloseTimeDisagree()
Definition: Consensus_test.cpp:345
ripple::Peer
Represents a peer connection in the overlay.
Definition: ripple/overlay/Peer.h:45
ripple::test::jtx::rate
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:30
ripple::test::BEAST_DEFINE_TESTSUITE
BEAST_DEFINE_TESTSUITE(DeliverMin, app, ripple)
std::chrono