Line data Source code
1 : // Copyright (c) 2020-2022 The PIVX Core developers
2 : // Distributed under the MIT software license, see the accompanying
3 : // file COPYING or https://www.opensource.org/licenses/mit-license.php.
4 :
5 : #include "masternode-sync.h"
6 :
7 : #include "llmq/quorums_blockprocessor.h"
8 : #include "llmq/quorums_chainlocks.h"
9 : #include "llmq/quorums_dkgsessionmgr.h"
10 : #include "llmq/quorums_signing.h"
11 : #include "llmq/quorums_signing_shares.h"
12 : #include "masternodeman.h" // for mnodeman
13 : #include "net_processing.h" // for Misbehaving
14 : #include "netmessagemaker.h"
15 : #include "spork.h" // for sporkManager
16 : #include "streams.h" // for CDataStream
17 : #include "tiertwo/tiertwo_sync_state.h"
18 :
19 :
20 : // Update in-flight message status if needed
21 3907 : bool CMasternodeSync::UpdatePeerSyncState(const NodeId& id, const char* msg, const int nextSyncStatus)
22 : {
23 3907 : auto it = peersSyncState.find(id);
24 3907 : if (it != peersSyncState.end()) {
25 2053 : auto peerData = it->second;
26 1929 : auto msgMapIt = peerData.mapMsgData.find(msg);
27 1929 : if (msgMapIt != peerData.mapMsgData.end()) {
28 : // exists, let's update the received status and the sync state.
29 :
30 : // future: these boolean will not be needed once the peer syncState status gets implemented.
31 1805 : msgMapIt->second.second = true;
32 1805 : LogPrintf("%s: %s message updated peer sync state\n", __func__, msgMapIt->first);
33 :
34 : // Only update sync status if we really need it. Otherwise, it's just good redundancy to verify data several times.
35 1805 : if (g_tiertwo_sync_state.GetSyncPhase() < nextSyncStatus) {
36 : // todo: this should only happen if more than N peers have sent the data.
37 : // move overall tier two sync state to the next one if needed.
38 500 : LogPrintf("%s: moving to next assset %s\n", __func__, nextSyncStatus);
39 500 : g_tiertwo_sync_state.SetCurrentSyncPhase(nextSyncStatus);
40 : }
41 1805 : return true;
42 : }
43 : }
44 : return false;
45 : }
46 :
47 88026 : bool CMasternodeSync::MessageDispatcher(CNode* pfrom, std::string& strCommand, CDataStream& vRecv)
48 : {
49 88026 : if (strCommand == NetMsgType::GETSPORKS) {
50 : // send sporks
51 1415 : sporkManager.ProcessGetSporks(pfrom, strCommand, vRecv);
52 1415 : return true;
53 : }
54 :
55 86611 : if (strCommand == NetMsgType::QFCOMMITMENT) {
56 : // Only process qfc if v6.0.0 is enforced.
57 96 : if (!deterministicMNManager->IsDIP3Enforced()) return true; // nothing to do.
58 96 : int retMisbehavingScore{0};
59 96 : llmq::quorumBlockProcessor->ProcessMessage(pfrom, vRecv, retMisbehavingScore);
60 96 : if (retMisbehavingScore > 0) {
61 0 : WITH_LOCK(cs_main, Misbehaving(pfrom->GetId(), retMisbehavingScore));
62 : }
63 96 : return true;
64 : }
65 :
66 86515 : if (strCommand == NetMsgType::QCONTRIB
67 86394 : || strCommand == NetMsgType::QCOMPLAINT
68 86368 : || strCommand == NetMsgType::QJUSTIFICATION
69 172875 : || strCommand == NetMsgType::QPCOMMITMENT) {
70 259 : if (!llmq::quorumDKGSessionManager->ProcessMessage(pfrom, strCommand, vRecv)) {
71 0 : WITH_LOCK(cs_main, Misbehaving(pfrom->GetId(), 100));
72 : }
73 259 : return true;
74 : }
75 86256 : if (strCommand == NetMsgType::QSIGSHARESINV || strCommand == NetMsgType::QGETSIGSHARES || strCommand == NetMsgType::QBSIGSHARES || strCommand == NetMsgType::QSIGSESANN || strCommand == NetMsgType::QSIGSHARE) {
76 710 : llmq::quorumSigSharesManager->ProcessMessage(pfrom, strCommand, vRecv, *g_connman);
77 710 : return true;
78 : }
79 85546 : if (strCommand == NetMsgType::QSIGREC) {
80 1721 : llmq::quorumSigningManager->ProcessMessage(pfrom, strCommand, vRecv, *g_connman);
81 1721 : return true;
82 : }
83 :
84 83825 : if (strCommand == NetMsgType::CLSIG) {
85 725 : llmq::chainLocksHandler->ProcessMessage(pfrom, strCommand, vRecv, *g_connman);
86 : }
87 :
88 83825 : if (strCommand == NetMsgType::GETMNLIST) {
89 : // Get Masternode list or specific entry
90 1690 : CTxIn vin;
91 845 : vRecv >> vin;
92 845 : int banScore = mnodeman.ProcessGetMNList(pfrom, vin);
93 845 : if (banScore > 0) {
94 0 : LOCK(cs_main);
95 0 : Misbehaving(pfrom->GetId(), banScore);
96 : }
97 845 : return true;
98 : }
99 :
100 82980 : if (strCommand == NetMsgType::SPORK) {
101 : // as there is no completion message, this is using a SPORK_INVALID as final message for now.
102 : // which is just a hack, should be replaced with another message, guard it until the protocol gets deployed on mainnet and
103 : // add compatibility with the previous protocol as well.
104 4334 : CSporkMessage spork;
105 2167 : vRecv >> spork;
106 2167 : int banScore = sporkManager.ProcessSporkMsg(spork);
107 2167 : if (banScore > 0) {
108 0 : LOCK(cs_main);
109 0 : Misbehaving(pfrom->GetId(), banScore);
110 0 : return true;
111 : }
112 : // All good, Update in-flight message status if needed
113 2167 : if (!UpdatePeerSyncState(pfrom->GetId(), NetMsgType::GETSPORKS, GetNextAsset(MASTERNODE_SYNC_SPORKS))) {
114 : // This could happen because of the message thread is requesting the sporks alone..
115 : // So.. for now, can just update the peer status and move it to the next state if the end message arrives
116 2079 : if (spork.nSporkID == SPORK_INVALID) {
117 1288 : if (g_tiertwo_sync_state.GetSyncPhase() < MASTERNODE_SYNC_LIST) {
118 : // future note: use internal cs for RequestedMasternodeAssets.
119 259 : g_tiertwo_sync_state.SetCurrentSyncPhase(MASTERNODE_SYNC_LIST);
120 : }
121 : }
122 : }
123 2167 : return true;
124 : }
125 :
126 80813 : if (strCommand == NetMsgType::SYNCSTATUSCOUNT) {
127 : // Nothing to do.
128 25063 : if (g_tiertwo_sync_state.GetSyncPhase() >= MASTERNODE_SYNC_FINISHED) return true;
129 :
130 : // Sync status count
131 1754 : int nItemID;
132 1754 : int nCount;
133 1754 : vRecv >> nItemID >> nCount;
134 :
135 : // Update stats
136 1754 : ProcessSyncStatusMsg(nItemID, nCount);
137 :
138 : // this means we will receive no further communication on the first sync
139 1754 : switch (nItemID) {
140 841 : case MASTERNODE_SYNC_LIST: {
141 841 : UpdatePeerSyncState(pfrom->GetId(), NetMsgType::GETMNLIST, GetNextAsset(nItemID));
142 841 : return true;
143 : }
144 756 : case MASTERNODE_SYNC_MNW: {
145 756 : UpdatePeerSyncState(pfrom->GetId(), NetMsgType::GETMNWINNERS, GetNextAsset(nItemID));
146 756 : return true;
147 : }
148 143 : case MASTERNODE_SYNC_BUDGET_PROP: {
149 : // TODO: This could be a MASTERNODE_SYNC_BUDGET_FIN as well, possibly should decouple the finalization budget sync
150 : // from the MASTERNODE_SYNC_BUDGET_PROP (both are under the BUDGETVOTESYNC message)
151 143 : UpdatePeerSyncState(pfrom->GetId(), NetMsgType::BUDGETVOTESYNC, GetNextAsset(nItemID));
152 143 : return true;
153 : }
154 14 : case MASTERNODE_SYNC_BUDGET_FIN: {
155 : // No need to handle this one, is handled by the proposals sync message for now..
156 14 : return true;
157 : }
158 : }
159 : }
160 :
161 : return false;
162 : }
163 :
164 : template <typename... Args>
165 2504 : void CMasternodeSync::PushMessage(CNode* pnode, const char* msg, Args&&... args)
166 : {
167 2504 : g_connman->PushMessage(pnode, CNetMsgMaker(pnode->GetSendVersion()).Make(msg, std::forward<Args>(args)...));
168 2504 : }
169 :
170 : template <typename... Args>
171 2592 : void CMasternodeSync::RequestDataTo(CNode* pnode, const char* msg, bool forceRequest, Args&&... args)
172 : {
173 2592 : const auto& it = peersSyncState.find(pnode->GetId());
174 2592 : bool exist = it != peersSyncState.end();
175 2592 : if (!exist || forceRequest) {
176 : // Erase it if this is a forced request
177 952 : if (exist) {
178 59 : peersSyncState.at(pnode->GetId()).mapMsgData.erase(msg);
179 : }
180 : // send the message
181 952 : PushMessage(pnode, msg, std::forward<Args>(args)...);
182 :
183 : // Add data to the tier two peers sync state
184 952 : TierTwoPeerData peerData;
185 952 : peerData.mapMsgData.emplace(msg, std::make_pair(GetTime(), false));
186 952 : peersSyncState.emplace(pnode->GetId(), peerData);
187 : } else {
188 : // Check if we have sent the message or not
189 1640 : TierTwoPeerData& peerData = it->second;
190 1640 : const auto& msgMapIt = peerData.mapMsgData.find(msg);
191 :
192 1640 : if (msgMapIt == peerData.mapMsgData.end()) {
193 : // message doesn't exist, push it and add it to the map.
194 1552 : PushMessage(pnode, msg, std::forward<Args>(args)...);
195 1552 : peerData.mapMsgData.emplace(msg, std::make_pair(GetTime(), false));
196 : } else {
197 : // message sent, next step: need to check if it was already answered or not.
198 : // And, if needed, request it again every certain amount of time.
199 :
200 : // Check if the node answered the message or not
201 88 : if (!msgMapIt->second.second) {
202 88 : int64_t lastRequestTime = msgMapIt->second.first;
203 88 : if (lastRequestTime + 600 < GetTime()) {
204 : // ten minutes passed. Let's ask it again.
205 59 : RequestDataTo(pnode, msg, true, std::forward<Args>(args)...);
206 : }
207 : }
208 :
209 : }
210 : }
211 2592 : }
212 :
213 2533 : void CMasternodeSync::SyncRegtest(CNode* pnode)
214 : {
215 : // skip mn list and winners sync if legacy mn are obsolete
216 2533 : int syncPhase = g_tiertwo_sync_state.GetSyncPhase();
217 2533 : if (deterministicMNManager->LegacyMNObsolete() &&
218 3 : (syncPhase == MASTERNODE_SYNC_LIST || syncPhase == MASTERNODE_SYNC_MNW)) {
219 2 : g_tiertwo_sync_state.SetCurrentSyncPhase(MASTERNODE_SYNC_BUDGET);
220 2 : syncPhase = g_tiertwo_sync_state.GetSyncPhase();
221 : }
222 :
223 : // Initial sync, verify that the other peer answered to all of the messages successfully
224 2533 : if (syncPhase == MASTERNODE_SYNC_SPORKS) {
225 180 : RequestDataTo(pnode, NetMsgType::GETSPORKS, false);
226 2353 : } else if (syncPhase == MASTERNODE_SYNC_LIST) {
227 1726 : RequestDataTo(pnode, NetMsgType::GETMNLIST, false, CTxIn());
228 1490 : } else if (syncPhase == MASTERNODE_SYNC_MNW) {
229 764 : RequestDataTo(pnode, NetMsgType::GETMNWINNERS, false, mnodeman.CountEnabled());
230 726 : } else if (syncPhase == MASTERNODE_SYNC_BUDGET) {
231 : // sync masternode votes
232 1452 : RequestDataTo(pnode, NetMsgType::BUDGETVOTESYNC, false, uint256());
233 0 : } else if (syncPhase == MASTERNODE_SYNC_FINISHED) {
234 0 : LogPrintf("REGTEST SYNC FINISHED!\n");
235 : }
236 2533 : }
237 :
|