/__w/smoldot/smoldot/repo/full-node/src/consensus_service.rs
Line | Count | Source (jump to first uncovered line) |
1 | | // Smoldot |
2 | | // Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. |
3 | | // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 |
4 | | |
5 | | // This program is free software: you can redistribute it and/or modify |
6 | | // it under the terms of the GNU General Public License as published by |
7 | | // the Free Software Foundation, either version 3 of the License, or |
8 | | // (at your option) any later version. |
9 | | |
10 | | // This program is distributed in the hope that it will be useful, |
11 | | // but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | | // GNU General Public License for more details. |
14 | | |
15 | | // You should have received a copy of the GNU General Public License |
16 | | // along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | | |
18 | | //! Background synchronization service. |
19 | | //! |
20 | | //! The [`ConsensusService`] manages a background task dedicated to synchronizing the chain with |
21 | | //! the network and authoring blocks. |
22 | | //! Importantly, its design is oriented towards the particular use case of the full node. |
23 | | |
24 | | // TODO: doc |
25 | | // TODO: re-review this once finished |
26 | | |
27 | | use crate::{database_thread, jaeger_service, network_service, LogCallback, LogLevel}; |
28 | | |
29 | | use core::num::NonZeroU32; |
30 | | use futures_channel::{mpsc, oneshot}; |
31 | | use futures_lite::FutureExt as _; |
32 | | use futures_util::{ |
33 | | future, |
34 | | stream::{self, FuturesUnordered}, |
35 | | SinkExt as _, StreamExt as _, |
36 | | }; |
37 | | use hashbrown::HashSet; |
38 | | use rand::seq::IteratorRandom; |
39 | | use smol::lock::Mutex; |
40 | | use smoldot::{ |
41 | | author, |
42 | | database::full_sqlite, |
43 | | executor::{self, host, runtime_call}, |
44 | | header, |
45 | | identity::keystore, |
46 | | informant::HashDisplay, |
47 | | libp2p, |
48 | | network::{self, codec::BlockData}, |
49 | | sync::all, |
50 | | trie, |
51 | | verify::body_only, |
52 | | }; |
53 | | use std::{ |
54 | | array, |
55 | | borrow::Cow, |
56 | | cmp, |
57 | | future::Future, |
58 | | iter, |
59 | | num::{NonZeroU64, NonZeroUsize}, |
60 | | pin::Pin, |
61 | | sync::Arc, |
62 | | time::{Duration, Instant, SystemTime}, |
63 | | }; |
64 | | |
65 | | /// Configuration for a [`ConsensusService`]. |
66 | | pub struct Config { |
67 | | /// Closure that spawns background tasks. |
68 | | pub tasks_executor: Box<dyn FnMut(future::BoxFuture<'static, ()>) + Send>, |
69 | | |
70 | | /// Function called in order to notify of something. |
71 | | pub log_callback: Arc<dyn LogCallback + Send + Sync>, |
72 | | |
73 | | /// Database to use to read and write information about the chain. |
74 | | pub database: Arc<database_thread::DatabaseThread>, |
75 | | |
76 | | /// Number of bytes of the block number in the networking protocol. |
77 | | pub block_number_bytes: usize, |
78 | | |
79 | | /// Hash of the genesis block. |
80 | | /// |
81 | | /// > **Note**: At the time of writing of this comment, the value in this field is used only |
82 | | /// > to compare against a known genesis hash and print a warning. |
83 | | pub genesis_block_hash: [u8; 32], |
84 | | |
85 | | /// Stores of key to use for all block-production-related purposes. |
86 | | pub keystore: Arc<keystore::Keystore>, |
87 | | |
88 | | /// Access to the network, and identifier of the chain to sync from the point of view of the |
89 | | /// network service. |
90 | | pub network_service: ( |
91 | | Arc<network_service::NetworkService>, |
92 | | network_service::ChainId, |
93 | | ), |
94 | | |
95 | | /// Receiver for events coming from the network, as returned by |
96 | | /// [`network_service::NetworkService::new`]. |
97 | | pub network_events_receiver: stream::BoxStream<'static, network_service::Event>, |
98 | | |
99 | | /// Service to use to report traces. |
100 | | pub jaeger_service: Arc<jaeger_service::JaegerService>, |
101 | | |
102 | | /// A node has the authorization to author a block during a slot. |
103 | | /// |
104 | | /// In order for the network to perform well, a block should be authored and propagated |
105 | | /// throughout the peer-to-peer network before the end of the slot. In order for this to |
106 | | /// happen, the block creation process itself should end a few seconds before the end of the |
107 | | /// slot. This threshold after which the block creation should end is determined by this value. |
108 | | /// |
109 | | /// The moment in the slot when the authoring ends is determined by |
110 | | /// `slot_duration * slot_duration_author_ratio / u16::MAX`. |
111 | | /// For example, passing `u16::MAX` means that the entire slot is used. Passing |
112 | | /// `u16::MAX / 2` means that half of the slot is used. |
113 | | /// |
114 | | /// A typical value is `43691_u16`, representing 2/3 of a slot. |
115 | | /// |
116 | | /// Note that this value doesn't determine the moment when creating the block has ended, but |
117 | | /// the moment when creating the block should start its final phase. |
118 | | pub slot_duration_author_ratio: u16, |
119 | | } |
120 | | |
121 | | /// Identifier for a blocks request to be performed. |
122 | | #[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] |
123 | | pub struct BlocksRequestId(usize); |
124 | | |
125 | | /// Summary of the state of the [`ConsensusService`]. |
126 | | #[derive(Debug, Clone)] |
127 | | pub struct SyncState { |
128 | | pub best_block_number: u64, |
129 | | pub best_block_hash: [u8; 32], |
130 | | pub finalized_block_number: u64, |
131 | | pub finalized_block_hash: [u8; 32], |
132 | | } |
133 | | |
134 | | /// Background task that verifies blocks and emits requests. |
135 | | pub struct ConsensusService { |
136 | | /// Used to communicate with the background task. Also used for the background task to detect |
137 | | /// a shutdown. |
138 | | to_background_tx: Mutex<mpsc::Sender<ToBackground>>, |
139 | | |
140 | | /// See [`Config::block_number_bytes`]. |
141 | | block_number_bytes: usize, |
142 | | } |
143 | | |
144 | | enum ToBackground { |
145 | | SubscribeAll { |
146 | | buffer_size: usize, |
147 | | // TODO: unused field |
148 | | _max_finalized_pinned_blocks: NonZeroUsize, |
149 | | result_tx: oneshot::Sender<SubscribeAll>, |
150 | | }, |
151 | | GetSyncState { |
152 | | result_tx: oneshot::Sender<SyncState>, |
153 | | }, |
154 | | Unpin { |
155 | | // TODO: unused field |
156 | | _subscription_id: SubscriptionId, |
157 | | // TODO: unused field |
158 | | _block_hash: [u8; 32], |
159 | | /// Sends back `()` if the unpinning was successful or the subscription no longer exists. |
160 | | /// The sender is silently destroyed if the block hash was invalid. |
161 | | result_tx: oneshot::Sender<()>, |
162 | | }, |
163 | | IsMajorSyncingHint { |
164 | | result_tx: oneshot::Sender<bool>, |
165 | | }, |
166 | | } |
167 | | |
168 | | /// Potential error when calling [`ConsensusService::new`]. |
169 | 0 | #[derive(Debug, derive_more::Display)] Unexecuted instantiation: _RNvXsc_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB5_9InitErrorNtNtCsaYZPK01V26L_4core3fmt7Display3fmt Unexecuted instantiation: _RNvXsc_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB5_9InitErrorNtNtCsaYZPK01V26L_4core3fmt7Display3fmt |
170 | | pub enum InitError { |
171 | | /// Database is corrupted. |
172 | | DatabaseCorruption(full_sqlite::CorruptedError), |
173 | | /// Error parsing the header of a block in the database. |
174 | | InvalidHeader(header::Error), |
175 | | /// `:code` key is missing from the finalized block storage. |
176 | | FinalizedCodeMissing, |
177 | | /// Error parsing the `:heappages` of the finalized block. |
178 | | FinalizedHeapPagesInvalid(executor::InvalidHeapPagesError), |
179 | | /// Error initializing the runtime of the finalized block. |
180 | | FinalizedRuntimeInit(executor::host::NewErr), |
181 | | } |
182 | | |
183 | | impl ConsensusService { |
184 | | /// Initializes the [`ConsensusService`] with the given configuration. |
185 | 21 | pub async fn new(mut config: Config) -> Result<Arc<Self>, InitError> { _RNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB2_16ConsensusService3new Line | Count | Source | 185 | 21 | pub async fn new(mut config: Config) -> Result<Arc<Self>, InitError> { |
Unexecuted instantiation: _RNvMNtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB2_16ConsensusService3new |
186 | | // Perform the initial access to the database to load a bunch of information. |
187 | | let ( |
188 | 21 | finalized_block_number, |
189 | 21 | finalized_heap_pages, |
190 | 21 | finalized_code, |
191 | 21 | best_block_hash, |
192 | 21 | best_block_number, |
193 | 21 | finalized_chain_information, |
194 | 21 | ) = config |
195 | 21 | .database |
196 | 21 | .with_database({ |
197 | 21 | let block_number_bytes = config.block_number_bytes; |
198 | 21 | move |database| { |
199 | 21 | // If the previous run of the full node crashed, the database will contain |
200 | 21 | // blocks that are no longer useful in any way. We purge them all here. |
201 | 21 | database |
202 | 21 | .purge_finality_orphans() |
203 | 21 | .map_err(InitError::DatabaseCorruption)?0 ; |
204 | | |
205 | 21 | let finalized_block_hash = database |
206 | 21 | .finalized_block_hash() |
207 | 21 | .map_err(InitError::DatabaseCorruption)?0 ; |
208 | 21 | let finalized_block_number = header::decode( |
209 | 21 | &database |
210 | 21 | .block_scale_encoded_header(&finalized_block_hash) |
211 | 21 | .map_err(InitError::DatabaseCorruption)?0 |
212 | 21 | .unwrap(), // A panic here would indicate a bug in the database code. |
213 | 21 | block_number_bytes, |
214 | 21 | ) |
215 | 21 | .map_err(InitError::InvalidHeader)?0 |
216 | | .number; |
217 | 21 | let best_block_hash = database.best_block_hash().unwrap(); |
218 | 21 | let best_block_number = header::decode( |
219 | 21 | &database |
220 | 21 | .block_scale_encoded_header(&best_block_hash) |
221 | 21 | .map_err(InitError::DatabaseCorruption)?0 |
222 | 21 | .unwrap(), // A panic here would indicate a bug in the database code. |
223 | 21 | block_number_bytes, |
224 | 21 | ) |
225 | 21 | .map_err(InitError::InvalidHeader)?0 |
226 | | .number; |
227 | 21 | let finalized_chain_information = |
228 | 21 | match database.to_chain_information(&finalized_block_hash) { |
229 | 21 | Ok(info) => info, |
230 | 0 | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { |
231 | 0 | return Err(InitError::DatabaseCorruption(err)) |
232 | | } |
233 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) |
234 | 0 | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), |
235 | | }; |
236 | 21 | let finalized_code = match database.block_storage_get( |
237 | 21 | &finalized_block_hash, |
238 | 21 | iter::empty::<iter::Empty<_>>(), |
239 | 21 | trie::bytes_to_nibbles(b":code".iter().copied()).map(u8::from), |
240 | 21 | ) { |
241 | 21 | Ok(Some((code, _))) => code, |
242 | 0 | Ok(None) => return Err(InitError::FinalizedCodeMissing), |
243 | 0 | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { |
244 | 0 | return Err(InitError::DatabaseCorruption(err)) |
245 | | } |
246 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) |
247 | 0 | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), |
248 | | }; |
249 | 21 | let finalized_heap_pages = match database.block_storage_get( |
250 | 21 | &finalized_block_hash, |
251 | 21 | iter::empty::<iter::Empty<_>>(), |
252 | 21 | trie::bytes_to_nibbles(b":heappages".iter().copied()).map(u8::from), |
253 | 21 | ) { |
254 | 0 | Ok(Some((hp, _))) => Some(hp), |
255 | 21 | Ok(None) => None, |
256 | 0 | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { |
257 | 0 | return Err(InitError::DatabaseCorruption(err)) |
258 | | } |
259 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) |
260 | 0 | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), |
261 | | }; |
262 | 21 | Ok(( |
263 | 21 | finalized_block_number, |
264 | 21 | finalized_heap_pages, |
265 | 21 | finalized_code, |
266 | 21 | best_block_hash, |
267 | 21 | best_block_number, |
268 | 21 | finalized_chain_information, |
269 | 21 | )) |
270 | 21 | } _RNCNCNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_16ConsensusService3new00CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 198 | 2 | move |database| { | 199 | 2 | // If the previous run of the full node crashed, the database will contain | 200 | 2 | // blocks that are no longer useful in any way. We purge them all here. | 201 | 2 | database | 202 | 2 | .purge_finality_orphans() | 203 | 2 | .map_err(InitError::DatabaseCorruption)?0 ; | 204 | | | 205 | 2 | let finalized_block_hash = database | 206 | 2 | .finalized_block_hash() | 207 | 2 | .map_err(InitError::DatabaseCorruption)?0 ; | 208 | 2 | let finalized_block_number = header::decode( | 209 | 2 | &database | 210 | 2 | .block_scale_encoded_header(&finalized_block_hash) | 211 | 2 | .map_err(InitError::DatabaseCorruption)?0 | 212 | 2 | .unwrap(), // A panic here would indicate a bug in the database code. | 213 | 2 | block_number_bytes, | 214 | 2 | ) | 215 | 2 | .map_err(InitError::InvalidHeader)?0 | 216 | | .number; | 217 | 2 | let best_block_hash = database.best_block_hash().unwrap(); | 218 | 2 | let best_block_number = header::decode( | 219 | 2 | &database | 220 | 2 | .block_scale_encoded_header(&best_block_hash) | 221 | 2 | .map_err(InitError::DatabaseCorruption)?0 | 222 | 2 | .unwrap(), // A panic here would indicate a bug in the database code. | 223 | 2 | block_number_bytes, | 224 | 2 | ) | 225 | 2 | .map_err(InitError::InvalidHeader)?0 | 226 | | .number; | 227 | 2 | let finalized_chain_information = | 228 | 2 | match database.to_chain_information(&finalized_block_hash) { | 229 | 2 | Ok(info) => info, | 230 | 0 | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { | 231 | 0 | return Err(InitError::DatabaseCorruption(err)) | 232 | | } | 233 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) | 234 | 0 | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), | 235 | | }; | 236 | 2 | let finalized_code = match database.block_storage_get( | 237 | 2 | &finalized_block_hash, | 238 | 2 | iter::empty::<iter::Empty<_>>(), | 239 | 2 | trie::bytes_to_nibbles(b":code".iter().copied()).map(u8::from), | 240 | 2 | ) { | 241 | 2 | Ok(Some((code, _))) => code, | 242 | 0 | Ok(None) => return Err(InitError::FinalizedCodeMissing), | 243 | 0 | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { | 244 | 0 | return Err(InitError::DatabaseCorruption(err)) | 245 | | } | 246 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) | 247 | 0 | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), | 248 | | }; | 249 | 2 | let finalized_heap_pages = match database.block_storage_get( | 250 | 2 | &finalized_block_hash, | 251 | 2 | iter::empty::<iter::Empty<_>>(), | 252 | 2 | trie::bytes_to_nibbles(b":heappages".iter().copied()).map(u8::from), | 253 | 2 | ) { | 254 | 0 | Ok(Some((hp, _))) => Some(hp), | 255 | 2 | Ok(None) => None, | 256 | 0 | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { | 257 | 0 | return Err(InitError::DatabaseCorruption(err)) | 258 | | } | 259 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) | 260 | 0 | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), | 261 | | }; | 262 | 2 | Ok(( | 263 | 2 | finalized_block_number, | 264 | 2 | finalized_heap_pages, | 265 | 2 | finalized_code, | 266 | 2 | best_block_hash, | 267 | 2 | best_block_number, | 268 | 2 | finalized_chain_information, | 269 | 2 | )) | 270 | 2 | } |
Unexecuted instantiation: _RNCNCNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_16ConsensusService3new00B8_ Unexecuted instantiation: _RNCNCNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_16ConsensusService3new00CscDgN54JpMGG_6author _RNCNCNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_16ConsensusService3new00CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 198 | 19 | move |database| { | 199 | 19 | // If the previous run of the full node crashed, the database will contain | 200 | 19 | // blocks that are no longer useful in any way. We purge them all here. | 201 | 19 | database | 202 | 19 | .purge_finality_orphans() | 203 | 19 | .map_err(InitError::DatabaseCorruption)?0 ; | 204 | | | 205 | 19 | let finalized_block_hash = database | 206 | 19 | .finalized_block_hash() | 207 | 19 | .map_err(InitError::DatabaseCorruption)?0 ; | 208 | 19 | let finalized_block_number = header::decode( | 209 | 19 | &database | 210 | 19 | .block_scale_encoded_header(&finalized_block_hash) | 211 | 19 | .map_err(InitError::DatabaseCorruption)?0 | 212 | 19 | .unwrap(), // A panic here would indicate a bug in the database code. | 213 | 19 | block_number_bytes, | 214 | 19 | ) | 215 | 19 | .map_err(InitError::InvalidHeader)?0 | 216 | | .number; | 217 | 19 | let best_block_hash = database.best_block_hash().unwrap(); | 218 | 19 | let best_block_number = header::decode( | 219 | 19 | &database | 220 | 19 | .block_scale_encoded_header(&best_block_hash) | 221 | 19 | .map_err(InitError::DatabaseCorruption)?0 | 222 | 19 | .unwrap(), // A panic here would indicate a bug in the database code. | 223 | 19 | block_number_bytes, | 224 | 19 | ) | 225 | 19 | .map_err(InitError::InvalidHeader)?0 | 226 | | .number; | 227 | 19 | let finalized_chain_information = | 228 | 19 | match database.to_chain_information(&finalized_block_hash) { | 229 | 19 | Ok(info) => info, | 230 | 0 | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { | 231 | 0 | return Err(InitError::DatabaseCorruption(err)) | 232 | | } | 233 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) | 234 | 0 | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), | 235 | | }; | 236 | 19 | let finalized_code = match database.block_storage_get( | 237 | 19 | &finalized_block_hash, | 238 | 19 | iter::empty::<iter::Empty<_>>(), | 239 | 19 | trie::bytes_to_nibbles(b":code".iter().copied()).map(u8::from), | 240 | 19 | ) { | 241 | 19 | Ok(Some((code, _))) => code, | 242 | 0 | Ok(None) => return Err(InitError::FinalizedCodeMissing), | 243 | 0 | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { | 244 | 0 | return Err(InitError::DatabaseCorruption(err)) | 245 | | } | 246 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) | 247 | 0 | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), | 248 | | }; | 249 | 19 | let finalized_heap_pages = match database.block_storage_get( | 250 | 19 | &finalized_block_hash, | 251 | 19 | iter::empty::<iter::Empty<_>>(), | 252 | 19 | trie::bytes_to_nibbles(b":heappages".iter().copied()).map(u8::from), | 253 | 19 | ) { | 254 | 0 | Ok(Some((hp, _))) => Some(hp), | 255 | 19 | Ok(None) => None, | 256 | 0 | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { | 257 | 0 | return Err(InitError::DatabaseCorruption(err)) | 258 | | } | 259 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) | 260 | 0 | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), | 261 | | }; | 262 | 19 | Ok(( | 263 | 19 | finalized_block_number, | 264 | 19 | finalized_heap_pages, | 265 | 19 | finalized_code, | 266 | 19 | best_block_hash, | 267 | 19 | best_block_number, | 268 | 19 | finalized_chain_information, | 269 | 19 | )) | 270 | 19 | } |
Unexecuted instantiation: _RNCNCNvMNtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB6_16ConsensusService3new00B8_ |
271 | 21 | }) |
272 | 21 | .await?0 ; |
273 | | |
274 | | // The Kusama chain contains a fork hardcoded in the official Polkadot client. |
275 | | // See <https://github.com/paritytech/polkadot/blob/93f45f996a3d5592a57eba02f91f2fc2bc5a07cf/node/service/src/grandpa_support.rs#L111-L216> |
276 | | // Because we don't want to support this in smoldot, a warning is printed instead if we |
277 | | // recognize Kusama. |
278 | | // See also <https://github.com/paritytech/smoldot/issues/1866>. |
279 | 21 | if config.genesis_block_hash |
280 | 21 | == [ |
281 | 21 | 176, 168, 212, 147, 40, 92, 45, 247, 50, 144, 223, 183, 230, 31, 135, 15, 23, 180, |
282 | 21 | 24, 1, 25, 122, 20, 156, 169, 54, 84, 73, 158, 163, 218, 254, |
283 | 21 | ] |
284 | 0 | && finalized_block_number <= 1500988 |
285 | 0 | { |
286 | 0 | config.log_callback.log( |
287 | 0 | LogLevel::Warn, |
288 | 0 | "The Kusama chain is known to be borked at block #1491596. The official Polkadot \ |
289 | 0 | client works around this issue by hardcoding a fork in its source code. Smoldot \ |
290 | 0 | does not support this hardcoded fork and will thus fail to sync past this block." |
291 | 0 | .to_string(), |
292 | 0 | ); |
293 | 21 | } |
294 | | |
295 | 21 | let mut sync = all::AllSync::new(all::Config { |
296 | 21 | chain_information: finalized_chain_information, |
297 | 21 | block_number_bytes: config.block_number_bytes, |
298 | 21 | allow_unknown_consensus_engines: false, |
299 | 21 | sources_capacity: 32, |
300 | 21 | blocks_capacity: { |
301 | 21 | // This is the maximum number of blocks between two consecutive justifications. |
302 | 21 | 1024 |
303 | 21 | }, |
304 | 21 | max_disjoint_headers: 1024, |
305 | 21 | max_requests_per_block: NonZeroU32::new(3).unwrap(), |
306 | 21 | download_ahead_blocks: { |
307 | 21 | // Assuming a verification speed of 1k blocks/sec and a 99th download time |
308 | 21 | // percentile of two second, the number of blocks to download ahead of time |
309 | 21 | // in order to not block is 2000. |
310 | 21 | // In practice, however, the verification speed and download speed depend on |
311 | 21 | // the chain and the machine of the user. |
312 | 21 | NonZeroU32::new(2000).unwrap() |
313 | 21 | }, |
314 | 21 | download_bodies: true, |
315 | 21 | // We ask for all the chain-information-related storage proofs and call proofs to be |
316 | 21 | // downloaded during the warp syncing in order to guarantee that the necessary |
317 | 21 | // information will be found in the database at the next reload. |
318 | 21 | download_all_chain_information_storage_proofs: true, |
319 | 21 | code_trie_node_hint: None, |
320 | 21 | }); |
321 | | |
322 | 21 | let finalized_runtime = { |
323 | | // Builds the runtime of the finalized block. |
324 | | // Assumed to always be valid, otherwise the block wouldn't have been |
325 | | // saved in the database, hence the large number of unwraps here. |
326 | 21 | let heap_pages = executor::storage_heap_pages_to_value(finalized_heap_pages.as_deref()) |
327 | 21 | .map_err(InitError::FinalizedHeapPagesInvalid)?0 ; |
328 | 21 | executor::host::HostVmPrototype::new(executor::host::Config { |
329 | 21 | module: finalized_code, |
330 | 21 | heap_pages, |
331 | 21 | exec_hint: executor::vm::ExecHint::ValidateAndCompile, // TODO: probably should be decided by the optimisticsync |
332 | 21 | allow_unresolved_imports: false, |
333 | 21 | }) |
334 | 21 | .map_err(InitError::FinalizedRuntimeInit)?0 |
335 | | }; |
336 | | |
337 | 21 | let block_author_sync_source = sync |
338 | 21 | .prepare_add_source(best_block_number, best_block_hash) |
339 | 21 | .add_source(None, NonFinalizedBlock::NotVerified); |
340 | 21 | |
341 | 21 | let (to_background_tx, to_background_rx) = mpsc::channel(4); |
342 | 21 | |
343 | 21 | let background_sync = SyncBackground { |
344 | 21 | sync, |
345 | 21 | block_author_sync_source, |
346 | 21 | block_authoring: None, |
347 | 21 | authored_block: None, |
348 | 21 | slot_duration_author_ratio: config.slot_duration_author_ratio, |
349 | 21 | keystore: config.keystore, |
350 | 21 | finalized_runtime: Arc::new(finalized_runtime), |
351 | 21 | network_service: config.network_service.0, |
352 | 21 | network_chain_id: config.network_service.1, |
353 | 21 | network_local_chain_update_needed: true, |
354 | 21 | pending_block_announce: None, |
355 | 21 | to_background_rx, |
356 | 21 | blocks_notifications: Vec::with_capacity(8), |
357 | 21 | pending_notification: None, |
358 | 21 | from_network_service: config.network_events_receiver, |
359 | 21 | database: config.database, |
360 | 21 | database_catch_up_download: DatabaseCatchUpDownload::NoDownloadInProgress, |
361 | 21 | database_catch_up_download_block_verification: |
362 | 21 | DatabaseCatchUpDownloadBlockVerification::None, |
363 | 21 | peers_source_id_map: Default::default(), |
364 | 21 | sub_tasks: FuturesUnordered::new(), |
365 | 21 | log_callback: config.log_callback, |
366 | 21 | jaeger_service: config.jaeger_service, |
367 | 21 | }; |
368 | 21 | |
369 | 21 | (config.tasks_executor)(Box::pin(background_sync.run())); |
370 | 21 | |
371 | 21 | Ok(Arc::new(ConsensusService { |
372 | 21 | block_number_bytes: config.block_number_bytes, |
373 | 21 | to_background_tx: Mutex::new(to_background_tx), |
374 | 21 | })) |
375 | 21 | } _RNCNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB4_16ConsensusService3new0CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 185 | 2 | pub async fn new(mut config: Config) -> Result<Arc<Self>, InitError> { | 186 | | // Perform the initial access to the database to load a bunch of information. | 187 | | let ( | 188 | 2 | finalized_block_number, | 189 | 2 | finalized_heap_pages, | 190 | 2 | finalized_code, | 191 | 2 | best_block_hash, | 192 | 2 | best_block_number, | 193 | 2 | finalized_chain_information, | 194 | 2 | ) = config | 195 | 2 | .database | 196 | 2 | .with_database({ | 197 | 2 | let block_number_bytes = config.block_number_bytes; | 198 | 2 | move |database| { | 199 | | // If the previous run of the full node crashed, the database will contain | 200 | | // blocks that are no longer useful in any way. We purge them all here. | 201 | | database | 202 | | .purge_finality_orphans() | 203 | | .map_err(InitError::DatabaseCorruption)?; | 204 | | | 205 | | let finalized_block_hash = database | 206 | | .finalized_block_hash() | 207 | | .map_err(InitError::DatabaseCorruption)?; | 208 | | let finalized_block_number = header::decode( | 209 | | &database | 210 | | .block_scale_encoded_header(&finalized_block_hash) | 211 | | .map_err(InitError::DatabaseCorruption)? | 212 | | .unwrap(), // A panic here would indicate a bug in the database code. | 213 | | block_number_bytes, | 214 | | ) | 215 | | .map_err(InitError::InvalidHeader)? | 216 | | .number; | 217 | | let best_block_hash = database.best_block_hash().unwrap(); | 218 | | let best_block_number = header::decode( | 219 | | &database | 220 | | .block_scale_encoded_header(&best_block_hash) | 221 | | .map_err(InitError::DatabaseCorruption)? | 222 | | .unwrap(), // A panic here would indicate a bug in the database code. | 223 | | block_number_bytes, | 224 | | ) | 225 | | .map_err(InitError::InvalidHeader)? | 226 | | .number; | 227 | | let finalized_chain_information = | 228 | | match database.to_chain_information(&finalized_block_hash) { | 229 | | Ok(info) => info, | 230 | | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { | 231 | | return Err(InitError::DatabaseCorruption(err)) | 232 | | } | 233 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) | 234 | | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), | 235 | | }; | 236 | | let finalized_code = match database.block_storage_get( | 237 | | &finalized_block_hash, | 238 | | iter::empty::<iter::Empty<_>>(), | 239 | | trie::bytes_to_nibbles(b":code".iter().copied()).map(u8::from), | 240 | | ) { | 241 | | Ok(Some((code, _))) => code, | 242 | | Ok(None) => return Err(InitError::FinalizedCodeMissing), | 243 | | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { | 244 | | return Err(InitError::DatabaseCorruption(err)) | 245 | | } | 246 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) | 247 | | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), | 248 | | }; | 249 | | let finalized_heap_pages = match database.block_storage_get( | 250 | | &finalized_block_hash, | 251 | | iter::empty::<iter::Empty<_>>(), | 252 | | trie::bytes_to_nibbles(b":heappages".iter().copied()).map(u8::from), | 253 | | ) { | 254 | | Ok(Some((hp, _))) => Some(hp), | 255 | | Ok(None) => None, | 256 | | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { | 257 | | return Err(InitError::DatabaseCorruption(err)) | 258 | | } | 259 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) | 260 | | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), | 261 | | }; | 262 | | Ok(( | 263 | | finalized_block_number, | 264 | | finalized_heap_pages, | 265 | | finalized_code, | 266 | | best_block_hash, | 267 | | best_block_number, | 268 | | finalized_chain_information, | 269 | | )) | 270 | 2 | } | 271 | 2 | }) | 272 | 2 | .await?0 ; | 273 | | | 274 | | // The Kusama chain contains a fork hardcoded in the official Polkadot client. | 275 | | // See <https://github.com/paritytech/polkadot/blob/93f45f996a3d5592a57eba02f91f2fc2bc5a07cf/node/service/src/grandpa_support.rs#L111-L216> | 276 | | // Because we don't want to support this in smoldot, a warning is printed instead if we | 277 | | // recognize Kusama. | 278 | | // See also <https://github.com/paritytech/smoldot/issues/1866>. | 279 | 2 | if config.genesis_block_hash | 280 | 2 | == [ | 281 | 2 | 176, 168, 212, 147, 40, 92, 45, 247, 50, 144, 223, 183, 230, 31, 135, 15, 23, 180, | 282 | 2 | 24, 1, 25, 122, 20, 156, 169, 54, 84, 73, 158, 163, 218, 254, | 283 | 2 | ] | 284 | 0 | && finalized_block_number <= 1500988 | 285 | 0 | { | 286 | 0 | config.log_callback.log( | 287 | 0 | LogLevel::Warn, | 288 | 0 | "The Kusama chain is known to be borked at block #1491596. The official Polkadot \ | 289 | 0 | client works around this issue by hardcoding a fork in its source code. Smoldot \ | 290 | 0 | does not support this hardcoded fork and will thus fail to sync past this block." | 291 | 0 | .to_string(), | 292 | 0 | ); | 293 | 2 | } | 294 | | | 295 | 2 | let mut sync = all::AllSync::new(all::Config { | 296 | 2 | chain_information: finalized_chain_information, | 297 | 2 | block_number_bytes: config.block_number_bytes, | 298 | 2 | allow_unknown_consensus_engines: false, | 299 | 2 | sources_capacity: 32, | 300 | 2 | blocks_capacity: { | 301 | 2 | // This is the maximum number of blocks between two consecutive justifications. | 302 | 2 | 1024 | 303 | 2 | }, | 304 | 2 | max_disjoint_headers: 1024, | 305 | 2 | max_requests_per_block: NonZeroU32::new(3).unwrap(), | 306 | 2 | download_ahead_blocks: { | 307 | 2 | // Assuming a verification speed of 1k blocks/sec and a 99th download time | 308 | 2 | // percentile of two second, the number of blocks to download ahead of time | 309 | 2 | // in order to not block is 2000. | 310 | 2 | // In practice, however, the verification speed and download speed depend on | 311 | 2 | // the chain and the machine of the user. | 312 | 2 | NonZeroU32::new(2000).unwrap() | 313 | 2 | }, | 314 | 2 | download_bodies: true, | 315 | 2 | // We ask for all the chain-information-related storage proofs and call proofs to be | 316 | 2 | // downloaded during the warp syncing in order to guarantee that the necessary | 317 | 2 | // information will be found in the database at the next reload. | 318 | 2 | download_all_chain_information_storage_proofs: true, | 319 | 2 | code_trie_node_hint: None, | 320 | 2 | }); | 321 | | | 322 | 2 | let finalized_runtime = { | 323 | | // Builds the runtime of the finalized block. | 324 | | // Assumed to always be valid, otherwise the block wouldn't have been | 325 | | // saved in the database, hence the large number of unwraps here. | 326 | 2 | let heap_pages = executor::storage_heap_pages_to_value(finalized_heap_pages.as_deref()) | 327 | 2 | .map_err(InitError::FinalizedHeapPagesInvalid)?0 ; | 328 | 2 | executor::host::HostVmPrototype::new(executor::host::Config { | 329 | 2 | module: finalized_code, | 330 | 2 | heap_pages, | 331 | 2 | exec_hint: executor::vm::ExecHint::ValidateAndCompile, // TODO: probably should be decided by the optimisticsync | 332 | 2 | allow_unresolved_imports: false, | 333 | 2 | }) | 334 | 2 | .map_err(InitError::FinalizedRuntimeInit)?0 | 335 | | }; | 336 | | | 337 | 2 | let block_author_sync_source = sync | 338 | 2 | .prepare_add_source(best_block_number, best_block_hash) | 339 | 2 | .add_source(None, NonFinalizedBlock::NotVerified); | 340 | 2 | | 341 | 2 | let (to_background_tx, to_background_rx) = mpsc::channel(4); | 342 | 2 | | 343 | 2 | let background_sync = SyncBackground { | 344 | 2 | sync, | 345 | 2 | block_author_sync_source, | 346 | 2 | block_authoring: None, | 347 | 2 | authored_block: None, | 348 | 2 | slot_duration_author_ratio: config.slot_duration_author_ratio, | 349 | 2 | keystore: config.keystore, | 350 | 2 | finalized_runtime: Arc::new(finalized_runtime), | 351 | 2 | network_service: config.network_service.0, | 352 | 2 | network_chain_id: config.network_service.1, | 353 | 2 | network_local_chain_update_needed: true, | 354 | 2 | pending_block_announce: None, | 355 | 2 | to_background_rx, | 356 | 2 | blocks_notifications: Vec::with_capacity(8), | 357 | 2 | pending_notification: None, | 358 | 2 | from_network_service: config.network_events_receiver, | 359 | 2 | database: config.database, | 360 | 2 | database_catch_up_download: DatabaseCatchUpDownload::NoDownloadInProgress, | 361 | 2 | database_catch_up_download_block_verification: | 362 | 2 | DatabaseCatchUpDownloadBlockVerification::None, | 363 | 2 | peers_source_id_map: Default::default(), | 364 | 2 | sub_tasks: FuturesUnordered::new(), | 365 | 2 | log_callback: config.log_callback, | 366 | 2 | jaeger_service: config.jaeger_service, | 367 | 2 | }; | 368 | 2 | | 369 | 2 | (config.tasks_executor)(Box::pin(background_sync.run())); | 370 | 2 | | 371 | 2 | Ok(Arc::new(ConsensusService { | 372 | 2 | block_number_bytes: config.block_number_bytes, | 373 | 2 | to_background_tx: Mutex::new(to_background_tx), | 374 | 2 | })) | 375 | 2 | } |
Unexecuted instantiation: _RNCNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB4_16ConsensusService3new0B6_ Unexecuted instantiation: _RNCNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB4_16ConsensusService3new0CscDgN54JpMGG_6author _RNCNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB4_16ConsensusService3new0CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 185 | 19 | pub async fn new(mut config: Config) -> Result<Arc<Self>, InitError> { | 186 | | // Perform the initial access to the database to load a bunch of information. | 187 | | let ( | 188 | 19 | finalized_block_number, | 189 | 19 | finalized_heap_pages, | 190 | 19 | finalized_code, | 191 | 19 | best_block_hash, | 192 | 19 | best_block_number, | 193 | 19 | finalized_chain_information, | 194 | 19 | ) = config | 195 | 19 | .database | 196 | 19 | .with_database({ | 197 | 19 | let block_number_bytes = config.block_number_bytes; | 198 | 19 | move |database| { | 199 | | // If the previous run of the full node crashed, the database will contain | 200 | | // blocks that are no longer useful in any way. We purge them all here. | 201 | | database | 202 | | .purge_finality_orphans() | 203 | | .map_err(InitError::DatabaseCorruption)?; | 204 | | | 205 | | let finalized_block_hash = database | 206 | | .finalized_block_hash() | 207 | | .map_err(InitError::DatabaseCorruption)?; | 208 | | let finalized_block_number = header::decode( | 209 | | &database | 210 | | .block_scale_encoded_header(&finalized_block_hash) | 211 | | .map_err(InitError::DatabaseCorruption)? | 212 | | .unwrap(), // A panic here would indicate a bug in the database code. | 213 | | block_number_bytes, | 214 | | ) | 215 | | .map_err(InitError::InvalidHeader)? | 216 | | .number; | 217 | | let best_block_hash = database.best_block_hash().unwrap(); | 218 | | let best_block_number = header::decode( | 219 | | &database | 220 | | .block_scale_encoded_header(&best_block_hash) | 221 | | .map_err(InitError::DatabaseCorruption)? | 222 | | .unwrap(), // A panic here would indicate a bug in the database code. | 223 | | block_number_bytes, | 224 | | ) | 225 | | .map_err(InitError::InvalidHeader)? | 226 | | .number; | 227 | | let finalized_chain_information = | 228 | | match database.to_chain_information(&finalized_block_hash) { | 229 | | Ok(info) => info, | 230 | | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { | 231 | | return Err(InitError::DatabaseCorruption(err)) | 232 | | } | 233 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) | 234 | | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), | 235 | | }; | 236 | | let finalized_code = match database.block_storage_get( | 237 | | &finalized_block_hash, | 238 | | iter::empty::<iter::Empty<_>>(), | 239 | | trie::bytes_to_nibbles(b":code".iter().copied()).map(u8::from), | 240 | | ) { | 241 | | Ok(Some((code, _))) => code, | 242 | | Ok(None) => return Err(InitError::FinalizedCodeMissing), | 243 | | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { | 244 | | return Err(InitError::DatabaseCorruption(err)) | 245 | | } | 246 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) | 247 | | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), | 248 | | }; | 249 | | let finalized_heap_pages = match database.block_storage_get( | 250 | | &finalized_block_hash, | 251 | | iter::empty::<iter::Empty<_>>(), | 252 | | trie::bytes_to_nibbles(b":heappages".iter().copied()).map(u8::from), | 253 | | ) { | 254 | | Ok(Some((hp, _))) => Some(hp), | 255 | | Ok(None) => None, | 256 | | Err(full_sqlite::StorageAccessError::Corrupted(err)) => { | 257 | | return Err(InitError::DatabaseCorruption(err)) | 258 | | } | 259 | | Err(full_sqlite::StorageAccessError::IncompleteStorage) | 260 | | | Err(full_sqlite::StorageAccessError::UnknownBlock) => unreachable!(), | 261 | | }; | 262 | | Ok(( | 263 | | finalized_block_number, | 264 | | finalized_heap_pages, | 265 | | finalized_code, | 266 | | best_block_hash, | 267 | | best_block_number, | 268 | | finalized_chain_information, | 269 | | )) | 270 | 19 | } | 271 | 19 | }) | 272 | 19 | .await?0 ; | 273 | | | 274 | | // The Kusama chain contains a fork hardcoded in the official Polkadot client. | 275 | | // See <https://github.com/paritytech/polkadot/blob/93f45f996a3d5592a57eba02f91f2fc2bc5a07cf/node/service/src/grandpa_support.rs#L111-L216> | 276 | | // Because we don't want to support this in smoldot, a warning is printed instead if we | 277 | | // recognize Kusama. | 278 | | // See also <https://github.com/paritytech/smoldot/issues/1866>. | 279 | 19 | if config.genesis_block_hash | 280 | 19 | == [ | 281 | 19 | 176, 168, 212, 147, 40, 92, 45, 247, 50, 144, 223, 183, 230, 31, 135, 15, 23, 180, | 282 | 19 | 24, 1, 25, 122, 20, 156, 169, 54, 84, 73, 158, 163, 218, 254, | 283 | 19 | ] | 284 | 0 | && finalized_block_number <= 1500988 | 285 | 0 | { | 286 | 0 | config.log_callback.log( | 287 | 0 | LogLevel::Warn, | 288 | 0 | "The Kusama chain is known to be borked at block #1491596. The official Polkadot \ | 289 | 0 | client works around this issue by hardcoding a fork in its source code. Smoldot \ | 290 | 0 | does not support this hardcoded fork and will thus fail to sync past this block." | 291 | 0 | .to_string(), | 292 | 0 | ); | 293 | 19 | } | 294 | | | 295 | 19 | let mut sync = all::AllSync::new(all::Config { | 296 | 19 | chain_information: finalized_chain_information, | 297 | 19 | block_number_bytes: config.block_number_bytes, | 298 | 19 | allow_unknown_consensus_engines: false, | 299 | 19 | sources_capacity: 32, | 300 | 19 | blocks_capacity: { | 301 | 19 | // This is the maximum number of blocks between two consecutive justifications. | 302 | 19 | 1024 | 303 | 19 | }, | 304 | 19 | max_disjoint_headers: 1024, | 305 | 19 | max_requests_per_block: NonZeroU32::new(3).unwrap(), | 306 | 19 | download_ahead_blocks: { | 307 | 19 | // Assuming a verification speed of 1k blocks/sec and a 99th download time | 308 | 19 | // percentile of two second, the number of blocks to download ahead of time | 309 | 19 | // in order to not block is 2000. | 310 | 19 | // In practice, however, the verification speed and download speed depend on | 311 | 19 | // the chain and the machine of the user. | 312 | 19 | NonZeroU32::new(2000).unwrap() | 313 | 19 | }, | 314 | 19 | download_bodies: true, | 315 | 19 | // We ask for all the chain-information-related storage proofs and call proofs to be | 316 | 19 | // downloaded during the warp syncing in order to guarantee that the necessary | 317 | 19 | // information will be found in the database at the next reload. | 318 | 19 | download_all_chain_information_storage_proofs: true, | 319 | 19 | code_trie_node_hint: None, | 320 | 19 | }); | 321 | | | 322 | 19 | let finalized_runtime = { | 323 | | // Builds the runtime of the finalized block. | 324 | | // Assumed to always be valid, otherwise the block wouldn't have been | 325 | | // saved in the database, hence the large number of unwraps here. | 326 | 19 | let heap_pages = executor::storage_heap_pages_to_value(finalized_heap_pages.as_deref()) | 327 | 19 | .map_err(InitError::FinalizedHeapPagesInvalid)?0 ; | 328 | 19 | executor::host::HostVmPrototype::new(executor::host::Config { | 329 | 19 | module: finalized_code, | 330 | 19 | heap_pages, | 331 | 19 | exec_hint: executor::vm::ExecHint::ValidateAndCompile, // TODO: probably should be decided by the optimisticsync | 332 | 19 | allow_unresolved_imports: false, | 333 | 19 | }) | 334 | 19 | .map_err(InitError::FinalizedRuntimeInit)?0 | 335 | | }; | 336 | | | 337 | 19 | let block_author_sync_source = sync | 338 | 19 | .prepare_add_source(best_block_number, best_block_hash) | 339 | 19 | .add_source(None, NonFinalizedBlock::NotVerified); | 340 | 19 | | 341 | 19 | let (to_background_tx, to_background_rx) = mpsc::channel(4); | 342 | 19 | | 343 | 19 | let background_sync = SyncBackground { | 344 | 19 | sync, | 345 | 19 | block_author_sync_source, | 346 | 19 | block_authoring: None, | 347 | 19 | authored_block: None, | 348 | 19 | slot_duration_author_ratio: config.slot_duration_author_ratio, | 349 | 19 | keystore: config.keystore, | 350 | 19 | finalized_runtime: Arc::new(finalized_runtime), | 351 | 19 | network_service: config.network_service.0, | 352 | 19 | network_chain_id: config.network_service.1, | 353 | 19 | network_local_chain_update_needed: true, | 354 | 19 | pending_block_announce: None, | 355 | 19 | to_background_rx, | 356 | 19 | blocks_notifications: Vec::with_capacity(8), | 357 | 19 | pending_notification: None, | 358 | 19 | from_network_service: config.network_events_receiver, | 359 | 19 | database: config.database, | 360 | 19 | database_catch_up_download: DatabaseCatchUpDownload::NoDownloadInProgress, | 361 | 19 | database_catch_up_download_block_verification: | 362 | 19 | DatabaseCatchUpDownloadBlockVerification::None, | 363 | 19 | peers_source_id_map: Default::default(), | 364 | 19 | sub_tasks: FuturesUnordered::new(), | 365 | 19 | log_callback: config.log_callback, | 366 | 19 | jaeger_service: config.jaeger_service, | 367 | 19 | }; | 368 | 19 | | 369 | 19 | (config.tasks_executor)(Box::pin(background_sync.run())); | 370 | 19 | | 371 | 19 | Ok(Arc::new(ConsensusService { | 372 | 19 | block_number_bytes: config.block_number_bytes, | 373 | 19 | to_background_tx: Mutex::new(to_background_tx), | 374 | 19 | })) | 375 | 19 | } |
Unexecuted instantiation: _RNCNvMNtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB4_16ConsensusService3new0B6_ |
376 | | |
377 | | /// Returns the value that was provided through [`Config::block_number_bytes`]. |
378 | 2 | pub fn block_number_bytes(&self) -> usize { |
379 | 2 | self.block_number_bytes |
380 | 2 | } _RNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB2_16ConsensusService18block_number_bytes Line | Count | Source | 378 | 2 | pub fn block_number_bytes(&self) -> usize { | 379 | 2 | self.block_number_bytes | 380 | 2 | } |
Unexecuted instantiation: _RNvMNtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB2_16ConsensusService18block_number_bytes |
381 | | |
382 | | /// Returns a summary of the state of the service. |
383 | | /// |
384 | | /// > **Important**: This doesn't represent the content of the database. |
385 | | // TODO: maybe remove this in favour of the database; seems like a better idea |
386 | 0 | pub async fn sync_state(&self) -> SyncState { Unexecuted instantiation: _RNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB2_16ConsensusService10sync_state Unexecuted instantiation: _RNvMNtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB2_16ConsensusService10sync_state |
387 | 0 | let (result_tx, result_rx) = oneshot::channel(); |
388 | 0 | let _ = self |
389 | 0 | .to_background_tx |
390 | 0 | .lock() |
391 | 0 | .await |
392 | 0 | .send(ToBackground::GetSyncState { result_tx }) |
393 | 0 | .await; |
394 | 0 | result_rx.await.unwrap() |
395 | 0 | } Unexecuted instantiation: _RNCNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB4_16ConsensusService10sync_state0B6_ Unexecuted instantiation: _RNCNvMNtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB4_16ConsensusService10sync_state0B6_ |
396 | | |
397 | | /// Subscribes to the state of the chain: the current state and the new blocks. |
398 | | /// |
399 | | /// Only up to `buffer_size` notifications are buffered in the channel. If the channel is full |
400 | | /// when a new notification is attempted to be pushed, the channel gets closed. |
401 | | /// |
402 | | /// A maximum number of finalized or non-canonical (i.e. not part of the finalized chain) |
403 | | /// pinned blocks must be passed, indicating the maximum number of blocks that are finalized |
404 | | /// or non-canonical that the consensus service will pin at the same time for this |
405 | | /// subscription. If this maximum is reached, the channel will get closed. In situations |
406 | | /// where the subscriber is guaranteed to always properly unpin blocks, a value of |
407 | | /// `usize::MAX` can be passed in order to ignore this maximum. |
408 | | /// |
409 | | /// All the blocks being reported are guaranteed to be present in the database associated to |
410 | | /// this [`ConsensusService`]. |
411 | | /// |
412 | | /// See [`SubscribeAll`] for information about the return value. |
413 | | /// |
414 | | /// While this function is asynchronous, it is guaranteed to finish relatively quickly. Only |
415 | | /// CPU operations are performed. |
416 | 0 | pub async fn subscribe_all( |
417 | 0 | &self, |
418 | 0 | buffer_size: usize, |
419 | 0 | max_finalized_pinned_blocks: NonZeroUsize, |
420 | 0 | ) -> SubscribeAll { Unexecuted instantiation: _RNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB2_16ConsensusService13subscribe_all Unexecuted instantiation: _RNvMNtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB2_16ConsensusService13subscribe_all |
421 | 0 | let (result_tx, result_rx) = oneshot::channel(); |
422 | 0 | let _ = self |
423 | 0 | .to_background_tx |
424 | 0 | .lock() |
425 | 0 | .await |
426 | 0 | .send(ToBackground::SubscribeAll { |
427 | 0 | buffer_size, |
428 | 0 | _max_finalized_pinned_blocks: max_finalized_pinned_blocks, |
429 | 0 | result_tx, |
430 | 0 | }) |
431 | 0 | .await; |
432 | 0 | result_rx.await.unwrap() |
433 | 0 | } Unexecuted instantiation: _RNCNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB4_16ConsensusService13subscribe_all0B6_ Unexecuted instantiation: _RNCNvMNtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB4_16ConsensusService13subscribe_all0B6_ |
434 | | |
435 | | /// Unpins a block that was reported as part of a subscription. |
436 | | /// |
437 | | /// Has no effect if the [`SubscriptionId`] is not or no longer valid (as the consensus service |
438 | | /// can kill any subscription at any moment). |
439 | | /// |
440 | | /// # Panic |
441 | | /// |
442 | | /// Panics if the block hash has not been reported or has already been unpinned. |
443 | | /// |
444 | 0 | pub async fn unpin_block(&self, subscription_id: SubscriptionId, block_hash: [u8; 32]) { Unexecuted instantiation: _RNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB2_16ConsensusService11unpin_block Unexecuted instantiation: _RNvMNtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB2_16ConsensusService11unpin_block |
445 | 0 | let (result_tx, result_rx) = oneshot::channel(); |
446 | 0 | let _ = self |
447 | 0 | .to_background_tx |
448 | 0 | .lock() |
449 | 0 | .await |
450 | 0 | .send(ToBackground::Unpin { |
451 | 0 | _subscription_id: subscription_id, |
452 | 0 | _block_hash: block_hash, |
453 | 0 | result_tx, |
454 | 0 | }) |
455 | 0 | .await; |
456 | 0 | result_rx.await.unwrap() |
457 | 0 | } Unexecuted instantiation: _RNCNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB4_16ConsensusService11unpin_block0B6_ Unexecuted instantiation: _RNCNvMNtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB4_16ConsensusService11unpin_block0B6_ |
458 | | |
459 | | /// Returns `true` if the syncing is currently downloading blocks at a high rate in order to |
460 | | /// catch up with the head of the chain. |
461 | | /// |
462 | | /// > **Note**: This function is used to implement the `system_health` JSON-RPC function and |
463 | | /// > is basically a hack that shouldn't be relied upon. |
464 | 1 | pub async fn is_major_syncing_hint(&self) -> bool { _RNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB2_16ConsensusService21is_major_syncing_hint Line | Count | Source | 464 | 1 | pub async fn is_major_syncing_hint(&self) -> bool { |
Unexecuted instantiation: _RNvMNtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB2_16ConsensusService21is_major_syncing_hint |
465 | 1 | let (result_tx, result_rx) = oneshot::channel(); |
466 | 1 | let _ = self |
467 | 1 | .to_background_tx |
468 | 1 | .lock() |
469 | 0 | .await |
470 | 1 | .send(ToBackground::IsMajorSyncingHint { result_tx }) |
471 | 0 | .await; |
472 | 1 | result_rx.await.unwrap() |
473 | 1 | } _RNCNvMNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB4_16ConsensusService21is_major_syncing_hint0B6_ Line | Count | Source | 464 | 1 | pub async fn is_major_syncing_hint(&self) -> bool { | 465 | 1 | let (result_tx, result_rx) = oneshot::channel(); | 466 | 1 | let _ = self | 467 | 1 | .to_background_tx | 468 | 1 | .lock() | 469 | 0 | .await | 470 | 1 | .send(ToBackground::IsMajorSyncingHint { result_tx }) | 471 | 0 | .await; | 472 | 1 | result_rx.await.unwrap() | 473 | 1 | } |
Unexecuted instantiation: _RNCNvMNtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB4_16ConsensusService21is_major_syncing_hint0B6_ |
474 | | } |
475 | | |
476 | | /// Return value of [`ConsensusService::subscribe_all`]. |
477 | | pub struct SubscribeAll { |
478 | | /// Identifier of this subscription. |
479 | | pub id: SubscriptionId, |
480 | | |
481 | | /// SCALE-encoded header of the finalized block at the time of the subscription. |
482 | | pub finalized_block_scale_encoded_header: Vec<u8>, |
483 | | |
484 | | /// Hash of the finalized block, to provide to [`ConsensusService::unpin_block`]. |
485 | | pub finalized_block_hash: [u8; 32], |
486 | | |
487 | | /// Runtime of the finalized block. |
488 | | pub finalized_block_runtime: Arc<executor::host::HostVmPrototype>, |
489 | | |
490 | | /// List of all known non-finalized blocks at the time of subscription. |
491 | | /// |
492 | | /// Only one element in this list has [`BlockNotification::is_new_best`] equal to true. |
493 | | /// |
494 | | /// The blocks are guaranteed to be ordered so that parents are always found before their |
495 | | /// children. |
496 | | pub non_finalized_blocks_ancestry_order: Vec<BlockNotification>, |
497 | | |
498 | | /// Channel onto which new blocks are sent. The channel gets closed if it is full when a new |
499 | | /// block needs to be reported. |
500 | | pub new_blocks: async_channel::Receiver<Notification>, |
501 | | } |
502 | | |
503 | | /// Identifier of a subscription returned by [`ConsensusService::subscribe_all`]. |
504 | | #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] |
505 | | pub struct SubscriptionId(u64); |
506 | | |
507 | | /// Notification about a new block or a new finalized block. |
508 | | /// |
509 | | /// See [`ConsensusService::subscribe_all`]. |
510 | | #[derive(Debug, Clone)] |
511 | | pub enum Notification { |
512 | | /// A non-finalized block has been finalized. |
513 | | Finalized { |
514 | | /// BLAKE2 hash of the blocks that have been finalized, in deceasing block number. In |
515 | | /// other words, each block in this list is the parent of the previous one. The first block |
516 | | /// in this list is the new finalized block. |
517 | | /// |
518 | | /// A block with this hash is guaranteed to have earlier been reported in a |
519 | | /// [`BlockNotification`], either in [`SubscribeAll::non_finalized_blocks_ancestry_order`] |
520 | | /// or in a [`Notification::Block`]. |
521 | | finalized_blocks_newest_to_oldest: Vec<[u8; 32]>, |
522 | | |
523 | | /// Hash of the best block after the finalization. |
524 | | /// |
525 | | /// If the newly-finalized block is an ancestor of the current best block, then this field |
526 | | /// contains the hash of this current best block. Otherwise, the best block is now |
527 | | /// the non-finalized block with the given hash. |
528 | | /// |
529 | | /// A block with this hash is guaranteed to have earlier been reported in a |
530 | | /// [`BlockNotification`], either in [`SubscribeAll::non_finalized_blocks_ancestry_order`] |
531 | | /// or in a [`Notification::Block`]. |
532 | | best_block_hash: [u8; 32], |
533 | | |
534 | | /// List of BLAKE2 hashes of blocks that are no longer part of the canonical chain. In |
535 | | /// unspecified order. |
536 | | pruned_blocks_hashes: Vec<[u8; 32]>, |
537 | | }, |
538 | | |
539 | | /// A new block has been added to the list of unfinalized blocks. |
540 | | Block { |
541 | | /// Information about the block. |
542 | | block: BlockNotification, |
543 | | |
544 | | /// Changes to the storage that the block has performed. |
545 | | /// |
546 | | /// Note that this field is only available when a new block is available. |
547 | | storage_changes: Arc<runtime_call::StorageChanges>, |
548 | | }, |
549 | | } |
550 | | |
551 | | /// Notification about a new block. |
552 | | /// |
553 | | /// See [`ConsensusService::subscribe_all`]. |
554 | | #[derive(Debug, Clone)] |
555 | | pub struct BlockNotification { |
556 | | /// True if this block is considered as the best block of the chain. |
557 | | pub is_new_best: bool, |
558 | | |
559 | | /// SCALE-encoded header of the block. |
560 | | pub scale_encoded_header: Vec<u8>, |
561 | | |
562 | | /// Hash of the block, to provide to [`ConsensusService::unpin_block`]. |
563 | | pub block_hash: [u8; 32], |
564 | | |
565 | | /// If the block has a different runtime compared to its parent, contains the new runtime. |
566 | | /// Contains `None` if the runtime of the block is the same as its parent's. |
567 | | pub runtime_update: Option<Arc<executor::host::HostVmPrototype>>, |
568 | | |
569 | | /// BLAKE2 hash of the header of the parent of this block. |
570 | | /// |
571 | | /// |
572 | | /// A block with this hash is guaranteed to have earlier been reported in a |
573 | | /// [`BlockNotification`], either in [`SubscribeAll::non_finalized_blocks_ancestry_order`] or |
574 | | /// in a [`Notification::Block`]. |
575 | | /// |
576 | | /// > **Note**: The header of a block contains the hash of its parent. When it comes to |
577 | | /// > consensus algorithms such as Babe or Aura, the syncing code verifies that this |
578 | | /// > hash, stored in the header, actually corresponds to a valid block. However, |
579 | | /// > when it comes to parachain consensus, no such verification is performed. |
580 | | /// > Contrary to the hash stored in the header, the value of this field is |
581 | | /// > guaranteed to refer to a block that is known by the syncing service. This |
582 | | /// > allows a subscriber of the state of the chain to precisely track the hierarchy |
583 | | /// > of blocks, without risking to run into a problem in case of a block with an |
584 | | /// > invalid header. |
585 | | pub parent_hash: [u8; 32], |
586 | | } |
587 | | |
588 | | struct SyncBackground { |
589 | | /// State machine containing the list of all the peers, all the non-finalized blocks, and all |
590 | | /// the network requests in progress. |
591 | | /// |
592 | | /// Each peer holds a struct containing either information about a networking peer, or `None` |
593 | | /// if this is the "special source" representing the local block authoring. Only one source |
594 | | /// must contain `None` and its id must be [`SyncBackground::block_author_sync_source`]. |
595 | | /// |
596 | | /// Each block holds its runtime if it has been verified. |
597 | | /// |
598 | | /// Some of the sources can represent networking peers that have already been disconnected. If |
599 | | /// that is the case, no new request is started against these sources but existing requests |
600 | | /// are allowed to finish. |
601 | | /// This "trick" is necessary in order to not cancel requests that have already been started |
602 | | /// against a peer when it disconnects and that might already have a response. |
603 | | /// |
604 | | /// Each on-going request has a corresponding background task in [`SyncBackground::sub_tasks`]. |
605 | | sync: all::AllSync<(), Option<NetworkSourceInfo>, NonFinalizedBlock>, |
606 | | |
607 | | /// Source within the [`SyncBackground::sync`] to use to import locally-authored blocks. |
608 | | block_author_sync_source: all::SourceId, |
609 | | |
610 | | /// State of the authoring. If `None`, the builder should be (re)created. If `Some`, also |
611 | | /// contains the list of public keys that were loaded from the keystore when creating the |
612 | | /// builder. |
613 | | /// |
614 | | /// The difference between a value of `None` and a value of `Some(Builder::Idle)` is that |
615 | | /// `None` indicates that we should try to author a block as soon as possible, while `Idle` |
616 | | /// means that we shouldn't try again until some event occurs (at which point this field is |
617 | | /// set to `None`). For instance, if the operation of building a block fails, the state is set |
618 | | /// to `Idle` so as to avoid trying to create a block over and over again. |
619 | | // TODO: this list of public keys is a bit hacky |
620 | | block_authoring: Option<(author::build::Builder, Vec<[u8; 32]>)>, |
621 | | |
622 | | /// See [`Config::slot_duration_author_ratio`]. |
623 | | slot_duration_author_ratio: u16, |
624 | | |
625 | | /// After a block has been authored, it is inserted here while waiting for the `sync` to |
626 | | /// import it. Contains the block height, the block hash, the SCALE-encoded block header, and |
627 | | /// the list of SCALE-encoded extrinsics of the block. |
628 | | authored_block: Option<(u64, [u8; 32], Vec<u8>, Vec<Vec<u8>>)>, |
629 | | |
630 | | /// See [`Config::keystore`]. |
631 | | keystore: Arc<keystore::Keystore>, |
632 | | |
633 | | /// Runtime of the latest finalized block. |
634 | | /// |
635 | | /// The runtime is extracted when necessary then put back it place. |
636 | | /// |
637 | | /// The `Arc` is shared with [`NonFinalizedBlock::Verified::runtime`]. |
638 | | finalized_runtime: Arc<executor::host::HostVmPrototype>, |
639 | | |
640 | | /// Used to receive messages from the frontend service, and to detect when it shuts down. |
641 | | to_background_rx: mpsc::Receiver<ToBackground>, |
642 | | |
643 | | /// List of senders to report events to when they happen. |
644 | | blocks_notifications: Vec<async_channel::Sender<Notification>>, |
645 | | |
646 | | /// Notification ready to be sent to [`SyncBackground::blocks_notifications`]. |
647 | | pending_notification: Option<Notification>, |
648 | | |
649 | | /// Service managing the connections to the networking peers. |
650 | | network_service: Arc<network_service::NetworkService>, |
651 | | |
652 | | /// Index, within the [`SyncBackground::network_service`], of the chain that this sync service |
653 | | /// is syncing from. This value must be passed as parameter when starting requests on the |
654 | | /// network service. |
655 | | network_chain_id: network_service::ChainId, |
656 | | |
657 | | /// If `true`, [`network_service::NetworkService::set_local_best_block`] should be called in |
658 | | /// the near future. |
659 | | network_local_chain_update_needed: bool, |
660 | | |
661 | | /// SCALE-encoded header, hash, and height of a block waiting to be announced to other peers. |
662 | | pending_block_announce: Option<(Vec<u8>, [u8; 32], u64)>, |
663 | | |
664 | | /// Stream of events coming from the [`SyncBackground::network_service`]. Used to know what |
665 | | /// happens on the peer-to-peer network. |
666 | | from_network_service: stream::BoxStream<'static, network_service::Event>, |
667 | | |
668 | | /// For each networking peer, the identifier of the source in [`SyncBackground::sync`]. |
669 | | /// This map is kept up-to-date with the "chain connections" of the network service. Whenever |
670 | | /// a connection is established with a peer, an entry is inserted in this map and a source is |
671 | | /// added to [`SyncBackground::sync`], and whenever a connection is closed, the map entry and |
672 | | /// source are removed. |
673 | | peers_source_id_map: hashbrown::HashMap<libp2p::PeerId, all::SourceId, fnv::FnvBuildHasher>, |
674 | | |
675 | | /// Futures that get executed by the background task. |
676 | | sub_tasks: FuturesUnordered<Pin<Box<dyn Future<Output = SubtaskFinished> + Send>>>, |
677 | | |
678 | | /// See [`Config::log_callback`]. |
679 | | log_callback: Arc<dyn LogCallback + Send + Sync>, |
680 | | |
681 | | /// See [`Config::database`]. |
682 | | database: Arc<database_thread::DatabaseThread>, |
683 | | |
684 | | /// Whether an old block or storage item from an old block is currently being downloaded or |
685 | | /// must be downloaded. |
686 | | database_catch_up_download: DatabaseCatchUpDownload, |
687 | | |
688 | | /// Whether an old block or storage item from an old block is currently being downloaded or |
689 | | /// must be downloaded. |
690 | | database_catch_up_download_block_verification: DatabaseCatchUpDownloadBlockVerification, |
691 | | |
692 | | /// How to report events about blocks. |
693 | | jaeger_service: Arc<jaeger_service::JaegerService>, |
694 | | } |
695 | | |
696 | | #[derive(Clone)] |
697 | | enum NonFinalizedBlock { |
698 | | NotVerified, |
699 | | Verified { |
700 | | /// Runtime of the block. Generally either identical to its parent's runtime, or a |
701 | | /// different one. |
702 | | /// |
703 | | /// The `Arc` is shared with [`SyncBackground::finalized_runtime`]. |
704 | | runtime: Arc<executor::host::HostVmPrototype>, |
705 | | }, |
706 | | } |
707 | | |
708 | | /// Information about a source in the sync state machine. |
709 | | #[derive(Debug, Clone)] |
710 | | struct NetworkSourceInfo { |
711 | | /// Identity of the peer according to the networking. |
712 | | peer_id: libp2p::PeerId, |
713 | | /// If `true`, this peer is considered disconnected by the network, and no new request should |
714 | | /// be started against it. |
715 | | is_disconnected: bool, |
716 | | } |
717 | | |
718 | | enum SubtaskFinished { |
719 | | BlocksRequestFinished { |
720 | | request_id: all::RequestId, |
721 | | source_id: all::SourceId, |
722 | | result: Result<Vec<BlockData>, network_service::BlocksRequestError>, |
723 | | }, |
724 | | WarpSyncRequestFinished { |
725 | | request_id: all::RequestId, |
726 | | source_id: all::SourceId, |
727 | | result: Result< |
728 | | network::service::EncodedGrandpaWarpSyncResponse, |
729 | | network_service::WarpSyncRequestError, |
730 | | >, |
731 | | }, |
732 | | StorageRequestFinished { |
733 | | request_id: all::RequestId, |
734 | | source_id: all::SourceId, |
735 | | result: Result<network::service::EncodedMerkleProof, ()>, |
736 | | }, |
737 | | CallProofRequestFinished { |
738 | | request_id: all::RequestId, |
739 | | source_id: all::SourceId, |
740 | | result: Result<network::service::EncodedMerkleProof, ()>, |
741 | | }, |
742 | | } |
743 | | |
744 | | #[derive(Debug, Clone)] |
745 | | enum DatabaseCatchUpDownload { |
746 | | /// No download currently in progress. |
747 | | NoDownloadInProgress, |
748 | | /// No download currently in progress, and we know that nothing is missing from the database. |
749 | | NothingToDownloadCache, |
750 | | /// Currently downloading. |
751 | | InProgress(all::RequestId), |
752 | | } |
753 | | |
754 | | #[derive(Debug, Clone)] |
755 | | enum DatabaseCatchUpDownloadBlockVerification { |
756 | | /// No download is required. |
757 | | None, |
758 | | /// A call proof download should be started next. |
759 | | CallProofDesired { |
760 | | block_hash: [u8; 32], |
761 | | block_number: u64, |
762 | | function_name: String, |
763 | | parameter: Vec<u8>, |
764 | | }, |
765 | | /// A storage proof download should be started next. |
766 | | CodeStorageProofDesired { |
767 | | block_hash: [u8; 32], |
768 | | block_number: u64, |
769 | | }, |
770 | | /// Currently downloading. |
771 | | InProgress(all::RequestId), |
772 | | } |
773 | | |
774 | | impl SyncBackground { |
775 | 21 | async fn run(mut self) { _RNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB4_14SyncBackground3run Line | Count | Source | 775 | 21 | async fn run(mut self) { |
Unexecuted instantiation: _RNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB4_14SyncBackground3run |
776 | 21 | let mut process_sync = true; |
777 | | |
778 | | loop { |
779 | | enum WakeUpReason { |
780 | | ReadyToAuthor, |
781 | | FrontendEvent(ToBackground), |
782 | | FrontendClosed, |
783 | | SendPendingNotification(Notification), |
784 | | StartNetworkRequest { |
785 | | source_id: all::SourceId, |
786 | | request: all::DesiredRequest, |
787 | | database_catch_up_type: DbCatchUpType, |
788 | | }, |
789 | | NetworkEvent(network_service::Event), |
790 | | NetworkLocalChainUpdate, |
791 | | AnnounceBlock(Vec<u8>, [u8; 32], u64), |
792 | | SubtaskFinished(SubtaskFinished), |
793 | | SyncProcess, |
794 | | } |
795 | | |
796 | | enum DbCatchUpType { |
797 | | No, |
798 | | BlockVerification, |
799 | | Database, |
800 | | } |
801 | | |
802 | 64 | let wake_up_reason: WakeUpReason = { |
803 | | // Creating the block authoring state and prepare a future that is ready when something |
804 | | // related to the block authoring is ready. |
805 | | // TODO: refactor as a separate task? |
806 | | // TODO: restore block authoring after https://github.com/smol-dot/smoldot/issues/1109 |
807 | 64 | let authoring_ready_future = { |
808 | 64 | future::pending::<WakeUpReason>() |
809 | | /*// TODO: overhead to call best_block_consensus() multiple times |
810 | | let local_authorities = { |
811 | | let namespace_filter = match self.sync.best_block_consensus() { |
812 | | chain_information::ChainInformationConsensusRef::Aura { .. } => { |
813 | | Some(keystore::KeyNamespace::Aura) |
814 | | } |
815 | | chain_information::ChainInformationConsensusRef::Babe { .. } => { |
816 | | Some(keystore::KeyNamespace::Babe) |
817 | | } |
818 | | chain_information::ChainInformationConsensusRef::Unknown => { |
819 | | // In `Unknown` mode, all keys are accepted and there is no |
820 | | // filter on the namespace, as we can't author blocks anyway. |
821 | | // TODO: is that correct? |
822 | | None |
823 | | } |
824 | | }; |
825 | | |
826 | | // Calling `keys()` on the keystore is racy, but that's considered |
827 | | // acceptable and part of the design of the node. |
828 | | self.keystore |
829 | | .keys() |
830 | | .await |
831 | | .filter(|(namespace, _)| { |
832 | | namespace_filter.map_or(true, |n| *namespace == n) |
833 | | }) |
834 | | .map(|(_, key)| key) |
835 | | .collect::<Vec<_>>() // TODO: collect overhead :-/ |
836 | | }; |
837 | | |
838 | | let block_authoring = |
839 | | match (&mut self.block_authoring, self.sync.best_block_consensus()) { |
840 | | (Some(ba), _) => Some(ba), |
841 | | ( |
842 | | block_authoring @ None, |
843 | | chain_information::ChainInformationConsensusRef::Aura { |
844 | | finalized_authorities_list, // TODO: field name not appropriate; should probably change the chain_information module |
845 | | slot_duration, |
846 | | }, |
847 | | ) => Some( |
848 | | block_authoring.insert(( |
849 | | author::build::Builder::new(author::build::Config { |
850 | | consensus: author::build::ConfigConsensus::Aura { |
851 | | current_authorities: finalized_authorities_list, |
852 | | local_authorities: local_authorities.iter(), |
853 | | now_from_unix_epoch: SystemTime::now() |
854 | | .duration_since(SystemTime::UNIX_EPOCH) |
855 | | .unwrap(), |
856 | | slot_duration, |
857 | | }, |
858 | | }), |
859 | | local_authorities, |
860 | | )), |
861 | | ), |
862 | | ( |
863 | | None, |
864 | | chain_information::ChainInformationConsensusRef::Babe { .. }, |
865 | | ) => { |
866 | | None // TODO: the block authoring doesn't support Babe at the moment |
867 | | } |
868 | | (None, _) => todo!(), |
869 | | }; |
870 | | |
871 | | match &block_authoring { |
872 | | Some((author::build::Builder::Ready(_), _)) => future::Either::Left( |
873 | | future::Either::Left(future::ready(Instant::now())), |
874 | | ), |
875 | | Some((author::build::Builder::WaitSlot(when), _)) => { |
876 | | let delay = (UNIX_EPOCH + when.when()) |
877 | | .duration_since(SystemTime::now()) |
878 | | .unwrap_or_else(|_| Duration::new(0, 0)); |
879 | | future::Either::Right(future::FutureExt::fuse(smol::Timer::after( |
880 | | delay, |
881 | | ))) |
882 | | } |
883 | | None => future::Either::Left(future::Either::Right(future::pending())), |
884 | | Some((author::build::Builder::Idle, _)) => { |
885 | | // If the block authoring is idle, which happens in case of error, |
886 | | // sleep for an arbitrary duration before resetting it. |
887 | | // This prevents the authoring from trying over and over again to generate |
888 | | // a bad block. |
889 | | let delay = Duration::from_secs(2); |
890 | | future::Either::Right(future::FutureExt::fuse(smol::Timer::after( |
891 | | delay, |
892 | | ))) |
893 | | } |
894 | | }*/ |
895 | | }; |
896 | | |
897 | 64 | async { |
898 | 64 | if let Some(notification0 ) = self.pending_notification.take() { |
899 | 0 | WakeUpReason::SendPendingNotification(notification) |
900 | | } else { |
901 | 64 | future::pending().await40 |
902 | | } |
903 | 0 | } _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run00CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 897 | 6 | async { | 898 | 6 | if let Some(notification0 ) = self.pending_notification.take() { | 899 | 0 | WakeUpReason::SendPendingNotification(notification) | 900 | | } else { | 901 | 6 | future::pending().await3 | 902 | | } | 903 | 0 | } |
Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run00Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run00CscDgN54JpMGG_6author _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run00CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 897 | 58 | async { | 898 | 58 | if let Some(notification0 ) = self.pending_notification.take() { | 899 | 0 | WakeUpReason::SendPendingNotification(notification) | 900 | | } else { | 901 | 58 | future::pending().await37 | 902 | | } | 903 | 0 | } |
Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run00Ba_ |
904 | 64 | .or(async move { |
905 | 64 | authoring_ready_future.await40 ; |
906 | 0 | WakeUpReason::ReadyToAuthor |
907 | 64 | }) _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s_0CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 904 | 6 | .or(async move { | 905 | 6 | authoring_ready_future.await3 ; | 906 | 0 | WakeUpReason::ReadyToAuthor | 907 | 0 | }) |
Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s_0CscDgN54JpMGG_6author _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s_0CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 904 | 58 | .or(async move { | 905 | 58 | authoring_ready_future.await37 ; | 906 | 0 | WakeUpReason::ReadyToAuthor | 907 | 0 | }) |
Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s_0Ba_ |
908 | 64 | .or(async { |
909 | 64 | self.to_background_rx |
910 | 64 | .next() |
911 | 40 | .await |
912 | 22 | .map_or(WakeUpReason::FrontendClosed, WakeUpReason::FrontendEvent) |
913 | 64 | }) _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s0_0CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 908 | 6 | .or(async { | 909 | 6 | self.to_background_rx | 910 | 6 | .next() | 911 | 3 | .await | 912 | 2 | .map_or(WakeUpReason::FrontendClosed, WakeUpReason::FrontendEvent) | 913 | 2 | }) |
Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s0_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s0_0CscDgN54JpMGG_6author _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s0_0CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 908 | 58 | .or(async { | 909 | 58 | self.to_background_rx | 910 | 58 | .next() | 911 | 37 | .await | 912 | 20 | .map_or(WakeUpReason::FrontendClosed, WakeUpReason::FrontendEvent) | 913 | 20 | }) |
Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s0_0Ba_ |
914 | 64 | .or(async { |
915 | 64 | WakeUpReason::NetworkEvent(self.from_network_service.next().await18 .unwrap()) |
916 | 64 | }) _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s1_0CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 914 | 6 | .or(async { | 915 | 6 | WakeUpReason::NetworkEvent(self.from_network_service.next().await1 .unwrap()) | 916 | 0 | }) |
Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s1_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s1_0CscDgN54JpMGG_6author _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s1_0CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 914 | 58 | .or(async { | 915 | 58 | WakeUpReason::NetworkEvent(self.from_network_service.next().await17 .unwrap()) | 916 | 0 | }) |
Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s1_0Ba_ |
917 | 64 | .or(async { |
918 | 64 | if self.network_local_chain_update_needed { |
919 | 21 | self.network_local_chain_update_needed = false; |
920 | 21 | WakeUpReason::NetworkLocalChainUpdate |
921 | | } else { |
922 | 43 | future::pending().await18 |
923 | | } |
924 | 64 | }21 ) _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s2_0CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 917 | 6 | .or(async { | 918 | 6 | if self.network_local_chain_update_needed { | 919 | 2 | self.network_local_chain_update_needed = false; | 920 | 2 | WakeUpReason::NetworkLocalChainUpdate | 921 | | } else { | 922 | 4 | future::pending().await1 | 923 | | } | 924 | 2 | }) |
Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s2_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s2_0CscDgN54JpMGG_6author _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s2_0CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 917 | 58 | .or(async { | 918 | 58 | if self.network_local_chain_update_needed { | 919 | 19 | self.network_local_chain_update_needed = false; | 920 | 19 | WakeUpReason::NetworkLocalChainUpdate | 921 | | } else { | 922 | 39 | future::pending().await17 | 923 | | } | 924 | 19 | }) |
Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s2_0Ba_ |
925 | 64 | .or(async {43 |
926 | 43 | if let Some((header, hash, height0 )) = self.pending_block_announce.take() { |
927 | 0 | WakeUpReason::AnnounceBlock(header, hash, height) |
928 | | } else { |
929 | 43 | future::pending().await18 |
930 | | } |
931 | 64 | }0 ) _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s3_0CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 925 | 4 | .or(async { | 926 | 4 | if let Some((header, hash, height0 )) = self.pending_block_announce.take() { | 927 | 0 | WakeUpReason::AnnounceBlock(header, hash, height) | 928 | | } else { | 929 | 4 | future::pending().await1 | 930 | | } | 931 | 0 | }) |
Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s3_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s3_0CscDgN54JpMGG_6author _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s3_0CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 925 | 39 | .or(async { | 926 | 39 | if let Some((header, hash, height0 )) = self.pending_block_announce.take() { | 927 | 0 | WakeUpReason::AnnounceBlock(header, hash, height) | 928 | | } else { | 929 | 39 | future::pending().await17 | 930 | | } | 931 | 0 | }) |
Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s3_0Ba_ |
932 | 64 | .or(async {43 |
933 | 43 | let Some(subtask_finished0 ) = self.sub_tasks.next().await0 else { |
934 | 43 | future::pending().await18 |
935 | | }; |
936 | 0 | WakeUpReason::SubtaskFinished(subtask_finished) |
937 | 64 | }) _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s4_0CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 932 | 4 | .or(async { | 933 | 4 | let Some(subtask_finished0 ) = self.sub_tasks.next().await0 else { | 934 | 4 | future::pending().await1 | 935 | | }; | 936 | 0 | WakeUpReason::SubtaskFinished(subtask_finished) | 937 | 0 | }) |
Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s4_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s4_0CscDgN54JpMGG_6author _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s4_0CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 932 | 39 | .or(async { | 933 | 39 | let Some(subtask_finished0 ) = self.sub_tasks.next().await0 else { | 934 | 39 | future::pending().await17 | 935 | | }; | 936 | 0 | WakeUpReason::SubtaskFinished(subtask_finished) | 937 | 0 | }) |
Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s4_0Ba_ |
938 | 64 | .or({ |
939 | 64 | async { |
940 | 43 | // TODO: handle obsolete requests |
941 | 43 | // Ask the sync state machine whether any new network request should |
942 | 43 | // be started. |
943 | 43 | // `desired_requests()` returns, in decreasing order of priority, the |
944 | 43 | // requests that should be started in order for the syncing to proceed. We |
945 | 43 | // simply pick the first request, but enforce one ongoing request per |
946 | 43 | // source. |
947 | 43 | // TODO: desired_requests() is expensive and done at every iteration |
948 | 43 | let request_to_start = self.sync.desired_requests().find( |
949 | 43 | |(source_id, source_info, request_details)| { |
950 | 0 | if source_info |
951 | 0 | .as_ref() |
952 | 0 | .map_or(false, |info| info.is_disconnected) Unexecuted instantiation: _RNCNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBc_14SyncBackground3run0s5_000CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBc_14SyncBackground3run0s5_000Be_ Unexecuted instantiation: _RNCNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBc_14SyncBackground3run0s5_000CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBc_14SyncBackground3run0s5_000CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBc_14SyncBackground3run0s5_000Be_ |
953 | | { |
954 | | // Source is a networking source that has already been disconnected. |
955 | 0 | false |
956 | 0 | } else if *source_id != self.block_author_sync_source { |
957 | | // Remote source. |
958 | 0 | self.sync.source_num_ongoing_requests(*source_id) == 0 |
959 | | } else { |
960 | | // Locally-authored blocks source. |
961 | 0 | match (request_details, &self.authored_block) { |
962 | | ( |
963 | | all::DesiredRequest::BlocksRequest { |
964 | 0 | first_block_hash, |
965 | 0 | first_block_height, |
966 | 0 | .. |
967 | 0 | }, |
968 | 0 | Some((authored_height, authored_hash, _, _)), |
969 | 0 | ) if first_block_hash == authored_hash |
970 | 0 | && first_block_height == authored_height => |
971 | 0 | { |
972 | 0 | true |
973 | | } |
974 | 0 | _ => false, |
975 | | } |
976 | | } |
977 | 43 | }0 , Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_00Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_00Bc_ |
978 | 43 | ); |
979 | 43 | if let Some((source_id, _, request0 )) = request_to_start { |
980 | 0 | return WakeUpReason::StartNetworkRequest { |
981 | 0 | source_id, |
982 | 0 | request, |
983 | 0 | database_catch_up_type: DbCatchUpType::No, |
984 | 0 | }; |
985 | 43 | } |
986 | 43 | |
987 | 43 | match self.database_catch_up_download_block_verification.clone() { |
988 | 0 | _ if !matches!( |
989 | 43 | self.database_catch_up_download, |
990 | | DatabaseCatchUpDownload::NoDownloadInProgress |
991 | | | DatabaseCatchUpDownload::NothingToDownloadCache |
992 | 0 | ) => {} |
993 | 43 | DatabaseCatchUpDownloadBlockVerification::None => {} |
994 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(_) => {} |
995 | | DatabaseCatchUpDownloadBlockVerification::CallProofDesired { |
996 | 0 | block_hash, |
997 | 0 | block_number, |
998 | 0 | function_name, |
999 | 0 | parameter, |
1000 | | } => { |
1001 | | // Choose which source to query. We have to use an `if` because |
1002 | | // `knows_non_finalized_block` panics if the parameter is inferior |
1003 | | // or equal to the finalized block number. |
1004 | 0 | let source_id = if block_number |
1005 | 0 | <= self.sync.finalized_block_number() |
1006 | | { |
1007 | 0 | self.sync |
1008 | 0 | .sources() |
1009 | 0 | .filter(|s| *s != self.block_author_sync_source) Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s_0Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s_0Bc_ |
1010 | 0 | .choose(&mut rand::thread_rng()) |
1011 | | } else { |
1012 | 0 | self.sync |
1013 | 0 | .knows_non_finalized_block(block_number, &block_hash) |
1014 | 0 | .filter(|source_id| { |
1015 | 0 | *source_id != self.block_author_sync_source |
1016 | 0 | && self.sync.source_num_ongoing_requests(*source_id) |
1017 | 0 | == 0 |
1018 | 0 | }) Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s0_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s0_0Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s0_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s0_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s0_0Bc_ |
1019 | 0 | .choose(&mut rand::thread_rng()) |
1020 | | }; |
1021 | | |
1022 | 0 | if let Some(source_id) = source_id { |
1023 | 0 | return WakeUpReason::StartNetworkRequest { |
1024 | 0 | source_id, |
1025 | 0 | request: all::DesiredRequest::RuntimeCallMerkleProof { |
1026 | 0 | block_hash, |
1027 | 0 | function_name: function_name.into(), |
1028 | 0 | parameter_vectored: parameter.into(), |
1029 | 0 | }, |
1030 | 0 | database_catch_up_type: DbCatchUpType::BlockVerification, |
1031 | 0 | }; |
1032 | 0 | } |
1033 | | } |
1034 | | DatabaseCatchUpDownloadBlockVerification::CodeStorageProofDesired { |
1035 | 0 | block_hash, |
1036 | 0 | block_number, |
1037 | | } => { |
1038 | | // Choose which source to query. We have to use an `if` because |
1039 | | // `knows_non_finalized_block` panics if the parameter is inferior |
1040 | | // or equal to the finalized block number. |
1041 | 0 | let source_id = if block_number |
1042 | 0 | <= self.sync.finalized_block_number() |
1043 | | { |
1044 | 0 | self.sync |
1045 | 0 | .sources() |
1046 | 0 | .filter(|s| *s != self.block_author_sync_source) Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s1_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s1_0Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s1_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s1_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s1_0Bc_ |
1047 | 0 | .choose(&mut rand::thread_rng()) |
1048 | | } else { |
1049 | 0 | self.sync |
1050 | 0 | .knows_non_finalized_block(block_number, &block_hash) |
1051 | 0 | .filter(|source_id| { |
1052 | 0 | *source_id != self.block_author_sync_source |
1053 | 0 | && self.sync.source_num_ongoing_requests(*source_id) |
1054 | 0 | == 0 |
1055 | 0 | }) Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s2_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s2_0Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s2_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s2_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s2_0Bc_ |
1056 | 0 | .choose(&mut rand::thread_rng()) |
1057 | | }; |
1058 | | |
1059 | 0 | if let Some(source_id) = source_id { |
1060 | 0 | return WakeUpReason::StartNetworkRequest { |
1061 | 0 | source_id, |
1062 | 0 | request: all::DesiredRequest::StorageGetMerkleProof { |
1063 | 0 | block_hash, |
1064 | 0 | state_trie_root: [0; 32], // TODO: wrong, but field value unused so it's fine temporarily |
1065 | 0 | keys: vec![":code".into(), ":heappages".into()], |
1066 | 0 | }, |
1067 | 0 | database_catch_up_type: DbCatchUpType::BlockVerification, |
1068 | 0 | }; |
1069 | 0 | } |
1070 | | } |
1071 | | } |
1072 | | |
1073 | | // If the sync state machine doesn't require any additional request, ask |
1074 | | // the database whether any storage item is missing. |
1075 | 1 | if matches!( |
1076 | 43 | self.database_catch_up_download, |
1077 | | DatabaseCatchUpDownload::NoDownloadInProgress |
1078 | | ) { |
1079 | | // TODO: this has a O(n^2) complexity; in case all sources are busy, we iterate a lot |
1080 | 42 | let missing_items18 = self |
1081 | 42 | .database |
1082 | 42 | .with_database(|db| { |
1083 | 42 | db.finalized_and_above_missing_trie_nodes_unordered() |
1084 | 42 | }) _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s3_0CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 1082 | 4 | .with_database(|db| { | 1083 | 4 | db.finalized_and_above_missing_trie_nodes_unordered() | 1084 | 4 | }) |
Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s3_0Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s3_0CscDgN54JpMGG_6author _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s3_0CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 1082 | 38 | .with_database(|db| { | 1083 | 38 | db.finalized_and_above_missing_trie_nodes_unordered() | 1084 | 38 | }) |
Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s3_0Bc_ |
1085 | 18 | .await |
1086 | 18 | .unwrap(); |
1087 | 18 | if missing_items.is_empty() { |
1088 | 18 | self.database_catch_up_download = |
1089 | 18 | DatabaseCatchUpDownload::NothingToDownloadCache; |
1090 | 18 | }0 |
1091 | | |
1092 | 18 | for missing_item0 in missing_items |
1093 | 18 | .into_iter() |
1094 | 18 | .flat_map(|item| item.blocks.into_iter()0 ) Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s4_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s4_0Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s4_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s4_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s4_0Bc_ |
1095 | | { |
1096 | | // Since the database and sync state machine are supposed to have the |
1097 | | // same finalized block, it is guaranteed that the missing item are |
1098 | | // in the finalized block or above. |
1099 | 0 | debug_assert!( |
1100 | 0 | missing_item.number >= self.sync.finalized_block_number() |
1101 | | ); |
1102 | | |
1103 | | // Choose which source to query. We have to use an `if` because |
1104 | | // `knows_non_finalized_block` panics if the parameter is inferior |
1105 | | // or equal to the finalized block number. |
1106 | 0 | let source_id = if missing_item.number |
1107 | 0 | <= self.sync.finalized_block_number() |
1108 | | { |
1109 | 0 | let Some(source_id) = self |
1110 | 0 | .sync |
1111 | 0 | .sources() |
1112 | 0 | .filter(|s| *s != self.block_author_sync_source) Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s5_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s5_0Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s5_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s5_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s5_0Bc_ |
1113 | 0 | .choose(&mut rand::thread_rng()) |
1114 | | else { |
1115 | 0 | break; |
1116 | | }; |
1117 | 0 | source_id |
1118 | | } else { |
1119 | 0 | let Some(source_id) = self |
1120 | 0 | .sync |
1121 | 0 | .knows_non_finalized_block( |
1122 | 0 | missing_item.number, |
1123 | 0 | &missing_item.hash, |
1124 | 0 | ) |
1125 | 0 | .filter(|source_id| { |
1126 | 0 | *source_id != self.block_author_sync_source |
1127 | 0 | && self.sync.source_num_ongoing_requests(*source_id) |
1128 | 0 | == 0 |
1129 | 0 | }) Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s6_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s6_0Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s6_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s6_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s6_0Bc_ |
1130 | 0 | .choose(&mut rand::thread_rng()) |
1131 | | else { |
1132 | 0 | continue; |
1133 | | }; |
1134 | 0 | source_id |
1135 | | }; |
1136 | | |
1137 | 0 | return WakeUpReason::StartNetworkRequest { |
1138 | 0 | source_id, |
1139 | 0 | request: all::DesiredRequest::StorageGetMerkleProof { |
1140 | 0 | block_hash: missing_item.hash, |
1141 | 0 | state_trie_root: [0; 32], // TODO: wrong, but field value unused so it's fine temporarily |
1142 | 0 | keys: vec![trie::nibbles_to_bytes_suffix_extend( |
1143 | 0 | missing_item |
1144 | 0 | .trie_node_key_nibbles |
1145 | 0 | .into_iter() |
1146 | 0 | // In order to download more than one item at a time, |
1147 | 0 | // we add some randomly-generated nibbles to the |
1148 | 0 | // requested key. The request will target the missing |
1149 | 0 | // key plus a few other random keys. |
1150 | 0 | .chain((0..32).map(|_| { |
1151 | 0 | rand::Rng::gen_range( |
1152 | 0 | &mut rand::thread_rng(), |
1153 | 0 | 0..16, |
1154 | 0 | ) |
1155 | 0 | })) Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s7_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s7_0Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s7_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s7_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s7_0Bc_ |
1156 | 0 | .map(|n| trie::Nibble::try_from(n).unwrap()), Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s8_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s8_0Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s8_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s8_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0s5_0s8_0Bc_ |
1157 | 0 | ) |
1158 | 0 | .collect::<Vec<_>>()], |
1159 | 0 | }, |
1160 | 0 | database_catch_up_type: DbCatchUpType::Database, |
1161 | 0 | }; |
1162 | | } |
1163 | 1 | } |
1164 | | |
1165 | | // No network request to start. |
1166 | 19 | future::pending().await0 |
1167 | 64 | }0 _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s5_0CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 939 | 4 | async { | 940 | 4 | // TODO: handle obsolete requests | 941 | 4 | // Ask the sync state machine whether any new network request should | 942 | 4 | // be started. | 943 | 4 | // `desired_requests()` returns, in decreasing order of priority, the | 944 | 4 | // requests that should be started in order for the syncing to proceed. We | 945 | 4 | // simply pick the first request, but enforce one ongoing request per | 946 | 4 | // source. | 947 | 4 | // TODO: desired_requests() is expensive and done at every iteration | 948 | 4 | let request_to_start = self.sync.desired_requests().find( | 949 | 4 | |(source_id, source_info, request_details)| { | 950 | | if source_info | 951 | | .as_ref() | 952 | | .map_or(false, |info| info.is_disconnected) | 953 | | { | 954 | | // Source is a networking source that has already been disconnected. | 955 | | false | 956 | | } else if *source_id != self.block_author_sync_source { | 957 | | // Remote source. | 958 | | self.sync.source_num_ongoing_requests(*source_id) == 0 | 959 | | } else { | 960 | | // Locally-authored blocks source. | 961 | | match (request_details, &self.authored_block) { | 962 | | ( | 963 | | all::DesiredRequest::BlocksRequest { | 964 | | first_block_hash, | 965 | | first_block_height, | 966 | | .. | 967 | | }, | 968 | | Some((authored_height, authored_hash, _, _)), | 969 | | ) if first_block_hash == authored_hash | 970 | | && first_block_height == authored_height => | 971 | | { | 972 | | true | 973 | | } | 974 | | _ => false, | 975 | | } | 976 | | } | 977 | 4 | }, | 978 | 4 | ); | 979 | 4 | if let Some((source_id, _, request0 )) = request_to_start { | 980 | 0 | return WakeUpReason::StartNetworkRequest { | 981 | 0 | source_id, | 982 | 0 | request, | 983 | 0 | database_catch_up_type: DbCatchUpType::No, | 984 | 0 | }; | 985 | 4 | } | 986 | 4 | | 987 | 4 | match self.database_catch_up_download_block_verification.clone() { | 988 | 0 | _ if !matches!( | 989 | 4 | self.database_catch_up_download, | 990 | | DatabaseCatchUpDownload::NoDownloadInProgress | 991 | | | DatabaseCatchUpDownload::NothingToDownloadCache | 992 | 0 | ) => {} | 993 | 4 | DatabaseCatchUpDownloadBlockVerification::None => {} | 994 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(_) => {} | 995 | | DatabaseCatchUpDownloadBlockVerification::CallProofDesired { | 996 | 0 | block_hash, | 997 | 0 | block_number, | 998 | 0 | function_name, | 999 | 0 | parameter, | 1000 | | } => { | 1001 | | // Choose which source to query. We have to use an `if` because | 1002 | | // `knows_non_finalized_block` panics if the parameter is inferior | 1003 | | // or equal to the finalized block number. | 1004 | 0 | let source_id = if block_number | 1005 | 0 | <= self.sync.finalized_block_number() | 1006 | | { | 1007 | 0 | self.sync | 1008 | 0 | .sources() | 1009 | 0 | .filter(|s| *s != self.block_author_sync_source) | 1010 | 0 | .choose(&mut rand::thread_rng()) | 1011 | | } else { | 1012 | 0 | self.sync | 1013 | 0 | .knows_non_finalized_block(block_number, &block_hash) | 1014 | 0 | .filter(|source_id| { | 1015 | | *source_id != self.block_author_sync_source | 1016 | | && self.sync.source_num_ongoing_requests(*source_id) | 1017 | | == 0 | 1018 | 0 | }) | 1019 | 0 | .choose(&mut rand::thread_rng()) | 1020 | | }; | 1021 | | | 1022 | 0 | if let Some(source_id) = source_id { | 1023 | 0 | return WakeUpReason::StartNetworkRequest { | 1024 | 0 | source_id, | 1025 | 0 | request: all::DesiredRequest::RuntimeCallMerkleProof { | 1026 | 0 | block_hash, | 1027 | 0 | function_name: function_name.into(), | 1028 | 0 | parameter_vectored: parameter.into(), | 1029 | 0 | }, | 1030 | 0 | database_catch_up_type: DbCatchUpType::BlockVerification, | 1031 | 0 | }; | 1032 | 0 | } | 1033 | | } | 1034 | | DatabaseCatchUpDownloadBlockVerification::CodeStorageProofDesired { | 1035 | 0 | block_hash, | 1036 | 0 | block_number, | 1037 | | } => { | 1038 | | // Choose which source to query. We have to use an `if` because | 1039 | | // `knows_non_finalized_block` panics if the parameter is inferior | 1040 | | // or equal to the finalized block number. | 1041 | 0 | let source_id = if block_number | 1042 | 0 | <= self.sync.finalized_block_number() | 1043 | | { | 1044 | 0 | self.sync | 1045 | 0 | .sources() | 1046 | 0 | .filter(|s| *s != self.block_author_sync_source) | 1047 | 0 | .choose(&mut rand::thread_rng()) | 1048 | | } else { | 1049 | 0 | self.sync | 1050 | 0 | .knows_non_finalized_block(block_number, &block_hash) | 1051 | 0 | .filter(|source_id| { | 1052 | | *source_id != self.block_author_sync_source | 1053 | | && self.sync.source_num_ongoing_requests(*source_id) | 1054 | | == 0 | 1055 | 0 | }) | 1056 | 0 | .choose(&mut rand::thread_rng()) | 1057 | | }; | 1058 | | | 1059 | 0 | if let Some(source_id) = source_id { | 1060 | 0 | return WakeUpReason::StartNetworkRequest { | 1061 | 0 | source_id, | 1062 | 0 | request: all::DesiredRequest::StorageGetMerkleProof { | 1063 | 0 | block_hash, | 1064 | 0 | state_trie_root: [0; 32], // TODO: wrong, but field value unused so it's fine temporarily | 1065 | 0 | keys: vec![":code".into(), ":heappages".into()], | 1066 | 0 | }, | 1067 | 0 | database_catch_up_type: DbCatchUpType::BlockVerification, | 1068 | 0 | }; | 1069 | 0 | } | 1070 | | } | 1071 | | } | 1072 | | | 1073 | | // If the sync state machine doesn't require any additional request, ask | 1074 | | // the database whether any storage item is missing. | 1075 | 0 | if matches!( | 1076 | 4 | self.database_catch_up_download, | 1077 | | DatabaseCatchUpDownload::NoDownloadInProgress | 1078 | | ) { | 1079 | | // TODO: this has a O(n^2) complexity; in case all sources are busy, we iterate a lot | 1080 | 4 | let missing_items1 = self | 1081 | 4 | .database | 1082 | 4 | .with_database(|db| { | 1083 | | db.finalized_and_above_missing_trie_nodes_unordered() | 1084 | 4 | }) | 1085 | 1 | .await | 1086 | 1 | .unwrap(); | 1087 | 1 | if missing_items.is_empty() { | 1088 | 1 | self.database_catch_up_download = | 1089 | 1 | DatabaseCatchUpDownload::NothingToDownloadCache; | 1090 | 1 | }0 | 1091 | | | 1092 | 1 | for missing_item0 in missing_items | 1093 | 1 | .into_iter() | 1094 | 1 | .flat_map(|item| item.blocks.into_iter()) | 1095 | | { | 1096 | | // Since the database and sync state machine are supposed to have the | 1097 | | // same finalized block, it is guaranteed that the missing item are | 1098 | | // in the finalized block or above. | 1099 | 0 | debug_assert!( | 1100 | 0 | missing_item.number >= self.sync.finalized_block_number() | 1101 | | ); | 1102 | | | 1103 | | // Choose which source to query. We have to use an `if` because | 1104 | | // `knows_non_finalized_block` panics if the parameter is inferior | 1105 | | // or equal to the finalized block number. | 1106 | 0 | let source_id = if missing_item.number | 1107 | 0 | <= self.sync.finalized_block_number() | 1108 | | { | 1109 | 0 | let Some(source_id) = self | 1110 | 0 | .sync | 1111 | 0 | .sources() | 1112 | 0 | .filter(|s| *s != self.block_author_sync_source) | 1113 | 0 | .choose(&mut rand::thread_rng()) | 1114 | | else { | 1115 | 0 | break; | 1116 | | }; | 1117 | 0 | source_id | 1118 | | } else { | 1119 | 0 | let Some(source_id) = self | 1120 | 0 | .sync | 1121 | 0 | .knows_non_finalized_block( | 1122 | 0 | missing_item.number, | 1123 | 0 | &missing_item.hash, | 1124 | 0 | ) | 1125 | 0 | .filter(|source_id| { | 1126 | | *source_id != self.block_author_sync_source | 1127 | | && self.sync.source_num_ongoing_requests(*source_id) | 1128 | | == 0 | 1129 | 0 | }) | 1130 | 0 | .choose(&mut rand::thread_rng()) | 1131 | | else { | 1132 | 0 | continue; | 1133 | | }; | 1134 | 0 | source_id | 1135 | | }; | 1136 | | | 1137 | 0 | return WakeUpReason::StartNetworkRequest { | 1138 | 0 | source_id, | 1139 | 0 | request: all::DesiredRequest::StorageGetMerkleProof { | 1140 | 0 | block_hash: missing_item.hash, | 1141 | 0 | state_trie_root: [0; 32], // TODO: wrong, but field value unused so it's fine temporarily | 1142 | 0 | keys: vec![trie::nibbles_to_bytes_suffix_extend( | 1143 | 0 | missing_item | 1144 | 0 | .trie_node_key_nibbles | 1145 | 0 | .into_iter() | 1146 | 0 | // In order to download more than one item at a time, | 1147 | 0 | // we add some randomly-generated nibbles to the | 1148 | 0 | // requested key. The request will target the missing | 1149 | 0 | // key plus a few other random keys. | 1150 | 0 | .chain((0..32).map(|_| { | 1151 | | rand::Rng::gen_range( | 1152 | | &mut rand::thread_rng(), | 1153 | | 0..16, | 1154 | | ) | 1155 | 0 | })) | 1156 | 0 | .map(|n| trie::Nibble::try_from(n).unwrap()), | 1157 | 0 | ) | 1158 | 0 | .collect::<Vec<_>>()], | 1159 | 0 | }, | 1160 | 0 | database_catch_up_type: DbCatchUpType::Database, | 1161 | 0 | }; | 1162 | | } | 1163 | 0 | } | 1164 | | | 1165 | | // No network request to start. | 1166 | 1 | future::pending().await0 | 1167 | 0 | } |
Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s5_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s5_0CscDgN54JpMGG_6author _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s5_0CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 939 | 39 | async { | 940 | 39 | // TODO: handle obsolete requests | 941 | 39 | // Ask the sync state machine whether any new network request should | 942 | 39 | // be started. | 943 | 39 | // `desired_requests()` returns, in decreasing order of priority, the | 944 | 39 | // requests that should be started in order for the syncing to proceed. We | 945 | 39 | // simply pick the first request, but enforce one ongoing request per | 946 | 39 | // source. | 947 | 39 | // TODO: desired_requests() is expensive and done at every iteration | 948 | 39 | let request_to_start = self.sync.desired_requests().find( | 949 | 39 | |(source_id, source_info, request_details)| { | 950 | | if source_info | 951 | | .as_ref() | 952 | | .map_or(false, |info| info.is_disconnected) | 953 | | { | 954 | | // Source is a networking source that has already been disconnected. | 955 | | false | 956 | | } else if *source_id != self.block_author_sync_source { | 957 | | // Remote source. | 958 | | self.sync.source_num_ongoing_requests(*source_id) == 0 | 959 | | } else { | 960 | | // Locally-authored blocks source. | 961 | | match (request_details, &self.authored_block) { | 962 | | ( | 963 | | all::DesiredRequest::BlocksRequest { | 964 | | first_block_hash, | 965 | | first_block_height, | 966 | | .. | 967 | | }, | 968 | | Some((authored_height, authored_hash, _, _)), | 969 | | ) if first_block_hash == authored_hash | 970 | | && first_block_height == authored_height => | 971 | | { | 972 | | true | 973 | | } | 974 | | _ => false, | 975 | | } | 976 | | } | 977 | 39 | }, | 978 | 39 | ); | 979 | 39 | if let Some((source_id, _, request0 )) = request_to_start { | 980 | 0 | return WakeUpReason::StartNetworkRequest { | 981 | 0 | source_id, | 982 | 0 | request, | 983 | 0 | database_catch_up_type: DbCatchUpType::No, | 984 | 0 | }; | 985 | 39 | } | 986 | 39 | | 987 | 39 | match self.database_catch_up_download_block_verification.clone() { | 988 | 0 | _ if !matches!( | 989 | 39 | self.database_catch_up_download, | 990 | | DatabaseCatchUpDownload::NoDownloadInProgress | 991 | | | DatabaseCatchUpDownload::NothingToDownloadCache | 992 | 0 | ) => {} | 993 | 39 | DatabaseCatchUpDownloadBlockVerification::None => {} | 994 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(_) => {} | 995 | | DatabaseCatchUpDownloadBlockVerification::CallProofDesired { | 996 | 0 | block_hash, | 997 | 0 | block_number, | 998 | 0 | function_name, | 999 | 0 | parameter, | 1000 | | } => { | 1001 | | // Choose which source to query. We have to use an `if` because | 1002 | | // `knows_non_finalized_block` panics if the parameter is inferior | 1003 | | // or equal to the finalized block number. | 1004 | 0 | let source_id = if block_number | 1005 | 0 | <= self.sync.finalized_block_number() | 1006 | | { | 1007 | 0 | self.sync | 1008 | 0 | .sources() | 1009 | 0 | .filter(|s| *s != self.block_author_sync_source) | 1010 | 0 | .choose(&mut rand::thread_rng()) | 1011 | | } else { | 1012 | 0 | self.sync | 1013 | 0 | .knows_non_finalized_block(block_number, &block_hash) | 1014 | 0 | .filter(|source_id| { | 1015 | | *source_id != self.block_author_sync_source | 1016 | | && self.sync.source_num_ongoing_requests(*source_id) | 1017 | | == 0 | 1018 | 0 | }) | 1019 | 0 | .choose(&mut rand::thread_rng()) | 1020 | | }; | 1021 | | | 1022 | 0 | if let Some(source_id) = source_id { | 1023 | 0 | return WakeUpReason::StartNetworkRequest { | 1024 | 0 | source_id, | 1025 | 0 | request: all::DesiredRequest::RuntimeCallMerkleProof { | 1026 | 0 | block_hash, | 1027 | 0 | function_name: function_name.into(), | 1028 | 0 | parameter_vectored: parameter.into(), | 1029 | 0 | }, | 1030 | 0 | database_catch_up_type: DbCatchUpType::BlockVerification, | 1031 | 0 | }; | 1032 | 0 | } | 1033 | | } | 1034 | | DatabaseCatchUpDownloadBlockVerification::CodeStorageProofDesired { | 1035 | 0 | block_hash, | 1036 | 0 | block_number, | 1037 | | } => { | 1038 | | // Choose which source to query. We have to use an `if` because | 1039 | | // `knows_non_finalized_block` panics if the parameter is inferior | 1040 | | // or equal to the finalized block number. | 1041 | 0 | let source_id = if block_number | 1042 | 0 | <= self.sync.finalized_block_number() | 1043 | | { | 1044 | 0 | self.sync | 1045 | 0 | .sources() | 1046 | 0 | .filter(|s| *s != self.block_author_sync_source) | 1047 | 0 | .choose(&mut rand::thread_rng()) | 1048 | | } else { | 1049 | 0 | self.sync | 1050 | 0 | .knows_non_finalized_block(block_number, &block_hash) | 1051 | 0 | .filter(|source_id| { | 1052 | | *source_id != self.block_author_sync_source | 1053 | | && self.sync.source_num_ongoing_requests(*source_id) | 1054 | | == 0 | 1055 | 0 | }) | 1056 | 0 | .choose(&mut rand::thread_rng()) | 1057 | | }; | 1058 | | | 1059 | 0 | if let Some(source_id) = source_id { | 1060 | 0 | return WakeUpReason::StartNetworkRequest { | 1061 | 0 | source_id, | 1062 | 0 | request: all::DesiredRequest::StorageGetMerkleProof { | 1063 | 0 | block_hash, | 1064 | 0 | state_trie_root: [0; 32], // TODO: wrong, but field value unused so it's fine temporarily | 1065 | 0 | keys: vec![":code".into(), ":heappages".into()], | 1066 | 0 | }, | 1067 | 0 | database_catch_up_type: DbCatchUpType::BlockVerification, | 1068 | 0 | }; | 1069 | 0 | } | 1070 | | } | 1071 | | } | 1072 | | | 1073 | | // If the sync state machine doesn't require any additional request, ask | 1074 | | // the database whether any storage item is missing. | 1075 | 1 | if matches!( | 1076 | 39 | self.database_catch_up_download, | 1077 | | DatabaseCatchUpDownload::NoDownloadInProgress | 1078 | | ) { | 1079 | | // TODO: this has a O(n^2) complexity; in case all sources are busy, we iterate a lot | 1080 | 38 | let missing_items17 = self | 1081 | 38 | .database | 1082 | 38 | .with_database(|db| { | 1083 | | db.finalized_and_above_missing_trie_nodes_unordered() | 1084 | 38 | }) | 1085 | 17 | .await | 1086 | 17 | .unwrap(); | 1087 | 17 | if missing_items.is_empty() { | 1088 | 17 | self.database_catch_up_download = | 1089 | 17 | DatabaseCatchUpDownload::NothingToDownloadCache; | 1090 | 17 | }0 | 1091 | | | 1092 | 17 | for missing_item0 in missing_items | 1093 | 17 | .into_iter() | 1094 | 17 | .flat_map(|item| item.blocks.into_iter()) | 1095 | | { | 1096 | | // Since the database and sync state machine are supposed to have the | 1097 | | // same finalized block, it is guaranteed that the missing item are | 1098 | | // in the finalized block or above. | 1099 | 0 | debug_assert!( | 1100 | 0 | missing_item.number >= self.sync.finalized_block_number() | 1101 | | ); | 1102 | | | 1103 | | // Choose which source to query. We have to use an `if` because | 1104 | | // `knows_non_finalized_block` panics if the parameter is inferior | 1105 | | // or equal to the finalized block number. | 1106 | 0 | let source_id = if missing_item.number | 1107 | 0 | <= self.sync.finalized_block_number() | 1108 | | { | 1109 | 0 | let Some(source_id) = self | 1110 | 0 | .sync | 1111 | 0 | .sources() | 1112 | 0 | .filter(|s| *s != self.block_author_sync_source) | 1113 | 0 | .choose(&mut rand::thread_rng()) | 1114 | | else { | 1115 | 0 | break; | 1116 | | }; | 1117 | 0 | source_id | 1118 | | } else { | 1119 | 0 | let Some(source_id) = self | 1120 | 0 | .sync | 1121 | 0 | .knows_non_finalized_block( | 1122 | 0 | missing_item.number, | 1123 | 0 | &missing_item.hash, | 1124 | 0 | ) | 1125 | 0 | .filter(|source_id| { | 1126 | | *source_id != self.block_author_sync_source | 1127 | | && self.sync.source_num_ongoing_requests(*source_id) | 1128 | | == 0 | 1129 | 0 | }) | 1130 | 0 | .choose(&mut rand::thread_rng()) | 1131 | | else { | 1132 | 0 | continue; | 1133 | | }; | 1134 | 0 | source_id | 1135 | | }; | 1136 | | | 1137 | 0 | return WakeUpReason::StartNetworkRequest { | 1138 | 0 | source_id, | 1139 | 0 | request: all::DesiredRequest::StorageGetMerkleProof { | 1140 | 0 | block_hash: missing_item.hash, | 1141 | 0 | state_trie_root: [0; 32], // TODO: wrong, but field value unused so it's fine temporarily | 1142 | 0 | keys: vec![trie::nibbles_to_bytes_suffix_extend( | 1143 | 0 | missing_item | 1144 | 0 | .trie_node_key_nibbles | 1145 | 0 | .into_iter() | 1146 | 0 | // In order to download more than one item at a time, | 1147 | 0 | // we add some randomly-generated nibbles to the | 1148 | 0 | // requested key. The request will target the missing | 1149 | 0 | // key plus a few other random keys. | 1150 | 0 | .chain((0..32).map(|_| { | 1151 | | rand::Rng::gen_range( | 1152 | | &mut rand::thread_rng(), | 1153 | | 0..16, | 1154 | | ) | 1155 | 0 | })) | 1156 | 0 | .map(|n| trie::Nibble::try_from(n).unwrap()), | 1157 | 0 | ) | 1158 | 0 | .collect::<Vec<_>>()], | 1159 | 0 | }, | 1160 | 0 | database_catch_up_type: DbCatchUpType::Database, | 1161 | 0 | }; | 1162 | | } | 1163 | 1 | } | 1164 | | | 1165 | | // No network request to start. | 1166 | 18 | future::pending().await0 | 1167 | 0 | } |
Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s5_0Ba_ |
1168 | 64 | }) |
1169 | 64 | .or({ |
1170 | 64 | let is_downloading = matches!0 ( |
1171 | 64 | self.database_catch_up_download_block_verification, |
1172 | | DatabaseCatchUpDownloadBlockVerification::None |
1173 | | ); |
1174 | 43 | async move { |
1175 | 43 | if !process_sync || !is_downloading21 { |
1176 | 22 | future::pending().await18 |
1177 | 21 | } |
1178 | 21 | WakeUpReason::SyncProcess |
1179 | 21 | } _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s6_0CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 1174 | 4 | async move { | 1175 | 4 | if !process_sync || !is_downloading2 { | 1176 | 2 | future::pending().await1 | 1177 | 2 | } | 1178 | 2 | WakeUpReason::SyncProcess | 1179 | 2 | } |
Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s6_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s6_0CscDgN54JpMGG_6author _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s6_0CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 1174 | 39 | async move { | 1175 | 39 | if !process_sync || !is_downloading19 { | 1176 | 20 | future::pending().await17 | 1177 | 19 | } | 1178 | 19 | WakeUpReason::SyncProcess | 1179 | 19 | } |
Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s6_0Ba_ |
1180 | | }) |
1181 | 40 | .await |
1182 | | }; |
1183 | | |
1184 | 1 | match wake_up_reason { |
1185 | | WakeUpReason::ReadyToAuthor => { |
1186 | | // Ready to author a block. Call `author_block()`. |
1187 | | // While a block is being authored, the whole syncing state machine is |
1188 | | // deliberately frozen. |
1189 | 0 | match self.block_authoring { |
1190 | | Some((author::build::Builder::Ready(_), _)) => { |
1191 | 0 | self.author_block().await; |
1192 | | } |
1193 | 0 | Some((author::build::Builder::WaitSlot(when), local_authorities)) => { |
1194 | 0 | self.block_authoring = Some(( |
1195 | 0 | author::build::Builder::Ready(when.start()), |
1196 | 0 | local_authorities, |
1197 | 0 | )); |
1198 | 0 | self.author_block().await; |
1199 | | } |
1200 | 0 | Some((author::build::Builder::Idle, _)) => { |
1201 | 0 | self.block_authoring = None; |
1202 | 0 | } |
1203 | | None => { |
1204 | 0 | unreachable!() |
1205 | | } |
1206 | | } |
1207 | | |
1208 | 0 | process_sync = true; |
1209 | | } |
1210 | | |
1211 | | WakeUpReason::FrontendClosed => { |
1212 | | // Shutdown. |
1213 | 21 | return; |
1214 | | } |
1215 | | |
1216 | | WakeUpReason::FrontendEvent(ToBackground::SubscribeAll { |
1217 | 0 | buffer_size, |
1218 | 0 | _max_finalized_pinned_blocks: _, |
1219 | 0 | result_tx, |
1220 | 0 | }) => { |
1221 | 0 | let (tx, new_blocks) = async_channel::bounded(buffer_size.saturating_sub(1)); |
1222 | | |
1223 | 0 | let non_finalized_blocks_ancestry_order = { |
1224 | 0 | let blocks_in = self |
1225 | 0 | .sync |
1226 | 0 | .non_finalized_blocks_ancestry_order() |
1227 | 0 | .map(|h| { |
1228 | 0 | ( |
1229 | 0 | h.number, |
1230 | 0 | h.scale_encoding_vec(self.sync.block_number_bytes()), |
1231 | 0 | *h.parent_hash, |
1232 | 0 | ) |
1233 | 0 | }) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s7_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s7_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s7_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s7_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s7_0Ba_ |
1234 | 0 | .collect::<Vec<_>>(); |
1235 | 0 | let mut blocks_out = Vec::new(); |
1236 | 0 | for (number, scale_encoding, parent_hash) in blocks_in { |
1237 | 0 | let hash = header::hash_from_scale_encoded_header(&scale_encoding); |
1238 | 0 | let runtime = match &self.sync[(number, &hash)] { |
1239 | 0 | NonFinalizedBlock::Verified { runtime } => runtime.clone(), |
1240 | 0 | _ => unreachable!(), |
1241 | | }; |
1242 | 0 | let runtime_update = if Arc::ptr_eq(&self.finalized_runtime, &runtime) { |
1243 | 0 | None |
1244 | | } else { |
1245 | 0 | Some(runtime.clone()) |
1246 | | }; |
1247 | 0 | blocks_out.push(BlockNotification { |
1248 | 0 | is_new_best: header::hash_from_scale_encoded_header( |
1249 | 0 | &scale_encoding, |
1250 | 0 | ) == *self.sync.best_block_hash(), |
1251 | 0 | block_hash: header::hash_from_scale_encoded_header(&scale_encoding), |
1252 | 0 | scale_encoded_header: scale_encoding, |
1253 | 0 | runtime_update, |
1254 | 0 | parent_hash, |
1255 | 0 | }); |
1256 | | } |
1257 | 0 | blocks_out |
1258 | 0 | }; |
1259 | 0 |
|
1260 | 0 | self.blocks_notifications.push(tx); |
1261 | 0 | let _ = result_tx.send(SubscribeAll { |
1262 | 0 | id: SubscriptionId(0), // TODO: |
1263 | 0 | finalized_block_hash: *self.sync.finalized_block_hash(), |
1264 | 0 | finalized_block_scale_encoded_header: self |
1265 | 0 | .sync |
1266 | 0 | .finalized_block_header() |
1267 | 0 | .to_owned(), |
1268 | 0 | finalized_block_runtime: self.finalized_runtime.clone(), |
1269 | 0 | non_finalized_blocks_ancestry_order, |
1270 | 0 | new_blocks, |
1271 | 0 | }); |
1272 | | } |
1273 | 0 | WakeUpReason::SendPendingNotification(notification) => { |
1274 | | // Elements in `blocks_notifications` are removed one by one and inserted |
1275 | | // back if the channel is still open. |
1276 | 0 | for index in (0..self.blocks_notifications.len()).rev() { |
1277 | 0 | let subscription = self.blocks_notifications.swap_remove(index); |
1278 | 0 | if subscription.try_send(notification.clone()).is_err() { |
1279 | 0 | continue; |
1280 | 0 | } |
1281 | 0 | self.blocks_notifications.push(subscription); |
1282 | | } |
1283 | | } |
1284 | | |
1285 | 0 | WakeUpReason::FrontendEvent(ToBackground::GetSyncState { result_tx }) => { |
1286 | 0 | let _ = result_tx.send(SyncState { |
1287 | 0 | best_block_hash: *self.sync.best_block_hash(), |
1288 | 0 | best_block_number: self.sync.best_block_number(), |
1289 | 0 | finalized_block_hash: *self.sync.finalized_block_hash(), |
1290 | 0 | finalized_block_number: self.sync.finalized_block_number(), |
1291 | 0 | }); |
1292 | 0 | } |
1293 | 0 | WakeUpReason::FrontendEvent(ToBackground::Unpin { result_tx, .. }) => { |
1294 | 0 | // TODO: check whether block was indeed pinned, and prune blocks that aren't pinned anymore from the database |
1295 | 0 | let _ = result_tx.send(()); |
1296 | 0 | } |
1297 | 1 | WakeUpReason::FrontendEvent(ToBackground::IsMajorSyncingHint { result_tx }) => { |
1298 | 1 | // As documented, the value returned doesn't need to be precise. |
1299 | 1 | let result = match self.sync.status() { |
1300 | 1 | all::Status::Sync => false, |
1301 | | all::Status::WarpSyncFragments { .. } |
1302 | 0 | | all::Status::WarpSyncChainInformation { .. } => true, |
1303 | | }; |
1304 | | |
1305 | 1 | let _ = result_tx.send(result); |
1306 | | } |
1307 | | |
1308 | | WakeUpReason::NetworkLocalChainUpdate => { |
1309 | 21 | self.network_service |
1310 | 21 | .set_local_best_block( |
1311 | 21 | self.network_chain_id, |
1312 | 21 | *self.sync.best_block_hash(), |
1313 | 21 | self.sync.best_block_number(), |
1314 | 21 | ) |
1315 | 0 | .await; |
1316 | | } |
1317 | | |
1318 | 0 | WakeUpReason::AnnounceBlock(header, hash, height) => { |
1319 | | // We can never be guaranteed that a certain source does *not* know about a |
1320 | | // block, however it is not a big problem to send a block announce to a source |
1321 | | // that already knows about that block. For this reason, the list of sources |
1322 | | // we send the block announce to is `all_sources - sources_that_know_it`. |
1323 | | // |
1324 | | // Note that not sending block announces to sources that already |
1325 | | // know that block means that these sources might also miss the |
1326 | | // fact that our local best block has been updated. This is in |
1327 | | // practice not a problem either. |
1328 | 0 | let sources_to_announce_to = { |
1329 | 0 | let mut all_sources = self |
1330 | 0 | .sync |
1331 | 0 | .sources() |
1332 | 0 | .collect::<HashSet<_, fnv::FnvBuildHasher>>(); |
1333 | 0 | for knows in self.sync.knows_non_finalized_block(height, &hash) { |
1334 | 0 | all_sources.remove(&knows); |
1335 | 0 | } |
1336 | 0 | all_sources |
1337 | 0 | }; |
1338 | 0 |
|
1339 | 0 | let is_best = *self.sync.best_block_hash() == hash; |
1340 | | |
1341 | 0 | for source_id in sources_to_announce_to { |
1342 | 0 | let peer_id = match &self.sync[source_id] { |
1343 | 0 | Some(info) if !info.is_disconnected => &info.peer_id, |
1344 | 0 | _ => continue, |
1345 | | }; |
1346 | | |
1347 | 0 | if self |
1348 | 0 | .network_service |
1349 | 0 | .clone() |
1350 | 0 | .send_block_announce( |
1351 | 0 | peer_id.clone(), |
1352 | 0 | self.network_chain_id, |
1353 | 0 | header.clone(), |
1354 | 0 | is_best, |
1355 | 0 | ) |
1356 | 0 | .await |
1357 | 0 | .is_ok() |
1358 | 0 | { |
1359 | 0 | // Note that `try_add_known_block_to_source` might have |
1360 | 0 | // no effect, which is not a problem considering that this |
1361 | 0 | // block tracking is mostly about optimizations and |
1362 | 0 | // politeness. |
1363 | 0 | self.sync |
1364 | 0 | .try_add_known_block_to_source(source_id, height, hash); |
1365 | 0 | } |
1366 | | } |
1367 | | } |
1368 | | |
1369 | | WakeUpReason::NetworkEvent(network_service::Event::Connected { |
1370 | 0 | peer_id, |
1371 | 0 | chain_id, |
1372 | 0 | best_block_number, |
1373 | 0 | best_block_hash, |
1374 | 0 | }) if chain_id == self.network_chain_id => { |
1375 | 0 | // Most of the time, we insert a new source in the state machine. |
1376 | 0 | // However, a source of that `PeerId` might already exist but be considered as |
1377 | 0 | // disconnected. If that is the case, we simply mark it as no |
1378 | 0 | // longer disconnected. |
1379 | 0 | match self.peers_source_id_map.entry(peer_id) { |
1380 | 0 | hashbrown::hash_map::Entry::Occupied(entry) => { |
1381 | 0 | let id = *entry.get(); |
1382 | 0 | let is_disconnected = |
1383 | 0 | &mut self.sync[id].as_mut().unwrap().is_disconnected; |
1384 | 0 | debug_assert!(*is_disconnected); |
1385 | 0 | *is_disconnected = false; |
1386 | | } |
1387 | 0 | hashbrown::hash_map::Entry::Vacant(entry) => { |
1388 | 0 | let id = self |
1389 | 0 | .sync |
1390 | 0 | .prepare_add_source(best_block_number, best_block_hash) |
1391 | 0 | .add_source( |
1392 | 0 | Some(NetworkSourceInfo { |
1393 | 0 | peer_id: entry.key().clone(), |
1394 | 0 | is_disconnected: false, |
1395 | 0 | }), |
1396 | 0 | NonFinalizedBlock::NotVerified, |
1397 | 0 | ); |
1398 | 0 | entry.insert(id); |
1399 | 0 | } |
1400 | | } |
1401 | | } |
1402 | | WakeUpReason::NetworkEvent(network_service::Event::Disconnected { |
1403 | 0 | peer_id, |
1404 | 0 | chain_id, |
1405 | 0 | }) if chain_id == self.network_chain_id => { |
1406 | 0 | // Sources that disconnect are only immediately removed from the sync state |
1407 | 0 | // machine if they have no request in progress. If that is not the case, they |
1408 | 0 | // are instead only marked as disconnected. |
1409 | 0 | let id = *self.peers_source_id_map.get(&peer_id).unwrap(); |
1410 | 0 | if self.sync.source_num_ongoing_requests(id) == 0 { |
1411 | 0 | self.peers_source_id_map.remove(&peer_id).unwrap(); |
1412 | 0 | let (_, mut _requests) = self.sync.remove_source(id); |
1413 | 0 | debug_assert!(_requests.next().is_none()); |
1414 | | } else { |
1415 | 0 | let is_disconnected = &mut self.sync[id].as_mut().unwrap().is_disconnected; |
1416 | 0 | debug_assert!(!*is_disconnected); |
1417 | 0 | *is_disconnected = true; |
1418 | | } |
1419 | | } |
1420 | | WakeUpReason::NetworkEvent(network_service::Event::BlockAnnounce { |
1421 | 0 | chain_id, |
1422 | 0 | peer_id, |
1423 | 0 | scale_encoded_header, |
1424 | 0 | is_best, |
1425 | 0 | }) if chain_id == self.network_chain_id => { |
1426 | 0 | let _jaeger_span = self.jaeger_service.block_announce_process_span( |
1427 | 0 | &header::hash_from_scale_encoded_header(&scale_encoded_header), |
1428 | 0 | ); |
1429 | 0 |
|
1430 | 0 | let id = *self.peers_source_id_map.get(&peer_id).unwrap(); |
1431 | 0 | // TODO: log the outcome |
1432 | 0 | match self.sync.block_announce(id, scale_encoded_header, is_best) { |
1433 | 0 | all::BlockAnnounceOutcome::TooOld { .. } => {} |
1434 | 0 | all::BlockAnnounceOutcome::AlreadyVerified(known) |
1435 | 0 | | all::BlockAnnounceOutcome::AlreadyPending(known) => { |
1436 | 0 | known.update_source_and_block(); |
1437 | 0 | } |
1438 | 0 | all::BlockAnnounceOutcome::Unknown(unknown) => { |
1439 | 0 | unknown.insert_and_update_source(NonFinalizedBlock::NotVerified) |
1440 | | } |
1441 | 0 | all::BlockAnnounceOutcome::InvalidHeader(_) => unreachable!(), // TODO: ?!?! why unreachable? also, ban the peer |
1442 | | } |
1443 | | } |
1444 | | WakeUpReason::NetworkEvent(network_service::Event::GrandpaNeighborPacket { |
1445 | 0 | chain_id, |
1446 | 0 | peer_id, |
1447 | 0 | finalized_block_height, |
1448 | 0 | }) if chain_id == self.network_chain_id => { |
1449 | 0 | let source_id = *self.peers_source_id_map.get(&peer_id).unwrap(); |
1450 | 0 | self.sync |
1451 | 0 | .update_source_finality_state(source_id, finalized_block_height); |
1452 | 0 | } |
1453 | 0 | WakeUpReason::NetworkEvent(_) => { |
1454 | 0 | // Different chain index. |
1455 | 0 | } |
1456 | | |
1457 | | WakeUpReason::StartNetworkRequest { |
1458 | 0 | source_id, |
1459 | 0 | request: request_info @ all::DesiredRequest::BlocksRequest { .. }, |
1460 | 0 | database_catch_up_type, |
1461 | 0 | } if source_id == self.block_author_sync_source => { |
1462 | 0 | debug_assert!(matches!(database_catch_up_type, DbCatchUpType::No)); |
1463 | | |
1464 | 0 | self.log_callback.log( |
1465 | 0 | LogLevel::Debug, |
1466 | 0 | "queue-locally-authored-block-for-import".to_string(), |
1467 | 0 | ); |
1468 | 0 |
|
1469 | 0 | let (_, block_hash, scale_encoded_header, scale_encoded_extrinsics) = |
1470 | 0 | self.authored_block.take().unwrap(); |
1471 | 0 |
|
1472 | 0 | let _jaeger_span = self.jaeger_service.block_import_queue_span(&block_hash); |
1473 | 0 |
|
1474 | 0 | // Create a request that is immediately answered right below. |
1475 | 0 | let request_id = self.sync.add_request(source_id, request_info.into(), ()); |
1476 | 0 | // TODO: announce the block on the network, but only after it's been imported |
1477 | 0 | self.sync.blocks_request_response( |
1478 | 0 | request_id, |
1479 | 0 | iter::once(all::BlockRequestSuccessBlock { |
1480 | 0 | scale_encoded_header, |
1481 | 0 | scale_encoded_extrinsics, |
1482 | 0 | scale_encoded_justifications: Vec::new(), |
1483 | 0 | user_data: NonFinalizedBlock::NotVerified, |
1484 | 0 | }), |
1485 | 0 | ); |
1486 | | } |
1487 | | |
1488 | | WakeUpReason::StartNetworkRequest { |
1489 | 0 | source_id, |
1490 | 0 | request: |
1491 | 0 | all::DesiredRequest::BlocksRequest { |
1492 | 0 | first_block_hash, |
1493 | 0 | first_block_height, |
1494 | 0 | num_blocks, |
1495 | 0 | request_headers, |
1496 | 0 | request_bodies, |
1497 | 0 | request_justification, |
1498 | 0 | }, |
1499 | 0 | database_catch_up_type, |
1500 | 0 | } => { |
1501 | 0 | // Before notifying the syncing of the request, clamp the number of blocks to |
1502 | 0 | // the number of blocks we expect to receive. |
1503 | 0 | let num_blocks = NonZeroU64::new(cmp::min(num_blocks.get(), 64)).unwrap(); |
1504 | | |
1505 | 0 | let peer_id = { |
1506 | 0 | let info = self.sync[source_id].clone().unwrap(); |
1507 | 0 | // Disconnected sources are filtered out above. |
1508 | 0 | debug_assert!(!info.is_disconnected); |
1509 | 0 | info.peer_id |
1510 | 0 | }; |
1511 | 0 |
|
1512 | 0 | // TODO: add jaeger span |
1513 | 0 |
|
1514 | 0 | let request = self.network_service.clone().blocks_request( |
1515 | 0 | peer_id, |
1516 | 0 | self.network_chain_id, |
1517 | 0 | network::codec::BlocksRequestConfig { |
1518 | 0 | start: network::codec::BlocksRequestConfigStart::Hash(first_block_hash), |
1519 | 0 | desired_count: NonZeroU32::new( |
1520 | 0 | u32::try_from(num_blocks.get()).unwrap_or(u32::MAX), |
1521 | 0 | ) |
1522 | 0 | .unwrap(), |
1523 | 0 | // The direction is hardcoded based on the documentation of the syncing |
1524 | 0 | // state machine. |
1525 | 0 | direction: network::codec::BlocksRequestDirection::Descending, |
1526 | 0 | fields: network::codec::BlocksRequestFields { |
1527 | 0 | header: true, // TODO: always set to true due to unwrapping the header when the response comes |
1528 | 0 | body: request_bodies, |
1529 | 0 | justifications: request_justification, |
1530 | 0 | }, |
1531 | 0 | }, |
1532 | 0 | ); |
1533 | 0 |
|
1534 | 0 | let request_id = self.sync.add_request( |
1535 | 0 | source_id, |
1536 | 0 | all::DesiredRequest::BlocksRequest { |
1537 | 0 | first_block_hash, |
1538 | 0 | first_block_height, |
1539 | 0 | num_blocks, |
1540 | 0 | request_headers, |
1541 | 0 | request_bodies, |
1542 | 0 | request_justification, |
1543 | 0 | } |
1544 | 0 | .into(), |
1545 | 0 | (), |
1546 | 0 | ); |
1547 | 0 |
|
1548 | 0 | match database_catch_up_type { |
1549 | 0 | DbCatchUpType::No => {} |
1550 | | DbCatchUpType::Database => { |
1551 | 0 | debug_assert!(matches!( |
1552 | 0 | self.database_catch_up_download, |
1553 | | DatabaseCatchUpDownload::NoDownloadInProgress |
1554 | | )); |
1555 | 0 | self.database_catch_up_download = |
1556 | 0 | DatabaseCatchUpDownload::InProgress(request_id); |
1557 | | } |
1558 | 0 | DbCatchUpType::BlockVerification => { |
1559 | 0 | self.database_catch_up_download_block_verification = |
1560 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(request_id); |
1561 | 0 | } |
1562 | | } |
1563 | | |
1564 | 0 | self.sub_tasks.push(Box::pin(async move { |
1565 | 0 | let result = request.await; |
1566 | 0 | SubtaskFinished::BlocksRequestFinished { |
1567 | 0 | request_id, |
1568 | 0 | source_id, |
1569 | 0 | result, |
1570 | 0 | } |
1571 | 0 | })); Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s8_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s8_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s8_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s8_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s8_0Ba_ |
1572 | | } |
1573 | | |
1574 | | WakeUpReason::StartNetworkRequest { |
1575 | 0 | source_id, |
1576 | 0 | request: |
1577 | 0 | all::DesiredRequest::WarpSync { |
1578 | 0 | sync_start_block_hash, |
1579 | 0 | }, |
1580 | 0 | database_catch_up_type, |
1581 | 0 | } => { |
1582 | 0 | // TODO: don't unwrap? could this target the virtual sync source? |
1583 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); // TODO: why does this require cloning? weird borrow chk issue |
1584 | 0 |
|
1585 | 0 | let request = self.network_service.clone().warp_sync_request( |
1586 | 0 | peer_id, |
1587 | 0 | self.network_chain_id, |
1588 | 0 | sync_start_block_hash, |
1589 | 0 | ); |
1590 | 0 |
|
1591 | 0 | let request_id = self.sync.add_request( |
1592 | 0 | source_id, |
1593 | 0 | all::RequestDetail::WarpSync { |
1594 | 0 | sync_start_block_hash, |
1595 | 0 | }, |
1596 | 0 | (), |
1597 | 0 | ); |
1598 | 0 |
|
1599 | 0 | match database_catch_up_type { |
1600 | 0 | DbCatchUpType::No => {} |
1601 | | DbCatchUpType::Database => { |
1602 | 0 | debug_assert!(matches!( |
1603 | 0 | self.database_catch_up_download, |
1604 | | DatabaseCatchUpDownload::NoDownloadInProgress |
1605 | | )); |
1606 | 0 | self.database_catch_up_download = |
1607 | 0 | DatabaseCatchUpDownload::InProgress(request_id); |
1608 | | } |
1609 | 0 | DbCatchUpType::BlockVerification => { |
1610 | 0 | self.database_catch_up_download_block_verification = |
1611 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(request_id); |
1612 | 0 | } |
1613 | | } |
1614 | | |
1615 | 0 | self.sub_tasks.push(Box::pin(async move { |
1616 | 0 | let result = request.await; |
1617 | 0 | SubtaskFinished::WarpSyncRequestFinished { |
1618 | 0 | request_id, |
1619 | 0 | source_id, |
1620 | 0 | result, |
1621 | 0 | } |
1622 | 0 | })); Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s9_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s9_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s9_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s9_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0s9_0Ba_ |
1623 | | } |
1624 | | |
1625 | | WakeUpReason::StartNetworkRequest { |
1626 | 0 | source_id, |
1627 | 0 | request: |
1628 | 0 | all::DesiredRequest::StorageGetMerkleProof { |
1629 | 0 | block_hash, keys, .. |
1630 | 0 | }, |
1631 | 0 | database_catch_up_type, |
1632 | 0 | } => { |
1633 | 0 | // TODO: don't unwrap? could this target the virtual sync source? |
1634 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); // TODO: why does this require cloning? weird borrow chk issue |
1635 | 0 |
|
1636 | 0 | let request = self.network_service.clone().storage_request( |
1637 | 0 | peer_id, |
1638 | 0 | self.network_chain_id, |
1639 | 0 | network::codec::StorageProofRequestConfig { |
1640 | 0 | block_hash, |
1641 | 0 | keys: keys.clone().into_iter(), |
1642 | 0 | }, |
1643 | 0 | ); |
1644 | 0 |
|
1645 | 0 | let request_id = self.sync.add_request( |
1646 | 0 | source_id, |
1647 | 0 | all::RequestDetail::StorageGet { block_hash, keys }, |
1648 | 0 | (), |
1649 | 0 | ); |
1650 | 0 |
|
1651 | 0 | match database_catch_up_type { |
1652 | 0 | DbCatchUpType::No => {} |
1653 | | DbCatchUpType::Database => { |
1654 | 0 | debug_assert!(matches!( |
1655 | 0 | self.database_catch_up_download, |
1656 | | DatabaseCatchUpDownload::NoDownloadInProgress |
1657 | | )); |
1658 | 0 | self.database_catch_up_download = |
1659 | 0 | DatabaseCatchUpDownload::InProgress(request_id); |
1660 | | } |
1661 | 0 | DbCatchUpType::BlockVerification => { |
1662 | 0 | self.database_catch_up_download_block_verification = |
1663 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(request_id); |
1664 | 0 | } |
1665 | | } |
1666 | | |
1667 | 0 | self.sub_tasks.push(Box::pin(async move { |
1668 | 0 | let result = request.await; |
1669 | 0 | SubtaskFinished::StorageRequestFinished { |
1670 | 0 | request_id, |
1671 | 0 | source_id, |
1672 | 0 | result, |
1673 | 0 | } |
1674 | 0 | })); Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sa_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sa_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sa_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sa_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sa_0Ba_ |
1675 | | } |
1676 | | |
1677 | | WakeUpReason::StartNetworkRequest { |
1678 | 0 | source_id, |
1679 | 0 | request: |
1680 | 0 | all::DesiredRequest::RuntimeCallMerkleProof { |
1681 | 0 | block_hash, |
1682 | 0 | function_name, |
1683 | 0 | parameter_vectored, |
1684 | 0 | }, |
1685 | 0 | database_catch_up_type, |
1686 | 0 | } => { |
1687 | 0 | // TODO: don't unwrap? could this target the virtual sync source? |
1688 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); // TODO: why does this require cloning? weird borrow chk issue |
1689 | 0 |
|
1690 | 0 | let request = self.network_service.clone().call_proof_request( |
1691 | 0 | peer_id, |
1692 | 0 | self.network_chain_id, |
1693 | 0 | network::codec::CallProofRequestConfig { |
1694 | 0 | block_hash, |
1695 | 0 | method: function_name.clone(), |
1696 | 0 | parameter_vectored: iter::once(parameter_vectored.clone()), |
1697 | 0 | }, |
1698 | 0 | ); |
1699 | 0 |
|
1700 | 0 | let request_id = self.sync.add_request( |
1701 | 0 | source_id, |
1702 | 0 | all::RequestDetail::RuntimeCallMerkleProof { |
1703 | 0 | block_hash, |
1704 | 0 | function_name, |
1705 | 0 | parameter_vectored, |
1706 | 0 | }, |
1707 | 0 | (), |
1708 | 0 | ); |
1709 | 0 |
|
1710 | 0 | match database_catch_up_type { |
1711 | 0 | DbCatchUpType::No => {} |
1712 | | DbCatchUpType::Database => { |
1713 | 0 | debug_assert!(matches!( |
1714 | 0 | self.database_catch_up_download, |
1715 | | DatabaseCatchUpDownload::NoDownloadInProgress |
1716 | | )); |
1717 | 0 | self.database_catch_up_download = |
1718 | 0 | DatabaseCatchUpDownload::InProgress(request_id); |
1719 | | } |
1720 | 0 | DbCatchUpType::BlockVerification => { |
1721 | 0 | self.database_catch_up_download_block_verification = |
1722 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(request_id); |
1723 | 0 | } |
1724 | | } |
1725 | | |
1726 | 0 | self.sub_tasks.push(Box::pin(async move { |
1727 | 0 | let result = request.await; |
1728 | 0 | SubtaskFinished::CallProofRequestFinished { |
1729 | 0 | request_id, |
1730 | 0 | source_id, |
1731 | 0 | result, |
1732 | 0 | } |
1733 | 0 | })); Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sb_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sb_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sb_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sb_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sb_0Ba_ |
1734 | | } |
1735 | | |
1736 | | WakeUpReason::SubtaskFinished(SubtaskFinished::BlocksRequestFinished { |
1737 | 0 | request_id, |
1738 | 0 | source_id, |
1739 | 0 | result: Ok(blocks), |
1740 | | }) => { |
1741 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) |
1742 | 0 | { |
1743 | 0 | self.database_catch_up_download = |
1744 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; |
1745 | 0 | } |
1746 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) |
1747 | 0 | { |
1748 | 0 | self.database_catch_up_download_block_verification = |
1749 | 0 | DatabaseCatchUpDownloadBlockVerification::None; |
1750 | 0 | } |
1751 | | |
1752 | | // TODO: insert blocks in database if they are referenced through a parent_hash? |
1753 | | |
1754 | 0 | let _ = self.sync.blocks_request_response( |
1755 | 0 | request_id, |
1756 | 0 | blocks |
1757 | 0 | .into_iter() |
1758 | 0 | .map(|block| all::BlockRequestSuccessBlock { |
1759 | 0 | scale_encoded_header: block.header.unwrap(), // TODO: don't unwrap |
1760 | 0 | scale_encoded_extrinsics: block.body.unwrap(), // TODO: don't unwrap |
1761 | 0 | scale_encoded_justifications: block |
1762 | 0 | .justifications |
1763 | 0 | .unwrap_or_default() |
1764 | 0 | .into_iter() |
1765 | 0 | .map(|j| all::Justification { |
1766 | 0 | engine_id: j.engine_id, |
1767 | 0 | justification: j.justification, |
1768 | 0 | }) Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sc_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sc_00Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sc_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sc_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sc_00Bc_ |
1769 | 0 | .collect(), |
1770 | 0 | user_data: NonFinalizedBlock::NotVerified, |
1771 | 0 | }), Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sc_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sc_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sc_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sc_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sc_0Ba_ |
1772 | 0 | ); |
1773 | 0 |
|
1774 | 0 | // If the source was actually disconnected and has no other request in |
1775 | 0 | // progress, we clean it up. |
1776 | 0 | // TODO: DRY |
1777 | 0 | if self.sync[source_id] |
1778 | 0 | .as_ref() |
1779 | 0 | .map_or(false, |info| info.is_disconnected) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sd_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sd_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sd_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sd_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sd_0Ba_ |
1780 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 |
1781 | | { |
1782 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); |
1783 | 0 | debug_assert!(_requests.next().is_none()); |
1784 | 0 | self.peers_source_id_map |
1785 | 0 | .remove(&info.unwrap().peer_id) |
1786 | 0 | .unwrap(); |
1787 | 0 | } |
1788 | | |
1789 | 0 | process_sync = true; |
1790 | | } |
1791 | | |
1792 | | WakeUpReason::SubtaskFinished(SubtaskFinished::BlocksRequestFinished { |
1793 | 0 | request_id, |
1794 | 0 | source_id, |
1795 | | result: Err(_), |
1796 | | }) => { |
1797 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) |
1798 | 0 | { |
1799 | 0 | self.database_catch_up_download = |
1800 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; |
1801 | 0 | } |
1802 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) |
1803 | 0 | { |
1804 | 0 | self.database_catch_up_download_block_verification = |
1805 | 0 | DatabaseCatchUpDownloadBlockVerification::None; |
1806 | 0 | } |
1807 | | |
1808 | | // Note that we perform the ban even if the source is now disconnected. |
1809 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); |
1810 | 0 | self.network_service |
1811 | 0 | .ban_and_disconnect( |
1812 | 0 | peer_id, |
1813 | 0 | self.network_chain_id, |
1814 | 0 | network_service::BanSeverity::Low, |
1815 | 0 | "blocks-request-error", |
1816 | 0 | ) |
1817 | 0 | .await; |
1818 | | |
1819 | 0 | let _ = self.sync.remove_request(request_id); |
1820 | 0 |
|
1821 | 0 | // If the source was actually disconnected and has no other request in |
1822 | 0 | // progress, we clean it up. |
1823 | 0 | // TODO: DRY |
1824 | 0 | if self.sync[source_id] |
1825 | 0 | .as_ref() |
1826 | 0 | .map_or(false, |info| info.is_disconnected) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0se_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0se_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0se_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0se_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0se_0Ba_ |
1827 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 |
1828 | | { |
1829 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); |
1830 | 0 | debug_assert!(_requests.next().is_none()); |
1831 | 0 | self.peers_source_id_map |
1832 | 0 | .remove(&info.unwrap().peer_id) |
1833 | 0 | .unwrap(); |
1834 | 0 | } |
1835 | | |
1836 | 0 | process_sync = true; |
1837 | | } |
1838 | | |
1839 | | WakeUpReason::SubtaskFinished(SubtaskFinished::WarpSyncRequestFinished { |
1840 | 0 | request_id, |
1841 | 0 | source_id, |
1842 | 0 | result: Ok(result), |
1843 | | }) => { |
1844 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) |
1845 | 0 | { |
1846 | 0 | self.database_catch_up_download = |
1847 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; |
1848 | 0 | } |
1849 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) |
1850 | 0 | { |
1851 | 0 | self.database_catch_up_download_block_verification = |
1852 | 0 | DatabaseCatchUpDownloadBlockVerification::None; |
1853 | 0 | } |
1854 | | |
1855 | 0 | let decoded = result.decode(); |
1856 | 0 | let fragments = decoded |
1857 | 0 | .fragments |
1858 | 0 | .into_iter() |
1859 | 0 | .map(|f| all::WarpSyncFragment { |
1860 | 0 | scale_encoded_header: f.scale_encoded_header.to_vec(), |
1861 | 0 | scale_encoded_justification: f.scale_encoded_justification.to_vec(), |
1862 | 0 | }) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sf_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sf_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sf_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sf_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sf_0Ba_ |
1863 | 0 | .collect(); |
1864 | 0 | let _ = self.sync.grandpa_warp_sync_response( |
1865 | 0 | request_id, |
1866 | 0 | fragments, |
1867 | 0 | decoded.is_finished, |
1868 | 0 | ); |
1869 | 0 |
|
1870 | 0 | // If the source was actually disconnected and has no other request in |
1871 | 0 | // progress, we clean it up. |
1872 | 0 | // TODO: DRY |
1873 | 0 | if self.sync[source_id] |
1874 | 0 | .as_ref() |
1875 | 0 | .map_or(false, |info| info.is_disconnected) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sg_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sg_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sg_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sg_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sg_0Ba_ |
1876 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 |
1877 | | { |
1878 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); |
1879 | 0 | debug_assert!(_requests.next().is_none()); |
1880 | 0 | self.peers_source_id_map |
1881 | 0 | .remove(&info.unwrap().peer_id) |
1882 | 0 | .unwrap(); |
1883 | 0 | } |
1884 | | |
1885 | 0 | process_sync = true; |
1886 | | } |
1887 | | |
1888 | | WakeUpReason::SubtaskFinished(SubtaskFinished::WarpSyncRequestFinished { |
1889 | 0 | request_id, |
1890 | 0 | source_id, |
1891 | | result: Err(_), |
1892 | | }) => { |
1893 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) |
1894 | 0 | { |
1895 | 0 | self.database_catch_up_download = |
1896 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; |
1897 | 0 | } |
1898 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) |
1899 | 0 | { |
1900 | 0 | self.database_catch_up_download_block_verification = |
1901 | 0 | DatabaseCatchUpDownloadBlockVerification::None; |
1902 | 0 | } |
1903 | | |
1904 | | // Note that we perform the ban even if the source is now disconnected. |
1905 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); |
1906 | 0 | self.network_service |
1907 | 0 | .ban_and_disconnect( |
1908 | 0 | peer_id, |
1909 | 0 | self.network_chain_id, |
1910 | 0 | network_service::BanSeverity::Low, |
1911 | 0 | "warp-sync-request-error", |
1912 | 0 | ) |
1913 | 0 | .await; |
1914 | | |
1915 | 0 | let _ = self.sync.remove_request(request_id); |
1916 | 0 |
|
1917 | 0 | // If the source was actually disconnected and has no other request in |
1918 | 0 | // progress, we clean it up. |
1919 | 0 | // TODO: DRY |
1920 | 0 | if self.sync[source_id] |
1921 | 0 | .as_ref() |
1922 | 0 | .map_or(false, |info| info.is_disconnected) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sh_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sh_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sh_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sh_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sh_0Ba_ |
1923 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 |
1924 | | { |
1925 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); |
1926 | 0 | debug_assert!(_requests.next().is_none()); |
1927 | 0 | self.peers_source_id_map |
1928 | 0 | .remove(&info.unwrap().peer_id) |
1929 | 0 | .unwrap(); |
1930 | 0 | } |
1931 | | |
1932 | 0 | process_sync = true; |
1933 | | } |
1934 | | |
1935 | | WakeUpReason::SubtaskFinished(SubtaskFinished::StorageRequestFinished { |
1936 | 0 | request_id, |
1937 | 0 | source_id, |
1938 | 0 | result, |
1939 | | }) => { |
1940 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) |
1941 | 0 | { |
1942 | 0 | self.database_catch_up_download = |
1943 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; |
1944 | 0 | } |
1945 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) |
1946 | 0 | { |
1947 | 0 | self.database_catch_up_download_block_verification = |
1948 | 0 | DatabaseCatchUpDownloadBlockVerification::None; |
1949 | 0 | } |
1950 | | |
1951 | 0 | if let Ok(result) = &result { |
1952 | 0 | let result = result.clone(); |
1953 | 0 | self.database |
1954 | 0 | .with_database(move |database| { |
1955 | 0 | if let Ok(decoded) = trie::proof_decode::decode_and_verify_proof( |
1956 | 0 | trie::proof_decode::Config { |
1957 | 0 | proof: result.decode(), |
1958 | 0 | }, |
1959 | 0 | ) { |
1960 | 0 | for (_, entry) in decoded.iter_ordered() { |
1961 | | // TODO: check the state root hash; while this can't lead to a vulnerability, it can bloat the database |
1962 | 0 | database.insert_trie_nodes( |
1963 | 0 | iter::once(full_sqlite::InsertTrieNode { |
1964 | 0 | merkle_value: Cow::Borrowed(entry.merkle_value), |
1965 | 0 | partial_key_nibbles: Cow::Owned(entry.partial_key_nibbles.into_iter().map(|n| u8::from(n)).collect()), Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0si_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0si_00Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0si_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0si_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0si_00Bc_ |
1966 | 0 | children_merkle_values: std::array::from_fn(|n| entry.trie_node_info.children.child(trie::Nibble::try_from(u8::try_from(n).unwrap()).unwrap()).merkle_value().map(Cow::Borrowed)), Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0si_0s_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0si_0s_0Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0si_0s_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0si_0s_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0si_0s_0Bc_ |
1967 | 0 | storage_value: match entry.trie_node_info.storage_value { |
1968 | | trie::proof_decode::StorageValue::HashKnownValueMissing( |
1969 | | _, |
1970 | 0 | ) => return, |
1971 | | trie::proof_decode::StorageValue::None => { |
1972 | 0 | full_sqlite::InsertTrieNodeStorageValue::NoValue |
1973 | | } |
1974 | | trie::proof_decode::StorageValue::Known { |
1975 | 0 | value, .. |
1976 | 0 | } => full_sqlite::InsertTrieNodeStorageValue::Value { |
1977 | 0 | value: Cow::Borrowed(value), |
1978 | 0 | references_merkle_value: false, // TODO: |
1979 | 0 | }, |
1980 | | }, |
1981 | | }), |
1982 | 0 | match entry.trie_node_info.storage_value { |
1983 | 0 | trie::proof_decode::StorageValue::None => 0, // TODO: ?! |
1984 | | trie::proof_decode::StorageValue::HashKnownValueMissing( |
1985 | | .. |
1986 | 0 | ) => return, |
1987 | | trie::proof_decode::StorageValue::Known { |
1988 | | inline: true, |
1989 | | .. |
1990 | 0 | } => 0, |
1991 | | trie::proof_decode::StorageValue::Known { |
1992 | | inline: false, |
1993 | | .. |
1994 | 0 | } => 1, |
1995 | | }, |
1996 | 0 | ).unwrap(); |
1997 | | } |
1998 | 0 | } |
1999 | 0 | }) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0si_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0si_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0si_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0si_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0si_0Ba_ |
2000 | 0 | .await; |
2001 | 0 | } |
2002 | | |
2003 | 0 | if result.is_err() { |
2004 | | // Note that we perform the ban even if the source is now disconnected. |
2005 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); |
2006 | 0 | self.network_service |
2007 | 0 | .ban_and_disconnect( |
2008 | 0 | peer_id, |
2009 | 0 | self.network_chain_id, |
2010 | 0 | network_service::BanSeverity::Low, |
2011 | 0 | "storage-proof-request-error", |
2012 | 0 | ) |
2013 | 0 | .await; |
2014 | 0 | } |
2015 | | |
2016 | 0 | if let Ok(result) = result { |
2017 | 0 | // TODO: to_owned overhead |
2018 | 0 | let _ = self |
2019 | 0 | .sync |
2020 | 0 | .storage_get_response(request_id, result.decode().to_owned()); |
2021 | 0 | } else { |
2022 | 0 | let _ = self.sync.remove_request(request_id); |
2023 | 0 | } |
2024 | | |
2025 | | // If the source was actually disconnected and has no other request in |
2026 | | // progress, we clean it up. |
2027 | | // TODO: DRY |
2028 | 0 | if self.sync[source_id] |
2029 | 0 | .as_ref() |
2030 | 0 | .map_or(false, |info| info.is_disconnected) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sj_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sj_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sj_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sj_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sj_0Ba_ |
2031 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 |
2032 | | { |
2033 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); |
2034 | 0 | debug_assert!(_requests.next().is_none()); |
2035 | 0 | self.peers_source_id_map |
2036 | 0 | .remove(&info.unwrap().peer_id) |
2037 | 0 | .unwrap(); |
2038 | 0 | } |
2039 | | |
2040 | 0 | process_sync = true; |
2041 | | } |
2042 | | |
2043 | | WakeUpReason::SubtaskFinished(SubtaskFinished::CallProofRequestFinished { |
2044 | 0 | request_id, |
2045 | 0 | source_id, |
2046 | 0 | result, |
2047 | | }) => { |
2048 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) |
2049 | 0 | { |
2050 | 0 | self.database_catch_up_download = |
2051 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; |
2052 | 0 | } |
2053 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) |
2054 | 0 | { |
2055 | 0 | self.database_catch_up_download_block_verification = |
2056 | 0 | DatabaseCatchUpDownloadBlockVerification::None; |
2057 | 0 | } |
2058 | | |
2059 | | // TODO: DRY with above |
2060 | 0 | if let Ok(result) = &result { |
2061 | 0 | let result = result.clone(); |
2062 | 0 | self.database |
2063 | 0 | .with_database(move |database| { |
2064 | 0 | if let Ok(decoded) = trie::proof_decode::decode_and_verify_proof( |
2065 | 0 | trie::proof_decode::Config { |
2066 | 0 | proof: result.decode(), |
2067 | 0 | }, |
2068 | 0 | ) { |
2069 | 0 | for (_, entry) in decoded.iter_ordered() { |
2070 | | // TODO: check the state root hash; while this can't lead to a vulnerability, it can bloat the database |
2071 | 0 | database.insert_trie_nodes( |
2072 | 0 | iter::once(full_sqlite::InsertTrieNode { |
2073 | 0 | merkle_value: Cow::Borrowed(entry.merkle_value), |
2074 | 0 | partial_key_nibbles: Cow::Owned(entry.partial_key_nibbles.into_iter().map(|n| u8::from(n)).collect()), Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sk_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sk_00Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sk_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sk_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sk_00Bc_ |
2075 | 0 | children_merkle_values: std::array::from_fn(|n| entry.trie_node_info.children.child(trie::Nibble::try_from(u8::try_from(n).unwrap()).unwrap()).merkle_value().map(Cow::Borrowed)), Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sk_0s_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sk_0s_0Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sk_0s_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sk_0s_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground3run0sk_0s_0Bc_ |
2076 | 0 | storage_value: match entry.trie_node_info.storage_value { |
2077 | | trie::proof_decode::StorageValue::HashKnownValueMissing( |
2078 | | _, |
2079 | 0 | ) => return, |
2080 | | trie::proof_decode::StorageValue::None => { |
2081 | 0 | full_sqlite::InsertTrieNodeStorageValue::NoValue |
2082 | | } |
2083 | | trie::proof_decode::StorageValue::Known { |
2084 | 0 | value, .. |
2085 | 0 | } => full_sqlite::InsertTrieNodeStorageValue::Value { |
2086 | 0 | value: Cow::Borrowed(value), |
2087 | 0 | references_merkle_value: false, // TODO: |
2088 | 0 | }, |
2089 | | }, |
2090 | | }), |
2091 | 0 | match entry.trie_node_info.storage_value { |
2092 | 0 | trie::proof_decode::StorageValue::None => 0, // TODO: ?! |
2093 | | trie::proof_decode::StorageValue::HashKnownValueMissing( |
2094 | | .. |
2095 | 0 | ) => return, |
2096 | | trie::proof_decode::StorageValue::Known { |
2097 | | inline: true, |
2098 | | .. |
2099 | 0 | } => 0, |
2100 | | trie::proof_decode::StorageValue::Known { |
2101 | | inline: false, |
2102 | | .. |
2103 | 0 | } => 1, |
2104 | | }, |
2105 | 0 | ).unwrap(); |
2106 | | } |
2107 | 0 | } |
2108 | 0 | }) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sk_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sk_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sk_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sk_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sk_0Ba_ |
2109 | 0 | .await; |
2110 | 0 | } |
2111 | | |
2112 | 0 | if result.is_err() { |
2113 | | // Note that we perform the ban even if the source is now disconnected. |
2114 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); |
2115 | 0 | self.network_service |
2116 | 0 | .ban_and_disconnect( |
2117 | 0 | peer_id, |
2118 | 0 | self.network_chain_id, |
2119 | 0 | network_service::BanSeverity::Low, |
2120 | 0 | "call-proof-request-error", |
2121 | 0 | ) |
2122 | 0 | .await; |
2123 | 0 | } |
2124 | | |
2125 | 0 | if let Ok(result) = result { |
2126 | 0 | self.sync |
2127 | 0 | .call_proof_response(request_id, result.decode().to_owned()); |
2128 | 0 | // TODO: need help from networking service to avoid this to_owned |
2129 | 0 | } else { |
2130 | 0 | self.sync.remove_request(request_id); |
2131 | 0 | } |
2132 | | |
2133 | | // If the source was actually disconnected and has no other request in |
2134 | | // progress, we clean it up. |
2135 | | // TODO: DRY |
2136 | 0 | if self.sync[source_id] |
2137 | 0 | .as_ref() |
2138 | 0 | .map_or(false, |info| info.is_disconnected) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sl_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sl_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sl_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sl_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground3run0sl_0Ba_ |
2139 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 |
2140 | | { |
2141 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); |
2142 | 0 | debug_assert!(_requests.next().is_none()); |
2143 | 0 | self.peers_source_id_map |
2144 | 0 | .remove(&info.unwrap().peer_id) |
2145 | 0 | .unwrap(); |
2146 | 0 | } |
2147 | | |
2148 | 0 | process_sync = true; |
2149 | | } |
2150 | | |
2151 | | WakeUpReason::SyncProcess => { |
2152 | | // Given that processing blocks might generate a notification, and that |
2153 | | // only one notification can be queued at a time, this path must never be |
2154 | | // reached if a notification is already waiting. |
2155 | 21 | debug_assert!(self.pending_notification.is_none()); |
2156 | | // Similarly, verifying a block might generate a block announce. |
2157 | 21 | debug_assert!(self.pending_block_announce.is_none()); |
2158 | | |
2159 | | // Given that a block verification might require downloading some storage |
2160 | | // items due to missing storage items, and that we only want one download at |
2161 | | // a time, we don't verify blocks if a download is in progress. |
2162 | 21 | debug_assert!(matches!0 ( |
2163 | 21 | self.database_catch_up_download_block_verification, |
2164 | | DatabaseCatchUpDownloadBlockVerification::None |
2165 | | )); |
2166 | | |
2167 | 21 | let (new_self, maybe_more_to_process) = self.process_blocks().await0 ; |
2168 | 21 | process_sync = maybe_more_to_process; |
2169 | 21 | self = new_self; |
2170 | | } |
2171 | | } |
2172 | | } |
2173 | 21 | } _RNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground3run0CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 775 | 2 | async fn run(mut self) { | 776 | 2 | let mut process_sync = true; | 777 | | | 778 | | loop { | 779 | | enum WakeUpReason { | 780 | | ReadyToAuthor, | 781 | | FrontendEvent(ToBackground), | 782 | | FrontendClosed, | 783 | | SendPendingNotification(Notification), | 784 | | StartNetworkRequest { | 785 | | source_id: all::SourceId, | 786 | | request: all::DesiredRequest, | 787 | | database_catch_up_type: DbCatchUpType, | 788 | | }, | 789 | | NetworkEvent(network_service::Event), | 790 | | NetworkLocalChainUpdate, | 791 | | AnnounceBlock(Vec<u8>, [u8; 32], u64), | 792 | | SubtaskFinished(SubtaskFinished), | 793 | | SyncProcess, | 794 | | } | 795 | | | 796 | | enum DbCatchUpType { | 797 | | No, | 798 | | BlockVerification, | 799 | | Database, | 800 | | } | 801 | | | 802 | 6 | let wake_up_reason: WakeUpReason = { | 803 | | // Creating the block authoring state and prepare a future that is ready when something | 804 | | // related to the block authoring is ready. | 805 | | // TODO: refactor as a separate task? | 806 | | // TODO: restore block authoring after https://github.com/smol-dot/smoldot/issues/1109 | 807 | 6 | let authoring_ready_future = { | 808 | 6 | future::pending::<WakeUpReason>() | 809 | | /*// TODO: overhead to call best_block_consensus() multiple times | 810 | | let local_authorities = { | 811 | | let namespace_filter = match self.sync.best_block_consensus() { | 812 | | chain_information::ChainInformationConsensusRef::Aura { .. } => { | 813 | | Some(keystore::KeyNamespace::Aura) | 814 | | } | 815 | | chain_information::ChainInformationConsensusRef::Babe { .. } => { | 816 | | Some(keystore::KeyNamespace::Babe) | 817 | | } | 818 | | chain_information::ChainInformationConsensusRef::Unknown => { | 819 | | // In `Unknown` mode, all keys are accepted and there is no | 820 | | // filter on the namespace, as we can't author blocks anyway. | 821 | | // TODO: is that correct? | 822 | | None | 823 | | } | 824 | | }; | 825 | | | 826 | | // Calling `keys()` on the keystore is racy, but that's considered | 827 | | // acceptable and part of the design of the node. | 828 | | self.keystore | 829 | | .keys() | 830 | | .await | 831 | | .filter(|(namespace, _)| { | 832 | | namespace_filter.map_or(true, |n| *namespace == n) | 833 | | }) | 834 | | .map(|(_, key)| key) | 835 | | .collect::<Vec<_>>() // TODO: collect overhead :-/ | 836 | | }; | 837 | | | 838 | | let block_authoring = | 839 | | match (&mut self.block_authoring, self.sync.best_block_consensus()) { | 840 | | (Some(ba), _) => Some(ba), | 841 | | ( | 842 | | block_authoring @ None, | 843 | | chain_information::ChainInformationConsensusRef::Aura { | 844 | | finalized_authorities_list, // TODO: field name not appropriate; should probably change the chain_information module | 845 | | slot_duration, | 846 | | }, | 847 | | ) => Some( | 848 | | block_authoring.insert(( | 849 | | author::build::Builder::new(author::build::Config { | 850 | | consensus: author::build::ConfigConsensus::Aura { | 851 | | current_authorities: finalized_authorities_list, | 852 | | local_authorities: local_authorities.iter(), | 853 | | now_from_unix_epoch: SystemTime::now() | 854 | | .duration_since(SystemTime::UNIX_EPOCH) | 855 | | .unwrap(), | 856 | | slot_duration, | 857 | | }, | 858 | | }), | 859 | | local_authorities, | 860 | | )), | 861 | | ), | 862 | | ( | 863 | | None, | 864 | | chain_information::ChainInformationConsensusRef::Babe { .. }, | 865 | | ) => { | 866 | | None // TODO: the block authoring doesn't support Babe at the moment | 867 | | } | 868 | | (None, _) => todo!(), | 869 | | }; | 870 | | | 871 | | match &block_authoring { | 872 | | Some((author::build::Builder::Ready(_), _)) => future::Either::Left( | 873 | | future::Either::Left(future::ready(Instant::now())), | 874 | | ), | 875 | | Some((author::build::Builder::WaitSlot(when), _)) => { | 876 | | let delay = (UNIX_EPOCH + when.when()) | 877 | | .duration_since(SystemTime::now()) | 878 | | .unwrap_or_else(|_| Duration::new(0, 0)); | 879 | | future::Either::Right(future::FutureExt::fuse(smol::Timer::after( | 880 | | delay, | 881 | | ))) | 882 | | } | 883 | | None => future::Either::Left(future::Either::Right(future::pending())), | 884 | | Some((author::build::Builder::Idle, _)) => { | 885 | | // If the block authoring is idle, which happens in case of error, | 886 | | // sleep for an arbitrary duration before resetting it. | 887 | | // This prevents the authoring from trying over and over again to generate | 888 | | // a bad block. | 889 | | let delay = Duration::from_secs(2); | 890 | | future::Either::Right(future::FutureExt::fuse(smol::Timer::after( | 891 | | delay, | 892 | | ))) | 893 | | } | 894 | | }*/ | 895 | | }; | 896 | | | 897 | 6 | async { | 898 | | if let Some(notification) = self.pending_notification.take() { | 899 | | WakeUpReason::SendPendingNotification(notification) | 900 | | } else { | 901 | | future::pending().await | 902 | | } | 903 | | } | 904 | 6 | .or(async move { | 905 | | authoring_ready_future.await; | 906 | | WakeUpReason::ReadyToAuthor | 907 | 6 | }) | 908 | 6 | .or(async { | 909 | | self.to_background_rx | 910 | | .next() | 911 | | .await | 912 | | .map_or(WakeUpReason::FrontendClosed, WakeUpReason::FrontendEvent) | 913 | 6 | }) | 914 | 6 | .or(async { | 915 | | WakeUpReason::NetworkEvent(self.from_network_service.next().await.unwrap()) | 916 | 6 | }) | 917 | 6 | .or(async { | 918 | | if self.network_local_chain_update_needed { | 919 | | self.network_local_chain_update_needed = false; | 920 | | WakeUpReason::NetworkLocalChainUpdate | 921 | | } else { | 922 | | future::pending().await | 923 | | } | 924 | 6 | }) | 925 | 6 | .or(async { | 926 | | if let Some((header, hash, height)) = self.pending_block_announce.take() { | 927 | | WakeUpReason::AnnounceBlock(header, hash, height) | 928 | | } else { | 929 | | future::pending().await | 930 | | } | 931 | 6 | }) | 932 | 6 | .or(async { | 933 | | let Some(subtask_finished) = self.sub_tasks.next().await else { | 934 | | future::pending().await | 935 | | }; | 936 | | WakeUpReason::SubtaskFinished(subtask_finished) | 937 | 6 | }) | 938 | 6 | .or({ | 939 | 6 | async { | 940 | | // TODO: handle obsolete requests | 941 | | // Ask the sync state machine whether any new network request should | 942 | | // be started. | 943 | | // `desired_requests()` returns, in decreasing order of priority, the | 944 | | // requests that should be started in order for the syncing to proceed. We | 945 | | // simply pick the first request, but enforce one ongoing request per | 946 | | // source. | 947 | | // TODO: desired_requests() is expensive and done at every iteration | 948 | | let request_to_start = self.sync.desired_requests().find( | 949 | | |(source_id, source_info, request_details)| { | 950 | | if source_info | 951 | | .as_ref() | 952 | | .map_or(false, |info| info.is_disconnected) | 953 | | { | 954 | | // Source is a networking source that has already been disconnected. | 955 | | false | 956 | | } else if *source_id != self.block_author_sync_source { | 957 | | // Remote source. | 958 | | self.sync.source_num_ongoing_requests(*source_id) == 0 | 959 | | } else { | 960 | | // Locally-authored blocks source. | 961 | | match (request_details, &self.authored_block) { | 962 | | ( | 963 | | all::DesiredRequest::BlocksRequest { | 964 | | first_block_hash, | 965 | | first_block_height, | 966 | | .. | 967 | | }, | 968 | | Some((authored_height, authored_hash, _, _)), | 969 | | ) if first_block_hash == authored_hash | 970 | | && first_block_height == authored_height => | 971 | | { | 972 | | true | 973 | | } | 974 | | _ => false, | 975 | | } | 976 | | } | 977 | | }, | 978 | | ); | 979 | | if let Some((source_id, _, request)) = request_to_start { | 980 | | return WakeUpReason::StartNetworkRequest { | 981 | | source_id, | 982 | | request, | 983 | | database_catch_up_type: DbCatchUpType::No, | 984 | | }; | 985 | | } | 986 | | | 987 | | match self.database_catch_up_download_block_verification.clone() { | 988 | | _ if !matches!( | 989 | | self.database_catch_up_download, | 990 | | DatabaseCatchUpDownload::NoDownloadInProgress | 991 | | | DatabaseCatchUpDownload::NothingToDownloadCache | 992 | | ) => {} | 993 | | DatabaseCatchUpDownloadBlockVerification::None => {} | 994 | | DatabaseCatchUpDownloadBlockVerification::InProgress(_) => {} | 995 | | DatabaseCatchUpDownloadBlockVerification::CallProofDesired { | 996 | | block_hash, | 997 | | block_number, | 998 | | function_name, | 999 | | parameter, | 1000 | | } => { | 1001 | | // Choose which source to query. We have to use an `if` because | 1002 | | // `knows_non_finalized_block` panics if the parameter is inferior | 1003 | | // or equal to the finalized block number. | 1004 | | let source_id = if block_number | 1005 | | <= self.sync.finalized_block_number() | 1006 | | { | 1007 | | self.sync | 1008 | | .sources() | 1009 | | .filter(|s| *s != self.block_author_sync_source) | 1010 | | .choose(&mut rand::thread_rng()) | 1011 | | } else { | 1012 | | self.sync | 1013 | | .knows_non_finalized_block(block_number, &block_hash) | 1014 | | .filter(|source_id| { | 1015 | | *source_id != self.block_author_sync_source | 1016 | | && self.sync.source_num_ongoing_requests(*source_id) | 1017 | | == 0 | 1018 | | }) | 1019 | | .choose(&mut rand::thread_rng()) | 1020 | | }; | 1021 | | | 1022 | | if let Some(source_id) = source_id { | 1023 | | return WakeUpReason::StartNetworkRequest { | 1024 | | source_id, | 1025 | | request: all::DesiredRequest::RuntimeCallMerkleProof { | 1026 | | block_hash, | 1027 | | function_name: function_name.into(), | 1028 | | parameter_vectored: parameter.into(), | 1029 | | }, | 1030 | | database_catch_up_type: DbCatchUpType::BlockVerification, | 1031 | | }; | 1032 | | } | 1033 | | } | 1034 | | DatabaseCatchUpDownloadBlockVerification::CodeStorageProofDesired { | 1035 | | block_hash, | 1036 | | block_number, | 1037 | | } => { | 1038 | | // Choose which source to query. We have to use an `if` because | 1039 | | // `knows_non_finalized_block` panics if the parameter is inferior | 1040 | | // or equal to the finalized block number. | 1041 | | let source_id = if block_number | 1042 | | <= self.sync.finalized_block_number() | 1043 | | { | 1044 | | self.sync | 1045 | | .sources() | 1046 | | .filter(|s| *s != self.block_author_sync_source) | 1047 | | .choose(&mut rand::thread_rng()) | 1048 | | } else { | 1049 | | self.sync | 1050 | | .knows_non_finalized_block(block_number, &block_hash) | 1051 | | .filter(|source_id| { | 1052 | | *source_id != self.block_author_sync_source | 1053 | | && self.sync.source_num_ongoing_requests(*source_id) | 1054 | | == 0 | 1055 | | }) | 1056 | | .choose(&mut rand::thread_rng()) | 1057 | | }; | 1058 | | | 1059 | | if let Some(source_id) = source_id { | 1060 | | return WakeUpReason::StartNetworkRequest { | 1061 | | source_id, | 1062 | | request: all::DesiredRequest::StorageGetMerkleProof { | 1063 | | block_hash, | 1064 | | state_trie_root: [0; 32], // TODO: wrong, but field value unused so it's fine temporarily | 1065 | | keys: vec![":code".into(), ":heappages".into()], | 1066 | | }, | 1067 | | database_catch_up_type: DbCatchUpType::BlockVerification, | 1068 | | }; | 1069 | | } | 1070 | | } | 1071 | | } | 1072 | | | 1073 | | // If the sync state machine doesn't require any additional request, ask | 1074 | | // the database whether any storage item is missing. | 1075 | | if matches!( | 1076 | | self.database_catch_up_download, | 1077 | | DatabaseCatchUpDownload::NoDownloadInProgress | 1078 | | ) { | 1079 | | // TODO: this has a O(n^2) complexity; in case all sources are busy, we iterate a lot | 1080 | | let missing_items = self | 1081 | | .database | 1082 | | .with_database(|db| { | 1083 | | db.finalized_and_above_missing_trie_nodes_unordered() | 1084 | | }) | 1085 | | .await | 1086 | | .unwrap(); | 1087 | | if missing_items.is_empty() { | 1088 | | self.database_catch_up_download = | 1089 | | DatabaseCatchUpDownload::NothingToDownloadCache; | 1090 | | } | 1091 | | | 1092 | | for missing_item in missing_items | 1093 | | .into_iter() | 1094 | | .flat_map(|item| item.blocks.into_iter()) | 1095 | | { | 1096 | | // Since the database and sync state machine are supposed to have the | 1097 | | // same finalized block, it is guaranteed that the missing item are | 1098 | | // in the finalized block or above. | 1099 | | debug_assert!( | 1100 | | missing_item.number >= self.sync.finalized_block_number() | 1101 | | ); | 1102 | | | 1103 | | // Choose which source to query. We have to use an `if` because | 1104 | | // `knows_non_finalized_block` panics if the parameter is inferior | 1105 | | // or equal to the finalized block number. | 1106 | | let source_id = if missing_item.number | 1107 | | <= self.sync.finalized_block_number() | 1108 | | { | 1109 | | let Some(source_id) = self | 1110 | | .sync | 1111 | | .sources() | 1112 | | .filter(|s| *s != self.block_author_sync_source) | 1113 | | .choose(&mut rand::thread_rng()) | 1114 | | else { | 1115 | | break; | 1116 | | }; | 1117 | | source_id | 1118 | | } else { | 1119 | | let Some(source_id) = self | 1120 | | .sync | 1121 | | .knows_non_finalized_block( | 1122 | | missing_item.number, | 1123 | | &missing_item.hash, | 1124 | | ) | 1125 | | .filter(|source_id| { | 1126 | | *source_id != self.block_author_sync_source | 1127 | | && self.sync.source_num_ongoing_requests(*source_id) | 1128 | | == 0 | 1129 | | }) | 1130 | | .choose(&mut rand::thread_rng()) | 1131 | | else { | 1132 | | continue; | 1133 | | }; | 1134 | | source_id | 1135 | | }; | 1136 | | | 1137 | | return WakeUpReason::StartNetworkRequest { | 1138 | | source_id, | 1139 | | request: all::DesiredRequest::StorageGetMerkleProof { | 1140 | | block_hash: missing_item.hash, | 1141 | | state_trie_root: [0; 32], // TODO: wrong, but field value unused so it's fine temporarily | 1142 | | keys: vec![trie::nibbles_to_bytes_suffix_extend( | 1143 | | missing_item | 1144 | | .trie_node_key_nibbles | 1145 | | .into_iter() | 1146 | | // In order to download more than one item at a time, | 1147 | | // we add some randomly-generated nibbles to the | 1148 | | // requested key. The request will target the missing | 1149 | | // key plus a few other random keys. | 1150 | | .chain((0..32).map(|_| { | 1151 | | rand::Rng::gen_range( | 1152 | | &mut rand::thread_rng(), | 1153 | | 0..16, | 1154 | | ) | 1155 | | })) | 1156 | | .map(|n| trie::Nibble::try_from(n).unwrap()), | 1157 | | ) | 1158 | | .collect::<Vec<_>>()], | 1159 | | }, | 1160 | | database_catch_up_type: DbCatchUpType::Database, | 1161 | | }; | 1162 | | } | 1163 | | } | 1164 | | | 1165 | | // No network request to start. | 1166 | | future::pending().await | 1167 | 6 | } | 1168 | 6 | }) | 1169 | 6 | .or({ | 1170 | 6 | let is_downloading = matches!0 ( | 1171 | 6 | self.database_catch_up_download_block_verification, | 1172 | | DatabaseCatchUpDownloadBlockVerification::None | 1173 | | ); | 1174 | | async move { | 1175 | | if !process_sync || !is_downloading { | 1176 | | future::pending().await | 1177 | | } | 1178 | | WakeUpReason::SyncProcess | 1179 | | } | 1180 | | }) | 1181 | 3 | .await | 1182 | | }; | 1183 | | | 1184 | 0 | match wake_up_reason { | 1185 | | WakeUpReason::ReadyToAuthor => { | 1186 | | // Ready to author a block. Call `author_block()`. | 1187 | | // While a block is being authored, the whole syncing state machine is | 1188 | | // deliberately frozen. | 1189 | 0 | match self.block_authoring { | 1190 | | Some((author::build::Builder::Ready(_), _)) => { | 1191 | 0 | self.author_block().await; | 1192 | | } | 1193 | 0 | Some((author::build::Builder::WaitSlot(when), local_authorities)) => { | 1194 | 0 | self.block_authoring = Some(( | 1195 | 0 | author::build::Builder::Ready(when.start()), | 1196 | 0 | local_authorities, | 1197 | 0 | )); | 1198 | 0 | self.author_block().await; | 1199 | | } | 1200 | 0 | Some((author::build::Builder::Idle, _)) => { | 1201 | 0 | self.block_authoring = None; | 1202 | 0 | } | 1203 | | None => { | 1204 | 0 | unreachable!() | 1205 | | } | 1206 | | } | 1207 | | | 1208 | 0 | process_sync = true; | 1209 | | } | 1210 | | | 1211 | | WakeUpReason::FrontendClosed => { | 1212 | | // Shutdown. | 1213 | 2 | return; | 1214 | | } | 1215 | | | 1216 | | WakeUpReason::FrontendEvent(ToBackground::SubscribeAll { | 1217 | 0 | buffer_size, | 1218 | 0 | _max_finalized_pinned_blocks: _, | 1219 | 0 | result_tx, | 1220 | 0 | }) => { | 1221 | 0 | let (tx, new_blocks) = async_channel::bounded(buffer_size.saturating_sub(1)); | 1222 | | | 1223 | 0 | let non_finalized_blocks_ancestry_order = { | 1224 | 0 | let blocks_in = self | 1225 | 0 | .sync | 1226 | 0 | .non_finalized_blocks_ancestry_order() | 1227 | 0 | .map(|h| { | 1228 | | ( | 1229 | | h.number, | 1230 | | h.scale_encoding_vec(self.sync.block_number_bytes()), | 1231 | | *h.parent_hash, | 1232 | | ) | 1233 | 0 | }) | 1234 | 0 | .collect::<Vec<_>>(); | 1235 | 0 | let mut blocks_out = Vec::new(); | 1236 | 0 | for (number, scale_encoding, parent_hash) in blocks_in { | 1237 | 0 | let hash = header::hash_from_scale_encoded_header(&scale_encoding); | 1238 | 0 | let runtime = match &self.sync[(number, &hash)] { | 1239 | 0 | NonFinalizedBlock::Verified { runtime } => runtime.clone(), | 1240 | 0 | _ => unreachable!(), | 1241 | | }; | 1242 | 0 | let runtime_update = if Arc::ptr_eq(&self.finalized_runtime, &runtime) { | 1243 | 0 | None | 1244 | | } else { | 1245 | 0 | Some(runtime.clone()) | 1246 | | }; | 1247 | 0 | blocks_out.push(BlockNotification { | 1248 | 0 | is_new_best: header::hash_from_scale_encoded_header( | 1249 | 0 | &scale_encoding, | 1250 | 0 | ) == *self.sync.best_block_hash(), | 1251 | 0 | block_hash: header::hash_from_scale_encoded_header(&scale_encoding), | 1252 | 0 | scale_encoded_header: scale_encoding, | 1253 | 0 | runtime_update, | 1254 | 0 | parent_hash, | 1255 | 0 | }); | 1256 | | } | 1257 | 0 | blocks_out | 1258 | 0 | }; | 1259 | 0 |
| 1260 | 0 | self.blocks_notifications.push(tx); | 1261 | 0 | let _ = result_tx.send(SubscribeAll { | 1262 | 0 | id: SubscriptionId(0), // TODO: | 1263 | 0 | finalized_block_hash: *self.sync.finalized_block_hash(), | 1264 | 0 | finalized_block_scale_encoded_header: self | 1265 | 0 | .sync | 1266 | 0 | .finalized_block_header() | 1267 | 0 | .to_owned(), | 1268 | 0 | finalized_block_runtime: self.finalized_runtime.clone(), | 1269 | 0 | non_finalized_blocks_ancestry_order, | 1270 | 0 | new_blocks, | 1271 | 0 | }); | 1272 | | } | 1273 | 0 | WakeUpReason::SendPendingNotification(notification) => { | 1274 | | // Elements in `blocks_notifications` are removed one by one and inserted | 1275 | | // back if the channel is still open. | 1276 | 0 | for index in (0..self.blocks_notifications.len()).rev() { | 1277 | 0 | let subscription = self.blocks_notifications.swap_remove(index); | 1278 | 0 | if subscription.try_send(notification.clone()).is_err() { | 1279 | 0 | continue; | 1280 | 0 | } | 1281 | 0 | self.blocks_notifications.push(subscription); | 1282 | | } | 1283 | | } | 1284 | | | 1285 | 0 | WakeUpReason::FrontendEvent(ToBackground::GetSyncState { result_tx }) => { | 1286 | 0 | let _ = result_tx.send(SyncState { | 1287 | 0 | best_block_hash: *self.sync.best_block_hash(), | 1288 | 0 | best_block_number: self.sync.best_block_number(), | 1289 | 0 | finalized_block_hash: *self.sync.finalized_block_hash(), | 1290 | 0 | finalized_block_number: self.sync.finalized_block_number(), | 1291 | 0 | }); | 1292 | 0 | } | 1293 | 0 | WakeUpReason::FrontendEvent(ToBackground::Unpin { result_tx, .. }) => { | 1294 | 0 | // TODO: check whether block was indeed pinned, and prune blocks that aren't pinned anymore from the database | 1295 | 0 | let _ = result_tx.send(()); | 1296 | 0 | } | 1297 | 0 | WakeUpReason::FrontendEvent(ToBackground::IsMajorSyncingHint { result_tx }) => { | 1298 | 0 | // As documented, the value returned doesn't need to be precise. | 1299 | 0 | let result = match self.sync.status() { | 1300 | 0 | all::Status::Sync => false, | 1301 | | all::Status::WarpSyncFragments { .. } | 1302 | 0 | | all::Status::WarpSyncChainInformation { .. } => true, | 1303 | | }; | 1304 | | | 1305 | 0 | let _ = result_tx.send(result); | 1306 | | } | 1307 | | | 1308 | | WakeUpReason::NetworkLocalChainUpdate => { | 1309 | 2 | self.network_service | 1310 | 2 | .set_local_best_block( | 1311 | 2 | self.network_chain_id, | 1312 | 2 | *self.sync.best_block_hash(), | 1313 | 2 | self.sync.best_block_number(), | 1314 | 2 | ) | 1315 | 0 | .await; | 1316 | | } | 1317 | | | 1318 | 0 | WakeUpReason::AnnounceBlock(header, hash, height) => { | 1319 | | // We can never be guaranteed that a certain source does *not* know about a | 1320 | | // block, however it is not a big problem to send a block announce to a source | 1321 | | // that already knows about that block. For this reason, the list of sources | 1322 | | // we send the block announce to is `all_sources - sources_that_know_it`. | 1323 | | // | 1324 | | // Note that not sending block announces to sources that already | 1325 | | // know that block means that these sources might also miss the | 1326 | | // fact that our local best block has been updated. This is in | 1327 | | // practice not a problem either. | 1328 | 0 | let sources_to_announce_to = { | 1329 | 0 | let mut all_sources = self | 1330 | 0 | .sync | 1331 | 0 | .sources() | 1332 | 0 | .collect::<HashSet<_, fnv::FnvBuildHasher>>(); | 1333 | 0 | for knows in self.sync.knows_non_finalized_block(height, &hash) { | 1334 | 0 | all_sources.remove(&knows); | 1335 | 0 | } | 1336 | 0 | all_sources | 1337 | 0 | }; | 1338 | 0 |
| 1339 | 0 | let is_best = *self.sync.best_block_hash() == hash; | 1340 | | | 1341 | 0 | for source_id in sources_to_announce_to { | 1342 | 0 | let peer_id = match &self.sync[source_id] { | 1343 | 0 | Some(info) if !info.is_disconnected => &info.peer_id, | 1344 | 0 | _ => continue, | 1345 | | }; | 1346 | | | 1347 | 0 | if self | 1348 | 0 | .network_service | 1349 | 0 | .clone() | 1350 | 0 | .send_block_announce( | 1351 | 0 | peer_id.clone(), | 1352 | 0 | self.network_chain_id, | 1353 | 0 | header.clone(), | 1354 | 0 | is_best, | 1355 | 0 | ) | 1356 | 0 | .await | 1357 | 0 | .is_ok() | 1358 | 0 | { | 1359 | 0 | // Note that `try_add_known_block_to_source` might have | 1360 | 0 | // no effect, which is not a problem considering that this | 1361 | 0 | // block tracking is mostly about optimizations and | 1362 | 0 | // politeness. | 1363 | 0 | self.sync | 1364 | 0 | .try_add_known_block_to_source(source_id, height, hash); | 1365 | 0 | } | 1366 | | } | 1367 | | } | 1368 | | | 1369 | | WakeUpReason::NetworkEvent(network_service::Event::Connected { | 1370 | 0 | peer_id, | 1371 | 0 | chain_id, | 1372 | 0 | best_block_number, | 1373 | 0 | best_block_hash, | 1374 | 0 | }) if chain_id == self.network_chain_id => { | 1375 | 0 | // Most of the time, we insert a new source in the state machine. | 1376 | 0 | // However, a source of that `PeerId` might already exist but be considered as | 1377 | 0 | // disconnected. If that is the case, we simply mark it as no | 1378 | 0 | // longer disconnected. | 1379 | 0 | match self.peers_source_id_map.entry(peer_id) { | 1380 | 0 | hashbrown::hash_map::Entry::Occupied(entry) => { | 1381 | 0 | let id = *entry.get(); | 1382 | 0 | let is_disconnected = | 1383 | 0 | &mut self.sync[id].as_mut().unwrap().is_disconnected; | 1384 | 0 | debug_assert!(*is_disconnected); | 1385 | 0 | *is_disconnected = false; | 1386 | | } | 1387 | 0 | hashbrown::hash_map::Entry::Vacant(entry) => { | 1388 | 0 | let id = self | 1389 | 0 | .sync | 1390 | 0 | .prepare_add_source(best_block_number, best_block_hash) | 1391 | 0 | .add_source( | 1392 | 0 | Some(NetworkSourceInfo { | 1393 | 0 | peer_id: entry.key().clone(), | 1394 | 0 | is_disconnected: false, | 1395 | 0 | }), | 1396 | 0 | NonFinalizedBlock::NotVerified, | 1397 | 0 | ); | 1398 | 0 | entry.insert(id); | 1399 | 0 | } | 1400 | | } | 1401 | | } | 1402 | | WakeUpReason::NetworkEvent(network_service::Event::Disconnected { | 1403 | 0 | peer_id, | 1404 | 0 | chain_id, | 1405 | 0 | }) if chain_id == self.network_chain_id => { | 1406 | 0 | // Sources that disconnect are only immediately removed from the sync state | 1407 | 0 | // machine if they have no request in progress. If that is not the case, they | 1408 | 0 | // are instead only marked as disconnected. | 1409 | 0 | let id = *self.peers_source_id_map.get(&peer_id).unwrap(); | 1410 | 0 | if self.sync.source_num_ongoing_requests(id) == 0 { | 1411 | 0 | self.peers_source_id_map.remove(&peer_id).unwrap(); | 1412 | 0 | let (_, mut _requests) = self.sync.remove_source(id); | 1413 | 0 | debug_assert!(_requests.next().is_none()); | 1414 | | } else { | 1415 | 0 | let is_disconnected = &mut self.sync[id].as_mut().unwrap().is_disconnected; | 1416 | 0 | debug_assert!(!*is_disconnected); | 1417 | 0 | *is_disconnected = true; | 1418 | | } | 1419 | | } | 1420 | | WakeUpReason::NetworkEvent(network_service::Event::BlockAnnounce { | 1421 | 0 | chain_id, | 1422 | 0 | peer_id, | 1423 | 0 | scale_encoded_header, | 1424 | 0 | is_best, | 1425 | 0 | }) if chain_id == self.network_chain_id => { | 1426 | 0 | let _jaeger_span = self.jaeger_service.block_announce_process_span( | 1427 | 0 | &header::hash_from_scale_encoded_header(&scale_encoded_header), | 1428 | 0 | ); | 1429 | 0 |
| 1430 | 0 | let id = *self.peers_source_id_map.get(&peer_id).unwrap(); | 1431 | 0 | // TODO: log the outcome | 1432 | 0 | match self.sync.block_announce(id, scale_encoded_header, is_best) { | 1433 | 0 | all::BlockAnnounceOutcome::TooOld { .. } => {} | 1434 | 0 | all::BlockAnnounceOutcome::AlreadyVerified(known) | 1435 | 0 | | all::BlockAnnounceOutcome::AlreadyPending(known) => { | 1436 | 0 | known.update_source_and_block(); | 1437 | 0 | } | 1438 | 0 | all::BlockAnnounceOutcome::Unknown(unknown) => { | 1439 | 0 | unknown.insert_and_update_source(NonFinalizedBlock::NotVerified) | 1440 | | } | 1441 | 0 | all::BlockAnnounceOutcome::InvalidHeader(_) => unreachable!(), // TODO: ?!?! why unreachable? also, ban the peer | 1442 | | } | 1443 | | } | 1444 | | WakeUpReason::NetworkEvent(network_service::Event::GrandpaNeighborPacket { | 1445 | 0 | chain_id, | 1446 | 0 | peer_id, | 1447 | 0 | finalized_block_height, | 1448 | 0 | }) if chain_id == self.network_chain_id => { | 1449 | 0 | let source_id = *self.peers_source_id_map.get(&peer_id).unwrap(); | 1450 | 0 | self.sync | 1451 | 0 | .update_source_finality_state(source_id, finalized_block_height); | 1452 | 0 | } | 1453 | 0 | WakeUpReason::NetworkEvent(_) => { | 1454 | 0 | // Different chain index. | 1455 | 0 | } | 1456 | | | 1457 | | WakeUpReason::StartNetworkRequest { | 1458 | 0 | source_id, | 1459 | 0 | request: request_info @ all::DesiredRequest::BlocksRequest { .. }, | 1460 | 0 | database_catch_up_type, | 1461 | 0 | } if source_id == self.block_author_sync_source => { | 1462 | 0 | debug_assert!(matches!(database_catch_up_type, DbCatchUpType::No)); | 1463 | | | 1464 | 0 | self.log_callback.log( | 1465 | 0 | LogLevel::Debug, | 1466 | 0 | "queue-locally-authored-block-for-import".to_string(), | 1467 | 0 | ); | 1468 | 0 |
| 1469 | 0 | let (_, block_hash, scale_encoded_header, scale_encoded_extrinsics) = | 1470 | 0 | self.authored_block.take().unwrap(); | 1471 | 0 |
| 1472 | 0 | let _jaeger_span = self.jaeger_service.block_import_queue_span(&block_hash); | 1473 | 0 |
| 1474 | 0 | // Create a request that is immediately answered right below. | 1475 | 0 | let request_id = self.sync.add_request(source_id, request_info.into(), ()); | 1476 | 0 | // TODO: announce the block on the network, but only after it's been imported | 1477 | 0 | self.sync.blocks_request_response( | 1478 | 0 | request_id, | 1479 | 0 | iter::once(all::BlockRequestSuccessBlock { | 1480 | 0 | scale_encoded_header, | 1481 | 0 | scale_encoded_extrinsics, | 1482 | 0 | scale_encoded_justifications: Vec::new(), | 1483 | 0 | user_data: NonFinalizedBlock::NotVerified, | 1484 | 0 | }), | 1485 | 0 | ); | 1486 | | } | 1487 | | | 1488 | | WakeUpReason::StartNetworkRequest { | 1489 | 0 | source_id, | 1490 | 0 | request: | 1491 | 0 | all::DesiredRequest::BlocksRequest { | 1492 | 0 | first_block_hash, | 1493 | 0 | first_block_height, | 1494 | 0 | num_blocks, | 1495 | 0 | request_headers, | 1496 | 0 | request_bodies, | 1497 | 0 | request_justification, | 1498 | 0 | }, | 1499 | 0 | database_catch_up_type, | 1500 | 0 | } => { | 1501 | 0 | // Before notifying the syncing of the request, clamp the number of blocks to | 1502 | 0 | // the number of blocks we expect to receive. | 1503 | 0 | let num_blocks = NonZeroU64::new(cmp::min(num_blocks.get(), 64)).unwrap(); | 1504 | | | 1505 | 0 | let peer_id = { | 1506 | 0 | let info = self.sync[source_id].clone().unwrap(); | 1507 | 0 | // Disconnected sources are filtered out above. | 1508 | 0 | debug_assert!(!info.is_disconnected); | 1509 | 0 | info.peer_id | 1510 | 0 | }; | 1511 | 0 |
| 1512 | 0 | // TODO: add jaeger span | 1513 | 0 |
| 1514 | 0 | let request = self.network_service.clone().blocks_request( | 1515 | 0 | peer_id, | 1516 | 0 | self.network_chain_id, | 1517 | 0 | network::codec::BlocksRequestConfig { | 1518 | 0 | start: network::codec::BlocksRequestConfigStart::Hash(first_block_hash), | 1519 | 0 | desired_count: NonZeroU32::new( | 1520 | 0 | u32::try_from(num_blocks.get()).unwrap_or(u32::MAX), | 1521 | 0 | ) | 1522 | 0 | .unwrap(), | 1523 | 0 | // The direction is hardcoded based on the documentation of the syncing | 1524 | 0 | // state machine. | 1525 | 0 | direction: network::codec::BlocksRequestDirection::Descending, | 1526 | 0 | fields: network::codec::BlocksRequestFields { | 1527 | 0 | header: true, // TODO: always set to true due to unwrapping the header when the response comes | 1528 | 0 | body: request_bodies, | 1529 | 0 | justifications: request_justification, | 1530 | 0 | }, | 1531 | 0 | }, | 1532 | 0 | ); | 1533 | 0 |
| 1534 | 0 | let request_id = self.sync.add_request( | 1535 | 0 | source_id, | 1536 | 0 | all::DesiredRequest::BlocksRequest { | 1537 | 0 | first_block_hash, | 1538 | 0 | first_block_height, | 1539 | 0 | num_blocks, | 1540 | 0 | request_headers, | 1541 | 0 | request_bodies, | 1542 | 0 | request_justification, | 1543 | 0 | } | 1544 | 0 | .into(), | 1545 | 0 | (), | 1546 | 0 | ); | 1547 | 0 |
| 1548 | 0 | match database_catch_up_type { | 1549 | 0 | DbCatchUpType::No => {} | 1550 | | DbCatchUpType::Database => { | 1551 | 0 | debug_assert!(matches!( | 1552 | 0 | self.database_catch_up_download, | 1553 | | DatabaseCatchUpDownload::NoDownloadInProgress | 1554 | | )); | 1555 | 0 | self.database_catch_up_download = | 1556 | 0 | DatabaseCatchUpDownload::InProgress(request_id); | 1557 | | } | 1558 | 0 | DbCatchUpType::BlockVerification => { | 1559 | 0 | self.database_catch_up_download_block_verification = | 1560 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(request_id); | 1561 | 0 | } | 1562 | | } | 1563 | | | 1564 | 0 | self.sub_tasks.push(Box::pin(async move { | 1565 | | let result = request.await; | 1566 | | SubtaskFinished::BlocksRequestFinished { | 1567 | | request_id, | 1568 | | source_id, | 1569 | | result, | 1570 | | } | 1571 | 0 | })); | 1572 | | } | 1573 | | | 1574 | | WakeUpReason::StartNetworkRequest { | 1575 | 0 | source_id, | 1576 | 0 | request: | 1577 | 0 | all::DesiredRequest::WarpSync { | 1578 | 0 | sync_start_block_hash, | 1579 | 0 | }, | 1580 | 0 | database_catch_up_type, | 1581 | 0 | } => { | 1582 | 0 | // TODO: don't unwrap? could this target the virtual sync source? | 1583 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); // TODO: why does this require cloning? weird borrow chk issue | 1584 | 0 |
| 1585 | 0 | let request = self.network_service.clone().warp_sync_request( | 1586 | 0 | peer_id, | 1587 | 0 | self.network_chain_id, | 1588 | 0 | sync_start_block_hash, | 1589 | 0 | ); | 1590 | 0 |
| 1591 | 0 | let request_id = self.sync.add_request( | 1592 | 0 | source_id, | 1593 | 0 | all::RequestDetail::WarpSync { | 1594 | 0 | sync_start_block_hash, | 1595 | 0 | }, | 1596 | 0 | (), | 1597 | 0 | ); | 1598 | 0 |
| 1599 | 0 | match database_catch_up_type { | 1600 | 0 | DbCatchUpType::No => {} | 1601 | | DbCatchUpType::Database => { | 1602 | 0 | debug_assert!(matches!( | 1603 | 0 | self.database_catch_up_download, | 1604 | | DatabaseCatchUpDownload::NoDownloadInProgress | 1605 | | )); | 1606 | 0 | self.database_catch_up_download = | 1607 | 0 | DatabaseCatchUpDownload::InProgress(request_id); | 1608 | | } | 1609 | 0 | DbCatchUpType::BlockVerification => { | 1610 | 0 | self.database_catch_up_download_block_verification = | 1611 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(request_id); | 1612 | 0 | } | 1613 | | } | 1614 | | | 1615 | 0 | self.sub_tasks.push(Box::pin(async move { | 1616 | | let result = request.await; | 1617 | | SubtaskFinished::WarpSyncRequestFinished { | 1618 | | request_id, | 1619 | | source_id, | 1620 | | result, | 1621 | | } | 1622 | 0 | })); | 1623 | | } | 1624 | | | 1625 | | WakeUpReason::StartNetworkRequest { | 1626 | 0 | source_id, | 1627 | 0 | request: | 1628 | 0 | all::DesiredRequest::StorageGetMerkleProof { | 1629 | 0 | block_hash, keys, .. | 1630 | 0 | }, | 1631 | 0 | database_catch_up_type, | 1632 | 0 | } => { | 1633 | 0 | // TODO: don't unwrap? could this target the virtual sync source? | 1634 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); // TODO: why does this require cloning? weird borrow chk issue | 1635 | 0 |
| 1636 | 0 | let request = self.network_service.clone().storage_request( | 1637 | 0 | peer_id, | 1638 | 0 | self.network_chain_id, | 1639 | 0 | network::codec::StorageProofRequestConfig { | 1640 | 0 | block_hash, | 1641 | 0 | keys: keys.clone().into_iter(), | 1642 | 0 | }, | 1643 | 0 | ); | 1644 | 0 |
| 1645 | 0 | let request_id = self.sync.add_request( | 1646 | 0 | source_id, | 1647 | 0 | all::RequestDetail::StorageGet { block_hash, keys }, | 1648 | 0 | (), | 1649 | 0 | ); | 1650 | 0 |
| 1651 | 0 | match database_catch_up_type { | 1652 | 0 | DbCatchUpType::No => {} | 1653 | | DbCatchUpType::Database => { | 1654 | 0 | debug_assert!(matches!( | 1655 | 0 | self.database_catch_up_download, | 1656 | | DatabaseCatchUpDownload::NoDownloadInProgress | 1657 | | )); | 1658 | 0 | self.database_catch_up_download = | 1659 | 0 | DatabaseCatchUpDownload::InProgress(request_id); | 1660 | | } | 1661 | 0 | DbCatchUpType::BlockVerification => { | 1662 | 0 | self.database_catch_up_download_block_verification = | 1663 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(request_id); | 1664 | 0 | } | 1665 | | } | 1666 | | | 1667 | 0 | self.sub_tasks.push(Box::pin(async move { | 1668 | | let result = request.await; | 1669 | | SubtaskFinished::StorageRequestFinished { | 1670 | | request_id, | 1671 | | source_id, | 1672 | | result, | 1673 | | } | 1674 | 0 | })); | 1675 | | } | 1676 | | | 1677 | | WakeUpReason::StartNetworkRequest { | 1678 | 0 | source_id, | 1679 | 0 | request: | 1680 | 0 | all::DesiredRequest::RuntimeCallMerkleProof { | 1681 | 0 | block_hash, | 1682 | 0 | function_name, | 1683 | 0 | parameter_vectored, | 1684 | 0 | }, | 1685 | 0 | database_catch_up_type, | 1686 | 0 | } => { | 1687 | 0 | // TODO: don't unwrap? could this target the virtual sync source? | 1688 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); // TODO: why does this require cloning? weird borrow chk issue | 1689 | 0 |
| 1690 | 0 | let request = self.network_service.clone().call_proof_request( | 1691 | 0 | peer_id, | 1692 | 0 | self.network_chain_id, | 1693 | 0 | network::codec::CallProofRequestConfig { | 1694 | 0 | block_hash, | 1695 | 0 | method: function_name.clone(), | 1696 | 0 | parameter_vectored: iter::once(parameter_vectored.clone()), | 1697 | 0 | }, | 1698 | 0 | ); | 1699 | 0 |
| 1700 | 0 | let request_id = self.sync.add_request( | 1701 | 0 | source_id, | 1702 | 0 | all::RequestDetail::RuntimeCallMerkleProof { | 1703 | 0 | block_hash, | 1704 | 0 | function_name, | 1705 | 0 | parameter_vectored, | 1706 | 0 | }, | 1707 | 0 | (), | 1708 | 0 | ); | 1709 | 0 |
| 1710 | 0 | match database_catch_up_type { | 1711 | 0 | DbCatchUpType::No => {} | 1712 | | DbCatchUpType::Database => { | 1713 | 0 | debug_assert!(matches!( | 1714 | 0 | self.database_catch_up_download, | 1715 | | DatabaseCatchUpDownload::NoDownloadInProgress | 1716 | | )); | 1717 | 0 | self.database_catch_up_download = | 1718 | 0 | DatabaseCatchUpDownload::InProgress(request_id); | 1719 | | } | 1720 | 0 | DbCatchUpType::BlockVerification => { | 1721 | 0 | self.database_catch_up_download_block_verification = | 1722 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(request_id); | 1723 | 0 | } | 1724 | | } | 1725 | | | 1726 | 0 | self.sub_tasks.push(Box::pin(async move { | 1727 | | let result = request.await; | 1728 | | SubtaskFinished::CallProofRequestFinished { | 1729 | | request_id, | 1730 | | source_id, | 1731 | | result, | 1732 | | } | 1733 | 0 | })); | 1734 | | } | 1735 | | | 1736 | | WakeUpReason::SubtaskFinished(SubtaskFinished::BlocksRequestFinished { | 1737 | 0 | request_id, | 1738 | 0 | source_id, | 1739 | 0 | result: Ok(blocks), | 1740 | | }) => { | 1741 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) | 1742 | 0 | { | 1743 | 0 | self.database_catch_up_download = | 1744 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; | 1745 | 0 | } | 1746 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) | 1747 | 0 | { | 1748 | 0 | self.database_catch_up_download_block_verification = | 1749 | 0 | DatabaseCatchUpDownloadBlockVerification::None; | 1750 | 0 | } | 1751 | | | 1752 | | // TODO: insert blocks in database if they are referenced through a parent_hash? | 1753 | | | 1754 | 0 | let _ = self.sync.blocks_request_response( | 1755 | 0 | request_id, | 1756 | 0 | blocks | 1757 | 0 | .into_iter() | 1758 | 0 | .map(|block| all::BlockRequestSuccessBlock { | 1759 | | scale_encoded_header: block.header.unwrap(), // TODO: don't unwrap | 1760 | | scale_encoded_extrinsics: block.body.unwrap(), // TODO: don't unwrap | 1761 | | scale_encoded_justifications: block | 1762 | | .justifications | 1763 | | .unwrap_or_default() | 1764 | | .into_iter() | 1765 | | .map(|j| all::Justification { | 1766 | | engine_id: j.engine_id, | 1767 | | justification: j.justification, | 1768 | | }) | 1769 | | .collect(), | 1770 | | user_data: NonFinalizedBlock::NotVerified, | 1771 | 0 | }), | 1772 | 0 | ); | 1773 | 0 |
| 1774 | 0 | // If the source was actually disconnected and has no other request in | 1775 | 0 | // progress, we clean it up. | 1776 | 0 | // TODO: DRY | 1777 | 0 | if self.sync[source_id] | 1778 | 0 | .as_ref() | 1779 | 0 | .map_or(false, |info| info.is_disconnected) | 1780 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 | 1781 | | { | 1782 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); | 1783 | 0 | debug_assert!(_requests.next().is_none()); | 1784 | 0 | self.peers_source_id_map | 1785 | 0 | .remove(&info.unwrap().peer_id) | 1786 | 0 | .unwrap(); | 1787 | 0 | } | 1788 | | | 1789 | 0 | process_sync = true; | 1790 | | } | 1791 | | | 1792 | | WakeUpReason::SubtaskFinished(SubtaskFinished::BlocksRequestFinished { | 1793 | 0 | request_id, | 1794 | 0 | source_id, | 1795 | | result: Err(_), | 1796 | | }) => { | 1797 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) | 1798 | 0 | { | 1799 | 0 | self.database_catch_up_download = | 1800 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; | 1801 | 0 | } | 1802 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) | 1803 | 0 | { | 1804 | 0 | self.database_catch_up_download_block_verification = | 1805 | 0 | DatabaseCatchUpDownloadBlockVerification::None; | 1806 | 0 | } | 1807 | | | 1808 | | // Note that we perform the ban even if the source is now disconnected. | 1809 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); | 1810 | 0 | self.network_service | 1811 | 0 | .ban_and_disconnect( | 1812 | 0 | peer_id, | 1813 | 0 | self.network_chain_id, | 1814 | 0 | network_service::BanSeverity::Low, | 1815 | 0 | "blocks-request-error", | 1816 | 0 | ) | 1817 | 0 | .await; | 1818 | | | 1819 | 0 | let _ = self.sync.remove_request(request_id); | 1820 | 0 |
| 1821 | 0 | // If the source was actually disconnected and has no other request in | 1822 | 0 | // progress, we clean it up. | 1823 | 0 | // TODO: DRY | 1824 | 0 | if self.sync[source_id] | 1825 | 0 | .as_ref() | 1826 | 0 | .map_or(false, |info| info.is_disconnected) | 1827 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 | 1828 | | { | 1829 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); | 1830 | 0 | debug_assert!(_requests.next().is_none()); | 1831 | 0 | self.peers_source_id_map | 1832 | 0 | .remove(&info.unwrap().peer_id) | 1833 | 0 | .unwrap(); | 1834 | 0 | } | 1835 | | | 1836 | 0 | process_sync = true; | 1837 | | } | 1838 | | | 1839 | | WakeUpReason::SubtaskFinished(SubtaskFinished::WarpSyncRequestFinished { | 1840 | 0 | request_id, | 1841 | 0 | source_id, | 1842 | 0 | result: Ok(result), | 1843 | | }) => { | 1844 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) | 1845 | 0 | { | 1846 | 0 | self.database_catch_up_download = | 1847 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; | 1848 | 0 | } | 1849 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) | 1850 | 0 | { | 1851 | 0 | self.database_catch_up_download_block_verification = | 1852 | 0 | DatabaseCatchUpDownloadBlockVerification::None; | 1853 | 0 | } | 1854 | | | 1855 | 0 | let decoded = result.decode(); | 1856 | 0 | let fragments = decoded | 1857 | 0 | .fragments | 1858 | 0 | .into_iter() | 1859 | 0 | .map(|f| all::WarpSyncFragment { | 1860 | | scale_encoded_header: f.scale_encoded_header.to_vec(), | 1861 | | scale_encoded_justification: f.scale_encoded_justification.to_vec(), | 1862 | 0 | }) | 1863 | 0 | .collect(); | 1864 | 0 | let _ = self.sync.grandpa_warp_sync_response( | 1865 | 0 | request_id, | 1866 | 0 | fragments, | 1867 | 0 | decoded.is_finished, | 1868 | 0 | ); | 1869 | 0 |
| 1870 | 0 | // If the source was actually disconnected and has no other request in | 1871 | 0 | // progress, we clean it up. | 1872 | 0 | // TODO: DRY | 1873 | 0 | if self.sync[source_id] | 1874 | 0 | .as_ref() | 1875 | 0 | .map_or(false, |info| info.is_disconnected) | 1876 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 | 1877 | | { | 1878 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); | 1879 | 0 | debug_assert!(_requests.next().is_none()); | 1880 | 0 | self.peers_source_id_map | 1881 | 0 | .remove(&info.unwrap().peer_id) | 1882 | 0 | .unwrap(); | 1883 | 0 | } | 1884 | | | 1885 | 0 | process_sync = true; | 1886 | | } | 1887 | | | 1888 | | WakeUpReason::SubtaskFinished(SubtaskFinished::WarpSyncRequestFinished { | 1889 | 0 | request_id, | 1890 | 0 | source_id, | 1891 | | result: Err(_), | 1892 | | }) => { | 1893 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) | 1894 | 0 | { | 1895 | 0 | self.database_catch_up_download = | 1896 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; | 1897 | 0 | } | 1898 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) | 1899 | 0 | { | 1900 | 0 | self.database_catch_up_download_block_verification = | 1901 | 0 | DatabaseCatchUpDownloadBlockVerification::None; | 1902 | 0 | } | 1903 | | | 1904 | | // Note that we perform the ban even if the source is now disconnected. | 1905 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); | 1906 | 0 | self.network_service | 1907 | 0 | .ban_and_disconnect( | 1908 | 0 | peer_id, | 1909 | 0 | self.network_chain_id, | 1910 | 0 | network_service::BanSeverity::Low, | 1911 | 0 | "warp-sync-request-error", | 1912 | 0 | ) | 1913 | 0 | .await; | 1914 | | | 1915 | 0 | let _ = self.sync.remove_request(request_id); | 1916 | 0 |
| 1917 | 0 | // If the source was actually disconnected and has no other request in | 1918 | 0 | // progress, we clean it up. | 1919 | 0 | // TODO: DRY | 1920 | 0 | if self.sync[source_id] | 1921 | 0 | .as_ref() | 1922 | 0 | .map_or(false, |info| info.is_disconnected) | 1923 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 | 1924 | | { | 1925 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); | 1926 | 0 | debug_assert!(_requests.next().is_none()); | 1927 | 0 | self.peers_source_id_map | 1928 | 0 | .remove(&info.unwrap().peer_id) | 1929 | 0 | .unwrap(); | 1930 | 0 | } | 1931 | | | 1932 | 0 | process_sync = true; | 1933 | | } | 1934 | | | 1935 | | WakeUpReason::SubtaskFinished(SubtaskFinished::StorageRequestFinished { | 1936 | 0 | request_id, | 1937 | 0 | source_id, | 1938 | 0 | result, | 1939 | | }) => { | 1940 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) | 1941 | 0 | { | 1942 | 0 | self.database_catch_up_download = | 1943 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; | 1944 | 0 | } | 1945 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) | 1946 | 0 | { | 1947 | 0 | self.database_catch_up_download_block_verification = | 1948 | 0 | DatabaseCatchUpDownloadBlockVerification::None; | 1949 | 0 | } | 1950 | | | 1951 | 0 | if let Ok(result) = &result { | 1952 | 0 | let result = result.clone(); | 1953 | 0 | self.database | 1954 | 0 | .with_database(move |database| { | 1955 | | if let Ok(decoded) = trie::proof_decode::decode_and_verify_proof( | 1956 | | trie::proof_decode::Config { | 1957 | | proof: result.decode(), | 1958 | | }, | 1959 | | ) { | 1960 | | for (_, entry) in decoded.iter_ordered() { | 1961 | | // TODO: check the state root hash; while this can't lead to a vulnerability, it can bloat the database | 1962 | | database.insert_trie_nodes( | 1963 | | iter::once(full_sqlite::InsertTrieNode { | 1964 | | merkle_value: Cow::Borrowed(entry.merkle_value), | 1965 | | partial_key_nibbles: Cow::Owned(entry.partial_key_nibbles.into_iter().map(|n| u8::from(n)).collect()), | 1966 | | children_merkle_values: std::array::from_fn(|n| entry.trie_node_info.children.child(trie::Nibble::try_from(u8::try_from(n).unwrap()).unwrap()).merkle_value().map(Cow::Borrowed)), | 1967 | | storage_value: match entry.trie_node_info.storage_value { | 1968 | | trie::proof_decode::StorageValue::HashKnownValueMissing( | 1969 | | _, | 1970 | | ) => return, | 1971 | | trie::proof_decode::StorageValue::None => { | 1972 | | full_sqlite::InsertTrieNodeStorageValue::NoValue | 1973 | | } | 1974 | | trie::proof_decode::StorageValue::Known { | 1975 | | value, .. | 1976 | | } => full_sqlite::InsertTrieNodeStorageValue::Value { | 1977 | | value: Cow::Borrowed(value), | 1978 | | references_merkle_value: false, // TODO: | 1979 | | }, | 1980 | | }, | 1981 | | }), | 1982 | | match entry.trie_node_info.storage_value { | 1983 | | trie::proof_decode::StorageValue::None => 0, // TODO: ?! | 1984 | | trie::proof_decode::StorageValue::HashKnownValueMissing( | 1985 | | .. | 1986 | | ) => return, | 1987 | | trie::proof_decode::StorageValue::Known { | 1988 | | inline: true, | 1989 | | .. | 1990 | | } => 0, | 1991 | | trie::proof_decode::StorageValue::Known { | 1992 | | inline: false, | 1993 | | .. | 1994 | | } => 1, | 1995 | | }, | 1996 | | ).unwrap(); | 1997 | | } | 1998 | | } | 1999 | 0 | }) | 2000 | 0 | .await; | 2001 | 0 | } | 2002 | | | 2003 | 0 | if result.is_err() { | 2004 | | // Note that we perform the ban even if the source is now disconnected. | 2005 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); | 2006 | 0 | self.network_service | 2007 | 0 | .ban_and_disconnect( | 2008 | 0 | peer_id, | 2009 | 0 | self.network_chain_id, | 2010 | 0 | network_service::BanSeverity::Low, | 2011 | 0 | "storage-proof-request-error", | 2012 | 0 | ) | 2013 | 0 | .await; | 2014 | 0 | } | 2015 | | | 2016 | 0 | if let Ok(result) = result { | 2017 | 0 | // TODO: to_owned overhead | 2018 | 0 | let _ = self | 2019 | 0 | .sync | 2020 | 0 | .storage_get_response(request_id, result.decode().to_owned()); | 2021 | 0 | } else { | 2022 | 0 | let _ = self.sync.remove_request(request_id); | 2023 | 0 | } | 2024 | | | 2025 | | // If the source was actually disconnected and has no other request in | 2026 | | // progress, we clean it up. | 2027 | | // TODO: DRY | 2028 | 0 | if self.sync[source_id] | 2029 | 0 | .as_ref() | 2030 | 0 | .map_or(false, |info| info.is_disconnected) | 2031 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 | 2032 | | { | 2033 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); | 2034 | 0 | debug_assert!(_requests.next().is_none()); | 2035 | 0 | self.peers_source_id_map | 2036 | 0 | .remove(&info.unwrap().peer_id) | 2037 | 0 | .unwrap(); | 2038 | 0 | } | 2039 | | | 2040 | 0 | process_sync = true; | 2041 | | } | 2042 | | | 2043 | | WakeUpReason::SubtaskFinished(SubtaskFinished::CallProofRequestFinished { | 2044 | 0 | request_id, | 2045 | 0 | source_id, | 2046 | 0 | result, | 2047 | | }) => { | 2048 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) | 2049 | 0 | { | 2050 | 0 | self.database_catch_up_download = | 2051 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; | 2052 | 0 | } | 2053 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) | 2054 | 0 | { | 2055 | 0 | self.database_catch_up_download_block_verification = | 2056 | 0 | DatabaseCatchUpDownloadBlockVerification::None; | 2057 | 0 | } | 2058 | | | 2059 | | // TODO: DRY with above | 2060 | 0 | if let Ok(result) = &result { | 2061 | 0 | let result = result.clone(); | 2062 | 0 | self.database | 2063 | 0 | .with_database(move |database| { | 2064 | | if let Ok(decoded) = trie::proof_decode::decode_and_verify_proof( | 2065 | | trie::proof_decode::Config { | 2066 | | proof: result.decode(), | 2067 | | }, | 2068 | | ) { | 2069 | | for (_, entry) in decoded.iter_ordered() { | 2070 | | // TODO: check the state root hash; while this can't lead to a vulnerability, it can bloat the database | 2071 | | database.insert_trie_nodes( | 2072 | | iter::once(full_sqlite::InsertTrieNode { | 2073 | | merkle_value: Cow::Borrowed(entry.merkle_value), | 2074 | | partial_key_nibbles: Cow::Owned(entry.partial_key_nibbles.into_iter().map(|n| u8::from(n)).collect()), | 2075 | | children_merkle_values: std::array::from_fn(|n| entry.trie_node_info.children.child(trie::Nibble::try_from(u8::try_from(n).unwrap()).unwrap()).merkle_value().map(Cow::Borrowed)), | 2076 | | storage_value: match entry.trie_node_info.storage_value { | 2077 | | trie::proof_decode::StorageValue::HashKnownValueMissing( | 2078 | | _, | 2079 | | ) => return, | 2080 | | trie::proof_decode::StorageValue::None => { | 2081 | | full_sqlite::InsertTrieNodeStorageValue::NoValue | 2082 | | } | 2083 | | trie::proof_decode::StorageValue::Known { | 2084 | | value, .. | 2085 | | } => full_sqlite::InsertTrieNodeStorageValue::Value { | 2086 | | value: Cow::Borrowed(value), | 2087 | | references_merkle_value: false, // TODO: | 2088 | | }, | 2089 | | }, | 2090 | | }), | 2091 | | match entry.trie_node_info.storage_value { | 2092 | | trie::proof_decode::StorageValue::None => 0, // TODO: ?! | 2093 | | trie::proof_decode::StorageValue::HashKnownValueMissing( | 2094 | | .. | 2095 | | ) => return, | 2096 | | trie::proof_decode::StorageValue::Known { | 2097 | | inline: true, | 2098 | | .. | 2099 | | } => 0, | 2100 | | trie::proof_decode::StorageValue::Known { | 2101 | | inline: false, | 2102 | | .. | 2103 | | } => 1, | 2104 | | }, | 2105 | | ).unwrap(); | 2106 | | } | 2107 | | } | 2108 | 0 | }) | 2109 | 0 | .await; | 2110 | 0 | } | 2111 | | | 2112 | 0 | if result.is_err() { | 2113 | | // Note that we perform the ban even if the source is now disconnected. | 2114 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); | 2115 | 0 | self.network_service | 2116 | 0 | .ban_and_disconnect( | 2117 | 0 | peer_id, | 2118 | 0 | self.network_chain_id, | 2119 | 0 | network_service::BanSeverity::Low, | 2120 | 0 | "call-proof-request-error", | 2121 | 0 | ) | 2122 | 0 | .await; | 2123 | 0 | } | 2124 | | | 2125 | 0 | if let Ok(result) = result { | 2126 | 0 | self.sync | 2127 | 0 | .call_proof_response(request_id, result.decode().to_owned()); | 2128 | 0 | // TODO: need help from networking service to avoid this to_owned | 2129 | 0 | } else { | 2130 | 0 | self.sync.remove_request(request_id); | 2131 | 0 | } | 2132 | | | 2133 | | // If the source was actually disconnected and has no other request in | 2134 | | // progress, we clean it up. | 2135 | | // TODO: DRY | 2136 | 0 | if self.sync[source_id] | 2137 | 0 | .as_ref() | 2138 | 0 | .map_or(false, |info| info.is_disconnected) | 2139 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 | 2140 | | { | 2141 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); | 2142 | 0 | debug_assert!(_requests.next().is_none()); | 2143 | 0 | self.peers_source_id_map | 2144 | 0 | .remove(&info.unwrap().peer_id) | 2145 | 0 | .unwrap(); | 2146 | 0 | } | 2147 | | | 2148 | 0 | process_sync = true; | 2149 | | } | 2150 | | | 2151 | | WakeUpReason::SyncProcess => { | 2152 | | // Given that processing blocks might generate a notification, and that | 2153 | | // only one notification can be queued at a time, this path must never be | 2154 | | // reached if a notification is already waiting. | 2155 | 2 | debug_assert!(self.pending_notification.is_none()); | 2156 | | // Similarly, verifying a block might generate a block announce. | 2157 | 2 | debug_assert!(self.pending_block_announce.is_none()); | 2158 | | | 2159 | | // Given that a block verification might require downloading some storage | 2160 | | // items due to missing storage items, and that we only want one download at | 2161 | | // a time, we don't verify blocks if a download is in progress. | 2162 | 2 | debug_assert!(matches!0 ( | 2163 | 2 | self.database_catch_up_download_block_verification, | 2164 | | DatabaseCatchUpDownloadBlockVerification::None | 2165 | | )); | 2166 | | | 2167 | 2 | let (new_self, maybe_more_to_process) = self.process_blocks().await0 ; | 2168 | 2 | process_sync = maybe_more_to_process; | 2169 | 2 | self = new_self; | 2170 | | } | 2171 | | } | 2172 | | } | 2173 | 2 | } |
Unexecuted instantiation: _RNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground3run0B8_ Unexecuted instantiation: _RNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground3run0CscDgN54JpMGG_6author _RNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground3run0CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 775 | 19 | async fn run(mut self) { | 776 | 19 | let mut process_sync = true; | 777 | | | 778 | | loop { | 779 | | enum WakeUpReason { | 780 | | ReadyToAuthor, | 781 | | FrontendEvent(ToBackground), | 782 | | FrontendClosed, | 783 | | SendPendingNotification(Notification), | 784 | | StartNetworkRequest { | 785 | | source_id: all::SourceId, | 786 | | request: all::DesiredRequest, | 787 | | database_catch_up_type: DbCatchUpType, | 788 | | }, | 789 | | NetworkEvent(network_service::Event), | 790 | | NetworkLocalChainUpdate, | 791 | | AnnounceBlock(Vec<u8>, [u8; 32], u64), | 792 | | SubtaskFinished(SubtaskFinished), | 793 | | SyncProcess, | 794 | | } | 795 | | | 796 | | enum DbCatchUpType { | 797 | | No, | 798 | | BlockVerification, | 799 | | Database, | 800 | | } | 801 | | | 802 | 58 | let wake_up_reason: WakeUpReason = { | 803 | | // Creating the block authoring state and prepare a future that is ready when something | 804 | | // related to the block authoring is ready. | 805 | | // TODO: refactor as a separate task? | 806 | | // TODO: restore block authoring after https://github.com/smol-dot/smoldot/issues/1109 | 807 | 58 | let authoring_ready_future = { | 808 | 58 | future::pending::<WakeUpReason>() | 809 | | /*// TODO: overhead to call best_block_consensus() multiple times | 810 | | let local_authorities = { | 811 | | let namespace_filter = match self.sync.best_block_consensus() { | 812 | | chain_information::ChainInformationConsensusRef::Aura { .. } => { | 813 | | Some(keystore::KeyNamespace::Aura) | 814 | | } | 815 | | chain_information::ChainInformationConsensusRef::Babe { .. } => { | 816 | | Some(keystore::KeyNamespace::Babe) | 817 | | } | 818 | | chain_information::ChainInformationConsensusRef::Unknown => { | 819 | | // In `Unknown` mode, all keys are accepted and there is no | 820 | | // filter on the namespace, as we can't author blocks anyway. | 821 | | // TODO: is that correct? | 822 | | None | 823 | | } | 824 | | }; | 825 | | | 826 | | // Calling `keys()` on the keystore is racy, but that's considered | 827 | | // acceptable and part of the design of the node. | 828 | | self.keystore | 829 | | .keys() | 830 | | .await | 831 | | .filter(|(namespace, _)| { | 832 | | namespace_filter.map_or(true, |n| *namespace == n) | 833 | | }) | 834 | | .map(|(_, key)| key) | 835 | | .collect::<Vec<_>>() // TODO: collect overhead :-/ | 836 | | }; | 837 | | | 838 | | let block_authoring = | 839 | | match (&mut self.block_authoring, self.sync.best_block_consensus()) { | 840 | | (Some(ba), _) => Some(ba), | 841 | | ( | 842 | | block_authoring @ None, | 843 | | chain_information::ChainInformationConsensusRef::Aura { | 844 | | finalized_authorities_list, // TODO: field name not appropriate; should probably change the chain_information module | 845 | | slot_duration, | 846 | | }, | 847 | | ) => Some( | 848 | | block_authoring.insert(( | 849 | | author::build::Builder::new(author::build::Config { | 850 | | consensus: author::build::ConfigConsensus::Aura { | 851 | | current_authorities: finalized_authorities_list, | 852 | | local_authorities: local_authorities.iter(), | 853 | | now_from_unix_epoch: SystemTime::now() | 854 | | .duration_since(SystemTime::UNIX_EPOCH) | 855 | | .unwrap(), | 856 | | slot_duration, | 857 | | }, | 858 | | }), | 859 | | local_authorities, | 860 | | )), | 861 | | ), | 862 | | ( | 863 | | None, | 864 | | chain_information::ChainInformationConsensusRef::Babe { .. }, | 865 | | ) => { | 866 | | None // TODO: the block authoring doesn't support Babe at the moment | 867 | | } | 868 | | (None, _) => todo!(), | 869 | | }; | 870 | | | 871 | | match &block_authoring { | 872 | | Some((author::build::Builder::Ready(_), _)) => future::Either::Left( | 873 | | future::Either::Left(future::ready(Instant::now())), | 874 | | ), | 875 | | Some((author::build::Builder::WaitSlot(when), _)) => { | 876 | | let delay = (UNIX_EPOCH + when.when()) | 877 | | .duration_since(SystemTime::now()) | 878 | | .unwrap_or_else(|_| Duration::new(0, 0)); | 879 | | future::Either::Right(future::FutureExt::fuse(smol::Timer::after( | 880 | | delay, | 881 | | ))) | 882 | | } | 883 | | None => future::Either::Left(future::Either::Right(future::pending())), | 884 | | Some((author::build::Builder::Idle, _)) => { | 885 | | // If the block authoring is idle, which happens in case of error, | 886 | | // sleep for an arbitrary duration before resetting it. | 887 | | // This prevents the authoring from trying over and over again to generate | 888 | | // a bad block. | 889 | | let delay = Duration::from_secs(2); | 890 | | future::Either::Right(future::FutureExt::fuse(smol::Timer::after( | 891 | | delay, | 892 | | ))) | 893 | | } | 894 | | }*/ | 895 | | }; | 896 | | | 897 | 58 | async { | 898 | | if let Some(notification) = self.pending_notification.take() { | 899 | | WakeUpReason::SendPendingNotification(notification) | 900 | | } else { | 901 | | future::pending().await | 902 | | } | 903 | | } | 904 | 58 | .or(async move { | 905 | | authoring_ready_future.await; | 906 | | WakeUpReason::ReadyToAuthor | 907 | 58 | }) | 908 | 58 | .or(async { | 909 | | self.to_background_rx | 910 | | .next() | 911 | | .await | 912 | | .map_or(WakeUpReason::FrontendClosed, WakeUpReason::FrontendEvent) | 913 | 58 | }) | 914 | 58 | .or(async { | 915 | | WakeUpReason::NetworkEvent(self.from_network_service.next().await.unwrap()) | 916 | 58 | }) | 917 | 58 | .or(async { | 918 | | if self.network_local_chain_update_needed { | 919 | | self.network_local_chain_update_needed = false; | 920 | | WakeUpReason::NetworkLocalChainUpdate | 921 | | } else { | 922 | | future::pending().await | 923 | | } | 924 | 58 | }) | 925 | 58 | .or(async { | 926 | | if let Some((header, hash, height)) = self.pending_block_announce.take() { | 927 | | WakeUpReason::AnnounceBlock(header, hash, height) | 928 | | } else { | 929 | | future::pending().await | 930 | | } | 931 | 58 | }) | 932 | 58 | .or(async { | 933 | | let Some(subtask_finished) = self.sub_tasks.next().await else { | 934 | | future::pending().await | 935 | | }; | 936 | | WakeUpReason::SubtaskFinished(subtask_finished) | 937 | 58 | }) | 938 | 58 | .or({ | 939 | 58 | async { | 940 | | // TODO: handle obsolete requests | 941 | | // Ask the sync state machine whether any new network request should | 942 | | // be started. | 943 | | // `desired_requests()` returns, in decreasing order of priority, the | 944 | | // requests that should be started in order for the syncing to proceed. We | 945 | | // simply pick the first request, but enforce one ongoing request per | 946 | | // source. | 947 | | // TODO: desired_requests() is expensive and done at every iteration | 948 | | let request_to_start = self.sync.desired_requests().find( | 949 | | |(source_id, source_info, request_details)| { | 950 | | if source_info | 951 | | .as_ref() | 952 | | .map_or(false, |info| info.is_disconnected) | 953 | | { | 954 | | // Source is a networking source that has already been disconnected. | 955 | | false | 956 | | } else if *source_id != self.block_author_sync_source { | 957 | | // Remote source. | 958 | | self.sync.source_num_ongoing_requests(*source_id) == 0 | 959 | | } else { | 960 | | // Locally-authored blocks source. | 961 | | match (request_details, &self.authored_block) { | 962 | | ( | 963 | | all::DesiredRequest::BlocksRequest { | 964 | | first_block_hash, | 965 | | first_block_height, | 966 | | .. | 967 | | }, | 968 | | Some((authored_height, authored_hash, _, _)), | 969 | | ) if first_block_hash == authored_hash | 970 | | && first_block_height == authored_height => | 971 | | { | 972 | | true | 973 | | } | 974 | | _ => false, | 975 | | } | 976 | | } | 977 | | }, | 978 | | ); | 979 | | if let Some((source_id, _, request)) = request_to_start { | 980 | | return WakeUpReason::StartNetworkRequest { | 981 | | source_id, | 982 | | request, | 983 | | database_catch_up_type: DbCatchUpType::No, | 984 | | }; | 985 | | } | 986 | | | 987 | | match self.database_catch_up_download_block_verification.clone() { | 988 | | _ if !matches!( | 989 | | self.database_catch_up_download, | 990 | | DatabaseCatchUpDownload::NoDownloadInProgress | 991 | | | DatabaseCatchUpDownload::NothingToDownloadCache | 992 | | ) => {} | 993 | | DatabaseCatchUpDownloadBlockVerification::None => {} | 994 | | DatabaseCatchUpDownloadBlockVerification::InProgress(_) => {} | 995 | | DatabaseCatchUpDownloadBlockVerification::CallProofDesired { | 996 | | block_hash, | 997 | | block_number, | 998 | | function_name, | 999 | | parameter, | 1000 | | } => { | 1001 | | // Choose which source to query. We have to use an `if` because | 1002 | | // `knows_non_finalized_block` panics if the parameter is inferior | 1003 | | // or equal to the finalized block number. | 1004 | | let source_id = if block_number | 1005 | | <= self.sync.finalized_block_number() | 1006 | | { | 1007 | | self.sync | 1008 | | .sources() | 1009 | | .filter(|s| *s != self.block_author_sync_source) | 1010 | | .choose(&mut rand::thread_rng()) | 1011 | | } else { | 1012 | | self.sync | 1013 | | .knows_non_finalized_block(block_number, &block_hash) | 1014 | | .filter(|source_id| { | 1015 | | *source_id != self.block_author_sync_source | 1016 | | && self.sync.source_num_ongoing_requests(*source_id) | 1017 | | == 0 | 1018 | | }) | 1019 | | .choose(&mut rand::thread_rng()) | 1020 | | }; | 1021 | | | 1022 | | if let Some(source_id) = source_id { | 1023 | | return WakeUpReason::StartNetworkRequest { | 1024 | | source_id, | 1025 | | request: all::DesiredRequest::RuntimeCallMerkleProof { | 1026 | | block_hash, | 1027 | | function_name: function_name.into(), | 1028 | | parameter_vectored: parameter.into(), | 1029 | | }, | 1030 | | database_catch_up_type: DbCatchUpType::BlockVerification, | 1031 | | }; | 1032 | | } | 1033 | | } | 1034 | | DatabaseCatchUpDownloadBlockVerification::CodeStorageProofDesired { | 1035 | | block_hash, | 1036 | | block_number, | 1037 | | } => { | 1038 | | // Choose which source to query. We have to use an `if` because | 1039 | | // `knows_non_finalized_block` panics if the parameter is inferior | 1040 | | // or equal to the finalized block number. | 1041 | | let source_id = if block_number | 1042 | | <= self.sync.finalized_block_number() | 1043 | | { | 1044 | | self.sync | 1045 | | .sources() | 1046 | | .filter(|s| *s != self.block_author_sync_source) | 1047 | | .choose(&mut rand::thread_rng()) | 1048 | | } else { | 1049 | | self.sync | 1050 | | .knows_non_finalized_block(block_number, &block_hash) | 1051 | | .filter(|source_id| { | 1052 | | *source_id != self.block_author_sync_source | 1053 | | && self.sync.source_num_ongoing_requests(*source_id) | 1054 | | == 0 | 1055 | | }) | 1056 | | .choose(&mut rand::thread_rng()) | 1057 | | }; | 1058 | | | 1059 | | if let Some(source_id) = source_id { | 1060 | | return WakeUpReason::StartNetworkRequest { | 1061 | | source_id, | 1062 | | request: all::DesiredRequest::StorageGetMerkleProof { | 1063 | | block_hash, | 1064 | | state_trie_root: [0; 32], // TODO: wrong, but field value unused so it's fine temporarily | 1065 | | keys: vec![":code".into(), ":heappages".into()], | 1066 | | }, | 1067 | | database_catch_up_type: DbCatchUpType::BlockVerification, | 1068 | | }; | 1069 | | } | 1070 | | } | 1071 | | } | 1072 | | | 1073 | | // If the sync state machine doesn't require any additional request, ask | 1074 | | // the database whether any storage item is missing. | 1075 | | if matches!( | 1076 | | self.database_catch_up_download, | 1077 | | DatabaseCatchUpDownload::NoDownloadInProgress | 1078 | | ) { | 1079 | | // TODO: this has a O(n^2) complexity; in case all sources are busy, we iterate a lot | 1080 | | let missing_items = self | 1081 | | .database | 1082 | | .with_database(|db| { | 1083 | | db.finalized_and_above_missing_trie_nodes_unordered() | 1084 | | }) | 1085 | | .await | 1086 | | .unwrap(); | 1087 | | if missing_items.is_empty() { | 1088 | | self.database_catch_up_download = | 1089 | | DatabaseCatchUpDownload::NothingToDownloadCache; | 1090 | | } | 1091 | | | 1092 | | for missing_item in missing_items | 1093 | | .into_iter() | 1094 | | .flat_map(|item| item.blocks.into_iter()) | 1095 | | { | 1096 | | // Since the database and sync state machine are supposed to have the | 1097 | | // same finalized block, it is guaranteed that the missing item are | 1098 | | // in the finalized block or above. | 1099 | | debug_assert!( | 1100 | | missing_item.number >= self.sync.finalized_block_number() | 1101 | | ); | 1102 | | | 1103 | | // Choose which source to query. We have to use an `if` because | 1104 | | // `knows_non_finalized_block` panics if the parameter is inferior | 1105 | | // or equal to the finalized block number. | 1106 | | let source_id = if missing_item.number | 1107 | | <= self.sync.finalized_block_number() | 1108 | | { | 1109 | | let Some(source_id) = self | 1110 | | .sync | 1111 | | .sources() | 1112 | | .filter(|s| *s != self.block_author_sync_source) | 1113 | | .choose(&mut rand::thread_rng()) | 1114 | | else { | 1115 | | break; | 1116 | | }; | 1117 | | source_id | 1118 | | } else { | 1119 | | let Some(source_id) = self | 1120 | | .sync | 1121 | | .knows_non_finalized_block( | 1122 | | missing_item.number, | 1123 | | &missing_item.hash, | 1124 | | ) | 1125 | | .filter(|source_id| { | 1126 | | *source_id != self.block_author_sync_source | 1127 | | && self.sync.source_num_ongoing_requests(*source_id) | 1128 | | == 0 | 1129 | | }) | 1130 | | .choose(&mut rand::thread_rng()) | 1131 | | else { | 1132 | | continue; | 1133 | | }; | 1134 | | source_id | 1135 | | }; | 1136 | | | 1137 | | return WakeUpReason::StartNetworkRequest { | 1138 | | source_id, | 1139 | | request: all::DesiredRequest::StorageGetMerkleProof { | 1140 | | block_hash: missing_item.hash, | 1141 | | state_trie_root: [0; 32], // TODO: wrong, but field value unused so it's fine temporarily | 1142 | | keys: vec![trie::nibbles_to_bytes_suffix_extend( | 1143 | | missing_item | 1144 | | .trie_node_key_nibbles | 1145 | | .into_iter() | 1146 | | // In order to download more than one item at a time, | 1147 | | // we add some randomly-generated nibbles to the | 1148 | | // requested key. The request will target the missing | 1149 | | // key plus a few other random keys. | 1150 | | .chain((0..32).map(|_| { | 1151 | | rand::Rng::gen_range( | 1152 | | &mut rand::thread_rng(), | 1153 | | 0..16, | 1154 | | ) | 1155 | | })) | 1156 | | .map(|n| trie::Nibble::try_from(n).unwrap()), | 1157 | | ) | 1158 | | .collect::<Vec<_>>()], | 1159 | | }, | 1160 | | database_catch_up_type: DbCatchUpType::Database, | 1161 | | }; | 1162 | | } | 1163 | | } | 1164 | | | 1165 | | // No network request to start. | 1166 | | future::pending().await | 1167 | 58 | } | 1168 | 58 | }) | 1169 | 58 | .or({ | 1170 | 58 | let is_downloading = matches!0 ( | 1171 | 58 | self.database_catch_up_download_block_verification, | 1172 | | DatabaseCatchUpDownloadBlockVerification::None | 1173 | | ); | 1174 | | async move { | 1175 | | if !process_sync || !is_downloading { | 1176 | | future::pending().await | 1177 | | } | 1178 | | WakeUpReason::SyncProcess | 1179 | | } | 1180 | | }) | 1181 | 37 | .await | 1182 | | }; | 1183 | | | 1184 | 1 | match wake_up_reason { | 1185 | | WakeUpReason::ReadyToAuthor => { | 1186 | | // Ready to author a block. Call `author_block()`. | 1187 | | // While a block is being authored, the whole syncing state machine is | 1188 | | // deliberately frozen. | 1189 | 0 | match self.block_authoring { | 1190 | | Some((author::build::Builder::Ready(_), _)) => { | 1191 | 0 | self.author_block().await; | 1192 | | } | 1193 | 0 | Some((author::build::Builder::WaitSlot(when), local_authorities)) => { | 1194 | 0 | self.block_authoring = Some(( | 1195 | 0 | author::build::Builder::Ready(when.start()), | 1196 | 0 | local_authorities, | 1197 | 0 | )); | 1198 | 0 | self.author_block().await; | 1199 | | } | 1200 | 0 | Some((author::build::Builder::Idle, _)) => { | 1201 | 0 | self.block_authoring = None; | 1202 | 0 | } | 1203 | | None => { | 1204 | 0 | unreachable!() | 1205 | | } | 1206 | | } | 1207 | | | 1208 | 0 | process_sync = true; | 1209 | | } | 1210 | | | 1211 | | WakeUpReason::FrontendClosed => { | 1212 | | // Shutdown. | 1213 | 19 | return; | 1214 | | } | 1215 | | | 1216 | | WakeUpReason::FrontendEvent(ToBackground::SubscribeAll { | 1217 | 0 | buffer_size, | 1218 | 0 | _max_finalized_pinned_blocks: _, | 1219 | 0 | result_tx, | 1220 | 0 | }) => { | 1221 | 0 | let (tx, new_blocks) = async_channel::bounded(buffer_size.saturating_sub(1)); | 1222 | | | 1223 | 0 | let non_finalized_blocks_ancestry_order = { | 1224 | 0 | let blocks_in = self | 1225 | 0 | .sync | 1226 | 0 | .non_finalized_blocks_ancestry_order() | 1227 | 0 | .map(|h| { | 1228 | | ( | 1229 | | h.number, | 1230 | | h.scale_encoding_vec(self.sync.block_number_bytes()), | 1231 | | *h.parent_hash, | 1232 | | ) | 1233 | 0 | }) | 1234 | 0 | .collect::<Vec<_>>(); | 1235 | 0 | let mut blocks_out = Vec::new(); | 1236 | 0 | for (number, scale_encoding, parent_hash) in blocks_in { | 1237 | 0 | let hash = header::hash_from_scale_encoded_header(&scale_encoding); | 1238 | 0 | let runtime = match &self.sync[(number, &hash)] { | 1239 | 0 | NonFinalizedBlock::Verified { runtime } => runtime.clone(), | 1240 | 0 | _ => unreachable!(), | 1241 | | }; | 1242 | 0 | let runtime_update = if Arc::ptr_eq(&self.finalized_runtime, &runtime) { | 1243 | 0 | None | 1244 | | } else { | 1245 | 0 | Some(runtime.clone()) | 1246 | | }; | 1247 | 0 | blocks_out.push(BlockNotification { | 1248 | 0 | is_new_best: header::hash_from_scale_encoded_header( | 1249 | 0 | &scale_encoding, | 1250 | 0 | ) == *self.sync.best_block_hash(), | 1251 | 0 | block_hash: header::hash_from_scale_encoded_header(&scale_encoding), | 1252 | 0 | scale_encoded_header: scale_encoding, | 1253 | 0 | runtime_update, | 1254 | 0 | parent_hash, | 1255 | 0 | }); | 1256 | | } | 1257 | 0 | blocks_out | 1258 | 0 | }; | 1259 | 0 |
| 1260 | 0 | self.blocks_notifications.push(tx); | 1261 | 0 | let _ = result_tx.send(SubscribeAll { | 1262 | 0 | id: SubscriptionId(0), // TODO: | 1263 | 0 | finalized_block_hash: *self.sync.finalized_block_hash(), | 1264 | 0 | finalized_block_scale_encoded_header: self | 1265 | 0 | .sync | 1266 | 0 | .finalized_block_header() | 1267 | 0 | .to_owned(), | 1268 | 0 | finalized_block_runtime: self.finalized_runtime.clone(), | 1269 | 0 | non_finalized_blocks_ancestry_order, | 1270 | 0 | new_blocks, | 1271 | 0 | }); | 1272 | | } | 1273 | 0 | WakeUpReason::SendPendingNotification(notification) => { | 1274 | | // Elements in `blocks_notifications` are removed one by one and inserted | 1275 | | // back if the channel is still open. | 1276 | 0 | for index in (0..self.blocks_notifications.len()).rev() { | 1277 | 0 | let subscription = self.blocks_notifications.swap_remove(index); | 1278 | 0 | if subscription.try_send(notification.clone()).is_err() { | 1279 | 0 | continue; | 1280 | 0 | } | 1281 | 0 | self.blocks_notifications.push(subscription); | 1282 | | } | 1283 | | } | 1284 | | | 1285 | 0 | WakeUpReason::FrontendEvent(ToBackground::GetSyncState { result_tx }) => { | 1286 | 0 | let _ = result_tx.send(SyncState { | 1287 | 0 | best_block_hash: *self.sync.best_block_hash(), | 1288 | 0 | best_block_number: self.sync.best_block_number(), | 1289 | 0 | finalized_block_hash: *self.sync.finalized_block_hash(), | 1290 | 0 | finalized_block_number: self.sync.finalized_block_number(), | 1291 | 0 | }); | 1292 | 0 | } | 1293 | 0 | WakeUpReason::FrontendEvent(ToBackground::Unpin { result_tx, .. }) => { | 1294 | 0 | // TODO: check whether block was indeed pinned, and prune blocks that aren't pinned anymore from the database | 1295 | 0 | let _ = result_tx.send(()); | 1296 | 0 | } | 1297 | 1 | WakeUpReason::FrontendEvent(ToBackground::IsMajorSyncingHint { result_tx }) => { | 1298 | 1 | // As documented, the value returned doesn't need to be precise. | 1299 | 1 | let result = match self.sync.status() { | 1300 | 1 | all::Status::Sync => false, | 1301 | | all::Status::WarpSyncFragments { .. } | 1302 | 0 | | all::Status::WarpSyncChainInformation { .. } => true, | 1303 | | }; | 1304 | | | 1305 | 1 | let _ = result_tx.send(result); | 1306 | | } | 1307 | | | 1308 | | WakeUpReason::NetworkLocalChainUpdate => { | 1309 | 19 | self.network_service | 1310 | 19 | .set_local_best_block( | 1311 | 19 | self.network_chain_id, | 1312 | 19 | *self.sync.best_block_hash(), | 1313 | 19 | self.sync.best_block_number(), | 1314 | 19 | ) | 1315 | 0 | .await; | 1316 | | } | 1317 | | | 1318 | 0 | WakeUpReason::AnnounceBlock(header, hash, height) => { | 1319 | | // We can never be guaranteed that a certain source does *not* know about a | 1320 | | // block, however it is not a big problem to send a block announce to a source | 1321 | | // that already knows about that block. For this reason, the list of sources | 1322 | | // we send the block announce to is `all_sources - sources_that_know_it`. | 1323 | | // | 1324 | | // Note that not sending block announces to sources that already | 1325 | | // know that block means that these sources might also miss the | 1326 | | // fact that our local best block has been updated. This is in | 1327 | | // practice not a problem either. | 1328 | 0 | let sources_to_announce_to = { | 1329 | 0 | let mut all_sources = self | 1330 | 0 | .sync | 1331 | 0 | .sources() | 1332 | 0 | .collect::<HashSet<_, fnv::FnvBuildHasher>>(); | 1333 | 0 | for knows in self.sync.knows_non_finalized_block(height, &hash) { | 1334 | 0 | all_sources.remove(&knows); | 1335 | 0 | } | 1336 | 0 | all_sources | 1337 | 0 | }; | 1338 | 0 |
| 1339 | 0 | let is_best = *self.sync.best_block_hash() == hash; | 1340 | | | 1341 | 0 | for source_id in sources_to_announce_to { | 1342 | 0 | let peer_id = match &self.sync[source_id] { | 1343 | 0 | Some(info) if !info.is_disconnected => &info.peer_id, | 1344 | 0 | _ => continue, | 1345 | | }; | 1346 | | | 1347 | 0 | if self | 1348 | 0 | .network_service | 1349 | 0 | .clone() | 1350 | 0 | .send_block_announce( | 1351 | 0 | peer_id.clone(), | 1352 | 0 | self.network_chain_id, | 1353 | 0 | header.clone(), | 1354 | 0 | is_best, | 1355 | 0 | ) | 1356 | 0 | .await | 1357 | 0 | .is_ok() | 1358 | 0 | { | 1359 | 0 | // Note that `try_add_known_block_to_source` might have | 1360 | 0 | // no effect, which is not a problem considering that this | 1361 | 0 | // block tracking is mostly about optimizations and | 1362 | 0 | // politeness. | 1363 | 0 | self.sync | 1364 | 0 | .try_add_known_block_to_source(source_id, height, hash); | 1365 | 0 | } | 1366 | | } | 1367 | | } | 1368 | | | 1369 | | WakeUpReason::NetworkEvent(network_service::Event::Connected { | 1370 | 0 | peer_id, | 1371 | 0 | chain_id, | 1372 | 0 | best_block_number, | 1373 | 0 | best_block_hash, | 1374 | 0 | }) if chain_id == self.network_chain_id => { | 1375 | 0 | // Most of the time, we insert a new source in the state machine. | 1376 | 0 | // However, a source of that `PeerId` might already exist but be considered as | 1377 | 0 | // disconnected. If that is the case, we simply mark it as no | 1378 | 0 | // longer disconnected. | 1379 | 0 | match self.peers_source_id_map.entry(peer_id) { | 1380 | 0 | hashbrown::hash_map::Entry::Occupied(entry) => { | 1381 | 0 | let id = *entry.get(); | 1382 | 0 | let is_disconnected = | 1383 | 0 | &mut self.sync[id].as_mut().unwrap().is_disconnected; | 1384 | 0 | debug_assert!(*is_disconnected); | 1385 | 0 | *is_disconnected = false; | 1386 | | } | 1387 | 0 | hashbrown::hash_map::Entry::Vacant(entry) => { | 1388 | 0 | let id = self | 1389 | 0 | .sync | 1390 | 0 | .prepare_add_source(best_block_number, best_block_hash) | 1391 | 0 | .add_source( | 1392 | 0 | Some(NetworkSourceInfo { | 1393 | 0 | peer_id: entry.key().clone(), | 1394 | 0 | is_disconnected: false, | 1395 | 0 | }), | 1396 | 0 | NonFinalizedBlock::NotVerified, | 1397 | 0 | ); | 1398 | 0 | entry.insert(id); | 1399 | 0 | } | 1400 | | } | 1401 | | } | 1402 | | WakeUpReason::NetworkEvent(network_service::Event::Disconnected { | 1403 | 0 | peer_id, | 1404 | 0 | chain_id, | 1405 | 0 | }) if chain_id == self.network_chain_id => { | 1406 | 0 | // Sources that disconnect are only immediately removed from the sync state | 1407 | 0 | // machine if they have no request in progress. If that is not the case, they | 1408 | 0 | // are instead only marked as disconnected. | 1409 | 0 | let id = *self.peers_source_id_map.get(&peer_id).unwrap(); | 1410 | 0 | if self.sync.source_num_ongoing_requests(id) == 0 { | 1411 | 0 | self.peers_source_id_map.remove(&peer_id).unwrap(); | 1412 | 0 | let (_, mut _requests) = self.sync.remove_source(id); | 1413 | 0 | debug_assert!(_requests.next().is_none()); | 1414 | | } else { | 1415 | 0 | let is_disconnected = &mut self.sync[id].as_mut().unwrap().is_disconnected; | 1416 | 0 | debug_assert!(!*is_disconnected); | 1417 | 0 | *is_disconnected = true; | 1418 | | } | 1419 | | } | 1420 | | WakeUpReason::NetworkEvent(network_service::Event::BlockAnnounce { | 1421 | 0 | chain_id, | 1422 | 0 | peer_id, | 1423 | 0 | scale_encoded_header, | 1424 | 0 | is_best, | 1425 | 0 | }) if chain_id == self.network_chain_id => { | 1426 | 0 | let _jaeger_span = self.jaeger_service.block_announce_process_span( | 1427 | 0 | &header::hash_from_scale_encoded_header(&scale_encoded_header), | 1428 | 0 | ); | 1429 | 0 |
| 1430 | 0 | let id = *self.peers_source_id_map.get(&peer_id).unwrap(); | 1431 | 0 | // TODO: log the outcome | 1432 | 0 | match self.sync.block_announce(id, scale_encoded_header, is_best) { | 1433 | 0 | all::BlockAnnounceOutcome::TooOld { .. } => {} | 1434 | 0 | all::BlockAnnounceOutcome::AlreadyVerified(known) | 1435 | 0 | | all::BlockAnnounceOutcome::AlreadyPending(known) => { | 1436 | 0 | known.update_source_and_block(); | 1437 | 0 | } | 1438 | 0 | all::BlockAnnounceOutcome::Unknown(unknown) => { | 1439 | 0 | unknown.insert_and_update_source(NonFinalizedBlock::NotVerified) | 1440 | | } | 1441 | 0 | all::BlockAnnounceOutcome::InvalidHeader(_) => unreachable!(), // TODO: ?!?! why unreachable? also, ban the peer | 1442 | | } | 1443 | | } | 1444 | | WakeUpReason::NetworkEvent(network_service::Event::GrandpaNeighborPacket { | 1445 | 0 | chain_id, | 1446 | 0 | peer_id, | 1447 | 0 | finalized_block_height, | 1448 | 0 | }) if chain_id == self.network_chain_id => { | 1449 | 0 | let source_id = *self.peers_source_id_map.get(&peer_id).unwrap(); | 1450 | 0 | self.sync | 1451 | 0 | .update_source_finality_state(source_id, finalized_block_height); | 1452 | 0 | } | 1453 | 0 | WakeUpReason::NetworkEvent(_) => { | 1454 | 0 | // Different chain index. | 1455 | 0 | } | 1456 | | | 1457 | | WakeUpReason::StartNetworkRequest { | 1458 | 0 | source_id, | 1459 | 0 | request: request_info @ all::DesiredRequest::BlocksRequest { .. }, | 1460 | 0 | database_catch_up_type, | 1461 | 0 | } if source_id == self.block_author_sync_source => { | 1462 | 0 | debug_assert!(matches!(database_catch_up_type, DbCatchUpType::No)); | 1463 | | | 1464 | 0 | self.log_callback.log( | 1465 | 0 | LogLevel::Debug, | 1466 | 0 | "queue-locally-authored-block-for-import".to_string(), | 1467 | 0 | ); | 1468 | 0 |
| 1469 | 0 | let (_, block_hash, scale_encoded_header, scale_encoded_extrinsics) = | 1470 | 0 | self.authored_block.take().unwrap(); | 1471 | 0 |
| 1472 | 0 | let _jaeger_span = self.jaeger_service.block_import_queue_span(&block_hash); | 1473 | 0 |
| 1474 | 0 | // Create a request that is immediately answered right below. | 1475 | 0 | let request_id = self.sync.add_request(source_id, request_info.into(), ()); | 1476 | 0 | // TODO: announce the block on the network, but only after it's been imported | 1477 | 0 | self.sync.blocks_request_response( | 1478 | 0 | request_id, | 1479 | 0 | iter::once(all::BlockRequestSuccessBlock { | 1480 | 0 | scale_encoded_header, | 1481 | 0 | scale_encoded_extrinsics, | 1482 | 0 | scale_encoded_justifications: Vec::new(), | 1483 | 0 | user_data: NonFinalizedBlock::NotVerified, | 1484 | 0 | }), | 1485 | 0 | ); | 1486 | | } | 1487 | | | 1488 | | WakeUpReason::StartNetworkRequest { | 1489 | 0 | source_id, | 1490 | 0 | request: | 1491 | 0 | all::DesiredRequest::BlocksRequest { | 1492 | 0 | first_block_hash, | 1493 | 0 | first_block_height, | 1494 | 0 | num_blocks, | 1495 | 0 | request_headers, | 1496 | 0 | request_bodies, | 1497 | 0 | request_justification, | 1498 | 0 | }, | 1499 | 0 | database_catch_up_type, | 1500 | 0 | } => { | 1501 | 0 | // Before notifying the syncing of the request, clamp the number of blocks to | 1502 | 0 | // the number of blocks we expect to receive. | 1503 | 0 | let num_blocks = NonZeroU64::new(cmp::min(num_blocks.get(), 64)).unwrap(); | 1504 | | | 1505 | 0 | let peer_id = { | 1506 | 0 | let info = self.sync[source_id].clone().unwrap(); | 1507 | 0 | // Disconnected sources are filtered out above. | 1508 | 0 | debug_assert!(!info.is_disconnected); | 1509 | 0 | info.peer_id | 1510 | 0 | }; | 1511 | 0 |
| 1512 | 0 | // TODO: add jaeger span | 1513 | 0 |
| 1514 | 0 | let request = self.network_service.clone().blocks_request( | 1515 | 0 | peer_id, | 1516 | 0 | self.network_chain_id, | 1517 | 0 | network::codec::BlocksRequestConfig { | 1518 | 0 | start: network::codec::BlocksRequestConfigStart::Hash(first_block_hash), | 1519 | 0 | desired_count: NonZeroU32::new( | 1520 | 0 | u32::try_from(num_blocks.get()).unwrap_or(u32::MAX), | 1521 | 0 | ) | 1522 | 0 | .unwrap(), | 1523 | 0 | // The direction is hardcoded based on the documentation of the syncing | 1524 | 0 | // state machine. | 1525 | 0 | direction: network::codec::BlocksRequestDirection::Descending, | 1526 | 0 | fields: network::codec::BlocksRequestFields { | 1527 | 0 | header: true, // TODO: always set to true due to unwrapping the header when the response comes | 1528 | 0 | body: request_bodies, | 1529 | 0 | justifications: request_justification, | 1530 | 0 | }, | 1531 | 0 | }, | 1532 | 0 | ); | 1533 | 0 |
| 1534 | 0 | let request_id = self.sync.add_request( | 1535 | 0 | source_id, | 1536 | 0 | all::DesiredRequest::BlocksRequest { | 1537 | 0 | first_block_hash, | 1538 | 0 | first_block_height, | 1539 | 0 | num_blocks, | 1540 | 0 | request_headers, | 1541 | 0 | request_bodies, | 1542 | 0 | request_justification, | 1543 | 0 | } | 1544 | 0 | .into(), | 1545 | 0 | (), | 1546 | 0 | ); | 1547 | 0 |
| 1548 | 0 | match database_catch_up_type { | 1549 | 0 | DbCatchUpType::No => {} | 1550 | | DbCatchUpType::Database => { | 1551 | 0 | debug_assert!(matches!( | 1552 | 0 | self.database_catch_up_download, | 1553 | | DatabaseCatchUpDownload::NoDownloadInProgress | 1554 | | )); | 1555 | 0 | self.database_catch_up_download = | 1556 | 0 | DatabaseCatchUpDownload::InProgress(request_id); | 1557 | | } | 1558 | 0 | DbCatchUpType::BlockVerification => { | 1559 | 0 | self.database_catch_up_download_block_verification = | 1560 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(request_id); | 1561 | 0 | } | 1562 | | } | 1563 | | | 1564 | 0 | self.sub_tasks.push(Box::pin(async move { | 1565 | | let result = request.await; | 1566 | | SubtaskFinished::BlocksRequestFinished { | 1567 | | request_id, | 1568 | | source_id, | 1569 | | result, | 1570 | | } | 1571 | 0 | })); | 1572 | | } | 1573 | | | 1574 | | WakeUpReason::StartNetworkRequest { | 1575 | 0 | source_id, | 1576 | 0 | request: | 1577 | 0 | all::DesiredRequest::WarpSync { | 1578 | 0 | sync_start_block_hash, | 1579 | 0 | }, | 1580 | 0 | database_catch_up_type, | 1581 | 0 | } => { | 1582 | 0 | // TODO: don't unwrap? could this target the virtual sync source? | 1583 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); // TODO: why does this require cloning? weird borrow chk issue | 1584 | 0 |
| 1585 | 0 | let request = self.network_service.clone().warp_sync_request( | 1586 | 0 | peer_id, | 1587 | 0 | self.network_chain_id, | 1588 | 0 | sync_start_block_hash, | 1589 | 0 | ); | 1590 | 0 |
| 1591 | 0 | let request_id = self.sync.add_request( | 1592 | 0 | source_id, | 1593 | 0 | all::RequestDetail::WarpSync { | 1594 | 0 | sync_start_block_hash, | 1595 | 0 | }, | 1596 | 0 | (), | 1597 | 0 | ); | 1598 | 0 |
| 1599 | 0 | match database_catch_up_type { | 1600 | 0 | DbCatchUpType::No => {} | 1601 | | DbCatchUpType::Database => { | 1602 | 0 | debug_assert!(matches!( | 1603 | 0 | self.database_catch_up_download, | 1604 | | DatabaseCatchUpDownload::NoDownloadInProgress | 1605 | | )); | 1606 | 0 | self.database_catch_up_download = | 1607 | 0 | DatabaseCatchUpDownload::InProgress(request_id); | 1608 | | } | 1609 | 0 | DbCatchUpType::BlockVerification => { | 1610 | 0 | self.database_catch_up_download_block_verification = | 1611 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(request_id); | 1612 | 0 | } | 1613 | | } | 1614 | | | 1615 | 0 | self.sub_tasks.push(Box::pin(async move { | 1616 | | let result = request.await; | 1617 | | SubtaskFinished::WarpSyncRequestFinished { | 1618 | | request_id, | 1619 | | source_id, | 1620 | | result, | 1621 | | } | 1622 | 0 | })); | 1623 | | } | 1624 | | | 1625 | | WakeUpReason::StartNetworkRequest { | 1626 | 0 | source_id, | 1627 | 0 | request: | 1628 | 0 | all::DesiredRequest::StorageGetMerkleProof { | 1629 | 0 | block_hash, keys, .. | 1630 | 0 | }, | 1631 | 0 | database_catch_up_type, | 1632 | 0 | } => { | 1633 | 0 | // TODO: don't unwrap? could this target the virtual sync source? | 1634 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); // TODO: why does this require cloning? weird borrow chk issue | 1635 | 0 |
| 1636 | 0 | let request = self.network_service.clone().storage_request( | 1637 | 0 | peer_id, | 1638 | 0 | self.network_chain_id, | 1639 | 0 | network::codec::StorageProofRequestConfig { | 1640 | 0 | block_hash, | 1641 | 0 | keys: keys.clone().into_iter(), | 1642 | 0 | }, | 1643 | 0 | ); | 1644 | 0 |
| 1645 | 0 | let request_id = self.sync.add_request( | 1646 | 0 | source_id, | 1647 | 0 | all::RequestDetail::StorageGet { block_hash, keys }, | 1648 | 0 | (), | 1649 | 0 | ); | 1650 | 0 |
| 1651 | 0 | match database_catch_up_type { | 1652 | 0 | DbCatchUpType::No => {} | 1653 | | DbCatchUpType::Database => { | 1654 | 0 | debug_assert!(matches!( | 1655 | 0 | self.database_catch_up_download, | 1656 | | DatabaseCatchUpDownload::NoDownloadInProgress | 1657 | | )); | 1658 | 0 | self.database_catch_up_download = | 1659 | 0 | DatabaseCatchUpDownload::InProgress(request_id); | 1660 | | } | 1661 | 0 | DbCatchUpType::BlockVerification => { | 1662 | 0 | self.database_catch_up_download_block_verification = | 1663 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(request_id); | 1664 | 0 | } | 1665 | | } | 1666 | | | 1667 | 0 | self.sub_tasks.push(Box::pin(async move { | 1668 | | let result = request.await; | 1669 | | SubtaskFinished::StorageRequestFinished { | 1670 | | request_id, | 1671 | | source_id, | 1672 | | result, | 1673 | | } | 1674 | 0 | })); | 1675 | | } | 1676 | | | 1677 | | WakeUpReason::StartNetworkRequest { | 1678 | 0 | source_id, | 1679 | 0 | request: | 1680 | 0 | all::DesiredRequest::RuntimeCallMerkleProof { | 1681 | 0 | block_hash, | 1682 | 0 | function_name, | 1683 | 0 | parameter_vectored, | 1684 | 0 | }, | 1685 | 0 | database_catch_up_type, | 1686 | 0 | } => { | 1687 | 0 | // TODO: don't unwrap? could this target the virtual sync source? | 1688 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); // TODO: why does this require cloning? weird borrow chk issue | 1689 | 0 |
| 1690 | 0 | let request = self.network_service.clone().call_proof_request( | 1691 | 0 | peer_id, | 1692 | 0 | self.network_chain_id, | 1693 | 0 | network::codec::CallProofRequestConfig { | 1694 | 0 | block_hash, | 1695 | 0 | method: function_name.clone(), | 1696 | 0 | parameter_vectored: iter::once(parameter_vectored.clone()), | 1697 | 0 | }, | 1698 | 0 | ); | 1699 | 0 |
| 1700 | 0 | let request_id = self.sync.add_request( | 1701 | 0 | source_id, | 1702 | 0 | all::RequestDetail::RuntimeCallMerkleProof { | 1703 | 0 | block_hash, | 1704 | 0 | function_name, | 1705 | 0 | parameter_vectored, | 1706 | 0 | }, | 1707 | 0 | (), | 1708 | 0 | ); | 1709 | 0 |
| 1710 | 0 | match database_catch_up_type { | 1711 | 0 | DbCatchUpType::No => {} | 1712 | | DbCatchUpType::Database => { | 1713 | 0 | debug_assert!(matches!( | 1714 | 0 | self.database_catch_up_download, | 1715 | | DatabaseCatchUpDownload::NoDownloadInProgress | 1716 | | )); | 1717 | 0 | self.database_catch_up_download = | 1718 | 0 | DatabaseCatchUpDownload::InProgress(request_id); | 1719 | | } | 1720 | 0 | DbCatchUpType::BlockVerification => { | 1721 | 0 | self.database_catch_up_download_block_verification = | 1722 | 0 | DatabaseCatchUpDownloadBlockVerification::InProgress(request_id); | 1723 | 0 | } | 1724 | | } | 1725 | | | 1726 | 0 | self.sub_tasks.push(Box::pin(async move { | 1727 | | let result = request.await; | 1728 | | SubtaskFinished::CallProofRequestFinished { | 1729 | | request_id, | 1730 | | source_id, | 1731 | | result, | 1732 | | } | 1733 | 0 | })); | 1734 | | } | 1735 | | | 1736 | | WakeUpReason::SubtaskFinished(SubtaskFinished::BlocksRequestFinished { | 1737 | 0 | request_id, | 1738 | 0 | source_id, | 1739 | 0 | result: Ok(blocks), | 1740 | | }) => { | 1741 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) | 1742 | 0 | { | 1743 | 0 | self.database_catch_up_download = | 1744 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; | 1745 | 0 | } | 1746 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) | 1747 | 0 | { | 1748 | 0 | self.database_catch_up_download_block_verification = | 1749 | 0 | DatabaseCatchUpDownloadBlockVerification::None; | 1750 | 0 | } | 1751 | | | 1752 | | // TODO: insert blocks in database if they are referenced through a parent_hash? | 1753 | | | 1754 | 0 | let _ = self.sync.blocks_request_response( | 1755 | 0 | request_id, | 1756 | 0 | blocks | 1757 | 0 | .into_iter() | 1758 | 0 | .map(|block| all::BlockRequestSuccessBlock { | 1759 | | scale_encoded_header: block.header.unwrap(), // TODO: don't unwrap | 1760 | | scale_encoded_extrinsics: block.body.unwrap(), // TODO: don't unwrap | 1761 | | scale_encoded_justifications: block | 1762 | | .justifications | 1763 | | .unwrap_or_default() | 1764 | | .into_iter() | 1765 | | .map(|j| all::Justification { | 1766 | | engine_id: j.engine_id, | 1767 | | justification: j.justification, | 1768 | | }) | 1769 | | .collect(), | 1770 | | user_data: NonFinalizedBlock::NotVerified, | 1771 | 0 | }), | 1772 | 0 | ); | 1773 | 0 |
| 1774 | 0 | // If the source was actually disconnected and has no other request in | 1775 | 0 | // progress, we clean it up. | 1776 | 0 | // TODO: DRY | 1777 | 0 | if self.sync[source_id] | 1778 | 0 | .as_ref() | 1779 | 0 | .map_or(false, |info| info.is_disconnected) | 1780 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 | 1781 | | { | 1782 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); | 1783 | 0 | debug_assert!(_requests.next().is_none()); | 1784 | 0 | self.peers_source_id_map | 1785 | 0 | .remove(&info.unwrap().peer_id) | 1786 | 0 | .unwrap(); | 1787 | 0 | } | 1788 | | | 1789 | 0 | process_sync = true; | 1790 | | } | 1791 | | | 1792 | | WakeUpReason::SubtaskFinished(SubtaskFinished::BlocksRequestFinished { | 1793 | 0 | request_id, | 1794 | 0 | source_id, | 1795 | | result: Err(_), | 1796 | | }) => { | 1797 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) | 1798 | 0 | { | 1799 | 0 | self.database_catch_up_download = | 1800 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; | 1801 | 0 | } | 1802 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) | 1803 | 0 | { | 1804 | 0 | self.database_catch_up_download_block_verification = | 1805 | 0 | DatabaseCatchUpDownloadBlockVerification::None; | 1806 | 0 | } | 1807 | | | 1808 | | // Note that we perform the ban even if the source is now disconnected. | 1809 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); | 1810 | 0 | self.network_service | 1811 | 0 | .ban_and_disconnect( | 1812 | 0 | peer_id, | 1813 | 0 | self.network_chain_id, | 1814 | 0 | network_service::BanSeverity::Low, | 1815 | 0 | "blocks-request-error", | 1816 | 0 | ) | 1817 | 0 | .await; | 1818 | | | 1819 | 0 | let _ = self.sync.remove_request(request_id); | 1820 | 0 |
| 1821 | 0 | // If the source was actually disconnected and has no other request in | 1822 | 0 | // progress, we clean it up. | 1823 | 0 | // TODO: DRY | 1824 | 0 | if self.sync[source_id] | 1825 | 0 | .as_ref() | 1826 | 0 | .map_or(false, |info| info.is_disconnected) | 1827 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 | 1828 | | { | 1829 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); | 1830 | 0 | debug_assert!(_requests.next().is_none()); | 1831 | 0 | self.peers_source_id_map | 1832 | 0 | .remove(&info.unwrap().peer_id) | 1833 | 0 | .unwrap(); | 1834 | 0 | } | 1835 | | | 1836 | 0 | process_sync = true; | 1837 | | } | 1838 | | | 1839 | | WakeUpReason::SubtaskFinished(SubtaskFinished::WarpSyncRequestFinished { | 1840 | 0 | request_id, | 1841 | 0 | source_id, | 1842 | 0 | result: Ok(result), | 1843 | | }) => { | 1844 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) | 1845 | 0 | { | 1846 | 0 | self.database_catch_up_download = | 1847 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; | 1848 | 0 | } | 1849 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) | 1850 | 0 | { | 1851 | 0 | self.database_catch_up_download_block_verification = | 1852 | 0 | DatabaseCatchUpDownloadBlockVerification::None; | 1853 | 0 | } | 1854 | | | 1855 | 0 | let decoded = result.decode(); | 1856 | 0 | let fragments = decoded | 1857 | 0 | .fragments | 1858 | 0 | .into_iter() | 1859 | 0 | .map(|f| all::WarpSyncFragment { | 1860 | | scale_encoded_header: f.scale_encoded_header.to_vec(), | 1861 | | scale_encoded_justification: f.scale_encoded_justification.to_vec(), | 1862 | 0 | }) | 1863 | 0 | .collect(); | 1864 | 0 | let _ = self.sync.grandpa_warp_sync_response( | 1865 | 0 | request_id, | 1866 | 0 | fragments, | 1867 | 0 | decoded.is_finished, | 1868 | 0 | ); | 1869 | 0 |
| 1870 | 0 | // If the source was actually disconnected and has no other request in | 1871 | 0 | // progress, we clean it up. | 1872 | 0 | // TODO: DRY | 1873 | 0 | if self.sync[source_id] | 1874 | 0 | .as_ref() | 1875 | 0 | .map_or(false, |info| info.is_disconnected) | 1876 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 | 1877 | | { | 1878 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); | 1879 | 0 | debug_assert!(_requests.next().is_none()); | 1880 | 0 | self.peers_source_id_map | 1881 | 0 | .remove(&info.unwrap().peer_id) | 1882 | 0 | .unwrap(); | 1883 | 0 | } | 1884 | | | 1885 | 0 | process_sync = true; | 1886 | | } | 1887 | | | 1888 | | WakeUpReason::SubtaskFinished(SubtaskFinished::WarpSyncRequestFinished { | 1889 | 0 | request_id, | 1890 | 0 | source_id, | 1891 | | result: Err(_), | 1892 | | }) => { | 1893 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) | 1894 | 0 | { | 1895 | 0 | self.database_catch_up_download = | 1896 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; | 1897 | 0 | } | 1898 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) | 1899 | 0 | { | 1900 | 0 | self.database_catch_up_download_block_verification = | 1901 | 0 | DatabaseCatchUpDownloadBlockVerification::None; | 1902 | 0 | } | 1903 | | | 1904 | | // Note that we perform the ban even if the source is now disconnected. | 1905 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); | 1906 | 0 | self.network_service | 1907 | 0 | .ban_and_disconnect( | 1908 | 0 | peer_id, | 1909 | 0 | self.network_chain_id, | 1910 | 0 | network_service::BanSeverity::Low, | 1911 | 0 | "warp-sync-request-error", | 1912 | 0 | ) | 1913 | 0 | .await; | 1914 | | | 1915 | 0 | let _ = self.sync.remove_request(request_id); | 1916 | 0 |
| 1917 | 0 | // If the source was actually disconnected and has no other request in | 1918 | 0 | // progress, we clean it up. | 1919 | 0 | // TODO: DRY | 1920 | 0 | if self.sync[source_id] | 1921 | 0 | .as_ref() | 1922 | 0 | .map_or(false, |info| info.is_disconnected) | 1923 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 | 1924 | | { | 1925 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); | 1926 | 0 | debug_assert!(_requests.next().is_none()); | 1927 | 0 | self.peers_source_id_map | 1928 | 0 | .remove(&info.unwrap().peer_id) | 1929 | 0 | .unwrap(); | 1930 | 0 | } | 1931 | | | 1932 | 0 | process_sync = true; | 1933 | | } | 1934 | | | 1935 | | WakeUpReason::SubtaskFinished(SubtaskFinished::StorageRequestFinished { | 1936 | 0 | request_id, | 1937 | 0 | source_id, | 1938 | 0 | result, | 1939 | | }) => { | 1940 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) | 1941 | 0 | { | 1942 | 0 | self.database_catch_up_download = | 1943 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; | 1944 | 0 | } | 1945 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) | 1946 | 0 | { | 1947 | 0 | self.database_catch_up_download_block_verification = | 1948 | 0 | DatabaseCatchUpDownloadBlockVerification::None; | 1949 | 0 | } | 1950 | | | 1951 | 0 | if let Ok(result) = &result { | 1952 | 0 | let result = result.clone(); | 1953 | 0 | self.database | 1954 | 0 | .with_database(move |database| { | 1955 | | if let Ok(decoded) = trie::proof_decode::decode_and_verify_proof( | 1956 | | trie::proof_decode::Config { | 1957 | | proof: result.decode(), | 1958 | | }, | 1959 | | ) { | 1960 | | for (_, entry) in decoded.iter_ordered() { | 1961 | | // TODO: check the state root hash; while this can't lead to a vulnerability, it can bloat the database | 1962 | | database.insert_trie_nodes( | 1963 | | iter::once(full_sqlite::InsertTrieNode { | 1964 | | merkle_value: Cow::Borrowed(entry.merkle_value), | 1965 | | partial_key_nibbles: Cow::Owned(entry.partial_key_nibbles.into_iter().map(|n| u8::from(n)).collect()), | 1966 | | children_merkle_values: std::array::from_fn(|n| entry.trie_node_info.children.child(trie::Nibble::try_from(u8::try_from(n).unwrap()).unwrap()).merkle_value().map(Cow::Borrowed)), | 1967 | | storage_value: match entry.trie_node_info.storage_value { | 1968 | | trie::proof_decode::StorageValue::HashKnownValueMissing( | 1969 | | _, | 1970 | | ) => return, | 1971 | | trie::proof_decode::StorageValue::None => { | 1972 | | full_sqlite::InsertTrieNodeStorageValue::NoValue | 1973 | | } | 1974 | | trie::proof_decode::StorageValue::Known { | 1975 | | value, .. | 1976 | | } => full_sqlite::InsertTrieNodeStorageValue::Value { | 1977 | | value: Cow::Borrowed(value), | 1978 | | references_merkle_value: false, // TODO: | 1979 | | }, | 1980 | | }, | 1981 | | }), | 1982 | | match entry.trie_node_info.storage_value { | 1983 | | trie::proof_decode::StorageValue::None => 0, // TODO: ?! | 1984 | | trie::proof_decode::StorageValue::HashKnownValueMissing( | 1985 | | .. | 1986 | | ) => return, | 1987 | | trie::proof_decode::StorageValue::Known { | 1988 | | inline: true, | 1989 | | .. | 1990 | | } => 0, | 1991 | | trie::proof_decode::StorageValue::Known { | 1992 | | inline: false, | 1993 | | .. | 1994 | | } => 1, | 1995 | | }, | 1996 | | ).unwrap(); | 1997 | | } | 1998 | | } | 1999 | 0 | }) | 2000 | 0 | .await; | 2001 | 0 | } | 2002 | | | 2003 | 0 | if result.is_err() { | 2004 | | // Note that we perform the ban even if the source is now disconnected. | 2005 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); | 2006 | 0 | self.network_service | 2007 | 0 | .ban_and_disconnect( | 2008 | 0 | peer_id, | 2009 | 0 | self.network_chain_id, | 2010 | 0 | network_service::BanSeverity::Low, | 2011 | 0 | "storage-proof-request-error", | 2012 | 0 | ) | 2013 | 0 | .await; | 2014 | 0 | } | 2015 | | | 2016 | 0 | if let Ok(result) = result { | 2017 | 0 | // TODO: to_owned overhead | 2018 | 0 | let _ = self | 2019 | 0 | .sync | 2020 | 0 | .storage_get_response(request_id, result.decode().to_owned()); | 2021 | 0 | } else { | 2022 | 0 | let _ = self.sync.remove_request(request_id); | 2023 | 0 | } | 2024 | | | 2025 | | // If the source was actually disconnected and has no other request in | 2026 | | // progress, we clean it up. | 2027 | | // TODO: DRY | 2028 | 0 | if self.sync[source_id] | 2029 | 0 | .as_ref() | 2030 | 0 | .map_or(false, |info| info.is_disconnected) | 2031 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 | 2032 | | { | 2033 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); | 2034 | 0 | debug_assert!(_requests.next().is_none()); | 2035 | 0 | self.peers_source_id_map | 2036 | 0 | .remove(&info.unwrap().peer_id) | 2037 | 0 | .unwrap(); | 2038 | 0 | } | 2039 | | | 2040 | 0 | process_sync = true; | 2041 | | } | 2042 | | | 2043 | | WakeUpReason::SubtaskFinished(SubtaskFinished::CallProofRequestFinished { | 2044 | 0 | request_id, | 2045 | 0 | source_id, | 2046 | 0 | result, | 2047 | | }) => { | 2048 | 0 | if matches!(self.database_catch_up_download, DatabaseCatchUpDownload::InProgress(r) if r == request_id) | 2049 | 0 | { | 2050 | 0 | self.database_catch_up_download = | 2051 | 0 | DatabaseCatchUpDownload::NoDownloadInProgress; | 2052 | 0 | } | 2053 | 0 | if matches!(self.database_catch_up_download_block_verification, DatabaseCatchUpDownloadBlockVerification::InProgress(r) if r == request_id) | 2054 | 0 | { | 2055 | 0 | self.database_catch_up_download_block_verification = | 2056 | 0 | DatabaseCatchUpDownloadBlockVerification::None; | 2057 | 0 | } | 2058 | | | 2059 | | // TODO: DRY with above | 2060 | 0 | if let Ok(result) = &result { | 2061 | 0 | let result = result.clone(); | 2062 | 0 | self.database | 2063 | 0 | .with_database(move |database| { | 2064 | | if let Ok(decoded) = trie::proof_decode::decode_and_verify_proof( | 2065 | | trie::proof_decode::Config { | 2066 | | proof: result.decode(), | 2067 | | }, | 2068 | | ) { | 2069 | | for (_, entry) in decoded.iter_ordered() { | 2070 | | // TODO: check the state root hash; while this can't lead to a vulnerability, it can bloat the database | 2071 | | database.insert_trie_nodes( | 2072 | | iter::once(full_sqlite::InsertTrieNode { | 2073 | | merkle_value: Cow::Borrowed(entry.merkle_value), | 2074 | | partial_key_nibbles: Cow::Owned(entry.partial_key_nibbles.into_iter().map(|n| u8::from(n)).collect()), | 2075 | | children_merkle_values: std::array::from_fn(|n| entry.trie_node_info.children.child(trie::Nibble::try_from(u8::try_from(n).unwrap()).unwrap()).merkle_value().map(Cow::Borrowed)), | 2076 | | storage_value: match entry.trie_node_info.storage_value { | 2077 | | trie::proof_decode::StorageValue::HashKnownValueMissing( | 2078 | | _, | 2079 | | ) => return, | 2080 | | trie::proof_decode::StorageValue::None => { | 2081 | | full_sqlite::InsertTrieNodeStorageValue::NoValue | 2082 | | } | 2083 | | trie::proof_decode::StorageValue::Known { | 2084 | | value, .. | 2085 | | } => full_sqlite::InsertTrieNodeStorageValue::Value { | 2086 | | value: Cow::Borrowed(value), | 2087 | | references_merkle_value: false, // TODO: | 2088 | | }, | 2089 | | }, | 2090 | | }), | 2091 | | match entry.trie_node_info.storage_value { | 2092 | | trie::proof_decode::StorageValue::None => 0, // TODO: ?! | 2093 | | trie::proof_decode::StorageValue::HashKnownValueMissing( | 2094 | | .. | 2095 | | ) => return, | 2096 | | trie::proof_decode::StorageValue::Known { | 2097 | | inline: true, | 2098 | | .. | 2099 | | } => 0, | 2100 | | trie::proof_decode::StorageValue::Known { | 2101 | | inline: false, | 2102 | | .. | 2103 | | } => 1, | 2104 | | }, | 2105 | | ).unwrap(); | 2106 | | } | 2107 | | } | 2108 | 0 | }) | 2109 | 0 | .await; | 2110 | 0 | } | 2111 | | | 2112 | 0 | if result.is_err() { | 2113 | | // Note that we perform the ban even if the source is now disconnected. | 2114 | 0 | let peer_id = self.sync[source_id].as_ref().unwrap().peer_id.clone(); | 2115 | 0 | self.network_service | 2116 | 0 | .ban_and_disconnect( | 2117 | 0 | peer_id, | 2118 | 0 | self.network_chain_id, | 2119 | 0 | network_service::BanSeverity::Low, | 2120 | 0 | "call-proof-request-error", | 2121 | 0 | ) | 2122 | 0 | .await; | 2123 | 0 | } | 2124 | | | 2125 | 0 | if let Ok(result) = result { | 2126 | 0 | self.sync | 2127 | 0 | .call_proof_response(request_id, result.decode().to_owned()); | 2128 | 0 | // TODO: need help from networking service to avoid this to_owned | 2129 | 0 | } else { | 2130 | 0 | self.sync.remove_request(request_id); | 2131 | 0 | } | 2132 | | | 2133 | | // If the source was actually disconnected and has no other request in | 2134 | | // progress, we clean it up. | 2135 | | // TODO: DRY | 2136 | 0 | if self.sync[source_id] | 2137 | 0 | .as_ref() | 2138 | 0 | .map_or(false, |info| info.is_disconnected) | 2139 | 0 | && self.sync.source_num_ongoing_requests(source_id) == 0 | 2140 | | { | 2141 | 0 | let (info, mut _requests) = self.sync.remove_source(source_id); | 2142 | 0 | debug_assert!(_requests.next().is_none()); | 2143 | 0 | self.peers_source_id_map | 2144 | 0 | .remove(&info.unwrap().peer_id) | 2145 | 0 | .unwrap(); | 2146 | 0 | } | 2147 | | | 2148 | 0 | process_sync = true; | 2149 | | } | 2150 | | | 2151 | | WakeUpReason::SyncProcess => { | 2152 | | // Given that processing blocks might generate a notification, and that | 2153 | | // only one notification can be queued at a time, this path must never be | 2154 | | // reached if a notification is already waiting. | 2155 | 19 | debug_assert!(self.pending_notification.is_none()); | 2156 | | // Similarly, verifying a block might generate a block announce. | 2157 | 19 | debug_assert!(self.pending_block_announce.is_none()); | 2158 | | | 2159 | | // Given that a block verification might require downloading some storage | 2160 | | // items due to missing storage items, and that we only want one download at | 2161 | | // a time, we don't verify blocks if a download is in progress. | 2162 | 19 | debug_assert!(matches!0 ( | 2163 | 19 | self.database_catch_up_download_block_verification, | 2164 | | DatabaseCatchUpDownloadBlockVerification::None | 2165 | | )); | 2166 | | | 2167 | 19 | let (new_self, maybe_more_to_process) = self.process_blocks().await0 ; | 2168 | 19 | process_sync = maybe_more_to_process; | 2169 | 19 | self = new_self; | 2170 | | } | 2171 | | } | 2172 | | } | 2173 | 19 | } |
Unexecuted instantiation: _RNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground3run0B8_ |
2174 | | |
2175 | | /// Authors a block, then imports it and gossips it out. |
2176 | | /// |
2177 | | /// # Panic |
2178 | | /// |
2179 | | /// The [`SyncBackground::block_authoring`] must be [`author::build::Builder::Ready`]. |
2180 | | /// |
2181 | 0 | async fn author_block(&mut self) { Unexecuted instantiation: _RNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB4_14SyncBackground12author_block Unexecuted instantiation: _RNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB4_14SyncBackground12author_block |
2182 | 0 | let (authoring_start, local_authorities) = match self.block_authoring.take() { |
2183 | 0 | Some((author::build::Builder::Ready(authoring), local_authorities)) => { |
2184 | 0 | (authoring, local_authorities) |
2185 | | } |
2186 | 0 | _ => panic!(), |
2187 | | }; |
2188 | | |
2189 | | // TODO: it is possible that the current best block is already the same authoring slot as the slot we want to claim ; unclear how to solve this |
2190 | | |
2191 | 0 | let parent_number = self.sync.best_block_number(); |
2192 | 0 | self.log_callback.log( |
2193 | 0 | LogLevel::Debug, |
2194 | 0 | format!( |
2195 | 0 | "block-author-start; parent_hash={}; parent_number={}", |
2196 | 0 | HashDisplay(self.sync.best_block_hash()), |
2197 | 0 | parent_number, |
2198 | 0 | ), |
2199 | 0 | ); |
2200 | 0 |
|
2201 | 0 | // We would like to create a span for authoring the new block, but the trace id depends on |
2202 | 0 | // the block hash, which is only known at the end. |
2203 | 0 | let block_author_jaeger_start_time = mick_jaeger::StartTime::now(); |
2204 | | |
2205 | | // Determine when the block should stop being authored. |
2206 | | // |
2207 | | // In order for the network to perform well, a block should be authored and propagated |
2208 | | // throughout the peer-to-peer network before the end of the slot. In order for this |
2209 | | // to happen, the block creation process itself should end a few seconds before the |
2210 | | // end of the slot. |
2211 | | // |
2212 | | // Most parts of the block authorship can't be accelerated, in particular the |
2213 | | // initialization and the signing at the end. This end of authoring threshold is only |
2214 | | // checked when deciding whether to continue including more transactions in the block. |
2215 | | // TODO: use this |
2216 | | // TODO: Substrate nodes increase the time available for authoring if it detects that slots have been skipped, in order to account for the possibility that the initialization of a block or the inclusion of an extrinsic takes too long |
2217 | 0 | let authoring_end = { |
2218 | 0 | let start = authoring_start.slot_start_from_unix_epoch(); |
2219 | 0 | let end = authoring_start.slot_end_from_unix_epoch(); |
2220 | 0 | debug_assert!(start < end); |
2221 | 0 | debug_assert!(SystemTime::now() >= SystemTime::UNIX_EPOCH + start); |
2222 | 0 | SystemTime::UNIX_EPOCH |
2223 | 0 | + start |
2224 | 0 | + (end - start) * u32::from(self.slot_duration_author_ratio) / u32::from(u16::MAX) |
2225 | | }; |
2226 | | |
2227 | | // Actual block production now happening. |
2228 | 0 | let (new_block_header, new_block_body) = { |
2229 | 0 | let parent_hash = *self.sync.best_block_hash(); |
2230 | 0 | let parent_runtime_arc = |
2231 | 0 | if self.sync.best_block_number() != self.sync.finalized_block_number() { |
2232 | | let NonFinalizedBlock::Verified { |
2233 | 0 | runtime: parent_runtime_arc, |
2234 | 0 | } = &self.sync[(self.sync.best_block_number(), self.sync.best_block_hash())] |
2235 | | else { |
2236 | 0 | unreachable!() |
2237 | | }; |
2238 | 0 | parent_runtime_arc.clone() |
2239 | | } else { |
2240 | 0 | self.finalized_runtime.clone() |
2241 | | }; |
2242 | 0 | let parent_runtime = (*parent_runtime_arc).clone(); |
2243 | 0 |
|
2244 | 0 | // Start the block authoring process. |
2245 | 0 | let mut block_authoring = { |
2246 | 0 | authoring_start.start(author::build::AuthoringStartConfig { |
2247 | 0 | block_number_bytes: self.sync.block_number_bytes(), |
2248 | 0 | parent_hash: &self.sync.best_block_hash(), |
2249 | 0 | parent_number: self.sync.best_block_number(), |
2250 | 0 | now_from_unix_epoch: SystemTime::now() |
2251 | 0 | .duration_since(SystemTime::UNIX_EPOCH) |
2252 | 0 | .unwrap(), |
2253 | 0 | parent_runtime, |
2254 | 0 | block_body_capacity: 0, // TODO: could be set to the size of the tx pool |
2255 | 0 | max_log_level: 0, |
2256 | 0 | calculate_trie_changes: true, |
2257 | 0 | }) |
2258 | | }; |
2259 | | |
2260 | | // The block authoring process jumps through various states, interrupted when it needs |
2261 | | // access to the storage of the best block. |
2262 | 0 | loop { |
2263 | 0 | match block_authoring { |
2264 | 0 | author::build::BuilderAuthoring::Seal(seal) => { |
2265 | 0 | // This is the last step of the authoring. The block creation is |
2266 | 0 | // successful, and the only thing remaining to do is sign the block |
2267 | 0 | // header. Signing is done through `self.keystore`. |
2268 | 0 |
|
2269 | 0 | // TODO: correct key namespace |
2270 | 0 | let data_to_sign = seal.to_sign(); |
2271 | 0 | let sign_future = self.keystore.sign( |
2272 | 0 | keystore::KeyNamespace::Aura, |
2273 | 0 | &local_authorities[seal.authority_index()], |
2274 | 0 | &data_to_sign, |
2275 | 0 | ); |
2276 | | |
2277 | 0 | let success = match sign_future.await { |
2278 | 0 | Ok(signature) => seal.inject_sr25519_signature(signature), |
2279 | 0 | Err(error) => { |
2280 | 0 | // Because the keystore is subject to race conditions, it is |
2281 | 0 | // possible for this situation to happen if the key has been |
2282 | 0 | // removed from the keystore in parallel of the block authoring |
2283 | 0 | // process, or the key is maybe no longer accessible because of |
2284 | 0 | // another issue. |
2285 | 0 | self.log_callback.log( |
2286 | 0 | LogLevel::Warn, |
2287 | 0 | format!("block-author-signing-error; error={}", error), |
2288 | 0 | ); |
2289 | 0 | self.block_authoring = None; |
2290 | 0 | return; |
2291 | | } |
2292 | | }; |
2293 | | |
2294 | 0 | break (success.scale_encoded_header, success.body); |
2295 | | } |
2296 | | |
2297 | 0 | author::build::BuilderAuthoring::Error { error, .. } => { |
2298 | 0 | // Block authoring process stopped because of an error. |
2299 | 0 |
|
2300 | 0 | // In order to prevent the block authoring from restarting immediately |
2301 | 0 | // after and failing again repeatedly, we switch the block authoring to |
2302 | 0 | // the same state as if it had successfully generated a block. |
2303 | 0 | self.block_authoring = Some((author::build::Builder::Idle, Vec::new())); |
2304 | 0 | // TODO: log the runtime logs |
2305 | 0 | self.log_callback.log( |
2306 | 0 | LogLevel::Warn, |
2307 | 0 | format!("block-author-error; error={}", error), |
2308 | 0 | ); |
2309 | 0 | return; |
2310 | | } |
2311 | | |
2312 | | // Part of the block production consists in adding transactions to the block. |
2313 | | // These transactions are extracted from the transactions pool. |
2314 | 0 | author::build::BuilderAuthoring::ApplyExtrinsic(apply) => { |
2315 | 0 | // TODO: actually implement including transactions in the blocks |
2316 | 0 | block_authoring = apply.finish(); |
2317 | 0 | } |
2318 | 0 | author::build::BuilderAuthoring::ApplyExtrinsicResult { result, resume } => { |
2319 | 0 | if let Err(error) = result { |
2320 | 0 | // TODO: include transaction bytes or something? |
2321 | 0 | self.log_callback.log( |
2322 | 0 | LogLevel::Warn, |
2323 | 0 | format!( |
2324 | 0 | "block-author-transaction-inclusion-error; error={}", |
2325 | 0 | error |
2326 | 0 | ), |
2327 | 0 | ); |
2328 | 0 | } |
2329 | | |
2330 | | // TODO: actually implement including transactions in the blocks |
2331 | 0 | block_authoring = resume.finish(); |
2332 | | } |
2333 | | |
2334 | | // Access to the best block storage. |
2335 | 0 | author::build::BuilderAuthoring::StorageGet(req) => { |
2336 | 0 | let parent_paths = req.child_trie().map(|child_trie| { |
2337 | 0 | trie::bytes_to_nibbles(b":child_storage:default:".iter().copied()) |
2338 | 0 | .chain(trie::bytes_to_nibbles(child_trie.as_ref().iter().copied())) |
2339 | 0 | .map(u8::from) |
2340 | 0 | .collect::<Vec<_>>() |
2341 | 0 | }); Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block00Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block00Ba_ |
2342 | 0 | let key = trie::bytes_to_nibbles(req.key().as_ref().iter().copied()) |
2343 | 0 | .map(u8::from) |
2344 | 0 | .collect::<Vec<_>>(); |
2345 | 0 | let value = self |
2346 | 0 | .database |
2347 | 0 | .with_database(move |db| { |
2348 | 0 | db.block_storage_get( |
2349 | 0 | &parent_hash, |
2350 | 0 | parent_paths.into_iter().map(|p| p.into_iter()), Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s_00Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s_00Bc_ |
2351 | 0 | key.iter().copied(), |
2352 | 0 | ) |
2353 | 0 | }) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s_0Ba_ |
2354 | 0 | .await |
2355 | | // TODO: don't panic in case of incomplete storage |
2356 | 0 | .expect("database access error"); |
2357 | 0 |
|
2358 | 0 | block_authoring = req.inject_value(value.as_ref().map(|(val, vers)| { |
2359 | 0 | ( |
2360 | 0 | iter::once(&val[..]), |
2361 | 0 | runtime_call::TrieEntryVersion::try_from(*vers) |
2362 | 0 | .expect("corrupted database"), |
2363 | 0 | ) |
2364 | 0 | })); Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s0_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s0_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s0_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s0_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s0_0Ba_ |
2365 | | } |
2366 | 0 | author::build::BuilderAuthoring::ClosestDescendantMerkleValue(req) => { |
2367 | 0 | let parent_paths = req.child_trie().map(|child_trie| { |
2368 | 0 | trie::bytes_to_nibbles(b":child_storage:default:".iter().copied()) |
2369 | 0 | .chain(trie::bytes_to_nibbles(child_trie.as_ref().iter().copied())) |
2370 | 0 | .map(u8::from) |
2371 | 0 | .collect::<Vec<_>>() |
2372 | 0 | }); Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s1_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s1_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s1_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s1_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s1_0Ba_ |
2373 | 0 | let key_nibbles = req.key().map(u8::from).collect::<Vec<_>>(); |
2374 | | |
2375 | 0 | let merkle_value = self |
2376 | 0 | .database |
2377 | 0 | .with_database(move |db| { |
2378 | 0 | db.block_storage_closest_descendant_merkle_value( |
2379 | 0 | &parent_hash, |
2380 | 0 | parent_paths.into_iter().map(|p| p.into_iter()), Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s2_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s2_00Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s2_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s2_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s2_00Bc_ |
2381 | 0 | key_nibbles.iter().copied(), |
2382 | 0 | ) |
2383 | 0 | }) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s2_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s2_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s2_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s2_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s2_0Ba_ |
2384 | 0 | .await |
2385 | | // TODO: don't panic in case of incomplete storage |
2386 | 0 | .expect("database access error"); |
2387 | 0 |
|
2388 | 0 | block_authoring = |
2389 | 0 | req.inject_merkle_value(merkle_value.as_ref().map(|v| &v[..])); Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s3_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s3_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s3_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s3_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s3_0Ba_ |
2390 | | } |
2391 | 0 | author::build::BuilderAuthoring::NextKey(req) => { |
2392 | 0 | let parent_paths = req.child_trie().map(|child_trie| { |
2393 | 0 | trie::bytes_to_nibbles(b":child_storage:default:".iter().copied()) |
2394 | 0 | .chain(trie::bytes_to_nibbles(child_trie.as_ref().iter().copied())) |
2395 | 0 | .map(u8::from) |
2396 | 0 | .collect::<Vec<_>>() |
2397 | 0 | }); Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s4_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s4_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s4_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s4_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s4_0Ba_ |
2398 | 0 | let key_nibbles = req |
2399 | 0 | .key() |
2400 | 0 | .map(u8::from) |
2401 | 0 | .chain(if req.or_equal() { None } else { Some(0u8) }) |
2402 | 0 | .collect::<Vec<_>>(); |
2403 | 0 | let prefix_nibbles = req.prefix().map(u8::from).collect::<Vec<_>>(); |
2404 | 0 |
|
2405 | 0 | let branch_nodes = req.branch_nodes(); |
2406 | 0 | let next_key = self |
2407 | 0 | .database |
2408 | 0 | .with_database(move |db| { |
2409 | 0 | db.block_storage_next_key( |
2410 | 0 | &parent_hash, |
2411 | 0 | parent_paths.into_iter().map(|p| p.into_iter()), Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s5_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s5_00Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s5_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s5_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s5_00Bc_ |
2412 | 0 | key_nibbles.iter().copied(), |
2413 | 0 | prefix_nibbles.iter().copied(), |
2414 | 0 | branch_nodes, |
2415 | 0 | ) |
2416 | 0 | }) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s5_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s5_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s5_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s5_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s5_0Ba_ |
2417 | 0 | .await |
2418 | | // TODO: don't panic in case of incomplete storage |
2419 | 0 | .expect("database access error"); |
2420 | 0 |
|
2421 | 0 | block_authoring = req |
2422 | 0 | .inject_key(next_key.map(|k| { |
2423 | 0 | k.into_iter().map(|b| trie::Nibble::try_from(b).unwrap()) Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s6_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s6_00Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s6_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s6_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground12author_block0s6_00Bc_ |
2424 | 0 | })); Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s6_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s6_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s6_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s6_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground12author_block0s6_0Ba_ |
2425 | | } |
2426 | 0 | author::build::BuilderAuthoring::OffchainStorageSet(req) => { |
2427 | 0 | // Ignore offchain storage writes at the moment. |
2428 | 0 | block_authoring = req.resume(); |
2429 | 0 | } |
2430 | | } |
2431 | | } |
2432 | | }; |
2433 | | |
2434 | | // Block has now finished being generated. |
2435 | | // TODO: print logs |
2436 | 0 | let new_block_hash = header::hash_from_scale_encoded_header(&new_block_header); |
2437 | 0 | self.log_callback.log( |
2438 | 0 | LogLevel::Info, |
2439 | 0 | format!( |
2440 | 0 | "block-generated; hash={}; body_len={}", |
2441 | 0 | HashDisplay(&new_block_hash), |
2442 | 0 | new_block_body.len() |
2443 | 0 | ), |
2444 | 0 | ); |
2445 | 0 | let _jaeger_span = self |
2446 | 0 | .jaeger_service |
2447 | 0 | .block_authorship_span(&new_block_hash, block_author_jaeger_start_time); |
2448 | 0 |
|
2449 | 0 | // Print a warning if generating the block has taken more time than expected. |
2450 | 0 | // This can happen because the node is completely overloaded, is running on a slow machine, |
2451 | 0 | // or if the runtime code being executed contains a very heavy operation. |
2452 | 0 | // In any case, there is not much that a node operator can do except try increase the |
2453 | 0 | // performance of their machine. |
2454 | 0 | match authoring_end.elapsed() { |
2455 | 0 | Ok(now_minus_end) if now_minus_end < Duration::from_millis(500) => {} |
2456 | 0 | _ => { |
2457 | 0 | self.log_callback.log( |
2458 | 0 | LogLevel::Warn, |
2459 | 0 | format!( |
2460 | 0 | "block-generation-too-long; hash={}", |
2461 | 0 | HashDisplay(&new_block_hash) |
2462 | 0 | ), |
2463 | 0 | ); |
2464 | 0 | } |
2465 | | } |
2466 | | |
2467 | | // Switch the block authoring to a state where we won't try to generate a new block again |
2468 | | // until something new happens. |
2469 | | // TODO: nothing prevents the node from generating two blocks at the same height at the moment |
2470 | 0 | self.block_authoring = Some((author::build::Builder::Idle, Vec::new())); |
2471 | | |
2472 | | // The next step is to import the block in `self.sync`. This is done by pretending that |
2473 | | // the local node is a source of block similar to networking peers. |
2474 | 0 | let all::BlockAnnounceOutcome::Unknown(block_insert) = self.sync.block_announce( |
2475 | 0 | self.block_author_sync_source, |
2476 | 0 | new_block_header.clone(), |
2477 | 0 | true, // Since the new block is a child of the current best block, it always becomes the new best. |
2478 | 0 | ) else { |
2479 | 0 | unreachable!(); |
2480 | | }; |
2481 | 0 | block_insert.insert_and_update_source(NonFinalizedBlock::NotVerified); |
2482 | 0 |
|
2483 | 0 | debug_assert!(self.authored_block.is_none()); |
2484 | 0 | self.authored_block = Some(( |
2485 | 0 | parent_number + 1, |
2486 | 0 | new_block_hash, |
2487 | 0 | new_block_header, |
2488 | 0 | new_block_body, |
2489 | 0 | )); |
2490 | 0 | } Unexecuted instantiation: _RNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground12author_block0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground12author_block0B8_ Unexecuted instantiation: _RNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground12author_block0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground12author_block0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground12author_block0B8_ |
2491 | | |
2492 | 21 | async fn process_blocks(mut self) -> (Self, bool) { _RNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB4_14SyncBackground14process_blocks Line | Count | Source | 2492 | 21 | async fn process_blocks(mut self) -> (Self, bool) { |
Unexecuted instantiation: _RNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB4_14SyncBackground14process_blocks |
2493 | 21 | // The sync state machine can be in a few various states. At the time of writing: |
2494 | 21 | // idle, verifying header, verifying block, verifying grandpa warp sync proof, |
2495 | 21 | // verifying storage proof. |
2496 | 21 | // If the state is one of the "verifying" states, perform the actual verification and |
2497 | 21 | // loop again until the sync is in an idle state. |
2498 | 21 | let unix_time = SystemTime::now() |
2499 | 21 | .duration_since(SystemTime::UNIX_EPOCH) |
2500 | 21 | .unwrap(); |
2501 | 21 | |
2502 | 21 | // TODO: move this? |
2503 | 21 | let block_number_bytes = self.sync.block_number_bytes(); |
2504 | 21 | |
2505 | 21 | match self.sync.process_one() { |
2506 | 21 | all::ProcessOne::AllSync(idle) => { |
2507 | 21 | self.sync = idle; |
2508 | 21 | (self, false) |
2509 | | } |
2510 | 0 | all::ProcessOne::VerifyWarpSyncFragment(verify) => { |
2511 | 0 | let sender = verify |
2512 | 0 | .proof_sender() |
2513 | 0 | .map(|(_, s)| s.as_ref().unwrap().peer_id.clone()); Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks00Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks00Ba_ |
2514 | 0 |
|
2515 | 0 | let (new_sync, outcome) = verify.perform(rand::random()); |
2516 | 0 | self.sync = new_sync; |
2517 | 0 | match outcome { |
2518 | 0 | Ok((fragment_hash, fragment_height)) => { |
2519 | 0 | self.log_callback.log( |
2520 | 0 | LogLevel::Debug, |
2521 | 0 | format!( |
2522 | 0 | "warp-sync-fragment-verification-success; peer_id={}, fragment-hash={}, fragment-height={fragment_height}", |
2523 | 0 | sender.map(|s| s.to_string()).unwrap_or_else(|| "unknown".to_owned()), Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s6_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s6_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s6_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s6_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s6_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s7_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s7_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s7_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s7_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s7_0Ba_ |
2524 | 0 | hex::encode(&fragment_hash) |
2525 | 0 | ), |
2526 | 0 | ); |
2527 | 0 | } |
2528 | 0 | Err(err) => { |
2529 | 0 | if let Some(sender) = &sender { |
2530 | 0 | self.network_service |
2531 | 0 | .ban_and_disconnect( |
2532 | 0 | sender.clone(), |
2533 | 0 | self.network_chain_id, |
2534 | 0 | network_service::BanSeverity::High, |
2535 | 0 | "bad-warp-sync-fragment", |
2536 | 0 | ) |
2537 | 0 | .await; |
2538 | 0 | } |
2539 | | |
2540 | 0 | self.log_callback.log( |
2541 | 0 | LogLevel::Warn, |
2542 | 0 | format!( |
2543 | 0 | "failed-warp-sync-fragment-verification; peer_id={}, error={err}", |
2544 | 0 | sender |
2545 | 0 | .map(|s| s.to_string()) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s8_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s8_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s8_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s8_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s8_0Ba_ |
2546 | 0 | .unwrap_or_else(|| "unknown".to_owned()), Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s9_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s9_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s9_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s9_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s9_0Ba_ |
2547 | 0 | ), |
2548 | 0 | ); |
2549 | | } |
2550 | | } |
2551 | 0 | (self, true) |
2552 | | } |
2553 | 0 | all::ProcessOne::WarpSyncBuildRuntime(build_runtime) => { |
2554 | 0 | let (new_sync, outcome) = |
2555 | 0 | build_runtime.build(all::ExecHint::ValidateAndCompile, true); |
2556 | 0 | self.sync = new_sync; |
2557 | 0 | if let Err(err) = outcome { |
2558 | 0 | self.log_callback.log( |
2559 | 0 | LogLevel::Warn, |
2560 | 0 | format!("failed-warp-sync-runtime-compilation; error={err}"), |
2561 | 0 | ); |
2562 | 0 | } |
2563 | 0 | (self, true) |
2564 | | } |
2565 | 0 | all::ProcessOne::WarpSyncBuildChainInformation(build_chain_information) => { |
2566 | 0 | let (new_sync, outcome) = build_chain_information.build(); |
2567 | 0 | self.sync = new_sync; |
2568 | 0 | if let Err(err) = outcome { |
2569 | 0 | self.log_callback.log( |
2570 | 0 | LogLevel::Warn, |
2571 | 0 | format!("failed-warp-sync-chain-information-build; error={err}"), |
2572 | 0 | ); |
2573 | 0 | } |
2574 | 0 | (self, true) |
2575 | | } |
2576 | | all::ProcessOne::WarpSyncFinished { |
2577 | 0 | sync, |
2578 | 0 | finalized_body: Some(finalized_body), |
2579 | 0 | finalized_block_runtime, |
2580 | 0 | .. |
2581 | 0 | } => { |
2582 | 0 | self.sync = sync; |
2583 | 0 |
|
2584 | 0 | // Destory all existing subscriptions due to the gap in the chain. |
2585 | 0 | self.pending_notification = None; |
2586 | 0 | self.blocks_notifications.clear(); |
2587 | 0 |
|
2588 | 0 | self.finalized_runtime = Arc::new(finalized_block_runtime); |
2589 | 0 | let finalized_block_header = self |
2590 | 0 | .sync |
2591 | 0 | .as_chain_information() |
2592 | 0 | .as_ref() |
2593 | 0 | .finalized_block_header |
2594 | 0 | .scale_encoding_vec(self.sync.block_number_bytes()); |
2595 | 0 | self.database |
2596 | 0 | .with_database(move |database| { |
2597 | 0 | database |
2598 | 0 | .reset( |
2599 | 0 | &finalized_block_header, |
2600 | 0 | finalized_body.iter().map(|e| &e[..]), Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground14process_blocks0s_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground14process_blocks0s_00Bc_ Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground14process_blocks0s_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground14process_blocks0s_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtBa_14SyncBackground14process_blocks0s_00Bc_ |
2601 | 0 | None, |
2602 | 0 | ) |
2603 | 0 | .unwrap(); |
2604 | 0 | }) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s_0Ba_ |
2605 | 0 | .await; |
2606 | | // TODO: what is known about the finalized storage into the database is currently done when a proof is downloaded; however if the proof download finished code no longer inserts entries related to unknown blocks, then we should do it here instead |
2607 | | |
2608 | 0 | if matches!( |
2609 | 0 | self.database_catch_up_download, |
2610 | | DatabaseCatchUpDownload::NothingToDownloadCache |
2611 | 0 | ) { |
2612 | 0 | self.database_catch_up_download = DatabaseCatchUpDownload::NoDownloadInProgress; |
2613 | 0 | } |
2614 | | |
2615 | 0 | (self, true) |
2616 | | } |
2617 | | all::ProcessOne::WarpSyncFinished { |
2618 | | finalized_body: None, |
2619 | | .. |
2620 | | } => { |
2621 | 0 | unreachable!() |
2622 | | } |
2623 | 0 | all::ProcessOne::VerifyBlock(verify) => { |
2624 | 0 | // TODO: ban peer in case of verification failure |
2625 | 0 | let when_verification_started = Instant::now(); |
2626 | 0 | let hash_to_verify = verify.hash(); |
2627 | 0 |
|
2628 | 0 | let _jaeger_span = self.jaeger_service.block_verify_span(&hash_to_verify); |
2629 | | |
2630 | 0 | let (is_new_best, header_verification_success) = |
2631 | 0 | match verify.verify_header(unix_time) { |
2632 | | all::HeaderVerifyOutcome::Success { |
2633 | 0 | is_new_best, |
2634 | 0 | success, |
2635 | 0 | } => (is_new_best, success), |
2636 | 0 | all::HeaderVerifyOutcome::Error { sync, error } => { |
2637 | 0 | // Print a separate warning because it is important for the user |
2638 | 0 | // to be aware of the verification failure. |
2639 | 0 | // `error` is last because it's quite big. |
2640 | 0 | self.log_callback.log( |
2641 | 0 | LogLevel::Warn, |
2642 | 0 | format!( |
2643 | 0 | "failed-block-verification; hash={}; error={}", |
2644 | 0 | HashDisplay(&hash_to_verify), |
2645 | 0 | error |
2646 | 0 | ), |
2647 | 0 | ); |
2648 | 0 | self.sync = sync; |
2649 | 0 | return (self, true); |
2650 | | } |
2651 | | }; |
2652 | | |
2653 | 0 | let parent_info = header_verification_success.parent_user_data().map(|b| { |
2654 | 0 | let NonFinalizedBlock::Verified { runtime } = b else { |
2655 | 0 | unreachable!() |
2656 | | }; |
2657 | 0 | runtime.clone() |
2658 | 0 | }); Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s0_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s0_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s0_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s0_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s0_0Ba_ |
2659 | 0 | let parent_runtime_arc = parent_info |
2660 | 0 | .as_ref() |
2661 | 0 | .cloned() |
2662 | 0 | .unwrap_or_else(|| self.finalized_runtime.clone()); Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s1_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s1_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s1_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s1_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s1_0Ba_ |
2663 | 0 |
|
2664 | 0 | // TODO: check this block against the chain spec's badBlocks |
2665 | 0 |
|
2666 | 0 | let height = header_verification_success.height(); |
2667 | 0 | let scale_encoded_header = |
2668 | 0 | header_verification_success.scale_encoded_header().to_vec(); |
2669 | | |
2670 | 0 | let execute_block_success = match execute_block_and_insert( |
2671 | 0 | &self.database, |
2672 | 0 | (*parent_runtime_arc).clone(), |
2673 | 0 | &header_verification_success.parent_hash(), |
2674 | 0 | header_verification_success.scale_encoded_header(), |
2675 | 0 | block_number_bytes, |
2676 | 0 | header_verification_success |
2677 | 0 | .scale_encoded_extrinsics() |
2678 | 0 | .unwrap(), |
2679 | 0 | unix_time, |
2680 | 0 | is_new_best, |
2681 | 0 | ) |
2682 | 0 | .await |
2683 | | { |
2684 | 0 | Ok(success) => success, |
2685 | | Err(ExecuteBlockError::VerificationFailure( |
2686 | | ExecuteBlockVerificationFailureError::DatabaseParentAccess { |
2687 | | error: full_sqlite::StorageAccessError::IncompleteStorage, |
2688 | 0 | context, |
2689 | 0 | }, |
2690 | 0 | )) => { |
2691 | 0 | // The block verification failed because the storage of the parent |
2692 | 0 | // is still being downloaded from the network. |
2693 | 0 | self.log_callback.log( |
2694 | 0 | LogLevel::Debug, |
2695 | 0 | format!( |
2696 | 0 | "block-verification-incomplete-storage; hash={}; height={}; \ |
2697 | 0 | time_before_interrupt={:?}", |
2698 | 0 | HashDisplay(&hash_to_verify), |
2699 | 0 | header_verification_success.height(), |
2700 | 0 | when_verification_started.elapsed() |
2701 | 0 | ), |
2702 | 0 | ); |
2703 | 0 |
|
2704 | 0 | debug_assert!(matches!( |
2705 | 0 | self.database_catch_up_download_block_verification, |
2706 | | DatabaseCatchUpDownloadBlockVerification::None |
2707 | | )); |
2708 | 0 | match context { |
2709 | 0 | ExecuteBlockDatabaseAccessFailureContext::ParentRuntimeAccess => { |
2710 | 0 | self.database_catch_up_download_block_verification = DatabaseCatchUpDownloadBlockVerification::CodeStorageProofDesired { |
2711 | 0 | block_hash: *header_verification_success.parent_hash(), |
2712 | 0 | block_number: header_verification_success.height() - 1, |
2713 | 0 | }; |
2714 | 0 | } |
2715 | | ExecuteBlockDatabaseAccessFailureContext::FunctionCall { |
2716 | 0 | function_name, |
2717 | 0 | parameter, |
2718 | 0 | } => { |
2719 | 0 | self.database_catch_up_download_block_verification = |
2720 | 0 | DatabaseCatchUpDownloadBlockVerification::CallProofDesired { |
2721 | 0 | block_hash: *header_verification_success.parent_hash(), |
2722 | 0 | block_number: header_verification_success.height() - 1, |
2723 | 0 | function_name: function_name.to_owned(), |
2724 | 0 | parameter, |
2725 | 0 | }; |
2726 | 0 | } |
2727 | | } |
2728 | | |
2729 | 0 | self.sync = header_verification_success.cancel(); |
2730 | 0 | return (self, true); |
2731 | | } |
2732 | | Err(ExecuteBlockError::VerificationFailure( |
2733 | | ExecuteBlockVerificationFailureError::DatabaseParentAccess { |
2734 | 0 | error, .. |
2735 | 0 | }, |
2736 | 0 | )) => { |
2737 | 0 | panic!("corrupted database: {error}") |
2738 | | } |
2739 | | Err(ExecuteBlockError::VerificationFailure( |
2740 | | ExecuteBlockVerificationFailureError::ParentCodeEmptyInDatabase |
2741 | | | ExecuteBlockVerificationFailureError::InvaliParentHeapPagesInDatabase(_) |
2742 | | | ExecuteBlockVerificationFailureError::DatabaseInvalidStateTrieVersion, |
2743 | 0 | )) => panic!("corrupted database"), |
2744 | 0 | Err(error) => { |
2745 | 0 | // Print a separate warning because it is important for the user |
2746 | 0 | // to be aware of the verification failure. |
2747 | 0 | // `error` is last because it's quite big. |
2748 | 0 | self.log_callback.log( |
2749 | 0 | LogLevel::Warn, |
2750 | 0 | format!( |
2751 | 0 | "failed-block-verification; hash={}; height={}; \ |
2752 | 0 | total_duration={:?}; error={error}", |
2753 | 0 | HashDisplay(&hash_to_verify), |
2754 | 0 | header_verification_success.height(), |
2755 | 0 | when_verification_started.elapsed() |
2756 | 0 | ), |
2757 | 0 | ); |
2758 | 0 | // Note that some errors are verification failures errors that shouldn't |
2759 | 0 | // lead to the block being marked as bad. However, there's not much else |
2760 | 0 | // we can do here as trying to verify the block again would likely lead to |
2761 | 0 | // the same error again. Marking the block as bad is a reasonable solution. |
2762 | 0 | self.sync = header_verification_success.reject_bad_block(); |
2763 | 0 | return (self, true); |
2764 | | } |
2765 | | }; |
2766 | | |
2767 | 0 | self.log_callback.log( |
2768 | 0 | LogLevel::Debug, |
2769 | 0 | format!( |
2770 | 0 | "block-verification-success; hash={}; height={}; \ |
2771 | 0 | total_duration={:?}; database_accesses_duration={:?}; \ |
2772 | 0 | runtime_build_duration={:?}; is_new_best={:?}", |
2773 | 0 | HashDisplay(&hash_to_verify), |
2774 | 0 | height, |
2775 | 0 | when_verification_started.elapsed(), |
2776 | 0 | execute_block_success.database_accesses_duration, |
2777 | 0 | execute_block_success.runtime_build_duration, |
2778 | 0 | is_new_best |
2779 | 0 | ), |
2780 | 0 | ); |
2781 | | |
2782 | 0 | match execute_block_success.block_insertion { |
2783 | 0 | Ok(()) => {} |
2784 | 0 | Err(full_sqlite::InsertError::Duplicate) => {} // TODO: this should be an error ; right now we silence them because non-finalized blocks aren't loaded from the database at startup, resulting in them being downloaded again |
2785 | 0 | Err(error) => panic!("failed to insert block in database: {error}"), |
2786 | | } |
2787 | | |
2788 | | // Notify the subscribers. |
2789 | 0 | debug_assert!(self.pending_notification.is_none()); |
2790 | 0 | self.pending_notification = Some(Notification::Block { |
2791 | 0 | block: BlockNotification { |
2792 | 0 | is_new_best, |
2793 | 0 | scale_encoded_header: scale_encoded_header.clone(), |
2794 | 0 | block_hash: header_verification_success.hash(), |
2795 | 0 | runtime_update: execute_block_success |
2796 | 0 | .new_runtime |
2797 | 0 | .as_ref() |
2798 | 0 | .map(|new_runtime| Arc::new(new_runtime.clone())), Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s2_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s2_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s2_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s2_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s2_0Ba_ |
2799 | 0 | parent_hash: *header_verification_success.parent_hash(), |
2800 | 0 | }, |
2801 | 0 | storage_changes: execute_block_success.storage_changes.clone(), |
2802 | 0 | }); |
2803 | 0 |
|
2804 | 0 | // Processing has made a step forward. |
2805 | 0 |
|
2806 | 0 | self.sync = header_verification_success.finish(NonFinalizedBlock::NotVerified); |
2807 | | |
2808 | | // Store the storage of the children. |
2809 | 0 | self.sync[(height, &hash_to_verify)] = NonFinalizedBlock::Verified { |
2810 | 0 | runtime: if let Some(new_runtime) = execute_block_success.new_runtime { |
2811 | 0 | Arc::new(new_runtime) |
2812 | | } else { |
2813 | 0 | parent_runtime_arc |
2814 | | }, |
2815 | | }; |
2816 | | |
2817 | 0 | if is_new_best { |
2818 | 0 | // Update the networking. |
2819 | 0 | self.network_local_chain_update_needed = true; |
2820 | 0 | // Reset the block authoring, in order to potentially build a |
2821 | 0 | // block on top of this new best. |
2822 | 0 | self.block_authoring = None; |
2823 | 0 | } |
2824 | | |
2825 | | // Announce the newly-verified block to all the sources that might |
2826 | | // not be aware of it. |
2827 | 0 | debug_assert!(self.pending_block_announce.is_none()); |
2828 | 0 | self.pending_block_announce = Some((scale_encoded_header, hash_to_verify, height)); |
2829 | 0 |
|
2830 | 0 | return (self, true); |
2831 | | } |
2832 | | |
2833 | 0 | all::ProcessOne::VerifyFinalityProof(verify) => { |
2834 | 0 | let sender = verify |
2835 | 0 | .sender() |
2836 | 0 | .1 |
2837 | 0 | .as_ref() |
2838 | 0 | .map(|s| s.peer_id.clone()) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s3_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s3_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s3_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s3_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s3_0Ba_ |
2839 | 0 | .unwrap(); |
2840 | 0 |
|
2841 | 0 | match verify.perform(rand::random()) { |
2842 | | ( |
2843 | 0 | sync_out, |
2844 | 0 | all::FinalityProofVerifyOutcome::NewFinalized { |
2845 | 0 | finalized_blocks_newest_to_oldest, |
2846 | 0 | pruned_blocks, |
2847 | 0 | updates_best_block, |
2848 | 0 | }, |
2849 | 0 | ) => { |
2850 | 0 | self.sync = sync_out; |
2851 | 0 |
|
2852 | 0 | let new_finalized_hash = finalized_blocks_newest_to_oldest |
2853 | 0 | .first() |
2854 | 0 | .unwrap() |
2855 | 0 | .block_hash; |
2856 | 0 | self.log_callback.log( |
2857 | 0 | LogLevel::Debug, |
2858 | 0 | format!( |
2859 | 0 | "finality-proof-verification; outcome=success, sender={sender}, new-finalized={}", |
2860 | 0 | HashDisplay(&new_finalized_hash) |
2861 | 0 | ), |
2862 | 0 | ); |
2863 | 0 |
|
2864 | 0 | if updates_best_block { |
2865 | 0 | // Update the networking. |
2866 | 0 | self.network_local_chain_update_needed = true; |
2867 | 0 | // Reset the block authoring, in order to potentially build a |
2868 | 0 | // block on top of this new best. |
2869 | 0 | self.block_authoring = None; |
2870 | 0 | } |
2871 | | |
2872 | 0 | self.finalized_runtime = |
2873 | 0 | match &finalized_blocks_newest_to_oldest.first().unwrap().user_data { |
2874 | 0 | NonFinalizedBlock::Verified { runtime } => runtime.clone(), |
2875 | 0 | _ => unreachable!(), |
2876 | | }; |
2877 | | // TODO: what if best block changed? |
2878 | 0 | self.database |
2879 | 0 | .with_database_detached(move |database| { |
2880 | 0 | database.set_finalized(&new_finalized_hash).unwrap(); |
2881 | 0 | }) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s4_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s4_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s4_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s4_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s4_0Ba_ |
2882 | 0 | .await; |
2883 | | |
2884 | | // Notify the subscribers. |
2885 | 0 | debug_assert!(self.pending_notification.is_none()); |
2886 | 0 | self.pending_notification = Some(Notification::Finalized { |
2887 | 0 | finalized_blocks_newest_to_oldest: finalized_blocks_newest_to_oldest |
2888 | 0 | .iter() |
2889 | 0 | .map(|b| b.block_hash) Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s5_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s5_0Ba_ Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s5_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s5_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB8_14SyncBackground14process_blocks0s5_0Ba_ |
2890 | 0 | .collect::<Vec<_>>(), |
2891 | 0 | pruned_blocks_hashes: pruned_blocks.clone(), |
2892 | 0 | best_block_hash: *self.sync.best_block_hash(), |
2893 | 0 | }); |
2894 | 0 |
|
2895 | 0 | (self, true) |
2896 | | } |
2897 | 0 | (sync_out, all::FinalityProofVerifyOutcome::GrandpaCommitPending) => { |
2898 | 0 | self.log_callback.log( |
2899 | 0 | LogLevel::Debug, |
2900 | 0 | "finality-proof-verification; outcome=pending, sender={sender}" |
2901 | 0 | .to_string(), |
2902 | 0 | ); |
2903 | 0 | self.sync = sync_out; |
2904 | 0 | (self, true) |
2905 | | } |
2906 | 0 | (sync_out, all::FinalityProofVerifyOutcome::AlreadyFinalized) => { |
2907 | 0 | self.log_callback.log( |
2908 | 0 | LogLevel::Debug, |
2909 | 0 | "finality-proof-verification; outcome=already-finalized, sender={sender}".to_string(), |
2910 | 0 | ); |
2911 | 0 | self.sync = sync_out; |
2912 | 0 | (self, true) |
2913 | | } |
2914 | 0 | (sync_out, all::FinalityProofVerifyOutcome::GrandpaCommitError(error)) => { |
2915 | 0 | self.network_service |
2916 | 0 | .ban_and_disconnect( |
2917 | 0 | sender.clone(), |
2918 | 0 | self.network_chain_id, |
2919 | 0 | network_service::BanSeverity::High, |
2920 | 0 | "bad-warp-sync-fragment", |
2921 | 0 | ) |
2922 | 0 | .await; |
2923 | 0 | self.log_callback.log( |
2924 | 0 | LogLevel::Warn, |
2925 | 0 | format!( |
2926 | 0 | "finality-proof-verification-failure; sender={sender}, error={}", |
2927 | 0 | error |
2928 | 0 | ), |
2929 | 0 | ); |
2930 | 0 | self.sync = sync_out; |
2931 | 0 | (self, true) |
2932 | | } |
2933 | 0 | (sync_out, all::FinalityProofVerifyOutcome::JustificationError(error)) => { |
2934 | | // Errors of type `JustificationEngineMismatch` indicate that the chain |
2935 | | // uses a finality engine that smoldot doesn't recognize. This is a benign |
2936 | | // error that shouldn't lead to a ban. |
2937 | 0 | if !matches!( |
2938 | 0 | error, |
2939 | | all::JustificationVerifyError::JustificationEngineMismatch |
2940 | | ) { |
2941 | 0 | self.network_service |
2942 | 0 | .ban_and_disconnect( |
2943 | 0 | sender.clone(), |
2944 | 0 | self.network_chain_id, |
2945 | 0 | network_service::BanSeverity::High, |
2946 | 0 | "bad-warp-sync-fragment", |
2947 | 0 | ) |
2948 | 0 | .await; |
2949 | 0 | } |
2950 | 0 | self.log_callback.log( |
2951 | 0 | LogLevel::Warn, |
2952 | 0 | format!( |
2953 | 0 | "finality-proof-verification-failure; sender={sender}, error={}", |
2954 | 0 | error |
2955 | 0 | ), |
2956 | 0 | ); |
2957 | 0 | self.sync = sync_out; |
2958 | 0 | (self, true) |
2959 | | } |
2960 | | } |
2961 | | } |
2962 | | } |
2963 | 21 | } _RNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground14process_blocks0CsiLzmwikkc22_14json_rpc_basic Line | Count | Source | 2492 | 2 | async fn process_blocks(mut self) -> (Self, bool) { | 2493 | 2 | // The sync state machine can be in a few various states. At the time of writing: | 2494 | 2 | // idle, verifying header, verifying block, verifying grandpa warp sync proof, | 2495 | 2 | // verifying storage proof. | 2496 | 2 | // If the state is one of the "verifying" states, perform the actual verification and | 2497 | 2 | // loop again until the sync is in an idle state. | 2498 | 2 | let unix_time = SystemTime::now() | 2499 | 2 | .duration_since(SystemTime::UNIX_EPOCH) | 2500 | 2 | .unwrap(); | 2501 | 2 | | 2502 | 2 | // TODO: move this? | 2503 | 2 | let block_number_bytes = self.sync.block_number_bytes(); | 2504 | 2 | | 2505 | 2 | match self.sync.process_one() { | 2506 | 2 | all::ProcessOne::AllSync(idle) => { | 2507 | 2 | self.sync = idle; | 2508 | 2 | (self, false) | 2509 | | } | 2510 | 0 | all::ProcessOne::VerifyWarpSyncFragment(verify) => { | 2511 | 0 | let sender = verify | 2512 | 0 | .proof_sender() | 2513 | 0 | .map(|(_, s)| s.as_ref().unwrap().peer_id.clone()); | 2514 | 0 |
| 2515 | 0 | let (new_sync, outcome) = verify.perform(rand::random()); | 2516 | 0 | self.sync = new_sync; | 2517 | 0 | match outcome { | 2518 | 0 | Ok((fragment_hash, fragment_height)) => { | 2519 | 0 | self.log_callback.log( | 2520 | 0 | LogLevel::Debug, | 2521 | 0 | format!( | 2522 | 0 | "warp-sync-fragment-verification-success; peer_id={}, fragment-hash={}, fragment-height={fragment_height}", | 2523 | 0 | sender.map(|s| s.to_string()).unwrap_or_else(|| "unknown".to_owned()), | 2524 | 0 | hex::encode(&fragment_hash) | 2525 | 0 | ), | 2526 | 0 | ); | 2527 | 0 | } | 2528 | 0 | Err(err) => { | 2529 | 0 | if let Some(sender) = &sender { | 2530 | 0 | self.network_service | 2531 | 0 | .ban_and_disconnect( | 2532 | 0 | sender.clone(), | 2533 | 0 | self.network_chain_id, | 2534 | 0 | network_service::BanSeverity::High, | 2535 | 0 | "bad-warp-sync-fragment", | 2536 | 0 | ) | 2537 | 0 | .await; | 2538 | 0 | } | 2539 | | | 2540 | 0 | self.log_callback.log( | 2541 | 0 | LogLevel::Warn, | 2542 | 0 | format!( | 2543 | 0 | "failed-warp-sync-fragment-verification; peer_id={}, error={err}", | 2544 | 0 | sender | 2545 | 0 | .map(|s| s.to_string()) | 2546 | 0 | .unwrap_or_else(|| "unknown".to_owned()), | 2547 | 0 | ), | 2548 | 0 | ); | 2549 | | } | 2550 | | } | 2551 | 0 | (self, true) | 2552 | | } | 2553 | 0 | all::ProcessOne::WarpSyncBuildRuntime(build_runtime) => { | 2554 | 0 | let (new_sync, outcome) = | 2555 | 0 | build_runtime.build(all::ExecHint::ValidateAndCompile, true); | 2556 | 0 | self.sync = new_sync; | 2557 | 0 | if let Err(err) = outcome { | 2558 | 0 | self.log_callback.log( | 2559 | 0 | LogLevel::Warn, | 2560 | 0 | format!("failed-warp-sync-runtime-compilation; error={err}"), | 2561 | 0 | ); | 2562 | 0 | } | 2563 | 0 | (self, true) | 2564 | | } | 2565 | 0 | all::ProcessOne::WarpSyncBuildChainInformation(build_chain_information) => { | 2566 | 0 | let (new_sync, outcome) = build_chain_information.build(); | 2567 | 0 | self.sync = new_sync; | 2568 | 0 | if let Err(err) = outcome { | 2569 | 0 | self.log_callback.log( | 2570 | 0 | LogLevel::Warn, | 2571 | 0 | format!("failed-warp-sync-chain-information-build; error={err}"), | 2572 | 0 | ); | 2573 | 0 | } | 2574 | 0 | (self, true) | 2575 | | } | 2576 | | all::ProcessOne::WarpSyncFinished { | 2577 | 0 | sync, | 2578 | 0 | finalized_body: Some(finalized_body), | 2579 | 0 | finalized_block_runtime, | 2580 | 0 | .. | 2581 | 0 | } => { | 2582 | 0 | self.sync = sync; | 2583 | 0 |
| 2584 | 0 | // Destory all existing subscriptions due to the gap in the chain. | 2585 | 0 | self.pending_notification = None; | 2586 | 0 | self.blocks_notifications.clear(); | 2587 | 0 |
| 2588 | 0 | self.finalized_runtime = Arc::new(finalized_block_runtime); | 2589 | 0 | let finalized_block_header = self | 2590 | 0 | .sync | 2591 | 0 | .as_chain_information() | 2592 | 0 | .as_ref() | 2593 | 0 | .finalized_block_header | 2594 | 0 | .scale_encoding_vec(self.sync.block_number_bytes()); | 2595 | 0 | self.database | 2596 | 0 | .with_database(move |database| { | 2597 | | database | 2598 | | .reset( | 2599 | | &finalized_block_header, | 2600 | | finalized_body.iter().map(|e| &e[..]), | 2601 | | None, | 2602 | | ) | 2603 | | .unwrap(); | 2604 | 0 | }) | 2605 | 0 | .await; | 2606 | | // TODO: what is known about the finalized storage into the database is currently done when a proof is downloaded; however if the proof download finished code no longer inserts entries related to unknown blocks, then we should do it here instead | 2607 | | | 2608 | 0 | if matches!( | 2609 | 0 | self.database_catch_up_download, | 2610 | | DatabaseCatchUpDownload::NothingToDownloadCache | 2611 | 0 | ) { | 2612 | 0 | self.database_catch_up_download = DatabaseCatchUpDownload::NoDownloadInProgress; | 2613 | 0 | } | 2614 | | | 2615 | 0 | (self, true) | 2616 | | } | 2617 | | all::ProcessOne::WarpSyncFinished { | 2618 | | finalized_body: None, | 2619 | | .. | 2620 | | } => { | 2621 | 0 | unreachable!() | 2622 | | } | 2623 | 0 | all::ProcessOne::VerifyBlock(verify) => { | 2624 | 0 | // TODO: ban peer in case of verification failure | 2625 | 0 | let when_verification_started = Instant::now(); | 2626 | 0 | let hash_to_verify = verify.hash(); | 2627 | 0 |
| 2628 | 0 | let _jaeger_span = self.jaeger_service.block_verify_span(&hash_to_verify); | 2629 | | | 2630 | 0 | let (is_new_best, header_verification_success) = | 2631 | 0 | match verify.verify_header(unix_time) { | 2632 | | all::HeaderVerifyOutcome::Success { | 2633 | 0 | is_new_best, | 2634 | 0 | success, | 2635 | 0 | } => (is_new_best, success), | 2636 | 0 | all::HeaderVerifyOutcome::Error { sync, error } => { | 2637 | 0 | // Print a separate warning because it is important for the user | 2638 | 0 | // to be aware of the verification failure. | 2639 | 0 | // `error` is last because it's quite big. | 2640 | 0 | self.log_callback.log( | 2641 | 0 | LogLevel::Warn, | 2642 | 0 | format!( | 2643 | 0 | "failed-block-verification; hash={}; error={}", | 2644 | 0 | HashDisplay(&hash_to_verify), | 2645 | 0 | error | 2646 | 0 | ), | 2647 | 0 | ); | 2648 | 0 | self.sync = sync; | 2649 | 0 | return (self, true); | 2650 | | } | 2651 | | }; | 2652 | | | 2653 | 0 | let parent_info = header_verification_success.parent_user_data().map(|b| { | 2654 | | let NonFinalizedBlock::Verified { runtime } = b else { | 2655 | | unreachable!() | 2656 | | }; | 2657 | | runtime.clone() | 2658 | 0 | }); | 2659 | 0 | let parent_runtime_arc = parent_info | 2660 | 0 | .as_ref() | 2661 | 0 | .cloned() | 2662 | 0 | .unwrap_or_else(|| self.finalized_runtime.clone()); | 2663 | 0 |
| 2664 | 0 | // TODO: check this block against the chain spec's badBlocks | 2665 | 0 |
| 2666 | 0 | let height = header_verification_success.height(); | 2667 | 0 | let scale_encoded_header = | 2668 | 0 | header_verification_success.scale_encoded_header().to_vec(); | 2669 | | | 2670 | 0 | let execute_block_success = match execute_block_and_insert( | 2671 | 0 | &self.database, | 2672 | 0 | (*parent_runtime_arc).clone(), | 2673 | 0 | &header_verification_success.parent_hash(), | 2674 | 0 | header_verification_success.scale_encoded_header(), | 2675 | 0 | block_number_bytes, | 2676 | 0 | header_verification_success | 2677 | 0 | .scale_encoded_extrinsics() | 2678 | 0 | .unwrap(), | 2679 | 0 | unix_time, | 2680 | 0 | is_new_best, | 2681 | 0 | ) | 2682 | 0 | .await | 2683 | | { | 2684 | 0 | Ok(success) => success, | 2685 | | Err(ExecuteBlockError::VerificationFailure( | 2686 | | ExecuteBlockVerificationFailureError::DatabaseParentAccess { | 2687 | | error: full_sqlite::StorageAccessError::IncompleteStorage, | 2688 | 0 | context, | 2689 | 0 | }, | 2690 | 0 | )) => { | 2691 | 0 | // The block verification failed because the storage of the parent | 2692 | 0 | // is still being downloaded from the network. | 2693 | 0 | self.log_callback.log( | 2694 | 0 | LogLevel::Debug, | 2695 | 0 | format!( | 2696 | 0 | "block-verification-incomplete-storage; hash={}; height={}; \ | 2697 | 0 | time_before_interrupt={:?}", | 2698 | 0 | HashDisplay(&hash_to_verify), | 2699 | 0 | header_verification_success.height(), | 2700 | 0 | when_verification_started.elapsed() | 2701 | 0 | ), | 2702 | 0 | ); | 2703 | 0 |
| 2704 | 0 | debug_assert!(matches!( | 2705 | 0 | self.database_catch_up_download_block_verification, | 2706 | | DatabaseCatchUpDownloadBlockVerification::None | 2707 | | )); | 2708 | 0 | match context { | 2709 | 0 | ExecuteBlockDatabaseAccessFailureContext::ParentRuntimeAccess => { | 2710 | 0 | self.database_catch_up_download_block_verification = DatabaseCatchUpDownloadBlockVerification::CodeStorageProofDesired { | 2711 | 0 | block_hash: *header_verification_success.parent_hash(), | 2712 | 0 | block_number: header_verification_success.height() - 1, | 2713 | 0 | }; | 2714 | 0 | } | 2715 | | ExecuteBlockDatabaseAccessFailureContext::FunctionCall { | 2716 | 0 | function_name, | 2717 | 0 | parameter, | 2718 | 0 | } => { | 2719 | 0 | self.database_catch_up_download_block_verification = | 2720 | 0 | DatabaseCatchUpDownloadBlockVerification::CallProofDesired { | 2721 | 0 | block_hash: *header_verification_success.parent_hash(), | 2722 | 0 | block_number: header_verification_success.height() - 1, | 2723 | 0 | function_name: function_name.to_owned(), | 2724 | 0 | parameter, | 2725 | 0 | }; | 2726 | 0 | } | 2727 | | } | 2728 | | | 2729 | 0 | self.sync = header_verification_success.cancel(); | 2730 | 0 | return (self, true); | 2731 | | } | 2732 | | Err(ExecuteBlockError::VerificationFailure( | 2733 | | ExecuteBlockVerificationFailureError::DatabaseParentAccess { | 2734 | 0 | error, .. | 2735 | 0 | }, | 2736 | 0 | )) => { | 2737 | 0 | panic!("corrupted database: {error}") | 2738 | | } | 2739 | | Err(ExecuteBlockError::VerificationFailure( | 2740 | | ExecuteBlockVerificationFailureError::ParentCodeEmptyInDatabase | 2741 | | | ExecuteBlockVerificationFailureError::InvaliParentHeapPagesInDatabase(_) | 2742 | | | ExecuteBlockVerificationFailureError::DatabaseInvalidStateTrieVersion, | 2743 | 0 | )) => panic!("corrupted database"), | 2744 | 0 | Err(error) => { | 2745 | 0 | // Print a separate warning because it is important for the user | 2746 | 0 | // to be aware of the verification failure. | 2747 | 0 | // `error` is last because it's quite big. | 2748 | 0 | self.log_callback.log( | 2749 | 0 | LogLevel::Warn, | 2750 | 0 | format!( | 2751 | 0 | "failed-block-verification; hash={}; height={}; \ | 2752 | 0 | total_duration={:?}; error={error}", | 2753 | 0 | HashDisplay(&hash_to_verify), | 2754 | 0 | header_verification_success.height(), | 2755 | 0 | when_verification_started.elapsed() | 2756 | 0 | ), | 2757 | 0 | ); | 2758 | 0 | // Note that some errors are verification failures errors that shouldn't | 2759 | 0 | // lead to the block being marked as bad. However, there's not much else | 2760 | 0 | // we can do here as trying to verify the block again would likely lead to | 2761 | 0 | // the same error again. Marking the block as bad is a reasonable solution. | 2762 | 0 | self.sync = header_verification_success.reject_bad_block(); | 2763 | 0 | return (self, true); | 2764 | | } | 2765 | | }; | 2766 | | | 2767 | 0 | self.log_callback.log( | 2768 | 0 | LogLevel::Debug, | 2769 | 0 | format!( | 2770 | 0 | "block-verification-success; hash={}; height={}; \ | 2771 | 0 | total_duration={:?}; database_accesses_duration={:?}; \ | 2772 | 0 | runtime_build_duration={:?}; is_new_best={:?}", | 2773 | 0 | HashDisplay(&hash_to_verify), | 2774 | 0 | height, | 2775 | 0 | when_verification_started.elapsed(), | 2776 | 0 | execute_block_success.database_accesses_duration, | 2777 | 0 | execute_block_success.runtime_build_duration, | 2778 | 0 | is_new_best | 2779 | 0 | ), | 2780 | 0 | ); | 2781 | | | 2782 | 0 | match execute_block_success.block_insertion { | 2783 | 0 | Ok(()) => {} | 2784 | 0 | Err(full_sqlite::InsertError::Duplicate) => {} // TODO: this should be an error ; right now we silence them because non-finalized blocks aren't loaded from the database at startup, resulting in them being downloaded again | 2785 | 0 | Err(error) => panic!("failed to insert block in database: {error}"), | 2786 | | } | 2787 | | | 2788 | | // Notify the subscribers. | 2789 | 0 | debug_assert!(self.pending_notification.is_none()); | 2790 | 0 | self.pending_notification = Some(Notification::Block { | 2791 | 0 | block: BlockNotification { | 2792 | 0 | is_new_best, | 2793 | 0 | scale_encoded_header: scale_encoded_header.clone(), | 2794 | 0 | block_hash: header_verification_success.hash(), | 2795 | 0 | runtime_update: execute_block_success | 2796 | 0 | .new_runtime | 2797 | 0 | .as_ref() | 2798 | 0 | .map(|new_runtime| Arc::new(new_runtime.clone())), | 2799 | 0 | parent_hash: *header_verification_success.parent_hash(), | 2800 | 0 | }, | 2801 | 0 | storage_changes: execute_block_success.storage_changes.clone(), | 2802 | 0 | }); | 2803 | 0 |
| 2804 | 0 | // Processing has made a step forward. | 2805 | 0 |
| 2806 | 0 | self.sync = header_verification_success.finish(NonFinalizedBlock::NotVerified); | 2807 | | | 2808 | | // Store the storage of the children. | 2809 | 0 | self.sync[(height, &hash_to_verify)] = NonFinalizedBlock::Verified { | 2810 | 0 | runtime: if let Some(new_runtime) = execute_block_success.new_runtime { | 2811 | 0 | Arc::new(new_runtime) | 2812 | | } else { | 2813 | 0 | parent_runtime_arc | 2814 | | }, | 2815 | | }; | 2816 | | | 2817 | 0 | if is_new_best { | 2818 | 0 | // Update the networking. | 2819 | 0 | self.network_local_chain_update_needed = true; | 2820 | 0 | // Reset the block authoring, in order to potentially build a | 2821 | 0 | // block on top of this new best. | 2822 | 0 | self.block_authoring = None; | 2823 | 0 | } | 2824 | | | 2825 | | // Announce the newly-verified block to all the sources that might | 2826 | | // not be aware of it. | 2827 | 0 | debug_assert!(self.pending_block_announce.is_none()); | 2828 | 0 | self.pending_block_announce = Some((scale_encoded_header, hash_to_verify, height)); | 2829 | 0 |
| 2830 | 0 | return (self, true); | 2831 | | } | 2832 | | | 2833 | 0 | all::ProcessOne::VerifyFinalityProof(verify) => { | 2834 | 0 | let sender = verify | 2835 | 0 | .sender() | 2836 | 0 | .1 | 2837 | 0 | .as_ref() | 2838 | 0 | .map(|s| s.peer_id.clone()) | 2839 | 0 | .unwrap(); | 2840 | 0 |
| 2841 | 0 | match verify.perform(rand::random()) { | 2842 | | ( | 2843 | 0 | sync_out, | 2844 | 0 | all::FinalityProofVerifyOutcome::NewFinalized { | 2845 | 0 | finalized_blocks_newest_to_oldest, | 2846 | 0 | pruned_blocks, | 2847 | 0 | updates_best_block, | 2848 | 0 | }, | 2849 | 0 | ) => { | 2850 | 0 | self.sync = sync_out; | 2851 | 0 |
| 2852 | 0 | let new_finalized_hash = finalized_blocks_newest_to_oldest | 2853 | 0 | .first() | 2854 | 0 | .unwrap() | 2855 | 0 | .block_hash; | 2856 | 0 | self.log_callback.log( | 2857 | 0 | LogLevel::Debug, | 2858 | 0 | format!( | 2859 | 0 | "finality-proof-verification; outcome=success, sender={sender}, new-finalized={}", | 2860 | 0 | HashDisplay(&new_finalized_hash) | 2861 | 0 | ), | 2862 | 0 | ); | 2863 | 0 |
| 2864 | 0 | if updates_best_block { | 2865 | 0 | // Update the networking. | 2866 | 0 | self.network_local_chain_update_needed = true; | 2867 | 0 | // Reset the block authoring, in order to potentially build a | 2868 | 0 | // block on top of this new best. | 2869 | 0 | self.block_authoring = None; | 2870 | 0 | } | 2871 | | | 2872 | 0 | self.finalized_runtime = | 2873 | 0 | match &finalized_blocks_newest_to_oldest.first().unwrap().user_data { | 2874 | 0 | NonFinalizedBlock::Verified { runtime } => runtime.clone(), | 2875 | 0 | _ => unreachable!(), | 2876 | | }; | 2877 | | // TODO: what if best block changed? | 2878 | 0 | self.database | 2879 | 0 | .with_database_detached(move |database| { | 2880 | | database.set_finalized(&new_finalized_hash).unwrap(); | 2881 | 0 | }) | 2882 | 0 | .await; | 2883 | | | 2884 | | // Notify the subscribers. | 2885 | 0 | debug_assert!(self.pending_notification.is_none()); | 2886 | 0 | self.pending_notification = Some(Notification::Finalized { | 2887 | 0 | finalized_blocks_newest_to_oldest: finalized_blocks_newest_to_oldest | 2888 | 0 | .iter() | 2889 | 0 | .map(|b| b.block_hash) | 2890 | 0 | .collect::<Vec<_>>(), | 2891 | 0 | pruned_blocks_hashes: pruned_blocks.clone(), | 2892 | 0 | best_block_hash: *self.sync.best_block_hash(), | 2893 | 0 | }); | 2894 | 0 |
| 2895 | 0 | (self, true) | 2896 | | } | 2897 | 0 | (sync_out, all::FinalityProofVerifyOutcome::GrandpaCommitPending) => { | 2898 | 0 | self.log_callback.log( | 2899 | 0 | LogLevel::Debug, | 2900 | 0 | "finality-proof-verification; outcome=pending, sender={sender}" | 2901 | 0 | .to_string(), | 2902 | 0 | ); | 2903 | 0 | self.sync = sync_out; | 2904 | 0 | (self, true) | 2905 | | } | 2906 | 0 | (sync_out, all::FinalityProofVerifyOutcome::AlreadyFinalized) => { | 2907 | 0 | self.log_callback.log( | 2908 | 0 | LogLevel::Debug, | 2909 | 0 | "finality-proof-verification; outcome=already-finalized, sender={sender}".to_string(), | 2910 | 0 | ); | 2911 | 0 | self.sync = sync_out; | 2912 | 0 | (self, true) | 2913 | | } | 2914 | 0 | (sync_out, all::FinalityProofVerifyOutcome::GrandpaCommitError(error)) => { | 2915 | 0 | self.network_service | 2916 | 0 | .ban_and_disconnect( | 2917 | 0 | sender.clone(), | 2918 | 0 | self.network_chain_id, | 2919 | 0 | network_service::BanSeverity::High, | 2920 | 0 | "bad-warp-sync-fragment", | 2921 | 0 | ) | 2922 | 0 | .await; | 2923 | 0 | self.log_callback.log( | 2924 | 0 | LogLevel::Warn, | 2925 | 0 | format!( | 2926 | 0 | "finality-proof-verification-failure; sender={sender}, error={}", | 2927 | 0 | error | 2928 | 0 | ), | 2929 | 0 | ); | 2930 | 0 | self.sync = sync_out; | 2931 | 0 | (self, true) | 2932 | | } | 2933 | 0 | (sync_out, all::FinalityProofVerifyOutcome::JustificationError(error)) => { | 2934 | | // Errors of type `JustificationEngineMismatch` indicate that the chain | 2935 | | // uses a finality engine that smoldot doesn't recognize. This is a benign | 2936 | | // error that shouldn't lead to a ban. | 2937 | 0 | if !matches!( | 2938 | 0 | error, | 2939 | | all::JustificationVerifyError::JustificationEngineMismatch | 2940 | | ) { | 2941 | 0 | self.network_service | 2942 | 0 | .ban_and_disconnect( | 2943 | 0 | sender.clone(), | 2944 | 0 | self.network_chain_id, | 2945 | 0 | network_service::BanSeverity::High, | 2946 | 0 | "bad-warp-sync-fragment", | 2947 | 0 | ) | 2948 | 0 | .await; | 2949 | 0 | } | 2950 | 0 | self.log_callback.log( | 2951 | 0 | LogLevel::Warn, | 2952 | 0 | format!( | 2953 | 0 | "finality-proof-verification-failure; sender={sender}, error={}", | 2954 | 0 | error | 2955 | 0 | ), | 2956 | 0 | ); | 2957 | 0 | self.sync = sync_out; | 2958 | 0 | (self, true) | 2959 | | } | 2960 | | } | 2961 | | } | 2962 | | } | 2963 | 2 | } |
Unexecuted instantiation: _RNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground14process_blocks0B8_ Unexecuted instantiation: _RNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground14process_blocks0CscDgN54JpMGG_6author _RNCNvMs_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground14process_blocks0CsibGXYHQB8Ea_25json_rpc_general_requests Line | Count | Source | 2492 | 19 | async fn process_blocks(mut self) -> (Self, bool) { | 2493 | 19 | // The sync state machine can be in a few various states. At the time of writing: | 2494 | 19 | // idle, verifying header, verifying block, verifying grandpa warp sync proof, | 2495 | 19 | // verifying storage proof. | 2496 | 19 | // If the state is one of the "verifying" states, perform the actual verification and | 2497 | 19 | // loop again until the sync is in an idle state. | 2498 | 19 | let unix_time = SystemTime::now() | 2499 | 19 | .duration_since(SystemTime::UNIX_EPOCH) | 2500 | 19 | .unwrap(); | 2501 | 19 | | 2502 | 19 | // TODO: move this? | 2503 | 19 | let block_number_bytes = self.sync.block_number_bytes(); | 2504 | 19 | | 2505 | 19 | match self.sync.process_one() { | 2506 | 19 | all::ProcessOne::AllSync(idle) => { | 2507 | 19 | self.sync = idle; | 2508 | 19 | (self, false) | 2509 | | } | 2510 | 0 | all::ProcessOne::VerifyWarpSyncFragment(verify) => { | 2511 | 0 | let sender = verify | 2512 | 0 | .proof_sender() | 2513 | 0 | .map(|(_, s)| s.as_ref().unwrap().peer_id.clone()); | 2514 | 0 |
| 2515 | 0 | let (new_sync, outcome) = verify.perform(rand::random()); | 2516 | 0 | self.sync = new_sync; | 2517 | 0 | match outcome { | 2518 | 0 | Ok((fragment_hash, fragment_height)) => { | 2519 | 0 | self.log_callback.log( | 2520 | 0 | LogLevel::Debug, | 2521 | 0 | format!( | 2522 | 0 | "warp-sync-fragment-verification-success; peer_id={}, fragment-hash={}, fragment-height={fragment_height}", | 2523 | 0 | sender.map(|s| s.to_string()).unwrap_or_else(|| "unknown".to_owned()), | 2524 | 0 | hex::encode(&fragment_hash) | 2525 | 0 | ), | 2526 | 0 | ); | 2527 | 0 | } | 2528 | 0 | Err(err) => { | 2529 | 0 | if let Some(sender) = &sender { | 2530 | 0 | self.network_service | 2531 | 0 | .ban_and_disconnect( | 2532 | 0 | sender.clone(), | 2533 | 0 | self.network_chain_id, | 2534 | 0 | network_service::BanSeverity::High, | 2535 | 0 | "bad-warp-sync-fragment", | 2536 | 0 | ) | 2537 | 0 | .await; | 2538 | 0 | } | 2539 | | | 2540 | 0 | self.log_callback.log( | 2541 | 0 | LogLevel::Warn, | 2542 | 0 | format!( | 2543 | 0 | "failed-warp-sync-fragment-verification; peer_id={}, error={err}", | 2544 | 0 | sender | 2545 | 0 | .map(|s| s.to_string()) | 2546 | 0 | .unwrap_or_else(|| "unknown".to_owned()), | 2547 | 0 | ), | 2548 | 0 | ); | 2549 | | } | 2550 | | } | 2551 | 0 | (self, true) | 2552 | | } | 2553 | 0 | all::ProcessOne::WarpSyncBuildRuntime(build_runtime) => { | 2554 | 0 | let (new_sync, outcome) = | 2555 | 0 | build_runtime.build(all::ExecHint::ValidateAndCompile, true); | 2556 | 0 | self.sync = new_sync; | 2557 | 0 | if let Err(err) = outcome { | 2558 | 0 | self.log_callback.log( | 2559 | 0 | LogLevel::Warn, | 2560 | 0 | format!("failed-warp-sync-runtime-compilation; error={err}"), | 2561 | 0 | ); | 2562 | 0 | } | 2563 | 0 | (self, true) | 2564 | | } | 2565 | 0 | all::ProcessOne::WarpSyncBuildChainInformation(build_chain_information) => { | 2566 | 0 | let (new_sync, outcome) = build_chain_information.build(); | 2567 | 0 | self.sync = new_sync; | 2568 | 0 | if let Err(err) = outcome { | 2569 | 0 | self.log_callback.log( | 2570 | 0 | LogLevel::Warn, | 2571 | 0 | format!("failed-warp-sync-chain-information-build; error={err}"), | 2572 | 0 | ); | 2573 | 0 | } | 2574 | 0 | (self, true) | 2575 | | } | 2576 | | all::ProcessOne::WarpSyncFinished { | 2577 | 0 | sync, | 2578 | 0 | finalized_body: Some(finalized_body), | 2579 | 0 | finalized_block_runtime, | 2580 | 0 | .. | 2581 | 0 | } => { | 2582 | 0 | self.sync = sync; | 2583 | 0 |
| 2584 | 0 | // Destory all existing subscriptions due to the gap in the chain. | 2585 | 0 | self.pending_notification = None; | 2586 | 0 | self.blocks_notifications.clear(); | 2587 | 0 |
| 2588 | 0 | self.finalized_runtime = Arc::new(finalized_block_runtime); | 2589 | 0 | let finalized_block_header = self | 2590 | 0 | .sync | 2591 | 0 | .as_chain_information() | 2592 | 0 | .as_ref() | 2593 | 0 | .finalized_block_header | 2594 | 0 | .scale_encoding_vec(self.sync.block_number_bytes()); | 2595 | 0 | self.database | 2596 | 0 | .with_database(move |database| { | 2597 | | database | 2598 | | .reset( | 2599 | | &finalized_block_header, | 2600 | | finalized_body.iter().map(|e| &e[..]), | 2601 | | None, | 2602 | | ) | 2603 | | .unwrap(); | 2604 | 0 | }) | 2605 | 0 | .await; | 2606 | | // TODO: what is known about the finalized storage into the database is currently done when a proof is downloaded; however if the proof download finished code no longer inserts entries related to unknown blocks, then we should do it here instead | 2607 | | | 2608 | 0 | if matches!( | 2609 | 0 | self.database_catch_up_download, | 2610 | | DatabaseCatchUpDownload::NothingToDownloadCache | 2611 | 0 | ) { | 2612 | 0 | self.database_catch_up_download = DatabaseCatchUpDownload::NoDownloadInProgress; | 2613 | 0 | } | 2614 | | | 2615 | 0 | (self, true) | 2616 | | } | 2617 | | all::ProcessOne::WarpSyncFinished { | 2618 | | finalized_body: None, | 2619 | | .. | 2620 | | } => { | 2621 | 0 | unreachable!() | 2622 | | } | 2623 | 0 | all::ProcessOne::VerifyBlock(verify) => { | 2624 | 0 | // TODO: ban peer in case of verification failure | 2625 | 0 | let when_verification_started = Instant::now(); | 2626 | 0 | let hash_to_verify = verify.hash(); | 2627 | 0 |
| 2628 | 0 | let _jaeger_span = self.jaeger_service.block_verify_span(&hash_to_verify); | 2629 | | | 2630 | 0 | let (is_new_best, header_verification_success) = | 2631 | 0 | match verify.verify_header(unix_time) { | 2632 | | all::HeaderVerifyOutcome::Success { | 2633 | 0 | is_new_best, | 2634 | 0 | success, | 2635 | 0 | } => (is_new_best, success), | 2636 | 0 | all::HeaderVerifyOutcome::Error { sync, error } => { | 2637 | 0 | // Print a separate warning because it is important for the user | 2638 | 0 | // to be aware of the verification failure. | 2639 | 0 | // `error` is last because it's quite big. | 2640 | 0 | self.log_callback.log( | 2641 | 0 | LogLevel::Warn, | 2642 | 0 | format!( | 2643 | 0 | "failed-block-verification; hash={}; error={}", | 2644 | 0 | HashDisplay(&hash_to_verify), | 2645 | 0 | error | 2646 | 0 | ), | 2647 | 0 | ); | 2648 | 0 | self.sync = sync; | 2649 | 0 | return (self, true); | 2650 | | } | 2651 | | }; | 2652 | | | 2653 | 0 | let parent_info = header_verification_success.parent_user_data().map(|b| { | 2654 | | let NonFinalizedBlock::Verified { runtime } = b else { | 2655 | | unreachable!() | 2656 | | }; | 2657 | | runtime.clone() | 2658 | 0 | }); | 2659 | 0 | let parent_runtime_arc = parent_info | 2660 | 0 | .as_ref() | 2661 | 0 | .cloned() | 2662 | 0 | .unwrap_or_else(|| self.finalized_runtime.clone()); | 2663 | 0 |
| 2664 | 0 | // TODO: check this block against the chain spec's badBlocks | 2665 | 0 |
| 2666 | 0 | let height = header_verification_success.height(); | 2667 | 0 | let scale_encoded_header = | 2668 | 0 | header_verification_success.scale_encoded_header().to_vec(); | 2669 | | | 2670 | 0 | let execute_block_success = match execute_block_and_insert( | 2671 | 0 | &self.database, | 2672 | 0 | (*parent_runtime_arc).clone(), | 2673 | 0 | &header_verification_success.parent_hash(), | 2674 | 0 | header_verification_success.scale_encoded_header(), | 2675 | 0 | block_number_bytes, | 2676 | 0 | header_verification_success | 2677 | 0 | .scale_encoded_extrinsics() | 2678 | 0 | .unwrap(), | 2679 | 0 | unix_time, | 2680 | 0 | is_new_best, | 2681 | 0 | ) | 2682 | 0 | .await | 2683 | | { | 2684 | 0 | Ok(success) => success, | 2685 | | Err(ExecuteBlockError::VerificationFailure( | 2686 | | ExecuteBlockVerificationFailureError::DatabaseParentAccess { | 2687 | | error: full_sqlite::StorageAccessError::IncompleteStorage, | 2688 | 0 | context, | 2689 | 0 | }, | 2690 | 0 | )) => { | 2691 | 0 | // The block verification failed because the storage of the parent | 2692 | 0 | // is still being downloaded from the network. | 2693 | 0 | self.log_callback.log( | 2694 | 0 | LogLevel::Debug, | 2695 | 0 | format!( | 2696 | 0 | "block-verification-incomplete-storage; hash={}; height={}; \ | 2697 | 0 | time_before_interrupt={:?}", | 2698 | 0 | HashDisplay(&hash_to_verify), | 2699 | 0 | header_verification_success.height(), | 2700 | 0 | when_verification_started.elapsed() | 2701 | 0 | ), | 2702 | 0 | ); | 2703 | 0 |
| 2704 | 0 | debug_assert!(matches!( | 2705 | 0 | self.database_catch_up_download_block_verification, | 2706 | | DatabaseCatchUpDownloadBlockVerification::None | 2707 | | )); | 2708 | 0 | match context { | 2709 | 0 | ExecuteBlockDatabaseAccessFailureContext::ParentRuntimeAccess => { | 2710 | 0 | self.database_catch_up_download_block_verification = DatabaseCatchUpDownloadBlockVerification::CodeStorageProofDesired { | 2711 | 0 | block_hash: *header_verification_success.parent_hash(), | 2712 | 0 | block_number: header_verification_success.height() - 1, | 2713 | 0 | }; | 2714 | 0 | } | 2715 | | ExecuteBlockDatabaseAccessFailureContext::FunctionCall { | 2716 | 0 | function_name, | 2717 | 0 | parameter, | 2718 | 0 | } => { | 2719 | 0 | self.database_catch_up_download_block_verification = | 2720 | 0 | DatabaseCatchUpDownloadBlockVerification::CallProofDesired { | 2721 | 0 | block_hash: *header_verification_success.parent_hash(), | 2722 | 0 | block_number: header_verification_success.height() - 1, | 2723 | 0 | function_name: function_name.to_owned(), | 2724 | 0 | parameter, | 2725 | 0 | }; | 2726 | 0 | } | 2727 | | } | 2728 | | | 2729 | 0 | self.sync = header_verification_success.cancel(); | 2730 | 0 | return (self, true); | 2731 | | } | 2732 | | Err(ExecuteBlockError::VerificationFailure( | 2733 | | ExecuteBlockVerificationFailureError::DatabaseParentAccess { | 2734 | 0 | error, .. | 2735 | 0 | }, | 2736 | 0 | )) => { | 2737 | 0 | panic!("corrupted database: {error}") | 2738 | | } | 2739 | | Err(ExecuteBlockError::VerificationFailure( | 2740 | | ExecuteBlockVerificationFailureError::ParentCodeEmptyInDatabase | 2741 | | | ExecuteBlockVerificationFailureError::InvaliParentHeapPagesInDatabase(_) | 2742 | | | ExecuteBlockVerificationFailureError::DatabaseInvalidStateTrieVersion, | 2743 | 0 | )) => panic!("corrupted database"), | 2744 | 0 | Err(error) => { | 2745 | 0 | // Print a separate warning because it is important for the user | 2746 | 0 | // to be aware of the verification failure. | 2747 | 0 | // `error` is last because it's quite big. | 2748 | 0 | self.log_callback.log( | 2749 | 0 | LogLevel::Warn, | 2750 | 0 | format!( | 2751 | 0 | "failed-block-verification; hash={}; height={}; \ | 2752 | 0 | total_duration={:?}; error={error}", | 2753 | 0 | HashDisplay(&hash_to_verify), | 2754 | 0 | header_verification_success.height(), | 2755 | 0 | when_verification_started.elapsed() | 2756 | 0 | ), | 2757 | 0 | ); | 2758 | 0 | // Note that some errors are verification failures errors that shouldn't | 2759 | 0 | // lead to the block being marked as bad. However, there's not much else | 2760 | 0 | // we can do here as trying to verify the block again would likely lead to | 2761 | 0 | // the same error again. Marking the block as bad is a reasonable solution. | 2762 | 0 | self.sync = header_verification_success.reject_bad_block(); | 2763 | 0 | return (self, true); | 2764 | | } | 2765 | | }; | 2766 | | | 2767 | 0 | self.log_callback.log( | 2768 | 0 | LogLevel::Debug, | 2769 | 0 | format!( | 2770 | 0 | "block-verification-success; hash={}; height={}; \ | 2771 | 0 | total_duration={:?}; database_accesses_duration={:?}; \ | 2772 | 0 | runtime_build_duration={:?}; is_new_best={:?}", | 2773 | 0 | HashDisplay(&hash_to_verify), | 2774 | 0 | height, | 2775 | 0 | when_verification_started.elapsed(), | 2776 | 0 | execute_block_success.database_accesses_duration, | 2777 | 0 | execute_block_success.runtime_build_duration, | 2778 | 0 | is_new_best | 2779 | 0 | ), | 2780 | 0 | ); | 2781 | | | 2782 | 0 | match execute_block_success.block_insertion { | 2783 | 0 | Ok(()) => {} | 2784 | 0 | Err(full_sqlite::InsertError::Duplicate) => {} // TODO: this should be an error ; right now we silence them because non-finalized blocks aren't loaded from the database at startup, resulting in them being downloaded again | 2785 | 0 | Err(error) => panic!("failed to insert block in database: {error}"), | 2786 | | } | 2787 | | | 2788 | | // Notify the subscribers. | 2789 | 0 | debug_assert!(self.pending_notification.is_none()); | 2790 | 0 | self.pending_notification = Some(Notification::Block { | 2791 | 0 | block: BlockNotification { | 2792 | 0 | is_new_best, | 2793 | 0 | scale_encoded_header: scale_encoded_header.clone(), | 2794 | 0 | block_hash: header_verification_success.hash(), | 2795 | 0 | runtime_update: execute_block_success | 2796 | 0 | .new_runtime | 2797 | 0 | .as_ref() | 2798 | 0 | .map(|new_runtime| Arc::new(new_runtime.clone())), | 2799 | 0 | parent_hash: *header_verification_success.parent_hash(), | 2800 | 0 | }, | 2801 | 0 | storage_changes: execute_block_success.storage_changes.clone(), | 2802 | 0 | }); | 2803 | 0 |
| 2804 | 0 | // Processing has made a step forward. | 2805 | 0 |
| 2806 | 0 | self.sync = header_verification_success.finish(NonFinalizedBlock::NotVerified); | 2807 | | | 2808 | | // Store the storage of the children. | 2809 | 0 | self.sync[(height, &hash_to_verify)] = NonFinalizedBlock::Verified { | 2810 | 0 | runtime: if let Some(new_runtime) = execute_block_success.new_runtime { | 2811 | 0 | Arc::new(new_runtime) | 2812 | | } else { | 2813 | 0 | parent_runtime_arc | 2814 | | }, | 2815 | | }; | 2816 | | | 2817 | 0 | if is_new_best { | 2818 | 0 | // Update the networking. | 2819 | 0 | self.network_local_chain_update_needed = true; | 2820 | 0 | // Reset the block authoring, in order to potentially build a | 2821 | 0 | // block on top of this new best. | 2822 | 0 | self.block_authoring = None; | 2823 | 0 | } | 2824 | | | 2825 | | // Announce the newly-verified block to all the sources that might | 2826 | | // not be aware of it. | 2827 | 0 | debug_assert!(self.pending_block_announce.is_none()); | 2828 | 0 | self.pending_block_announce = Some((scale_encoded_header, hash_to_verify, height)); | 2829 | 0 |
| 2830 | 0 | return (self, true); | 2831 | | } | 2832 | | | 2833 | 0 | all::ProcessOne::VerifyFinalityProof(verify) => { | 2834 | 0 | let sender = verify | 2835 | 0 | .sender() | 2836 | 0 | .1 | 2837 | 0 | .as_ref() | 2838 | 0 | .map(|s| s.peer_id.clone()) | 2839 | 0 | .unwrap(); | 2840 | 0 |
| 2841 | 0 | match verify.perform(rand::random()) { | 2842 | | ( | 2843 | 0 | sync_out, | 2844 | 0 | all::FinalityProofVerifyOutcome::NewFinalized { | 2845 | 0 | finalized_blocks_newest_to_oldest, | 2846 | 0 | pruned_blocks, | 2847 | 0 | updates_best_block, | 2848 | 0 | }, | 2849 | 0 | ) => { | 2850 | 0 | self.sync = sync_out; | 2851 | 0 |
| 2852 | 0 | let new_finalized_hash = finalized_blocks_newest_to_oldest | 2853 | 0 | .first() | 2854 | 0 | .unwrap() | 2855 | 0 | .block_hash; | 2856 | 0 | self.log_callback.log( | 2857 | 0 | LogLevel::Debug, | 2858 | 0 | format!( | 2859 | 0 | "finality-proof-verification; outcome=success, sender={sender}, new-finalized={}", | 2860 | 0 | HashDisplay(&new_finalized_hash) | 2861 | 0 | ), | 2862 | 0 | ); | 2863 | 0 |
| 2864 | 0 | if updates_best_block { | 2865 | 0 | // Update the networking. | 2866 | 0 | self.network_local_chain_update_needed = true; | 2867 | 0 | // Reset the block authoring, in order to potentially build a | 2868 | 0 | // block on top of this new best. | 2869 | 0 | self.block_authoring = None; | 2870 | 0 | } | 2871 | | | 2872 | 0 | self.finalized_runtime = | 2873 | 0 | match &finalized_blocks_newest_to_oldest.first().unwrap().user_data { | 2874 | 0 | NonFinalizedBlock::Verified { runtime } => runtime.clone(), | 2875 | 0 | _ => unreachable!(), | 2876 | | }; | 2877 | | // TODO: what if best block changed? | 2878 | 0 | self.database | 2879 | 0 | .with_database_detached(move |database| { | 2880 | | database.set_finalized(&new_finalized_hash).unwrap(); | 2881 | 0 | }) | 2882 | 0 | .await; | 2883 | | | 2884 | | // Notify the subscribers. | 2885 | 0 | debug_assert!(self.pending_notification.is_none()); | 2886 | 0 | self.pending_notification = Some(Notification::Finalized { | 2887 | 0 | finalized_blocks_newest_to_oldest: finalized_blocks_newest_to_oldest | 2888 | 0 | .iter() | 2889 | 0 | .map(|b| b.block_hash) | 2890 | 0 | .collect::<Vec<_>>(), | 2891 | 0 | pruned_blocks_hashes: pruned_blocks.clone(), | 2892 | 0 | best_block_hash: *self.sync.best_block_hash(), | 2893 | 0 | }); | 2894 | 0 |
| 2895 | 0 | (self, true) | 2896 | | } | 2897 | 0 | (sync_out, all::FinalityProofVerifyOutcome::GrandpaCommitPending) => { | 2898 | 0 | self.log_callback.log( | 2899 | 0 | LogLevel::Debug, | 2900 | 0 | "finality-proof-verification; outcome=pending, sender={sender}" | 2901 | 0 | .to_string(), | 2902 | 0 | ); | 2903 | 0 | self.sync = sync_out; | 2904 | 0 | (self, true) | 2905 | | } | 2906 | 0 | (sync_out, all::FinalityProofVerifyOutcome::AlreadyFinalized) => { | 2907 | 0 | self.log_callback.log( | 2908 | 0 | LogLevel::Debug, | 2909 | 0 | "finality-proof-verification; outcome=already-finalized, sender={sender}".to_string(), | 2910 | 0 | ); | 2911 | 0 | self.sync = sync_out; | 2912 | 0 | (self, true) | 2913 | | } | 2914 | 0 | (sync_out, all::FinalityProofVerifyOutcome::GrandpaCommitError(error)) => { | 2915 | 0 | self.network_service | 2916 | 0 | .ban_and_disconnect( | 2917 | 0 | sender.clone(), | 2918 | 0 | self.network_chain_id, | 2919 | 0 | network_service::BanSeverity::High, | 2920 | 0 | "bad-warp-sync-fragment", | 2921 | 0 | ) | 2922 | 0 | .await; | 2923 | 0 | self.log_callback.log( | 2924 | 0 | LogLevel::Warn, | 2925 | 0 | format!( | 2926 | 0 | "finality-proof-verification-failure; sender={sender}, error={}", | 2927 | 0 | error | 2928 | 0 | ), | 2929 | 0 | ); | 2930 | 0 | self.sync = sync_out; | 2931 | 0 | (self, true) | 2932 | | } | 2933 | 0 | (sync_out, all::FinalityProofVerifyOutcome::JustificationError(error)) => { | 2934 | | // Errors of type `JustificationEngineMismatch` indicate that the chain | 2935 | | // uses a finality engine that smoldot doesn't recognize. This is a benign | 2936 | | // error that shouldn't lead to a ban. | 2937 | 0 | if !matches!( | 2938 | 0 | error, | 2939 | | all::JustificationVerifyError::JustificationEngineMismatch | 2940 | | ) { | 2941 | 0 | self.network_service | 2942 | 0 | .ban_and_disconnect( | 2943 | 0 | sender.clone(), | 2944 | 0 | self.network_chain_id, | 2945 | 0 | network_service::BanSeverity::High, | 2946 | 0 | "bad-warp-sync-fragment", | 2947 | 0 | ) | 2948 | 0 | .await; | 2949 | 0 | } | 2950 | 0 | self.log_callback.log( | 2951 | 0 | LogLevel::Warn, | 2952 | 0 | format!( | 2953 | 0 | "finality-proof-verification-failure; sender={sender}, error={}", | 2954 | 0 | error | 2955 | 0 | ), | 2956 | 0 | ); | 2957 | 0 | self.sync = sync_out; | 2958 | 0 | (self, true) | 2959 | | } | 2960 | | } | 2961 | | } | 2962 | | } | 2963 | 19 | } |
Unexecuted instantiation: _RNCNvMs_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB6_14SyncBackground14process_blocks0B8_ |
2964 | | } |
2965 | | |
2966 | | /// Executes the given block. On success, inserts it and its storage into the database. |
2967 | | // TODO: use a config struct for the parameters? |
2968 | 0 | pub async fn execute_block_and_insert( |
2969 | 0 | database: &database_thread::DatabaseThread, |
2970 | 0 | mut parent_runtime: host::HostVmPrototype, |
2971 | 0 | parent_block_hash: &[u8; 32], |
2972 | 0 | block_header: &[u8], |
2973 | 0 | block_number_bytes: usize, |
2974 | 0 | block_body: impl ExactSizeIterator<Item = impl AsRef<[u8]> + Clone> + Clone, |
2975 | 0 | now_from_unix_epoch: Duration, |
2976 | 0 | is_new_best: bool, |
2977 | 0 | ) -> Result<ExecuteBlockSuccess, ExecuteBlockError> { Unexecuted instantiation: _RINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1l_EECsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppEB4_ Unexecuted instantiation: _RINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1l_EECscDgN54JpMGG_6author Unexecuted instantiation: _RINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1l_EECsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppEB4_ |
2978 | 0 | let mut database_accesses_duration = Duration::new(0, 0); |
2979 | 0 | let mut runtime_build_duration = Duration::new(0, 0); |
2980 | 0 |
|
2981 | 0 | let mut storage_changes = runtime_call::StorageChanges::empty(); |
2982 | 0 | let mut state_trie_version = runtime_call::TrieEntryVersion::V0; // TODO: shouldn't have to be initialized |
2983 | 0 | for (call_function, call_parameter) in [ |
2984 | 0 | ( |
2985 | 0 | body_only::CHECK_INHERENTS_FUNCTION_NAME, |
2986 | 0 | body_only::check_inherents_parameter( |
2987 | 0 | block_header, |
2988 | 0 | block_number_bytes, |
2989 | 0 | block_body.clone(), |
2990 | 0 | now_from_unix_epoch, |
2991 | 0 | ) |
2992 | 0 | .unwrap() |
2993 | 0 | .fold(Vec::new(), |mut a, b| { |
2994 | 0 | a.extend_from_slice(b.as_ref()); |
2995 | 0 | a |
2996 | 0 | }), Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppE00B8_ Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppE00B8_ |
2997 | 0 | ), |
2998 | 0 | ( |
2999 | 0 | body_only::EXECUTE_BLOCK_FUNCTION_NAME, |
3000 | 0 | body_only::execute_block_parameter( |
3001 | 0 | block_header, |
3002 | 0 | block_number_bytes, |
3003 | 0 | block_body.clone(), |
3004 | 0 | ) |
3005 | 0 | .unwrap() |
3006 | 0 | .fold(Vec::new(), |mut a, b| { |
3007 | 0 | a.extend_from_slice(b.as_ref()); |
3008 | 0 | a |
3009 | 0 | }), Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s_0B8_ Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s_0B8_ |
3010 | 0 | ), |
3011 | | ] { |
3012 | 0 | match runtime_call( |
3013 | 0 | database, |
3014 | 0 | parent_block_hash, |
3015 | 0 | parent_runtime, |
3016 | 0 | call_function, |
3017 | 0 | &call_parameter, |
3018 | 0 | runtime_call::StorageProofSizeBehavior::Unimplemented, |
3019 | 0 | storage_changes, |
3020 | 0 | ) |
3021 | 0 | .await |
3022 | | { |
3023 | 0 | Ok(success) => { |
3024 | | // TODO: a bit crappy to compare function names, however I got insanely weird borrowck errors if putting a fn() in the for loop above |
3025 | 0 | let error = if call_function == body_only::CHECK_INHERENTS_FUNCTION_NAME { |
3026 | 0 | body_only::check_check_inherents_output(&success.output) |
3027 | 0 | .map_err(ExecuteBlockInvalidBlockError::CheckInherentsOutputError) |
3028 | 0 | .err() |
3029 | | } else { |
3030 | 0 | body_only::check_execute_block_output(&success.output) |
3031 | 0 | .map_err(ExecuteBlockInvalidBlockError::ExecuteBlockOutputError) |
3032 | 0 | .err() |
3033 | | }; |
3034 | 0 | if let Some(error) = error { |
3035 | 0 | return Err(ExecuteBlockError::InvalidBlock(error)); |
3036 | 0 | } |
3037 | 0 |
|
3038 | 0 | parent_runtime = success.runtime; |
3039 | 0 | storage_changes = success.storage_changes; |
3040 | 0 | state_trie_version = success.state_trie_version; |
3041 | 0 | database_accesses_duration += success.database_accesses_duration; |
3042 | | } |
3043 | 0 | Err(RuntimeCallError::RuntimeStartError(error)) => { |
3044 | 0 | return Err(ExecuteBlockError::VerificationFailure( |
3045 | 0 | ExecuteBlockVerificationFailureError::RuntimeStartError(error), |
3046 | 0 | )) |
3047 | | } |
3048 | 0 | Err(RuntimeCallError::RuntimeExecutionError(error)) => { |
3049 | 0 | return Err(ExecuteBlockError::InvalidBlock( |
3050 | 0 | ExecuteBlockInvalidBlockError::RuntimeExecutionError(error), |
3051 | 0 | )) |
3052 | | } |
3053 | 0 | Err(RuntimeCallError::DatabaseParentAccess(error)) => { |
3054 | 0 | return Err(ExecuteBlockError::VerificationFailure( |
3055 | 0 | ExecuteBlockVerificationFailureError::DatabaseParentAccess { |
3056 | 0 | error, |
3057 | 0 | context: ExecuteBlockDatabaseAccessFailureContext::FunctionCall { |
3058 | 0 | function_name: call_function, |
3059 | 0 | parameter: call_parameter.to_owned(), |
3060 | 0 | }, |
3061 | 0 | }, |
3062 | 0 | )) |
3063 | | } |
3064 | | Err(RuntimeCallError::ForbiddenHostFunction) => { |
3065 | 0 | return Err(ExecuteBlockError::VerificationFailure( |
3066 | 0 | ExecuteBlockVerificationFailureError::ForbiddenHostFunction, |
3067 | 0 | )) |
3068 | | } |
3069 | | Err(RuntimeCallError::DatabaseInvalidStateTrieVersion) => { |
3070 | 0 | return Err(ExecuteBlockError::VerificationFailure( |
3071 | 0 | ExecuteBlockVerificationFailureError::DatabaseInvalidStateTrieVersion, |
3072 | 0 | )) |
3073 | | } |
3074 | | } |
3075 | | } |
3076 | | |
3077 | | // If the block performs a runtime upgrade, compile the new runtime. |
3078 | | // The block is rejected if the new runtime doesn't successfully compile. |
3079 | 0 | let new_runtime: Option<executor::host::HostVmPrototype> = match ( |
3080 | 0 | storage_changes.main_trie_diff_get(b":code"), |
3081 | 0 | storage_changes.main_trie_diff_get(b":heappages"), |
3082 | | ) { |
3083 | 0 | (None, None) => None, |
3084 | 0 | (new_code, new_heap_pages) => { |
3085 | 0 | let new_code = match new_code { |
3086 | 0 | Some(Some(c)) => Cow::Borrowed(c), |
3087 | | Some(None) => { |
3088 | 0 | return Err(ExecuteBlockError::InvalidBlock( |
3089 | 0 | ExecuteBlockInvalidBlockError::EmptyCode, |
3090 | 0 | )) |
3091 | | } |
3092 | | None => { |
3093 | 0 | let parent_block_hash = *parent_block_hash; |
3094 | 0 | let access = database |
3095 | 0 | .with_database(move |db| { |
3096 | 0 | db.block_storage_get( |
3097 | 0 | &parent_block_hash, |
3098 | 0 | iter::empty::<iter::Empty<_>>(), |
3099 | 0 | trie::bytes_to_nibbles(b":code".into_iter().copied()).map(u8::from), |
3100 | 0 | ) |
3101 | 0 | }) Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s0_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s0_0B8_ Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s0_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s0_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s0_0B8_ |
3102 | 0 | .await; |
3103 | 0 | match access { |
3104 | 0 | Ok(Some((v, _))) => Cow::Owned(v), |
3105 | | Ok(None) => { |
3106 | 0 | return Err(ExecuteBlockError::VerificationFailure( |
3107 | 0 | ExecuteBlockVerificationFailureError::ParentCodeEmptyInDatabase, |
3108 | 0 | )) |
3109 | | } |
3110 | 0 | Err(error) => return Err(ExecuteBlockError::VerificationFailure( |
3111 | 0 | ExecuteBlockVerificationFailureError::DatabaseParentAccess { |
3112 | 0 | error, |
3113 | 0 | context: |
3114 | 0 | ExecuteBlockDatabaseAccessFailureContext::ParentRuntimeAccess, |
3115 | 0 | }, |
3116 | 0 | )), |
3117 | | } |
3118 | | } |
3119 | | }; |
3120 | | |
3121 | 0 | let new_heap_pages = match new_heap_pages { |
3122 | 0 | Some(c) => executor::storage_heap_pages_to_value(c) |
3123 | 0 | .map_err(ExecuteBlockInvalidBlockError::InvalidNewHeapPages)?, |
3124 | | None => executor::storage_heap_pages_to_value( |
3125 | | { |
3126 | 0 | let parent_block_hash = *parent_block_hash; |
3127 | 0 | let access = database |
3128 | 0 | .with_database(move |db| { |
3129 | 0 | db.block_storage_get( |
3130 | 0 | &parent_block_hash, |
3131 | 0 | iter::empty::<iter::Empty<_>>(), |
3132 | 0 | trie::bytes_to_nibbles(b":heappages".into_iter().copied()) |
3133 | 0 | .map(u8::from), |
3134 | 0 | ) |
3135 | 0 | }) Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s1_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s1_0B8_ Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s1_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s1_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s1_0B8_ |
3136 | 0 | .await; |
3137 | 0 | match access { |
3138 | 0 | Ok(Some((v, _))) => Some(v), |
3139 | 0 | Ok(None) => None, |
3140 | 0 | Err(error) => return Err(ExecuteBlockError::VerificationFailure( |
3141 | 0 | ExecuteBlockVerificationFailureError::DatabaseParentAccess { |
3142 | 0 | error, |
3143 | 0 | context: |
3144 | 0 | ExecuteBlockDatabaseAccessFailureContext::ParentRuntimeAccess, |
3145 | 0 | }, |
3146 | 0 | )), |
3147 | | } |
3148 | | } |
3149 | 0 | .as_deref(), |
3150 | 0 | ) |
3151 | 0 | .map_err(ExecuteBlockVerificationFailureError::InvaliParentHeapPagesInDatabase)?, |
3152 | | }; |
3153 | | |
3154 | 0 | let before_runtime_build = Instant::now(); |
3155 | 0 | let vm = host::HostVmPrototype::new(host::Config { |
3156 | 0 | module: &new_code, |
3157 | 0 | heap_pages: new_heap_pages, |
3158 | 0 | exec_hint: executor::vm::ExecHint::ValidateAndCompile, |
3159 | 0 | allow_unresolved_imports: false, |
3160 | 0 | }) |
3161 | 0 | .map_err(ExecuteBlockInvalidBlockError::InvalidNewRuntime)?; |
3162 | 0 | runtime_build_duration += before_runtime_build.elapsed(); |
3163 | 0 | Some(vm) |
3164 | | } |
3165 | | }; |
3166 | | |
3167 | 0 | let storage_changes = Arc::new(storage_changes); |
3168 | 0 |
|
3169 | 0 | // Insert the block in the database. |
3170 | 0 | let when_database_access_started = Instant::now(); |
3171 | 0 | let block_insertion = database |
3172 | 0 | .with_database({ |
3173 | 0 | let parent_block_hash = *parent_block_hash; |
3174 | 0 | let storage_changes = storage_changes.clone(); |
3175 | 0 | let block_header = block_header.to_owned(); |
3176 | 0 | let block_body = block_body |
3177 | 0 | .map(|tx| tx.as_ref().to_owned()) Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s2_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s2_0B8_ Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s2_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s2_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s2_0B8_ |
3178 | 0 | .collect::<Vec<_>>(); |
3179 | 0 | move |database| { |
3180 | 0 | database.insert(&block_header, is_new_best, block_body.into_iter())?; |
3181 | | |
3182 | 0 | let trie_nodes = storage_changes |
3183 | 0 | .trie_changes_iter_ordered() |
3184 | 0 | .unwrap() |
3185 | 0 | .filter_map(|(_child_trie, key, change)| { |
3186 | | let runtime_call::TrieChange::InsertUpdate { |
3187 | 0 | new_merkle_value, |
3188 | 0 | partial_key, |
3189 | 0 | children_merkle_values, |
3190 | 0 | new_storage_value, |
3191 | 0 | } = &change |
3192 | | else { |
3193 | 0 | return None; |
3194 | | }; |
3195 | | |
3196 | | // TODO: this punches through abstraction layers; maybe add some code to runtime_call to indicate this? |
3197 | 0 | let references_merkle_value = key |
3198 | 0 | .iter() |
3199 | 0 | .copied() |
3200 | 0 | .zip(trie::bytes_to_nibbles(b":child_storage:".iter().copied())) |
3201 | 0 | .all(|(a, b)| a == b); Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1t_EE0s3_000CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_000Bc_ Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1t_EE0s3_000CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1t_EE0s3_000CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNCINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_000Bc_ |
3202 | 0 |
|
3203 | 0 | Some(full_sqlite::InsertTrieNode { |
3204 | 0 | merkle_value: (&new_merkle_value[..]).into(), |
3205 | 0 | children_merkle_values: array::from_fn(|n| { |
3206 | 0 | children_merkle_values[n] |
3207 | 0 | .as_ref() |
3208 | 0 | .map(|v| From::from(&v[..])) Unexecuted instantiation: _RNCNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1v_EE0s3_00s_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_00s_00Be_ Unexecuted instantiation: _RNCNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1v_EE0s3_00s_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1v_EE0s3_00s_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNCNCINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_00s_00Be_ |
3209 | 0 | }), Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1t_EE0s3_00s_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_00s_0Bc_ Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1t_EE0s3_00s_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1t_EE0s3_00s_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNCINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_00s_0Bc_ |
3210 | 0 | storage_value: match new_storage_value { |
3211 | | runtime_call::TrieChangeStorageValue::Modified { |
3212 | 0 | new_value: Some(value), |
3213 | 0 | } => full_sqlite::InsertTrieNodeStorageValue::Value { |
3214 | 0 | value: Cow::Borrowed(value), |
3215 | 0 | references_merkle_value, |
3216 | 0 | }, |
3217 | | runtime_call::TrieChangeStorageValue::Modified { |
3218 | | new_value: None, |
3219 | 0 | } => full_sqlite::InsertTrieNodeStorageValue::NoValue, |
3220 | | runtime_call::TrieChangeStorageValue::Unmodified => { |
3221 | | // TODO: overhead, and no child trie support |
3222 | 0 | if let Some((value_in_parent, _)) = database |
3223 | 0 | .block_storage_get( |
3224 | 0 | &parent_block_hash, |
3225 | 0 | iter::empty::<iter::Empty<_>>(), |
3226 | 0 | key.iter().map(|n| u8::from(*n)), Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1t_EE0s3_00s0_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_00s0_0Bc_ Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1t_EE0s3_00s0_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1t_EE0s3_00s0_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNCINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_00s0_0Bc_ |
3227 | 0 | ) |
3228 | 0 | .unwrap() |
3229 | | { |
3230 | 0 | full_sqlite::InsertTrieNodeStorageValue::Value { |
3231 | 0 | value: Cow::Owned(value_in_parent), |
3232 | 0 | references_merkle_value, |
3233 | 0 | } |
3234 | | } else { |
3235 | 0 | full_sqlite::InsertTrieNodeStorageValue::NoValue |
3236 | | } |
3237 | | } |
3238 | | }, |
3239 | 0 | partial_key_nibbles: partial_key |
3240 | 0 | .iter() |
3241 | 0 | .map(|n| u8::from(*n)) Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1t_EE0s3_00s1_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_00s1_0Bc_ Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1t_EE0s3_00s1_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1t_EE0s3_00s1_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNCINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_00s1_0Bc_ |
3242 | 0 | .collect::<Vec<_>>() |
3243 | 0 | .into(), |
3244 | | }) |
3245 | 0 | }) Unexecuted instantiation: _RNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1r_EE0s3_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_00Ba_ Unexecuted instantiation: _RNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1r_EE0s3_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1r_EE0s3_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_00Ba_ |
3246 | 0 | .collect::<Vec<_>>(); |
3247 | 0 |
|
3248 | 0 | database |
3249 | 0 | .insert_trie_nodes(trie_nodes.into_iter(), u8::from(state_trie_version)) |
3250 | 0 | .map_err(full_sqlite::InsertError::Corrupted) |
3251 | 0 | } Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s3_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_0B8_ Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s3_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1p_EE0s3_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppE0s3_0B8_ |
3252 | 0 | }) |
3253 | 0 | .await; |
3254 | 0 | database_accesses_duration += when_database_access_started.elapsed(); |
3255 | 0 |
|
3256 | 0 | Ok(ExecuteBlockSuccess { |
3257 | 0 | block_insertion, |
3258 | 0 | new_runtime, |
3259 | 0 | storage_changes, |
3260 | 0 | database_accesses_duration, |
3261 | 0 | runtime_build_duration, |
3262 | 0 | }) |
3263 | 0 | } Unexecuted instantiation: _RNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1n_EE0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertppE0B6_ Unexecuted instantiation: _RNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1n_EE0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCINvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service24execute_block_and_insertRINtNtCsdZExvAaxgia_5alloc3vec3VechEINtNtNtCsaYZPK01V26L_4core5slice4iter4IterB1n_EE0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCINvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service24execute_block_and_insertppE0B6_ |
3264 | | |
3265 | | /// Returned by [`execute_block_and_insert`] in case of success. |
3266 | | #[derive(Debug)] |
3267 | | pub struct ExecuteBlockSuccess { |
3268 | | /// Whether the block was successfully inserted in the database. |
3269 | | pub block_insertion: Result<(), full_sqlite::InsertError>, |
3270 | | |
3271 | | /// If the block modifies the runtime, this contains the new runtime. |
3272 | | pub new_runtime: Option<host::HostVmPrototype>, |
3273 | | |
3274 | | /// Changes to the storage performed during the execution. |
3275 | | pub storage_changes: Arc<runtime_call::StorageChanges>, |
3276 | | |
3277 | | /// Total time the database accesses combined took. |
3278 | | pub database_accesses_duration: Duration, |
3279 | | |
3280 | | /// Total time compiling the new runtime took. Equal to 0 if no runtime upgrade happened. |
3281 | | pub runtime_build_duration: Duration, |
3282 | | } |
3283 | | |
3284 | | /// Error returned by [`execute_block_and_insert`]. |
3285 | 0 | #[derive(Debug, derive_more::Display, derive_more::From)] Unexecuted instantiation: _RNvXsz_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB5_17ExecuteBlockErrorNtNtCsaYZPK01V26L_4core3fmt7Display3fmt Unexecuted instantiation: _RNvXsz_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB5_17ExecuteBlockErrorNtNtCsaYZPK01V26L_4core3fmt7Display3fmt |
3286 | | pub enum ExecuteBlockError { |
3287 | | /// Failed to verify block. |
3288 | | VerificationFailure(ExecuteBlockVerificationFailureError), |
3289 | | /// Block has been verified as being invalid. |
3290 | | InvalidBlock(ExecuteBlockInvalidBlockError), |
3291 | | } |
3292 | | |
3293 | | /// See [`ExecuteBlockError::VerificationFailure`]. |
3294 | 0 | #[derive(Debug, derive_more::Display)] Unexecuted instantiation: _RNvXsD_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB5_36ExecuteBlockVerificationFailureErrorNtNtCsaYZPK01V26L_4core3fmt7Display3fmt Unexecuted instantiation: _RNvXsD_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB5_36ExecuteBlockVerificationFailureErrorNtNtCsaYZPK01V26L_4core3fmt7Display3fmt |
3295 | | pub enum ExecuteBlockVerificationFailureError { |
3296 | | /// Error starting the runtime execution. |
3297 | | RuntimeStartError(executor::host::StartErr), |
3298 | | /// Error while accessing the parent block in the database. |
3299 | | #[display(fmt = "Error while accessing the parent block in the database: {error}")] |
3300 | | DatabaseParentAccess { |
3301 | | /// Error that happened. |
3302 | | error: full_sqlite::StorageAccessError, |
3303 | | /// In which context the error hapened. |
3304 | | context: ExecuteBlockDatabaseAccessFailureContext, |
3305 | | }, |
3306 | | /// State trie version stored in database is invalid. |
3307 | | DatabaseInvalidStateTrieVersion, |
3308 | | /// Runtime has tried to call a forbidden host function. |
3309 | | ForbiddenHostFunction, |
3310 | | /// The `:code` of the parent block in database is empty. |
3311 | | ParentCodeEmptyInDatabase, |
3312 | | /// The `:heappages` after the execution is invalid. |
3313 | | InvaliParentHeapPagesInDatabase(executor::InvalidHeapPagesError), |
3314 | | } |
3315 | | |
3316 | | /// See [`ExecuteBlockVerificationFailureError::DatabaseParentAccess`]. |
3317 | | #[derive(Debug)] |
3318 | | pub enum ExecuteBlockDatabaseAccessFailureContext { |
3319 | | /// Error while accessing the `:code` or `:heappages` key of the parent. |
3320 | | ParentRuntimeAccess, |
3321 | | /// Error while performing a runtime call. |
3322 | | FunctionCall { |
3323 | | /// Name of the function that was being called during the access error. |
3324 | | function_name: &'static str, |
3325 | | /// Parameter to the function that was being called during the access error. |
3326 | | parameter: Vec<u8>, |
3327 | | }, |
3328 | | } |
3329 | | |
3330 | | /// See [`ExecuteBlockError::InvalidBlock`]. |
3331 | 0 | #[derive(Debug, derive_more::Display)] Unexecuted instantiation: _RNvXsG_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB5_29ExecuteBlockInvalidBlockErrorNtNtCsaYZPK01V26L_4core3fmt7Display3fmt Unexecuted instantiation: _RNvXsG_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB5_29ExecuteBlockInvalidBlockErrorNtNtCsaYZPK01V26L_4core3fmt7Display3fmt |
3332 | | pub enum ExecuteBlockInvalidBlockError { |
3333 | | /// Error while executing the runtime. |
3334 | | RuntimeExecutionError(runtime_call::ErrorDetail), |
3335 | | /// Error in the output of `BlockBuilder_check_inherents`. |
3336 | | #[display(fmt = "Error in the output of BlockBuilder_check_inherents: {_0}")] |
3337 | | CheckInherentsOutputError(body_only::InherentsOutputError), |
3338 | | /// Error in the output of `Core_execute_block`. |
3339 | | #[display(fmt = "Error in the output of Core_execute_block: {_0}")] |
3340 | | ExecuteBlockOutputError(body_only::ExecuteBlockOutputError), |
3341 | | /// The new `:code` after the execution is empty. |
3342 | | EmptyCode, |
3343 | | /// The `:heappages` after the execution is invalid. |
3344 | | InvalidNewHeapPages(executor::InvalidHeapPagesError), |
3345 | | /// Failed to compile the new runtime that the block upgrades to. |
3346 | | InvalidNewRuntime(host::NewErr), |
3347 | | } |
3348 | | |
3349 | | /// Perform a runtime call, using the database as the source for storage data. |
3350 | 0 | pub async fn runtime_call( |
3351 | 0 | database: &database_thread::DatabaseThread, |
3352 | 0 | storage_block_hash: &[u8; 32], |
3353 | 0 | runtime: host::HostVmPrototype, |
3354 | 0 | function_to_call: &str, |
3355 | 0 | parameter: &[u8], |
3356 | 0 | storage_proof_size_behavior: runtime_call::StorageProofSizeBehavior, |
3357 | 0 | initial_storage_changes: runtime_call::StorageChanges, |
3358 | 0 | ) -> Result<RuntimeCallSuccess, RuntimeCallError> { Unexecuted instantiation: _RNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call Unexecuted instantiation: _RNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call |
3359 | 0 | let mut call = runtime_call::run(runtime_call::Config { |
3360 | 0 | virtual_machine: runtime, |
3361 | 0 | function_to_call, |
3362 | 0 | parameter: iter::once(¶meter), |
3363 | 0 | storage_proof_size_behavior, |
3364 | 0 | storage_main_trie_changes: initial_storage_changes.into_main_trie_diff(), |
3365 | 0 | max_log_level: 0, |
3366 | 0 | calculate_trie_changes: true, |
3367 | 0 | }) |
3368 | 0 | .map_err(|(err, _)| RuntimeCallError::RuntimeStartError(err))?; Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call00B7_ Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call00B7_ |
3369 | | |
3370 | 0 | let mut database_accesses_duration = Duration::new(0, 0); |
3371 | | |
3372 | | loop { |
3373 | 0 | match call { |
3374 | 0 | runtime_call::RuntimeCall::Finished(Err(error)) => { |
3375 | 0 | return Err(RuntimeCallError::RuntimeExecutionError(error.detail)); |
3376 | | } |
3377 | | runtime_call::RuntimeCall::Finished(Ok(runtime_call::Success { |
3378 | 0 | virtual_machine, |
3379 | 0 | storage_changes, |
3380 | 0 | state_trie_version, |
3381 | 0 | .. |
3382 | 0 | })) => { |
3383 | 0 | let output = virtual_machine.value().as_ref().to_owned(); |
3384 | 0 | return Ok(RuntimeCallSuccess { |
3385 | 0 | output, |
3386 | 0 | runtime: virtual_machine.into_prototype(), |
3387 | 0 | storage_changes, |
3388 | 0 | state_trie_version, |
3389 | 0 | database_accesses_duration, |
3390 | 0 | }); |
3391 | | } |
3392 | | |
3393 | 0 | runtime_call::RuntimeCall::StorageGet(req) => { |
3394 | 0 | let when_database_access_started = Instant::now(); |
3395 | 0 | let parent_paths = req.child_trie().map(|child_trie| { |
3396 | 0 | trie::bytes_to_nibbles(b":child_storage:default:".iter().copied()) |
3397 | 0 | .chain(trie::bytes_to_nibbles(child_trie.as_ref().iter().copied())) |
3398 | 0 | .map(u8::from) |
3399 | 0 | .collect::<Vec<_>>() |
3400 | 0 | }); Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s_0B7_ Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0s_0B7_ |
3401 | 0 | let key = trie::bytes_to_nibbles(req.key().as_ref().iter().copied()) |
3402 | 0 | .map(u8::from) |
3403 | 0 | .collect::<Vec<_>>(); |
3404 | 0 | let storage_block_hash = *storage_block_hash; |
3405 | 0 | let value = database |
3406 | 0 | .with_database(move |db| { |
3407 | 0 | db.block_storage_get( |
3408 | 0 | &storage_block_hash, |
3409 | 0 | parent_paths.into_iter().map(|p| p.into_iter()), Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s0_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s0_00B9_ Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s0_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s0_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0s0_00B9_ |
3410 | 0 | key.iter().copied(), |
3411 | 0 | ) |
3412 | 0 | }) Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s0_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s0_0B7_ Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s0_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s0_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0s0_0B7_ |
3413 | 0 | .await; |
3414 | 0 | let value = match value { |
3415 | 0 | Ok(Some((ref val, vers))) => Some(( |
3416 | 0 | iter::once(&val[..]), |
3417 | 0 | runtime_call::TrieEntryVersion::try_from(vers) |
3418 | 0 | .map_err(|_| RuntimeCallError::DatabaseInvalidStateTrieVersion)?, Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s1_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s1_0B7_ Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s1_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s1_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0s1_0B7_ |
3419 | | )), |
3420 | 0 | Ok(None) => None, |
3421 | 0 | Err(error) => return Err(RuntimeCallError::DatabaseParentAccess(error)), |
3422 | | }; |
3423 | | |
3424 | 0 | database_accesses_duration += when_database_access_started.elapsed(); |
3425 | 0 | call = req.inject_value(value); |
3426 | | } |
3427 | 0 | runtime_call::RuntimeCall::ClosestDescendantMerkleValue(req) => { |
3428 | 0 | let when_database_access_started = Instant::now(); |
3429 | 0 |
|
3430 | 0 | let parent_paths = req.child_trie().map(|child_trie| { |
3431 | 0 | trie::bytes_to_nibbles(b":child_storage:default:".iter().copied()) |
3432 | 0 | .chain(trie::bytes_to_nibbles(child_trie.as_ref().iter().copied())) |
3433 | 0 | .map(u8::from) |
3434 | 0 | .collect::<Vec<_>>() |
3435 | 0 | }); Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s2_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s2_0B7_ Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s2_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s2_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0s2_0B7_ |
3436 | 0 | let key_nibbles = req.key().map(u8::from).collect::<Vec<_>>(); |
3437 | 0 |
|
3438 | 0 | let storage_block_hash = *storage_block_hash; |
3439 | 0 | let merkle_value = database |
3440 | 0 | .with_database(move |db| { |
3441 | 0 | db.block_storage_closest_descendant_merkle_value( |
3442 | 0 | &storage_block_hash, |
3443 | 0 | parent_paths.into_iter().map(|p| p.into_iter()), Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s3_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s3_00B9_ Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s3_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s3_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0s3_00B9_ |
3444 | 0 | key_nibbles.iter().copied(), |
3445 | 0 | ) |
3446 | 0 | }) Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s3_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s3_0B7_ Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s3_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s3_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0s3_0B7_ |
3447 | 0 | .await; |
3448 | 0 | let merkle_value = match merkle_value { |
3449 | 0 | Ok(mv) => mv, |
3450 | 0 | Err(error) => return Err(RuntimeCallError::DatabaseParentAccess(error)), |
3451 | | }; |
3452 | | |
3453 | 0 | database_accesses_duration += when_database_access_started.elapsed(); |
3454 | 0 | call = req.inject_merkle_value(merkle_value.as_ref().map(|v| &v[..])); Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s4_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s4_0B7_ Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s4_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s4_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0s4_0B7_ |
3455 | | } |
3456 | 0 | runtime_call::RuntimeCall::NextKey(req) => { |
3457 | 0 | let when_database_access_started = Instant::now(); |
3458 | 0 |
|
3459 | 0 | let parent_paths = req.child_trie().map(|child_trie| { |
3460 | 0 | trie::bytes_to_nibbles(b":child_storage:default:".iter().copied()) |
3461 | 0 | .chain(trie::bytes_to_nibbles(child_trie.as_ref().iter().copied())) |
3462 | 0 | .map(u8::from) |
3463 | 0 | .collect::<Vec<_>>() |
3464 | 0 | }); Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s5_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s5_0B7_ Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s5_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s5_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0s5_0B7_ |
3465 | 0 | let key_nibbles = req |
3466 | 0 | .key() |
3467 | 0 | .map(u8::from) |
3468 | 0 | .chain(if req.or_equal() { None } else { Some(0u8) }) |
3469 | 0 | .collect::<Vec<_>>(); |
3470 | 0 | let prefix_nibbles = req.prefix().map(u8::from).collect::<Vec<_>>(); |
3471 | 0 |
|
3472 | 0 | let branch_nodes = req.branch_nodes(); |
3473 | 0 | let storage_block_hash = *storage_block_hash; |
3474 | 0 | let next_key = database |
3475 | 0 | .with_database(move |db| { |
3476 | 0 | db.block_storage_next_key( |
3477 | 0 | &storage_block_hash, |
3478 | 0 | parent_paths.into_iter().map(|p| p.into_iter()), Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s6_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s6_00B9_ Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s6_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s6_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0s6_00B9_ |
3479 | 0 | key_nibbles.iter().copied(), |
3480 | 0 | prefix_nibbles.iter().copied(), |
3481 | 0 | branch_nodes, |
3482 | 0 | ) |
3483 | 0 | }) Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s6_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s6_0B7_ Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s6_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s6_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0s6_0B7_ |
3484 | 0 | .await; |
3485 | 0 | let next_key = match next_key { |
3486 | 0 | Ok(k) => k, |
3487 | 0 | Err(error) => return Err(RuntimeCallError::DatabaseParentAccess(error)), |
3488 | | }; |
3489 | | |
3490 | 0 | database_accesses_duration += when_database_access_started.elapsed(); |
3491 | 0 | call = req.inject_key( |
3492 | 0 | next_key.map(|k| k.into_iter().map(|b| trie::Nibble::try_from(b).unwrap())), Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s7_0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s7_0B7_ Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s7_0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s7_0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0s7_0B7_ Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s7_00CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s7_00B9_ Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s7_00CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNCNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0s7_00CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNCNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0s7_00B9_ |
3493 | 0 | ); |
3494 | | } |
3495 | 0 | runtime_call::RuntimeCall::OffchainStorageSet(req) => { |
3496 | 0 | // Ignore offchain storage writes at the moment. |
3497 | 0 | call = req.resume(); |
3498 | 0 | } |
3499 | 0 | runtime_call::RuntimeCall::LogEmit(req) => { |
3500 | 0 | // Logs are ignored. |
3501 | 0 | call = req.resume(); |
3502 | 0 | } |
3503 | 0 | runtime_call::RuntimeCall::SignatureVerification(sig) => { |
3504 | 0 | call = sig.verify_and_resume(); |
3505 | 0 | } |
3506 | | runtime_call::RuntimeCall::Offchain(_) => { |
3507 | | // Offchain storage calls are forbidden. |
3508 | 0 | return Err(RuntimeCallError::ForbiddenHostFunction); |
3509 | | } |
3510 | | } |
3511 | | } |
3512 | 0 | } Unexecuted instantiation: _RNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0B5_ Unexecuted instantiation: _RNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCNvNtCsiUjFBJteJ7x_17smoldot_full_node17consensus_service12runtime_call0CsibGXYHQB8Ea_25json_rpc_general_requests Unexecuted instantiation: _RNCNvNtCshBwayKnNXDT_17smoldot_full_node17consensus_service12runtime_call0B5_ |
3513 | | |
3514 | | /// Returned by [`runtime_call()`] in case of success. |
3515 | | #[derive(Debug)] |
3516 | | pub struct RuntimeCallSuccess { |
3517 | | /// Output of the runtime call. |
3518 | | pub output: Vec<u8>, |
3519 | | |
3520 | | /// Runtime that was provided as input to [`runtime_call()`]. |
3521 | | pub runtime: host::HostVmPrototype, |
3522 | | |
3523 | | /// Changes to the storage performed during the execution. |
3524 | | pub storage_changes: runtime_call::StorageChanges, |
3525 | | |
3526 | | /// Version of the trie entries of the storage changes. |
3527 | | pub state_trie_version: runtime_call::TrieEntryVersion, |
3528 | | |
3529 | | /// Total time the database accesses combined took. |
3530 | | pub database_accesses_duration: Duration, |
3531 | | } |
3532 | | |
3533 | | /// Error returned by [`runtime_call()`]. |
3534 | 0 | #[derive(Debug, derive_more::Display, derive_more::From)] Unexecuted instantiation: _RNvXsJ_NtCsiUjFBJteJ7x_17smoldot_full_node17consensus_serviceNtB5_16RuntimeCallErrorNtNtCsaYZPK01V26L_4core3fmt7Display3fmt Unexecuted instantiation: _RNvXsJ_NtCshBwayKnNXDT_17smoldot_full_node17consensus_serviceNtB5_16RuntimeCallErrorNtNtCsaYZPK01V26L_4core3fmt7Display3fmt |
3535 | | pub enum RuntimeCallError { |
3536 | | /// Error starting the runtime execution. |
3537 | | #[display(fmt = "{_0}")] |
3538 | | RuntimeStartError(executor::host::StartErr), |
3539 | | /// Error while executing the runtime. |
3540 | | #[display(fmt = "{_0}")] |
3541 | | RuntimeExecutionError(runtime_call::ErrorDetail), |
3542 | | /// Error while accessing the parent block in the database. |
3543 | | #[display(fmt = "{_0}")] |
3544 | | DatabaseParentAccess(full_sqlite::StorageAccessError), |
3545 | | /// State trie version stored in database is invalid. |
3546 | | DatabaseInvalidStateTrieVersion, |
3547 | | /// Runtime has tried to call a forbidden host function. |
3548 | | ForbiddenHostFunction, |
3549 | | } |