/__w/smoldot/smoldot/repo/lib/src/executor/allocator.rs
Line | Count | Source (jump to first uncovered line) |
1 | | // This file is part of Substrate. |
2 | | |
3 | | // Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. |
4 | | // SPDX-License-Identifier: Apache-2.0 |
5 | | |
6 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
7 | | // you may not use this file except in compliance with the License. |
8 | | // You may obtain a copy of the License at |
9 | | // |
10 | | // http://www.apache.org/licenses/LICENSE-2.0 |
11 | | // |
12 | | // Unless required by applicable law or agreed to in writing, software |
13 | | // distributed under the License is distributed on an "AS IS" BASIS, |
14 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
15 | | // See the License for the specific language governing permissions and |
16 | | // limitations under the License. |
17 | | |
18 | | //! This module implements a freeing-bump allocator. |
19 | | //! |
20 | | //! The heap is a continuous linear memory and chunks are allocated using a bump allocator. |
21 | | //! |
22 | | //! ```ignore |
23 | | //! +-------------+-------------------------------------------------+ |
24 | | //! | <allocated> | <unallocated> | |
25 | | //! +-------------+-------------------------------------------------+ |
26 | | //! ^ |
27 | | //! |_ bumper |
28 | | //! ``` |
29 | | //! |
30 | | //! Only allocations with sizes of power of two can be allocated. If the incoming request has a non |
31 | | //! power of two size it is increased to the nearest power of two. The power of two of size is |
32 | | //! referred as **an order**. |
33 | | //! |
34 | | //! Each allocation has a header immediately preceding to it. The header is always 8 bytes and can |
35 | | //! be of two types: free and occupied. |
36 | | //! |
37 | | //! For implementing freeing we maintain a linked lists for each order. The maximum supported |
38 | | //! allocation size is capped, therefore the number of orders and thus the linked lists is as well |
39 | | //! limited. Currently, the maximum size of an allocation is 32 MiB. |
40 | | //! |
41 | | //! When the allocator serves an allocation request it first checks the linked list for the |
42 | | //! respective order. If it doesn't have any free chunks, the allocator requests memory from the |
43 | | //! bump allocator. In any case the order is stored in the header of the allocation. |
44 | | //! |
45 | | //! Upon deallocation we get the order of the allocation from its header and then add that |
46 | | //! allocation to the linked list for the respective order. |
47 | | //! |
48 | | //! # Caveats |
49 | | //! |
50 | | //! This is a fast allocator but it is also dumb. There are specifically two main shortcomings |
51 | | //! that the user should keep in mind: |
52 | | //! |
53 | | //! - Once the bump allocator space is exhausted, there is no way to reclaim the memory. This means |
54 | | //! that it's possible to end up in a situation where there are no live allocations yet a new |
55 | | //! allocation will fail. |
56 | | //! |
57 | | //! Let's look into an example. Given a heap of 32 MiB. The user makes a 32 MiB allocation that we |
58 | | //! call `X` . Now the heap is full. Then user deallocates `X`. Since all the space in the bump |
59 | | //! allocator was consumed by the 32 MiB allocation, allocations of all sizes except 32 MiB will |
60 | | //! fail. |
61 | | //! |
62 | | //! - Sizes of allocations are rounded up to the nearest order. That is, an allocation of 2,00001 |
63 | | //! MiB will be put into the bucket of 4 MiB. Therefore, any allocation of size `(N, 2N]` will |
64 | | //! take up to `2N`, thus assuming a uniform distribution of allocation sizes, the average amount |
65 | | //! in use of a `2N` space on the heap will be `(3N + ε) / 2`. So average utilization is going to |
66 | | //! be around `75%` (`(3N + ε) / 2 / 2N`) meaning that around `25%` of the space in allocation will be |
67 | | //! wasted. This is more pronounced (in terms of absolute heap amounts) with larger allocation |
68 | | //! sizes. |
69 | | |
70 | | #![allow(clippy::all)] // TODO: since this code has been copy-pasted from Substrate, we simply silence clippy warnings |
71 | | |
72 | | use core::{ |
73 | | mem, |
74 | | ops::{Index, IndexMut, Range}, |
75 | | }; |
76 | | |
77 | | /// Error that can happen in the memory allocator. |
78 | | #[derive(Debug)] |
79 | | pub enum Error { |
80 | | /// Someone tried to allocate more memory than the allowed maximum per allocation. |
81 | | RequestedAllocationTooLarge, |
82 | | /// Allocator run out of space. |
83 | | AllocatorOutOfSpace, |
84 | | /// The client passed a memory instance which is smaller than previously observed. |
85 | | MemoryShrinked, |
86 | | // TODO: wtf is "Other"? |
87 | | Other(&'static str), |
88 | | } |
89 | | |
90 | | /// The maximum number of bytes that can be allocated at one time. |
91 | | // The maximum possible allocation size was chosen rather arbitrary, 32 MiB should be enough for |
92 | | // everybody. |
93 | | pub const MAX_POSSIBLE_ALLOCATION: u32 = 33_554_432; // 2^25 bytes, 32 MiB |
94 | | |
95 | | /// The minimal alignment guaranteed by this allocator. |
96 | | /// |
97 | | /// The alignment of 8 is chosen because it is the maximum size of a primitive type supported by the |
98 | | /// target version of `wasm32`: `i64's` natural alignment is 8. |
99 | | const ALIGNMENT: u32 = 8; |
100 | | |
101 | | // Each pointer is prefixed with 8 bytes, which identify the list index |
102 | | // to which it belongs. |
103 | | const HEADER_SIZE: u32 = 8; |
104 | | |
105 | | /// Create an allocator error. |
106 | 1 | fn error(msg: &'static str) -> Error { |
107 | 1 | Error::Other(msg) |
108 | 1 | } _RNvNtNtCsN16ciHI6Qf_7smoldot8executor9allocator5error Line | Count | Source | 106 | 1 | fn error(msg: &'static str) -> Error { | 107 | 1 | Error::Other(msg) | 108 | 1 | } |
Unexecuted instantiation: _RNvNtNtCseuYC0Zibziv_7smoldot8executor9allocator5error |
109 | | |
110 | | // The minimum possible allocation size is chosen to be 8 bytes because in that case we would have |
111 | | // easier time to provide the guaranteed alignment of 8. |
112 | | // |
113 | | // The maximum possible allocation size is set in the primitives to 32MiB. |
114 | | // |
115 | | // N_ORDERS - represents the number of orders supported. |
116 | | // |
117 | | // This number corresponds to the number of powers between the minimum possible allocation and |
118 | | // maximum possible allocation, or: 2^3...2^25 (both ends inclusive, hence 23). |
119 | | const N_ORDERS: usize = 23; |
120 | | const MIN_POSSIBLE_ALLOCATION: u32 = 8; // 2^3 bytes, 8 bytes |
121 | | |
122 | | /// The exponent for the power of two sized block adjusted to the minimum size. |
123 | | /// |
124 | | /// This way, if `MIN_POSSIBLE_ALLOCATION == 8`, we would get: |
125 | | /// |
126 | | /// `power_of_two_size` | order |
127 | | /// 8 | 0 |
128 | | /// 16 | 1 |
129 | | /// 32 | 2 |
130 | | /// 64 | 3 |
131 | | /// ... |
132 | | /// 16777216 | 21 |
133 | | /// 33554432 | 22 |
134 | | /// |
135 | | /// and so on. |
136 | | #[derive(Copy, Clone, PartialEq, Eq, Debug)] |
137 | | struct Order(u32); |
138 | | |
139 | | impl Order { |
140 | | /// Create `Order` object from a raw order. |
141 | | /// |
142 | | /// Returns `Err` if it is greater than the maximum supported order. |
143 | 16.7k | fn from_raw(order: u32) -> Result<Self, Error> { |
144 | 16.7k | if order < N_ORDERS as u32 { |
145 | 16.7k | Ok(Self(order)) |
146 | | } else { |
147 | 0 | Err(error("invalid order")) |
148 | | } |
149 | 16.7k | } _RNvMNtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB2_5Order8from_raw Line | Count | Source | 143 | 14.3k | fn from_raw(order: u32) -> Result<Self, Error> { | 144 | 14.3k | if order < N_ORDERS as u32 { | 145 | 14.3k | Ok(Self(order)) | 146 | | } else { | 147 | 0 | Err(error("invalid order")) | 148 | | } | 149 | 14.3k | } |
_RNvMNtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB2_5Order8from_raw Line | Count | Source | 143 | 2.41k | fn from_raw(order: u32) -> Result<Self, Error> { | 144 | 2.41k | if order < N_ORDERS as u32 { | 145 | 2.41k | Ok(Self(order)) | 146 | | } else { | 147 | 0 | Err(error("invalid order")) | 148 | | } | 149 | 2.41k | } |
|
150 | | |
151 | | /// Compute the order by the given size |
152 | | /// |
153 | | /// The size is clamped, so that the following holds: |
154 | | /// |
155 | | /// `MIN_POSSIBLE_ALLOCATION <= size <= MAX_POSSIBLE_ALLOCATION` |
156 | 17.1k | fn from_size(size: u32) -> Result<Self, Error> { |
157 | 17.1k | let clamped_size17.1k = if size > MAX_POSSIBLE_ALLOCATION { |
158 | 1 | return Err(Error::RequestedAllocationTooLarge); |
159 | 17.1k | } else if size < MIN_POSSIBLE_ALLOCATION { |
160 | 1.68k | MIN_POSSIBLE_ALLOCATION |
161 | | } else { |
162 | 15.4k | size |
163 | | }; |
164 | | |
165 | | // Round the clamped size to the next power of two. |
166 | | // |
167 | | // It returns the unchanged value if the value is already a power of two. |
168 | 17.1k | let power_of_two_size = clamped_size.next_power_of_two(); |
169 | 17.1k | |
170 | 17.1k | // Compute the number of trailing zeroes to get the order. We adjust it by the number of |
171 | 17.1k | // trailing zeroes in the minimum possible allocation. |
172 | 17.1k | let order = power_of_two_size.trailing_zeros() - MIN_POSSIBLE_ALLOCATION.trailing_zeros(); |
173 | 17.1k | |
174 | 17.1k | Ok(Self(order)) |
175 | 17.1k | } _RNvMNtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB2_5Order9from_size Line | Count | Source | 156 | 14.4k | fn from_size(size: u32) -> Result<Self, Error> { | 157 | 14.4k | let clamped_size14.4k = if size > MAX_POSSIBLE_ALLOCATION { | 158 | 1 | return Err(Error::RequestedAllocationTooLarge); | 159 | 14.4k | } else if size < MIN_POSSIBLE_ALLOCATION { | 160 | 1.52k | MIN_POSSIBLE_ALLOCATION | 161 | | } else { | 162 | 12.9k | size | 163 | | }; | 164 | | | 165 | | // Round the clamped size to the next power of two. | 166 | | // | 167 | | // It returns the unchanged value if the value is already a power of two. | 168 | 14.4k | let power_of_two_size = clamped_size.next_power_of_two(); | 169 | 14.4k | | 170 | 14.4k | // Compute the number of trailing zeroes to get the order. We adjust it by the number of | 171 | 14.4k | // trailing zeroes in the minimum possible allocation. | 172 | 14.4k | let order = power_of_two_size.trailing_zeros() - MIN_POSSIBLE_ALLOCATION.trailing_zeros(); | 173 | 14.4k | | 174 | 14.4k | Ok(Self(order)) | 175 | 14.4k | } |
_RNvMNtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB2_5Order9from_size Line | Count | Source | 156 | 2.66k | fn from_size(size: u32) -> Result<Self, Error> { | 157 | 2.66k | let clamped_size = if size > MAX_POSSIBLE_ALLOCATION { | 158 | 0 | return Err(Error::RequestedAllocationTooLarge); | 159 | 2.66k | } else if size < MIN_POSSIBLE_ALLOCATION { | 160 | 160 | MIN_POSSIBLE_ALLOCATION | 161 | | } else { | 162 | 2.50k | size | 163 | | }; | 164 | | | 165 | | // Round the clamped size to the next power of two. | 166 | | // | 167 | | // It returns the unchanged value if the value is already a power of two. | 168 | 2.66k | let power_of_two_size = clamped_size.next_power_of_two(); | 169 | 2.66k | | 170 | 2.66k | // Compute the number of trailing zeroes to get the order. We adjust it by the number of | 171 | 2.66k | // trailing zeroes in the minimum possible allocation. | 172 | 2.66k | let order = power_of_two_size.trailing_zeros() - MIN_POSSIBLE_ALLOCATION.trailing_zeros(); | 173 | 2.66k | | 174 | 2.66k | Ok(Self(order)) | 175 | 2.66k | } |
|
176 | | |
177 | | /// Returns the corresponding size in bytes for this order. |
178 | | /// |
179 | | /// Note that it is always a power of two. |
180 | 50.9k | fn size(&self) -> u32 { |
181 | 50.9k | MIN_POSSIBLE_ALLOCATION << self.0 |
182 | 50.9k | } _RNvMNtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB2_5Order4size Line | Count | Source | 180 | 43.2k | fn size(&self) -> u32 { | 181 | 43.2k | MIN_POSSIBLE_ALLOCATION << self.0 | 182 | 43.2k | } |
_RNvMNtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB2_5Order4size Line | Count | Source | 180 | 7.74k | fn size(&self) -> u32 { | 181 | 7.74k | MIN_POSSIBLE_ALLOCATION << self.0 | 182 | 7.74k | } |
|
183 | | |
184 | | /// Extract the order as `u32`. |
185 | 17.1k | fn into_raw(self) -> u32 { |
186 | 17.1k | self.0 |
187 | 17.1k | } _RNvMNtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB2_5Order8into_raw Line | Count | Source | 185 | 14.4k | fn into_raw(self) -> u32 { | 186 | 14.4k | self.0 | 187 | 14.4k | } |
_RNvMNtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB2_5Order8into_raw Line | Count | Source | 185 | 2.66k | fn into_raw(self) -> u32 { | 186 | 2.66k | self.0 | 187 | 2.66k | } |
|
188 | | } |
189 | | |
190 | | /// A special magic value for a pointer in a link that denotes the end of the linked list. |
191 | | const NIL_MARKER: u32 = u32::MAX; |
192 | | |
193 | | /// A link between headers in the free list. |
194 | | #[derive(Clone, Copy, Debug, PartialEq, Eq)] |
195 | | enum Link { |
196 | | /// Nil, denotes that there is no next element. |
197 | | Nil, |
198 | | /// Link to the next element represented as a pointer to the a header. |
199 | | Ptr(u32), |
200 | | } |
201 | | |
202 | | impl Link { |
203 | | /// Creates a link from raw value. |
204 | 13.3k | fn from_raw(raw: u32) -> Self { |
205 | 13.3k | if raw != NIL_MARKER { |
206 | 11.3k | Self::Ptr(raw) |
207 | | } else { |
208 | 1.95k | Self::Nil |
209 | | } |
210 | 13.3k | } _RNvMs_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB4_4Link8from_raw Line | Count | Source | 204 | 12.7k | fn from_raw(raw: u32) -> Self { | 205 | 12.7k | if raw != NIL_MARKER { | 206 | 11.1k | Self::Ptr(raw) | 207 | | } else { | 208 | 1.61k | Self::Nil | 209 | | } | 210 | 12.7k | } |
_RNvMs_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB4_4Link8from_raw Line | Count | Source | 204 | 536 | fn from_raw(raw: u32) -> Self { | 205 | 536 | if raw != NIL_MARKER { | 206 | 196 | Self::Ptr(raw) | 207 | | } else { | 208 | 340 | Self::Nil | 209 | | } | 210 | 536 | } |
|
211 | | |
212 | | /// Converts this link into a raw `u32`. |
213 | 16.7k | fn into_raw(self) -> u32 { |
214 | 16.7k | match self { |
215 | 2.27k | Self::Nil => NIL_MARKER, |
216 | 14.4k | Self::Ptr(ptr) => ptr, |
217 | | } |
218 | 16.7k | } _RNvMs_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB4_4Link8into_raw Line | Count | Source | 213 | 14.3k | fn into_raw(self) -> u32 { | 214 | 14.3k | match self { | 215 | 1.74k | Self::Nil => NIL_MARKER, | 216 | 12.6k | Self::Ptr(ptr) => ptr, | 217 | | } | 218 | 14.3k | } |
_RNvMs_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB4_4Link8into_raw Line | Count | Source | 213 | 2.41k | fn into_raw(self) -> u32 { | 214 | 2.41k | match self { | 215 | 522 | Self::Nil => NIL_MARKER, | 216 | 1.88k | Self::Ptr(ptr) => ptr, | 217 | | } | 218 | 2.41k | } |
|
219 | | } |
220 | | |
221 | | /// A header of an allocation. |
222 | | /// |
223 | | /// The header is encoded in memory as follows. |
224 | | /// |
225 | | /// ## Free header |
226 | | /// |
227 | | /// ```ignore |
228 | | /// 64 32 0 |
229 | | // +--------------+-------------------+ |
230 | | /// | 0 | next element link | |
231 | | /// +--------------+-------------------+ |
232 | | /// ``` |
233 | | /// |
234 | | /// ## Occupied header |
235 | | /// ```ignore |
236 | | /// 64 32 0 |
237 | | // +--------------+-------------------+ |
238 | | /// | 1 | order | |
239 | | /// +--------------+-------------------+ |
240 | | /// ``` |
241 | | #[derive(Clone, Debug, PartialEq, Eq)] |
242 | | enum Header { |
243 | | /// A free header contains a link to the next element to form a free linked list. |
244 | | Free(Link), |
245 | | /// An occupied header has attached order to know in which free list we should put the |
246 | | /// allocation upon deallocation. |
247 | | Occupied(Order), |
248 | | } |
249 | | |
250 | | impl Header { |
251 | | /// Reads a header from memory. |
252 | | /// |
253 | | /// Returns an error if the `header_ptr` is out of bounds of the linear memory or if the read |
254 | | /// header is corrupted (e.g. the order is incorrect). |
255 | 30.0k | fn read_from<M: Memory + ?Sized>(memory: &M, header_ptr: u32) -> Result<Self, Error> { |
256 | 30.0k | let raw_header = memory.read_le_u64(header_ptr)?0 ; |
257 | | |
258 | | // Check if the header represents an occupied or free allocation and extract the header data |
259 | | // by trimming (and discarding) the high bits. |
260 | 30.0k | let occupied = raw_header & 0x00000001_00000000 != 0; |
261 | 30.0k | let header_data = raw_header as u32; |
262 | 30.0k | |
263 | 30.0k | Ok(if occupied { |
264 | 16.7k | Self::Occupied(Order::from_raw(header_data)?0 ) |
265 | | } else { |
266 | 13.3k | Self::Free(Link::from_raw(header_data)) |
267 | | }) |
268 | 30.0k | } _RINvMs0_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB6_6Header9read_fromNtNtB8_4host9MemAccessEBa_ Line | Count | Source | 255 | 27.0k | fn read_from<M: Memory + ?Sized>(memory: &M, header_ptr: u32) -> Result<Self, Error> { | 256 | 27.0k | let raw_header = memory.read_le_u64(header_ptr)?0 ; | 257 | | | 258 | | // Check if the header represents an occupied or free allocation and extract the header data | 259 | | // by trimming (and discarding) the high bits. | 260 | 27.0k | let occupied = raw_header & 0x00000001_00000000 != 0; | 261 | 27.0k | let header_data = raw_header as u32; | 262 | 27.0k | | 263 | 27.0k | Ok(if occupied { | 264 | 14.3k | Self::Occupied(Order::from_raw(header_data)?0 ) | 265 | | } else { | 266 | 12.7k | Self::Free(Link::from_raw(header_data)) | 267 | | }) | 268 | 27.0k | } |
_RINvMs0_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB6_6Header9read_fromShEBa_ Line | Count | Source | 255 | 40 | fn read_from<M: Memory + ?Sized>(memory: &M, header_ptr: u32) -> Result<Self, Error> { | 256 | 40 | let raw_header = memory.read_le_u64(header_ptr)?0 ; | 257 | | | 258 | | // Check if the header represents an occupied or free allocation and extract the header data | 259 | | // by trimming (and discarding) the high bits. | 260 | 40 | let occupied = raw_header & 0x00000001_00000000 != 0; | 261 | 40 | let header_data = raw_header as u32; | 262 | 40 | | 263 | 40 | Ok(if occupied { | 264 | 23 | Self::Occupied(Order::from_raw(header_data)?0 ) | 265 | | } else { | 266 | 17 | Self::Free(Link::from_raw(header_data)) | 267 | | }) | 268 | 40 | } |
Unexecuted instantiation: _RINvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_6Header9read_fromNtNtB8_4host9MemAccessECsDDUKWWCHAU_18smoldot_light_wasm _RINvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_6Header9read_fromNtNtB8_4host9MemAccessEBa_ Line | Count | Source | 255 | 2.94k | fn read_from<M: Memory + ?Sized>(memory: &M, header_ptr: u32) -> Result<Self, Error> { | 256 | 2.94k | let raw_header = memory.read_le_u64(header_ptr)?0 ; | 257 | | | 258 | | // Check if the header represents an occupied or free allocation and extract the header data | 259 | | // by trimming (and discarding) the high bits. | 260 | 2.94k | let occupied = raw_header & 0x00000001_00000000 != 0; | 261 | 2.94k | let header_data = raw_header as u32; | 262 | 2.94k | | 263 | 2.94k | Ok(if occupied { | 264 | 2.41k | Self::Occupied(Order::from_raw(header_data)?0 ) | 265 | | } else { | 266 | 536 | Self::Free(Link::from_raw(header_data)) | 267 | | }) | 268 | 2.94k | } |
Unexecuted instantiation: _RINvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_6Header9read_fromNtNtB8_4host9MemAccessECsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RINvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_6Header9read_fromNtNtB8_4host9MemAccessECsiUjFBJteJ7x_17smoldot_full_node Unexecuted instantiation: _RINvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_6Header9read_fromNtNtB8_4host9MemAccessECscDgN54JpMGG_6author Unexecuted instantiation: _RINvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_6Header9read_fromNtNtB8_4host9MemAccessECsibGXYHQB8Ea_25json_rpc_general_requests |
269 | | |
270 | | /// Write out this header to memory. |
271 | | /// |
272 | | /// Returns an error if the `header_ptr` is out of bounds of the linear memory. |
273 | 33.8k | fn write_into<M: Memory + ?Sized>(&self, memory: &mut M, header_ptr: u32) -> Result<(), Error> { |
274 | 33.8k | let (header_data, occupied_mask) = match *self { |
275 | 17.1k | Self::Occupied(order) => (order.into_raw(), 0x00000001_00000000), |
276 | 16.7k | Self::Free(link) => (link.into_raw(), 0x00000000_00000000), |
277 | | }; |
278 | 33.8k | let raw_header = header_data as u64 | occupied_mask; |
279 | 33.8k | memory.write_le_u64(header_ptr, raw_header)?0 ; |
280 | 33.8k | Ok(()) |
281 | 33.8k | } _RINvMs0_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB6_6Header10write_intoNtNtB8_4host9MemAccessEBa_ Line | Count | Source | 273 | 28.7k | fn write_into<M: Memory + ?Sized>(&self, memory: &mut M, header_ptr: u32) -> Result<(), Error> { | 274 | 28.7k | let (header_data, occupied_mask) = match *self { | 275 | 14.4k | Self::Occupied(order) => (order.into_raw(), 0x00000001_00000000), | 276 | 14.3k | Self::Free(link) => (link.into_raw(), 0x00000000_00000000), | 277 | | }; | 278 | 28.7k | let raw_header = header_data as u64 | occupied_mask; | 279 | 28.7k | memory.write_le_u64(header_ptr, raw_header)?0 ; | 280 | 28.7k | Ok(()) | 281 | 28.7k | } |
_RINvMs0_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB6_6Header10write_intoShEBa_ Line | Count | Source | 273 | 69 | fn write_into<M: Memory + ?Sized>(&self, memory: &mut M, header_ptr: u32) -> Result<(), Error> { | 274 | 69 | let (header_data, occupied_mask) = match *self { | 275 | 45 | Self::Occupied(order) => (order.into_raw(), 0x00000001_00000000), | 276 | 24 | Self::Free(link) => (link.into_raw(), 0x00000000_00000000), | 277 | | }; | 278 | 69 | let raw_header = header_data as u64 | occupied_mask; | 279 | 69 | memory.write_le_u64(header_ptr, raw_header)?0 ; | 280 | 69 | Ok(()) | 281 | 69 | } |
Unexecuted instantiation: _RINvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_6Header10write_intoNtNtB8_4host9MemAccessECsDDUKWWCHAU_18smoldot_light_wasm _RINvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_6Header10write_intoNtNtB8_4host9MemAccessEBa_ Line | Count | Source | 273 | 5.07k | fn write_into<M: Memory + ?Sized>(&self, memory: &mut M, header_ptr: u32) -> Result<(), Error> { | 274 | 5.07k | let (header_data, occupied_mask) = match *self { | 275 | 2.66k | Self::Occupied(order) => (order.into_raw(), 0x00000001_00000000), | 276 | 2.41k | Self::Free(link) => (link.into_raw(), 0x00000000_00000000), | 277 | | }; | 278 | 5.07k | let raw_header = header_data as u64 | occupied_mask; | 279 | 5.07k | memory.write_le_u64(header_ptr, raw_header)?0 ; | 280 | 5.07k | Ok(()) | 281 | 5.07k | } |
Unexecuted instantiation: _RINvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_6Header10write_intoNtNtB8_4host9MemAccessECsiLzmwikkc22_14json_rpc_basic _RINvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_6Header10write_intoNtNtB8_4host9MemAccessECsiUjFBJteJ7x_17smoldot_full_node Line | Count | Source | 273 | 1 | fn write_into<M: Memory + ?Sized>(&self, memory: &mut M, header_ptr: u32) -> Result<(), Error> { | 274 | 1 | let (header_data, occupied_mask) = match *self { | 275 | 1 | Self::Occupied(order) => (order.into_raw(), 0x00000001_00000000), | 276 | 0 | Self::Free(link) => (link.into_raw(), 0x00000000_00000000), | 277 | | }; | 278 | 1 | let raw_header = header_data as u64 | occupied_mask; | 279 | 1 | memory.write_le_u64(header_ptr, raw_header)?0 ; | 280 | 1 | Ok(()) | 281 | 1 | } |
Unexecuted instantiation: _RINvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_6Header10write_intoNtNtB8_4host9MemAccessECscDgN54JpMGG_6author Unexecuted instantiation: _RINvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_6Header10write_intoNtNtB8_4host9MemAccessECsibGXYHQB8Ea_25json_rpc_general_requests |
282 | | |
283 | | /// Returns the order of the allocation if this is an occupied header. |
284 | 16.7k | fn into_occupied(self) -> Option<Order> { |
285 | 16.7k | match self { |
286 | 16.7k | Self::Occupied(order) => Some(order), |
287 | 0 | _ => None, |
288 | | } |
289 | 16.7k | } _RNvMs0_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB5_6Header13into_occupied Line | Count | Source | 284 | 14.3k | fn into_occupied(self) -> Option<Order> { | 285 | 14.3k | match self { | 286 | 14.3k | Self::Occupied(order) => Some(order), | 287 | 0 | _ => None, | 288 | | } | 289 | 14.3k | } |
_RNvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB5_6Header13into_occupied Line | Count | Source | 284 | 2.41k | fn into_occupied(self) -> Option<Order> { | 285 | 2.41k | match self { | 286 | 2.41k | Self::Occupied(order) => Some(order), | 287 | 0 | _ => None, | 288 | | } | 289 | 2.41k | } |
|
290 | | |
291 | | /// Returns the link to the next element in the free list if this is a free header. |
292 | 13.3k | fn into_free(self) -> Option<Link> { |
293 | 13.3k | match self { |
294 | 13.3k | Self::Free(link) => Some(link), |
295 | 0 | _ => None, |
296 | | } |
297 | 13.3k | } _RNvMs0_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB5_6Header9into_free Line | Count | Source | 292 | 12.7k | fn into_free(self) -> Option<Link> { | 293 | 12.7k | match self { | 294 | 12.7k | Self::Free(link) => Some(link), | 295 | 0 | _ => None, | 296 | | } | 297 | 12.7k | } |
_RNvMs0_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB5_6Header9into_free Line | Count | Source | 292 | 536 | fn into_free(self) -> Option<Link> { | 293 | 536 | match self { | 294 | 536 | Self::Free(link) => Some(link), | 295 | 0 | _ => None, | 296 | | } | 297 | 536 | } |
|
298 | | } |
299 | | |
300 | | /// This struct represents a collection of intrusive linked lists for each order. |
301 | | struct FreeLists { |
302 | | heads: [Link; N_ORDERS], |
303 | | } |
304 | | |
305 | | impl FreeLists { |
306 | | /// Creates the free empty lists. |
307 | 197 | fn new() -> Self { |
308 | 197 | Self { |
309 | 197 | heads: [Link::Nil; N_ORDERS], |
310 | 197 | } |
311 | 197 | } _RNvMs1_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB5_9FreeLists3new Line | Count | Source | 307 | 70 | fn new() -> Self { | 308 | 70 | Self { | 309 | 70 | heads: [Link::Nil; N_ORDERS], | 310 | 70 | } | 311 | 70 | } |
_RNvMs1_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB5_9FreeLists3new Line | Count | Source | 307 | 127 | fn new() -> Self { | 308 | 127 | Self { | 309 | 127 | heads: [Link::Nil; N_ORDERS], | 310 | 127 | } | 311 | 127 | } |
|
312 | | |
313 | | /// Replaces a given link for the specified order and returns the old one. |
314 | 16.7k | fn replace(&mut self, order: Order, new: Link) -> Link { |
315 | 16.7k | let prev = self[order]; |
316 | 16.7k | self[order] = new; |
317 | 16.7k | prev |
318 | 16.7k | } _RNvMs1_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB5_9FreeLists7replace Line | Count | Source | 314 | 14.3k | fn replace(&mut self, order: Order, new: Link) -> Link { | 315 | 14.3k | let prev = self[order]; | 316 | 14.3k | self[order] = new; | 317 | 14.3k | prev | 318 | 14.3k | } |
_RNvMs1_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB5_9FreeLists7replace Line | Count | Source | 314 | 2.41k | fn replace(&mut self, order: Order, new: Link) -> Link { | 315 | 2.41k | let prev = self[order]; | 316 | 2.41k | self[order] = new; | 317 | 2.41k | prev | 318 | 2.41k | } |
|
319 | | } |
320 | | |
321 | | impl Index<Order> for FreeLists { |
322 | | type Output = Link; |
323 | 33.8k | fn index(&self, index: Order) -> &Link { |
324 | 33.8k | &self.heads[index.0 as usize] |
325 | 33.8k | } _RNvXs2_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB5_9FreeListsINtNtNtCsaYZPK01V26L_4core3ops5index5IndexNtB5_5OrderE5index Line | Count | Source | 323 | 28.7k | fn index(&self, index: Order) -> &Link { | 324 | 28.7k | &self.heads[index.0 as usize] | 325 | 28.7k | } |
_RNvXs2_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB5_9FreeListsINtNtNtCsaYZPK01V26L_4core3ops5index5IndexNtB5_5OrderE5index Line | Count | Source | 323 | 5.07k | fn index(&self, index: Order) -> &Link { | 324 | 5.07k | &self.heads[index.0 as usize] | 325 | 5.07k | } |
|
326 | | } |
327 | | |
328 | | impl IndexMut<Order> for FreeLists { |
329 | 30.0k | fn index_mut(&mut self, index: Order) -> &mut Link { |
330 | 30.0k | &mut self.heads[index.0 as usize] |
331 | 30.0k | } _RNvXs3_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB5_9FreeListsINtNtNtCsaYZPK01V26L_4core3ops5index8IndexMutNtB5_5OrderE9index_mut Line | Count | Source | 329 | 27.1k | fn index_mut(&mut self, index: Order) -> &mut Link { | 330 | 27.1k | &mut self.heads[index.0 as usize] | 331 | 27.1k | } |
_RNvXs3_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB5_9FreeListsINtNtNtCsaYZPK01V26L_4core3ops5index8IndexMutNtB5_5OrderE9index_mut Line | Count | Source | 329 | 2.94k | fn index_mut(&mut self, index: Order) -> &mut Link { | 330 | 2.94k | &mut self.heads[index.0 as usize] | 331 | 2.94k | } |
|
332 | | } |
333 | | |
334 | | /// An implementation of freeing bump allocator. |
335 | | /// |
336 | | /// Refer to the module-level documentation for further details. |
337 | | pub struct FreeingBumpHeapAllocator { |
338 | | bumper: u32, |
339 | | free_lists: FreeLists, |
340 | | total_size: u32, |
341 | | poisoned: bool, |
342 | | max_total_size: u32, |
343 | | max_bumper: u32, |
344 | | last_observed_memory_size: u32, |
345 | | } |
346 | | |
347 | | impl FreeingBumpHeapAllocator { |
348 | | /// Creates a new allocation heap which follows a freeing-bump strategy. |
349 | | /// |
350 | | /// # Arguments |
351 | | /// |
352 | | /// - `heap_base` - the offset from the beginning of the linear memory where the heap starts. |
353 | 197 | pub fn new(heap_base: u32) -> Self { |
354 | 197 | let aligned_heap_base = (heap_base + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT; |
355 | 197 | |
356 | 197 | FreeingBumpHeapAllocator { |
357 | 197 | bumper: aligned_heap_base, |
358 | 197 | free_lists: FreeLists::new(), |
359 | 197 | total_size: 0, |
360 | 197 | poisoned: false, |
361 | 197 | max_total_size: 0, |
362 | 197 | max_bumper: aligned_heap_base, |
363 | 197 | last_observed_memory_size: 0, |
364 | 197 | } |
365 | 197 | } _RNvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB5_24FreeingBumpHeapAllocator3new Line | Count | Source | 353 | 70 | pub fn new(heap_base: u32) -> Self { | 354 | 70 | let aligned_heap_base = (heap_base + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT; | 355 | 70 | | 356 | 70 | FreeingBumpHeapAllocator { | 357 | 70 | bumper: aligned_heap_base, | 358 | 70 | free_lists: FreeLists::new(), | 359 | 70 | total_size: 0, | 360 | 70 | poisoned: false, | 361 | 70 | max_total_size: 0, | 362 | 70 | max_bumper: aligned_heap_base, | 363 | 70 | last_observed_memory_size: 0, | 364 | 70 | } | 365 | 70 | } |
_RNvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB5_24FreeingBumpHeapAllocator3new Line | Count | Source | 353 | 127 | pub fn new(heap_base: u32) -> Self { | 354 | 127 | let aligned_heap_base = (heap_base + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT; | 355 | 127 | | 356 | 127 | FreeingBumpHeapAllocator { | 357 | 127 | bumper: aligned_heap_base, | 358 | 127 | free_lists: FreeLists::new(), | 359 | 127 | total_size: 0, | 360 | 127 | poisoned: false, | 361 | 127 | max_total_size: 0, | 362 | 127 | max_bumper: aligned_heap_base, | 363 | 127 | last_observed_memory_size: 0, | 364 | 127 | } | 365 | 127 | } |
|
366 | | |
367 | | /// Gets requested number of bytes to allocate and returns a pointer. |
368 | | /// The maximum size which can be allocated at once is 32 MiB. |
369 | | /// There is no minimum size, but whatever size is passed into |
370 | | /// this function is rounded to the next power of two. If the requested |
371 | | /// size is below 8 bytes it will be rounded up to 8 bytes. |
372 | | /// |
373 | | /// The identity or the type of the passed memory object does not matter. However, the size of |
374 | | /// memory cannot shrink compared to the memory passed in previous invocations. |
375 | | /// |
376 | | /// NOTE: Once the allocator has returned an error all subsequent requests will return an error. |
377 | | /// |
378 | | /// # Arguments |
379 | | /// |
380 | | /// - `mem` - a slice representing the linear memory on which this allocator operates. |
381 | | /// - `size` - size in bytes of the allocation request |
382 | 17.1k | pub fn allocate<M: Memory + ?Sized>(&mut self, mem: &mut M, size: u32) -> Result<u32, Error> { |
383 | 17.1k | if self.poisoned { |
384 | 0 | return Err(error("the allocator has been poisoned")); |
385 | 17.1k | } |
386 | 17.1k | |
387 | 17.1k | let bomb = PoisonBomb { |
388 | 17.1k | poisoned: &mut self.poisoned, |
389 | 17.1k | }; |
390 | 17.1k | |
391 | 17.1k | Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?1 ; |
392 | 17.1k | let order17.1k = Order::from_size(size)?1 ; |
393 | | |
394 | 17.1k | let header_ptr: u3217.1k = match self.free_lists[order] { |
395 | 13.3k | Link::Ptr(header_ptr) => { |
396 | 13.3k | assert!( |
397 | 13.3k | header_ptr + order.size() + HEADER_SIZE <= mem.size(), |
398 | 0 | "Pointer is looked up in list of free entries, into which |
399 | 0 | only valid values are inserted; qed" |
400 | | ); |
401 | | |
402 | | // Remove this header from the free list. |
403 | 13.3k | let next_free = Header::read_from(mem, header_ptr)?0 |
404 | 13.3k | .into_free() |
405 | 13.3k | .ok_or_else(|| error("free list points to a occupied header")0 )?0 ; Unexecuted instantiation: _RNCINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator8allocateNtNtBa_4host9MemAccessE0Bc_ Unexecuted instantiation: _RNCINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator8allocateShE0Bc_ Unexecuted instantiation: _RNCINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator8allocateNtNtBa_4host9MemAccessE0CsDDUKWWCHAU_18smoldot_light_wasm Unexecuted instantiation: _RNCINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator8allocateNtNtBa_4host9MemAccessE0Bc_ Unexecuted instantiation: _RNCINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator8allocateNtNtBa_4host9MemAccessE0CsiLzmwikkc22_14json_rpc_basic Unexecuted instantiation: _RNCINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator8allocateNtNtBa_4host9MemAccessE0CsiUjFBJteJ7x_17smoldot_full_node Unexecuted instantiation: _RNCINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator8allocateNtNtBa_4host9MemAccessE0CscDgN54JpMGG_6author Unexecuted instantiation: _RNCINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator8allocateNtNtBa_4host9MemAccessE0CsibGXYHQB8Ea_25json_rpc_general_requests |
406 | 13.3k | self.free_lists[order] = next_free; |
407 | 13.3k | |
408 | 13.3k | header_ptr |
409 | | } |
410 | | Link::Nil => { |
411 | | // Corresponding free list is empty. Allocate a new item. |
412 | 3.80k | Self::bump(&mut self.bumper, order.size() + HEADER_SIZE, mem.size())?4 |
413 | | } |
414 | | }; |
415 | | |
416 | | // Write the order in the occupied header. |
417 | 17.1k | Header::Occupied(order).write_into(mem, header_ptr)?0 ; |
418 | | |
419 | 17.1k | self.total_size += order.size() + HEADER_SIZE; |
420 | 17.1k | |
421 | 17.1k | // update trackers if needed. |
422 | 17.1k | if self.total_size > self.max_total_size { |
423 | 1.45k | self.max_total_size = self.total_size; |
424 | 15.6k | } |
425 | 17.1k | if self.bumper > self.max_bumper { |
426 | 3.80k | self.max_bumper = self.bumper; |
427 | 13.3k | } |
428 | | |
429 | 17.1k | bomb.disarm(); |
430 | 17.1k | Ok(header_ptr + HEADER_SIZE) |
431 | 17.1k | } _RINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator8allocateNtNtB8_4host9MemAccessEBa_ Line | Count | Source | 382 | 14.4k | pub fn allocate<M: Memory + ?Sized>(&mut self, mem: &mut M, size: u32) -> Result<u32, Error> { | 383 | 14.4k | if self.poisoned { | 384 | 0 | return Err(error("the allocator has been poisoned")); | 385 | 14.4k | } | 386 | 14.4k | | 387 | 14.4k | let bomb = PoisonBomb { | 388 | 14.4k | poisoned: &mut self.poisoned, | 389 | 14.4k | }; | 390 | 14.4k | | 391 | 14.4k | Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?0 ; | 392 | 14.4k | let order = Order::from_size(size)?0 ; | 393 | | | 394 | 14.4k | let header_ptr: u32 = match self.free_lists[order] { | 395 | 12.7k | Link::Ptr(header_ptr) => { | 396 | 12.7k | assert!( | 397 | 12.7k | header_ptr + order.size() + HEADER_SIZE <= mem.size(), | 398 | 0 | "Pointer is looked up in list of free entries, into which | 399 | 0 | only valid values are inserted; qed" | 400 | | ); | 401 | | | 402 | | // Remove this header from the free list. | 403 | 12.7k | let next_free = Header::read_from(mem, header_ptr)?0 | 404 | 12.7k | .into_free() | 405 | 12.7k | .ok_or_else(|| error("free list points to a occupied header"))?0 ; | 406 | 12.7k | self.free_lists[order] = next_free; | 407 | 12.7k | | 408 | 12.7k | header_ptr | 409 | | } | 410 | | Link::Nil => { | 411 | | // Corresponding free list is empty. Allocate a new item. | 412 | 1.64k | Self::bump(&mut self.bumper, order.size() + HEADER_SIZE, mem.size())?0 | 413 | | } | 414 | | }; | 415 | | | 416 | | // Write the order in the occupied header. | 417 | 14.4k | Header::Occupied(order).write_into(mem, header_ptr)?0 ; | 418 | | | 419 | 14.4k | self.total_size += order.size() + HEADER_SIZE; | 420 | 14.4k | | 421 | 14.4k | // update trackers if needed. | 422 | 14.4k | if self.total_size > self.max_total_size { | 423 | 394 | self.max_total_size = self.total_size; | 424 | 14.0k | } | 425 | 14.4k | if self.bumper > self.max_bumper { | 426 | 1.64k | self.max_bumper = self.bumper; | 427 | 12.7k | } | 428 | | | 429 | 14.4k | bomb.disarm(); | 430 | 14.4k | Ok(header_ptr + HEADER_SIZE) | 431 | 14.4k | } |
_RINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator8allocateShEBa_ Line | Count | Source | 382 | 49 | pub fn allocate<M: Memory + ?Sized>(&mut self, mem: &mut M, size: u32) -> Result<u32, Error> { | 383 | 49 | if self.poisoned { | 384 | 0 | return Err(error("the allocator has been poisoned")); | 385 | 49 | } | 386 | 49 | | 387 | 49 | let bomb = PoisonBomb { | 388 | 49 | poisoned: &mut self.poisoned, | 389 | 49 | }; | 390 | 49 | | 391 | 49 | Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?1 ; | 392 | 48 | let order47 = Order::from_size(size)?1 ; | 393 | | | 394 | 47 | let header_ptr: u3243 = match self.free_lists[order] { | 395 | 14 | Link::Ptr(header_ptr) => { | 396 | 14 | assert!( | 397 | 14 | header_ptr + order.size() + HEADER_SIZE <= mem.size(), | 398 | 0 | "Pointer is looked up in list of free entries, into which | 399 | 0 | only valid values are inserted; qed" | 400 | | ); | 401 | | | 402 | | // Remove this header from the free list. | 403 | 14 | let next_free = Header::read_from(mem, header_ptr)?0 | 404 | 14 | .into_free() | 405 | 14 | .ok_or_else(|| error("free list points to a occupied header"))?0 ; | 406 | 14 | self.free_lists[order] = next_free; | 407 | 14 | | 408 | 14 | header_ptr | 409 | | } | 410 | | Link::Nil => { | 411 | | // Corresponding free list is empty. Allocate a new item. | 412 | 33 | Self::bump(&mut self.bumper, order.size() + HEADER_SIZE, mem.size())?4 | 413 | | } | 414 | | }; | 415 | | | 416 | | // Write the order in the occupied header. | 417 | 43 | Header::Occupied(order).write_into(mem, header_ptr)?0 ; | 418 | | | 419 | 43 | self.total_size += order.size() + HEADER_SIZE; | 420 | 43 | | 421 | 43 | // update trackers if needed. | 422 | 43 | if self.total_size > self.max_total_size { | 423 | 28 | self.max_total_size = self.total_size; | 424 | 28 | }15 | 425 | 43 | if self.bumper > self.max_bumper { | 426 | 29 | self.max_bumper = self.bumper; | 427 | 29 | }14 | 428 | | | 429 | 43 | bomb.disarm(); | 430 | 43 | Ok(header_ptr + HEADER_SIZE) | 431 | 49 | } |
Unexecuted instantiation: _RINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator8allocateNtNtB8_4host9MemAccessECsDDUKWWCHAU_18smoldot_light_wasm _RINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator8allocateNtNtB8_4host9MemAccessEBa_ Line | Count | Source | 382 | 2.66k | pub fn allocate<M: Memory + ?Sized>(&mut self, mem: &mut M, size: u32) -> Result<u32, Error> { | 383 | 2.66k | if self.poisoned { | 384 | 0 | return Err(error("the allocator has been poisoned")); | 385 | 2.66k | } | 386 | 2.66k | | 387 | 2.66k | let bomb = PoisonBomb { | 388 | 2.66k | poisoned: &mut self.poisoned, | 389 | 2.66k | }; | 390 | 2.66k | | 391 | 2.66k | Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?0 ; | 392 | 2.66k | let order = Order::from_size(size)?0 ; | 393 | | | 394 | 2.66k | let header_ptr: u32 = match self.free_lists[order] { | 395 | 536 | Link::Ptr(header_ptr) => { | 396 | 536 | assert!( | 397 | 536 | header_ptr + order.size() + HEADER_SIZE <= mem.size(), | 398 | 0 | "Pointer is looked up in list of free entries, into which | 399 | 0 | only valid values are inserted; qed" | 400 | | ); | 401 | | | 402 | | // Remove this header from the free list. | 403 | 536 | let next_free = Header::read_from(mem, header_ptr)?0 | 404 | 536 | .into_free() | 405 | 536 | .ok_or_else(|| error("free list points to a occupied header"))?0 ; | 406 | 536 | self.free_lists[order] = next_free; | 407 | 536 | | 408 | 536 | header_ptr | 409 | | } | 410 | | Link::Nil => { | 411 | | // Corresponding free list is empty. Allocate a new item. | 412 | 2.12k | Self::bump(&mut self.bumper, order.size() + HEADER_SIZE, mem.size())?0 | 413 | | } | 414 | | }; | 415 | | | 416 | | // Write the order in the occupied header. | 417 | 2.66k | Header::Occupied(order).write_into(mem, header_ptr)?0 ; | 418 | | | 419 | 2.66k | self.total_size += order.size() + HEADER_SIZE; | 420 | 2.66k | | 421 | 2.66k | // update trackers if needed. | 422 | 2.66k | if self.total_size > self.max_total_size { | 423 | 1.03k | self.max_total_size = self.total_size; | 424 | 1.63k | } | 425 | 2.66k | if self.bumper > self.max_bumper { | 426 | 2.12k | self.max_bumper = self.bumper; | 427 | 2.12k | }536 | 428 | | | 429 | 2.66k | bomb.disarm(); | 430 | 2.66k | Ok(header_ptr + HEADER_SIZE) | 431 | 2.66k | } |
Unexecuted instantiation: _RINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator8allocateNtNtB8_4host9MemAccessECsiLzmwikkc22_14json_rpc_basic _RINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator8allocateNtNtB8_4host9MemAccessECsiUjFBJteJ7x_17smoldot_full_node Line | Count | Source | 382 | 1 | pub fn allocate<M: Memory + ?Sized>(&mut self, mem: &mut M, size: u32) -> Result<u32, Error> { | 383 | 1 | if self.poisoned { | 384 | 0 | return Err(error("the allocator has been poisoned")); | 385 | 1 | } | 386 | 1 | | 387 | 1 | let bomb = PoisonBomb { | 388 | 1 | poisoned: &mut self.poisoned, | 389 | 1 | }; | 390 | 1 | | 391 | 1 | Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?0 ; | 392 | 1 | let order = Order::from_size(size)?0 ; | 393 | | | 394 | 1 | let header_ptr: u32 = match self.free_lists[order] { | 395 | 0 | Link::Ptr(header_ptr) => { | 396 | 0 | assert!( | 397 | 0 | header_ptr + order.size() + HEADER_SIZE <= mem.size(), | 398 | 0 | "Pointer is looked up in list of free entries, into which | 399 | 0 | only valid values are inserted; qed" | 400 | | ); | 401 | | | 402 | | // Remove this header from the free list. | 403 | 0 | let next_free = Header::read_from(mem, header_ptr)? | 404 | 0 | .into_free() | 405 | 0 | .ok_or_else(|| error("free list points to a occupied header"))?; | 406 | 0 | self.free_lists[order] = next_free; | 407 | 0 |
| 408 | 0 | header_ptr | 409 | | } | 410 | | Link::Nil => { | 411 | | // Corresponding free list is empty. Allocate a new item. | 412 | 1 | Self::bump(&mut self.bumper, order.size() + HEADER_SIZE, mem.size())?0 | 413 | | } | 414 | | }; | 415 | | | 416 | | // Write the order in the occupied header. | 417 | 1 | Header::Occupied(order).write_into(mem, header_ptr)?0 ; | 418 | | | 419 | 1 | self.total_size += order.size() + HEADER_SIZE; | 420 | 1 | | 421 | 1 | // update trackers if needed. | 422 | 1 | if self.total_size > self.max_total_size { | 423 | 1 | self.max_total_size = self.total_size; | 424 | 1 | }0 | 425 | 1 | if self.bumper > self.max_bumper { | 426 | 1 | self.max_bumper = self.bumper; | 427 | 1 | }0 | 428 | | | 429 | 1 | bomb.disarm(); | 430 | 1 | Ok(header_ptr + HEADER_SIZE) | 431 | 1 | } |
Unexecuted instantiation: _RINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator8allocateNtNtB8_4host9MemAccessECscDgN54JpMGG_6author Unexecuted instantiation: _RINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator8allocateNtNtB8_4host9MemAccessECsibGXYHQB8Ea_25json_rpc_general_requests |
432 | | |
433 | | /// Deallocates the space which was allocated for a pointer. |
434 | | /// |
435 | | /// The identity or the type of the passed memory object does not matter. However, the size of |
436 | | /// memory cannot shrink compared to the memory passed in previous invocations. |
437 | | /// |
438 | | /// NOTE: Once the allocator has returned an error all subsequent requests will return an error. |
439 | | /// |
440 | | /// # Arguments |
441 | | /// |
442 | | /// - `mem` - a slice representing the linear memory on which this allocator operates. |
443 | | /// - `ptr` - pointer to the allocated chunk |
444 | 16.7k | pub fn deallocate<M: Memory + ?Sized>(&mut self, mem: &mut M, ptr: u32) -> Result<(), Error> { |
445 | 16.7k | if self.poisoned { |
446 | 1 | return Err(error("the allocator has been poisoned")); |
447 | 16.7k | } |
448 | 16.7k | |
449 | 16.7k | let bomb = PoisonBomb { |
450 | 16.7k | poisoned: &mut self.poisoned, |
451 | 16.7k | }; |
452 | 16.7k | |
453 | 16.7k | Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?0 ; |
454 | | |
455 | 16.7k | let header_ptr = u32::from(ptr) |
456 | 16.7k | .checked_sub(HEADER_SIZE) |
457 | 16.7k | .ok_or_else(|| error("Invalid pointer for deallocation")0 )?0 ; Unexecuted instantiation: _RNCINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator10deallocateNtNtBa_4host9MemAccessE0Bc_ Unexecuted instantiation: _RNCINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator10deallocateShE0Bc_ Unexecuted instantiation: _RNCINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator10deallocateNtNtBa_4host9MemAccessE0Bc_ |
458 | | |
459 | 16.7k | let order = Header::read_from(mem, header_ptr)?0 |
460 | 16.7k | .into_occupied() |
461 | 16.7k | .ok_or_else(|| error("the allocation points to an empty header")0 )?0 ; Unexecuted instantiation: _RNCINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator10deallocateNtNtBa_4host9MemAccessEs_0Bc_ Unexecuted instantiation: _RNCINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator10deallocateShEs_0Bc_ Unexecuted instantiation: _RNCINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator10deallocateNtNtBa_4host9MemAccessEs_0Bc_ |
462 | | |
463 | | // Update the just freed header and knit it back to the free list. |
464 | 16.7k | let prev_head = self.free_lists.replace(order, Link::Ptr(header_ptr)); |
465 | 16.7k | Header::Free(prev_head).write_into(mem, header_ptr)?0 ; |
466 | | |
467 | | // Do the total_size book keeping. |
468 | 16.7k | self.total_size = self |
469 | 16.7k | .total_size |
470 | 16.7k | .checked_sub(order.size() + HEADER_SIZE) |
471 | 16.7k | .ok_or_else(|| error("Unable to subtract from total heap size without overflow")0 )?0 ; Unexecuted instantiation: _RNCINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator10deallocateNtNtBa_4host9MemAccessEs0_0Bc_ Unexecuted instantiation: _RNCINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator10deallocateShEs0_0Bc_ Unexecuted instantiation: _RNCINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB8_24FreeingBumpHeapAllocator10deallocateNtNtBa_4host9MemAccessEs0_0Bc_ |
472 | | |
473 | 16.7k | bomb.disarm(); |
474 | 16.7k | Ok(()) |
475 | 16.7k | } _RINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator10deallocateNtNtB8_4host9MemAccessEBa_ Line | Count | Source | 444 | 14.3k | pub fn deallocate<M: Memory + ?Sized>(&mut self, mem: &mut M, ptr: u32) -> Result<(), Error> { | 445 | 14.3k | if self.poisoned { | 446 | 0 | return Err(error("the allocator has been poisoned")); | 447 | 14.3k | } | 448 | 14.3k | | 449 | 14.3k | let bomb = PoisonBomb { | 450 | 14.3k | poisoned: &mut self.poisoned, | 451 | 14.3k | }; | 452 | 14.3k | | 453 | 14.3k | Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?0 ; | 454 | | | 455 | 14.3k | let header_ptr = u32::from(ptr) | 456 | 14.3k | .checked_sub(HEADER_SIZE) | 457 | 14.3k | .ok_or_else(|| error("Invalid pointer for deallocation"))?0 ; | 458 | | | 459 | 14.3k | let order = Header::read_from(mem, header_ptr)?0 | 460 | 14.3k | .into_occupied() | 461 | 14.3k | .ok_or_else(|| error("the allocation points to an empty header"))?0 ; | 462 | | | 463 | | // Update the just freed header and knit it back to the free list. | 464 | 14.3k | let prev_head = self.free_lists.replace(order, Link::Ptr(header_ptr)); | 465 | 14.3k | Header::Free(prev_head).write_into(mem, header_ptr)?0 ; | 466 | | | 467 | | // Do the total_size book keeping. | 468 | 14.3k | self.total_size = self | 469 | 14.3k | .total_size | 470 | 14.3k | .checked_sub(order.size() + HEADER_SIZE) | 471 | 14.3k | .ok_or_else(|| error("Unable to subtract from total heap size without overflow"))?0 ; | 472 | | | 473 | 14.3k | bomb.disarm(); | 474 | 14.3k | Ok(()) | 475 | 14.3k | } |
_RINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator10deallocateShEBa_ Line | Count | Source | 444 | 22 | pub fn deallocate<M: Memory + ?Sized>(&mut self, mem: &mut M, ptr: u32) -> Result<(), Error> { | 445 | 22 | if self.poisoned { | 446 | 1 | return Err(error("the allocator has been poisoned")); | 447 | 21 | } | 448 | 21 | | 449 | 21 | let bomb = PoisonBomb { | 450 | 21 | poisoned: &mut self.poisoned, | 451 | 21 | }; | 452 | 21 | | 453 | 21 | Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?0 ; | 454 | | | 455 | 21 | let header_ptr = u32::from(ptr) | 456 | 21 | .checked_sub(HEADER_SIZE) | 457 | 21 | .ok_or_else(|| error("Invalid pointer for deallocation"))?0 ; | 458 | | | 459 | 21 | let order = Header::read_from(mem, header_ptr)?0 | 460 | 21 | .into_occupied() | 461 | 21 | .ok_or_else(|| error("the allocation points to an empty header"))?0 ; | 462 | | | 463 | | // Update the just freed header and knit it back to the free list. | 464 | 21 | let prev_head = self.free_lists.replace(order, Link::Ptr(header_ptr)); | 465 | 21 | Header::Free(prev_head).write_into(mem, header_ptr)?0 ; | 466 | | | 467 | | // Do the total_size book keeping. | 468 | 21 | self.total_size = self | 469 | 21 | .total_size | 470 | 21 | .checked_sub(order.size() + HEADER_SIZE) | 471 | 21 | .ok_or_else(|| error("Unable to subtract from total heap size without overflow"))?0 ; | 472 | | | 473 | 21 | bomb.disarm(); | 474 | 21 | Ok(()) | 475 | 22 | } |
_RINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator10deallocateNtNtB8_4host9MemAccessEBa_ Line | Count | Source | 444 | 2.41k | pub fn deallocate<M: Memory + ?Sized>(&mut self, mem: &mut M, ptr: u32) -> Result<(), Error> { | 445 | 2.41k | if self.poisoned { | 446 | 0 | return Err(error("the allocator has been poisoned")); | 447 | 2.41k | } | 448 | 2.41k | | 449 | 2.41k | let bomb = PoisonBomb { | 450 | 2.41k | poisoned: &mut self.poisoned, | 451 | 2.41k | }; | 452 | 2.41k | | 453 | 2.41k | Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?0 ; | 454 | | | 455 | 2.41k | let header_ptr = u32::from(ptr) | 456 | 2.41k | .checked_sub(HEADER_SIZE) | 457 | 2.41k | .ok_or_else(|| error("Invalid pointer for deallocation"))?0 ; | 458 | | | 459 | 2.41k | let order = Header::read_from(mem, header_ptr)?0 | 460 | 2.41k | .into_occupied() | 461 | 2.41k | .ok_or_else(|| error("the allocation points to an empty header"))?0 ; | 462 | | | 463 | | // Update the just freed header and knit it back to the free list. | 464 | 2.41k | let prev_head = self.free_lists.replace(order, Link::Ptr(header_ptr)); | 465 | 2.41k | Header::Free(prev_head).write_into(mem, header_ptr)?0 ; | 466 | | | 467 | | // Do the total_size book keeping. | 468 | 2.41k | self.total_size = self | 469 | 2.41k | .total_size | 470 | 2.41k | .checked_sub(order.size() + HEADER_SIZE) | 471 | 2.41k | .ok_or_else(|| error("Unable to subtract from total heap size without overflow"))?0 ; | 472 | | | 473 | 2.41k | bomb.disarm(); | 474 | 2.41k | Ok(()) | 475 | 2.41k | } |
|
476 | | |
477 | | /// Increases the `bumper` by `size`. |
478 | | /// |
479 | | /// Returns the `bumper` from before the increase. Returns an `Error::AllocatorOutOfSpace` if |
480 | | /// the operation would exhaust the heap. |
481 | 3.80k | fn bump(bumper: &mut u32, size: u32, heap_end: u32) -> Result<u32, Error> { |
482 | 3.80k | if *bumper + size > heap_end { |
483 | 4 | return Err(Error::AllocatorOutOfSpace); |
484 | 3.80k | } |
485 | 3.80k | |
486 | 3.80k | let res = *bumper; |
487 | 3.80k | *bumper += size; |
488 | 3.80k | Ok(res) |
489 | 3.80k | } _RNvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB5_24FreeingBumpHeapAllocator4bump Line | Count | Source | 481 | 1.67k | fn bump(bumper: &mut u32, size: u32, heap_end: u32) -> Result<u32, Error> { | 482 | 1.67k | if *bumper + size > heap_end { | 483 | 4 | return Err(Error::AllocatorOutOfSpace); | 484 | 1.67k | } | 485 | 1.67k | | 486 | 1.67k | let res = *bumper; | 487 | 1.67k | *bumper += size; | 488 | 1.67k | Ok(res) | 489 | 1.67k | } |
_RNvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB5_24FreeingBumpHeapAllocator4bump Line | Count | Source | 481 | 2.12k | fn bump(bumper: &mut u32, size: u32, heap_end: u32) -> Result<u32, Error> { | 482 | 2.12k | if *bumper + size > heap_end { | 483 | 0 | return Err(Error::AllocatorOutOfSpace); | 484 | 2.12k | } | 485 | 2.12k | | 486 | 2.12k | let res = *bumper; | 487 | 2.12k | *bumper += size; | 488 | 2.12k | Ok(res) | 489 | 2.12k | } |
|
490 | | |
491 | 33.8k | fn observe_memory_size<M: Memory + ?Sized>( |
492 | 33.8k | last_observed_memory_size: &mut u32, |
493 | 33.8k | mem: &mut M, |
494 | 33.8k | ) -> Result<(), Error> { |
495 | 33.8k | if mem.size() < *last_observed_memory_size { |
496 | 1 | return Err(Error::MemoryShrinked); |
497 | 33.8k | } |
498 | 33.8k | *last_observed_memory_size = mem.size(); |
499 | 33.8k | Ok(()) |
500 | 33.8k | } _RINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator19observe_memory_sizeNtNtB8_4host9MemAccessEBa_ Line | Count | Source | 491 | 28.7k | fn observe_memory_size<M: Memory + ?Sized>( | 492 | 28.7k | last_observed_memory_size: &mut u32, | 493 | 28.7k | mem: &mut M, | 494 | 28.7k | ) -> Result<(), Error> { | 495 | 28.7k | if mem.size() < *last_observed_memory_size { | 496 | 0 | return Err(Error::MemoryShrinked); | 497 | 28.7k | } | 498 | 28.7k | *last_observed_memory_size = mem.size(); | 499 | 28.7k | Ok(()) | 500 | 28.7k | } |
_RINvMs4_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator19observe_memory_sizeShEBa_ Line | Count | Source | 491 | 70 | fn observe_memory_size<M: Memory + ?Sized>( | 492 | 70 | last_observed_memory_size: &mut u32, | 493 | 70 | mem: &mut M, | 494 | 70 | ) -> Result<(), Error> { | 495 | 70 | if mem.size() < *last_observed_memory_size { | 496 | 1 | return Err(Error::MemoryShrinked); | 497 | 69 | } | 498 | 69 | *last_observed_memory_size = mem.size(); | 499 | 69 | Ok(()) | 500 | 70 | } |
Unexecuted instantiation: _RINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator19observe_memory_sizeNtNtB8_4host9MemAccessECsDDUKWWCHAU_18smoldot_light_wasm _RINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator19observe_memory_sizeNtNtB8_4host9MemAccessEBa_ Line | Count | Source | 491 | 5.07k | fn observe_memory_size<M: Memory + ?Sized>( | 492 | 5.07k | last_observed_memory_size: &mut u32, | 493 | 5.07k | mem: &mut M, | 494 | 5.07k | ) -> Result<(), Error> { | 495 | 5.07k | if mem.size() < *last_observed_memory_size { | 496 | 0 | return Err(Error::MemoryShrinked); | 497 | 5.07k | } | 498 | 5.07k | *last_observed_memory_size = mem.size(); | 499 | 5.07k | Ok(()) | 500 | 5.07k | } |
Unexecuted instantiation: _RINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator19observe_memory_sizeNtNtB8_4host9MemAccessECsiLzmwikkc22_14json_rpc_basic _RINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator19observe_memory_sizeNtNtB8_4host9MemAccessECsiUjFBJteJ7x_17smoldot_full_node Line | Count | Source | 491 | 1 | fn observe_memory_size<M: Memory + ?Sized>( | 492 | 1 | last_observed_memory_size: &mut u32, | 493 | 1 | mem: &mut M, | 494 | 1 | ) -> Result<(), Error> { | 495 | 1 | if mem.size() < *last_observed_memory_size { | 496 | 0 | return Err(Error::MemoryShrinked); | 497 | 1 | } | 498 | 1 | *last_observed_memory_size = mem.size(); | 499 | 1 | Ok(()) | 500 | 1 | } |
Unexecuted instantiation: _RINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator19observe_memory_sizeNtNtB8_4host9MemAccessECscDgN54JpMGG_6author Unexecuted instantiation: _RINvMs4_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB6_24FreeingBumpHeapAllocator19observe_memory_sizeNtNtB8_4host9MemAccessECsibGXYHQB8Ea_25json_rpc_general_requests |
501 | | } |
502 | | |
503 | | /// A trait for abstraction of accesses to a wasm linear memory. Used to read or modify the |
504 | | /// allocation prefixes. |
505 | | /// |
506 | | /// A wasm linear memory behaves similarly to a vector. The address space doesn't have holes and is |
507 | | /// accessible up to the reported size. |
508 | | /// |
509 | | /// The linear memory can grow in size with the wasm page granularity (`64KiB`), but it cannot shrink. |
510 | | pub trait Memory { |
511 | | /// Read a `u64` from the heap in LE form. Returns an error if any of the bytes read are out of |
512 | | /// bounds. |
513 | | fn read_le_u64(&self, ptr: u32) -> Result<u64, Error>; |
514 | | /// Write a `u64` to the heap in LE form. Returns an error if any of the bytes written are out of |
515 | | /// bounds. |
516 | | fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error>; |
517 | | /// Returns the full size of the memory in bytes. |
518 | | fn size(&self) -> u32; |
519 | | } |
520 | | |
521 | | impl Memory for [u8] { |
522 | 41 | fn read_le_u64(&self, ptr: u32) -> Result<u64, Error> { |
523 | 41 | let range = |
524 | 41 | heap_range(ptr, 8, self.len()).ok_or_else(|| error("read out of heap bounds")0 )?0 ; Unexecuted instantiation: _RNCNvXs5_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorShNtB7_6Memory11read_le_u640Bb_ Unexecuted instantiation: _RNCNvXs5_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorShNtB7_6Memory11read_le_u640Bb_ |
525 | 41 | let bytes = self[range] |
526 | 41 | .try_into() |
527 | 41 | .expect("[u8] slice of length 8 must be convertible to [u8; 8]"); |
528 | 41 | Ok(u64::from_le_bytes(bytes)) |
529 | 41 | } _RNvXs5_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorShNtB5_6Memory11read_le_u64 Line | Count | Source | 522 | 41 | fn read_le_u64(&self, ptr: u32) -> Result<u64, Error> { | 523 | 41 | let range = | 524 | 41 | heap_range(ptr, 8, self.len()).ok_or_else(|| error("read out of heap bounds"))?0 ; | 525 | 41 | let bytes = self[range] | 526 | 41 | .try_into() | 527 | 41 | .expect("[u8] slice of length 8 must be convertible to [u8; 8]"); | 528 | 41 | Ok(u64::from_le_bytes(bytes)) | 529 | 41 | } |
Unexecuted instantiation: _RNvXs5_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorShNtB5_6Memory11read_le_u64 |
530 | 70 | fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error> { |
531 | 70 | let range = |
532 | 70 | heap_range(ptr, 8, self.len()).ok_or_else(|| error("write out of heap bounds")0 )?0 ; Unexecuted instantiation: _RNCNvXs5_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorShNtB7_6Memory12write_le_u640Bb_ Unexecuted instantiation: _RNCNvXs5_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorShNtB7_6Memory12write_le_u640Bb_ |
533 | 70 | let bytes = val.to_le_bytes(); |
534 | 70 | self[range].copy_from_slice(&bytes[..]); |
535 | 70 | Ok(()) |
536 | 70 | } _RNvXs5_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorShNtB5_6Memory12write_le_u64 Line | Count | Source | 530 | 70 | fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error> { | 531 | 70 | let range = | 532 | 70 | heap_range(ptr, 8, self.len()).ok_or_else(|| error("write out of heap bounds"))?0 ; | 533 | 70 | let bytes = val.to_le_bytes(); | 534 | 70 | self[range].copy_from_slice(&bytes[..]); | 535 | 70 | Ok(()) | 536 | 70 | } |
Unexecuted instantiation: _RNvXs5_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorShNtB5_6Memory12write_le_u64 |
537 | 186 | fn size(&self) -> u32 { |
538 | 186 | u32::try_from(self.len()).expect("size of Wasm linear memory is <2^32; qed") |
539 | 186 | } _RNvXs5_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorShNtB5_6Memory4size Line | Count | Source | 537 | 186 | fn size(&self) -> u32 { | 538 | 186 | u32::try_from(self.len()).expect("size of Wasm linear memory is <2^32; qed") | 539 | 186 | } |
Unexecuted instantiation: _RNvXs5_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorShNtB5_6Memory4size |
540 | | } |
541 | | |
542 | 111 | fn heap_range(offset: u32, length: u32, heap_len: usize) -> Option<Range<usize>> { |
543 | 111 | let start = offset as usize; |
544 | 111 | let end = offset.checked_add(length)?0 as usize; |
545 | 111 | if end <= heap_len { |
546 | 111 | Some(start..end) |
547 | | } else { |
548 | 0 | None |
549 | | } |
550 | 111 | } _RNvNtNtCsN16ciHI6Qf_7smoldot8executor9allocator10heap_range Line | Count | Source | 542 | 111 | fn heap_range(offset: u32, length: u32, heap_len: usize) -> Option<Range<usize>> { | 543 | 111 | let start = offset as usize; | 544 | 111 | let end = offset.checked_add(length)?0 as usize; | 545 | 111 | if end <= heap_len { | 546 | 111 | Some(start..end) | 547 | | } else { | 548 | 0 | None | 549 | | } | 550 | 111 | } |
Unexecuted instantiation: _RNvNtNtCseuYC0Zibziv_7smoldot8executor9allocator10heap_range |
551 | | |
552 | | /// A guard that will raise the poisoned flag on drop unless disarmed. |
553 | | struct PoisonBomb<'a> { |
554 | | poisoned: &'a mut bool, |
555 | | } |
556 | | |
557 | | impl<'a> PoisonBomb<'a> { |
558 | 33.8k | fn disarm(self) { |
559 | 33.8k | mem::forget(self); |
560 | 33.8k | } _RNvMs6_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB5_10PoisonBomb6disarm Line | Count | Source | 558 | 28.7k | fn disarm(self) { | 559 | 28.7k | mem::forget(self); | 560 | 28.7k | } |
_RNvMs6_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB5_10PoisonBomb6disarm Line | Count | Source | 558 | 5.07k | fn disarm(self) { | 559 | 5.07k | mem::forget(self); | 560 | 5.07k | } |
|
561 | | } |
562 | | |
563 | | impl<'a> Drop for PoisonBomb<'a> { |
564 | 6 | fn drop(&mut self) { |
565 | 6 | *self.poisoned = true; |
566 | 6 | } _RNvXs7_NtNtCsN16ciHI6Qf_7smoldot8executor9allocatorNtB5_10PoisonBombNtNtNtCsaYZPK01V26L_4core3ops4drop4Drop4drop Line | Count | Source | 564 | 6 | fn drop(&mut self) { | 565 | 6 | *self.poisoned = true; | 566 | 6 | } |
Unexecuted instantiation: _RNvXs7_NtNtCseuYC0Zibziv_7smoldot8executor9allocatorNtB5_10PoisonBombNtNtNtCsaYZPK01V26L_4core3ops4drop4Drop4drop |
567 | | } |
568 | | |
569 | | #[cfg(test)] |
570 | | mod tests { |
571 | | use super::*; |
572 | | |
573 | | const PAGE_SIZE: u32 = 65536; |
574 | | |
575 | | /// Makes a pointer out of the given address. |
576 | 15 | fn to_pointer(address: u32) -> u32 { |
577 | 15 | address |
578 | 15 | } |
579 | | |
580 | | #[test] |
581 | 1 | fn should_allocate_properly() { |
582 | 1 | // given |
583 | 1 | let mut mem = [0u8; PAGE_SIZE as usize]; |
584 | 1 | let mut heap = FreeingBumpHeapAllocator::new(0); |
585 | 1 | |
586 | 1 | // when |
587 | 1 | let ptr = heap.allocate(&mut mem[..], 1).unwrap(); |
588 | 1 | |
589 | 1 | // then |
590 | 1 | // returned pointer must start right after `HEADER_SIZE` |
591 | 1 | assert_eq!(ptr, to_pointer(HEADER_SIZE)); |
592 | 1 | } |
593 | | |
594 | | #[test] |
595 | 1 | fn should_always_align_pointers_to_multiples_of_8() { |
596 | 1 | // given |
597 | 1 | let mut mem = [0u8; PAGE_SIZE as usize]; |
598 | 1 | let mut heap = FreeingBumpHeapAllocator::new(13); |
599 | 1 | |
600 | 1 | // when |
601 | 1 | let ptr = heap.allocate(&mut mem[..], 1).unwrap(); |
602 | 1 | |
603 | 1 | // then |
604 | 1 | // the pointer must start at the next multiple of 8 from 13 |
605 | 1 | // + the prefix of 8 bytes. |
606 | 1 | assert_eq!(ptr, to_pointer(24)); |
607 | 1 | } |
608 | | |
609 | | #[test] |
610 | 1 | fn should_increment_pointers_properly() { |
611 | 1 | // given |
612 | 1 | let mut mem = [0u8; PAGE_SIZE as usize]; |
613 | 1 | let mut heap = FreeingBumpHeapAllocator::new(0); |
614 | 1 | |
615 | 1 | // when |
616 | 1 | let ptr1 = heap.allocate(&mut mem[..], 1).unwrap(); |
617 | 1 | let ptr2 = heap.allocate(&mut mem[..], 9).unwrap(); |
618 | 1 | let ptr3 = heap.allocate(&mut mem[..], 1).unwrap(); |
619 | 1 | |
620 | 1 | // then |
621 | 1 | // a prefix of 8 bytes is prepended to each pointer |
622 | 1 | assert_eq!(ptr1, to_pointer(HEADER_SIZE)); |
623 | | |
624 | | // the prefix of 8 bytes + the content of ptr1 padded to the lowest possible |
625 | | // item size of 8 bytes + the prefix of ptr1 |
626 | 1 | assert_eq!(ptr2, to_pointer(24)); |
627 | | |
628 | | // ptr2 + its content of 16 bytes + the prefix of 8 bytes |
629 | 1 | assert_eq!(ptr3, to_pointer(24 + 16 + HEADER_SIZE)); |
630 | 1 | } |
631 | | |
632 | | #[test] |
633 | 1 | fn should_free_properly() { |
634 | 1 | // given |
635 | 1 | let mut mem = [0u8; PAGE_SIZE as usize]; |
636 | 1 | let mut heap = FreeingBumpHeapAllocator::new(0); |
637 | 1 | let ptr1 = heap.allocate(&mut mem[..], 1).unwrap(); |
638 | 1 | // the prefix of 8 bytes is prepended to the pointer |
639 | 1 | assert_eq!(ptr1, to_pointer(HEADER_SIZE)); |
640 | | |
641 | 1 | let ptr2 = heap.allocate(&mut mem[..], 1).unwrap(); |
642 | 1 | // the prefix of 8 bytes + the content of ptr 1 is prepended to the pointer |
643 | 1 | assert_eq!(ptr2, to_pointer(24)); |
644 | | |
645 | | // when |
646 | 1 | heap.deallocate(&mut mem[..], ptr2).unwrap(); |
647 | 1 | |
648 | 1 | // then |
649 | 1 | // then the heads table should contain a pointer to the |
650 | 1 | // prefix of ptr2 in the leftmost entry |
651 | 1 | assert_eq!( |
652 | 1 | heap.free_lists.heads[0], |
653 | 1 | Link::Ptr(u32::from(ptr2) - HEADER_SIZE) |
654 | 1 | ); |
655 | 1 | } |
656 | | |
657 | | #[test] |
658 | 1 | fn should_deallocate_and_reallocate_properly() { |
659 | 1 | // given |
660 | 1 | let mut mem = [0u8; PAGE_SIZE as usize]; |
661 | 1 | let padded_offset = 16; |
662 | 1 | let mut heap = FreeingBumpHeapAllocator::new(13); |
663 | 1 | |
664 | 1 | let ptr1 = heap.allocate(&mut mem[..], 1).unwrap(); |
665 | 1 | // the prefix of 8 bytes is prepended to the pointer |
666 | 1 | assert_eq!(ptr1, to_pointer(padded_offset + HEADER_SIZE)); |
667 | | |
668 | 1 | let ptr2 = heap.allocate(&mut mem[..], 9).unwrap(); |
669 | 1 | // the padded_offset + the previously allocated ptr (8 bytes prefix + |
670 | 1 | // 8 bytes content) + the prefix of 8 bytes which is prepended to the |
671 | 1 | // current pointer |
672 | 1 | assert_eq!(ptr2, to_pointer(padded_offset + 16 + HEADER_SIZE)); |
673 | | |
674 | | // when |
675 | 1 | heap.deallocate(&mut mem[..], ptr2).unwrap(); |
676 | 1 | let ptr3 = heap.allocate(&mut mem[..], 9).unwrap(); |
677 | 1 | |
678 | 1 | // then |
679 | 1 | // should have re-allocated |
680 | 1 | assert_eq!(ptr3, to_pointer(padded_offset + 16 + HEADER_SIZE)); |
681 | 1 | assert_eq!(heap.free_lists.heads, [Link::Nil; N_ORDERS]); |
682 | 1 | } |
683 | | |
684 | | #[test] |
685 | 1 | fn should_build_linked_list_of_free_areas_properly() { |
686 | 1 | // given |
687 | 1 | let mut mem = [0u8; PAGE_SIZE as usize]; |
688 | 1 | let mut heap = FreeingBumpHeapAllocator::new(0); |
689 | 1 | |
690 | 1 | let ptr1 = heap.allocate(&mut mem[..], 8).unwrap(); |
691 | 1 | let ptr2 = heap.allocate(&mut mem[..], 8).unwrap(); |
692 | 1 | let ptr3 = heap.allocate(&mut mem[..], 8).unwrap(); |
693 | 1 | |
694 | 1 | // when |
695 | 1 | heap.deallocate(&mut mem[..], ptr1).unwrap(); |
696 | 1 | heap.deallocate(&mut mem[..], ptr2).unwrap(); |
697 | 1 | heap.deallocate(&mut mem[..], ptr3).unwrap(); |
698 | 1 | |
699 | 1 | // then |
700 | 1 | assert_eq!( |
701 | 1 | heap.free_lists.heads[0], |
702 | 1 | Link::Ptr(u32::from(ptr3) - HEADER_SIZE) |
703 | 1 | ); |
704 | | |
705 | 1 | let ptr4 = heap.allocate(&mut mem[..], 8).unwrap(); |
706 | 1 | assert_eq!(ptr4, ptr3); |
707 | | |
708 | 1 | assert_eq!( |
709 | 1 | heap.free_lists.heads[0], |
710 | 1 | Link::Ptr(u32::from(ptr2) - HEADER_SIZE) |
711 | 1 | ); |
712 | 1 | } |
713 | | |
714 | | #[test] |
715 | 1 | fn should_not_allocate_if_too_large() { |
716 | 1 | // given |
717 | 1 | let mut mem = [0u8; PAGE_SIZE as usize]; |
718 | 1 | let mut heap = FreeingBumpHeapAllocator::new(13); |
719 | 1 | |
720 | 1 | // when |
721 | 1 | let ptr = heap.allocate(&mut mem[..], PAGE_SIZE - 13); |
722 | 1 | |
723 | 1 | // then |
724 | 1 | match ptr.unwrap_err() { |
725 | 1 | Error::AllocatorOutOfSpace => {} |
726 | 0 | e => panic!("Expected allocator out of space error, got: {:?}", e), |
727 | | } |
728 | 1 | } |
729 | | |
730 | | #[test] |
731 | 1 | fn should_not_allocate_if_full() { |
732 | 1 | // given |
733 | 1 | let mut mem = [0u8; PAGE_SIZE as usize]; |
734 | 1 | let mut heap = FreeingBumpHeapAllocator::new(0); |
735 | 1 | let ptr1 = heap |
736 | 1 | .allocate(&mut mem[..], (PAGE_SIZE / 2) - HEADER_SIZE) |
737 | 1 | .unwrap(); |
738 | 1 | assert_eq!(ptr1, to_pointer(HEADER_SIZE)); |
739 | | |
740 | | // when |
741 | 1 | let ptr2 = heap.allocate(&mut mem[..], PAGE_SIZE / 2); |
742 | 1 | |
743 | 1 | // then |
744 | 1 | // there is no room for another half page incl. its 8 byte prefix |
745 | 1 | match ptr2.unwrap_err() { |
746 | 1 | Error::AllocatorOutOfSpace => {} |
747 | 0 | e => panic!("Expected allocator out of space error, got: {:?}", e), |
748 | | } |
749 | 1 | } |
750 | | |
751 | | #[test] |
752 | 1 | fn should_allocate_max_possible_allocation_size() { |
753 | 1 | // given |
754 | 1 | let mut mem = vec![0u8; (MAX_POSSIBLE_ALLOCATION + PAGE_SIZE) as usize]; |
755 | 1 | let mut heap = FreeingBumpHeapAllocator::new(0); |
756 | 1 | |
757 | 1 | // when |
758 | 1 | let ptr = heap |
759 | 1 | .allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION) |
760 | 1 | .unwrap(); |
761 | 1 | |
762 | 1 | // then |
763 | 1 | assert_eq!(ptr, to_pointer(HEADER_SIZE)); |
764 | 1 | } |
765 | | |
766 | | #[test] |
767 | 1 | fn should_not_allocate_if_requested_size_too_large() { |
768 | 1 | // given |
769 | 1 | let mut mem = [0u8; PAGE_SIZE as usize]; |
770 | 1 | let mut heap = FreeingBumpHeapAllocator::new(0); |
771 | 1 | |
772 | 1 | // when |
773 | 1 | let ptr = heap.allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION + 1); |
774 | 1 | |
775 | 1 | // then |
776 | 1 | match ptr.unwrap_err() { |
777 | 1 | Error::RequestedAllocationTooLarge => {} |
778 | 0 | e => panic!("Expected allocation size too large error, got: {:?}", e), |
779 | | } |
780 | 1 | } |
781 | | |
782 | | #[test] |
783 | 1 | fn should_return_error_when_bumper_greater_than_heap_size() { |
784 | 1 | // given |
785 | 1 | let mut mem = [0u8; 64]; |
786 | 1 | let mut heap = FreeingBumpHeapAllocator::new(0); |
787 | 1 | |
788 | 1 | let ptr1 = heap.allocate(&mut mem[..], 32).unwrap(); |
789 | 1 | assert_eq!(ptr1, to_pointer(HEADER_SIZE)); |
790 | 1 | heap.deallocate(&mut mem[..], ptr1) |
791 | 1 | .expect("failed freeing ptr1"); |
792 | 1 | assert_eq!(heap.total_size, 0); |
793 | 1 | assert_eq!(heap.bumper, 40); |
794 | | |
795 | 1 | let ptr2 = heap.allocate(&mut mem[..], 16).unwrap(); |
796 | 1 | assert_eq!(ptr2, to_pointer(48)); |
797 | 1 | heap.deallocate(&mut mem[..], ptr2) |
798 | 1 | .expect("failed freeing ptr2"); |
799 | 1 | assert_eq!(heap.total_size, 0); |
800 | 1 | assert_eq!(heap.bumper, 64); |
801 | | |
802 | | // when |
803 | | // the `bumper` value is equal to `size` here and any |
804 | | // further allocation which would increment the bumper must fail. |
805 | | // we try to allocate 8 bytes here, which will increment the |
806 | | // bumper since no 8 byte item has been allocated+freed before. |
807 | 1 | let ptr = heap.allocate(&mut mem[..], 8); |
808 | 1 | |
809 | 1 | // then |
810 | 1 | match ptr.unwrap_err() { |
811 | 1 | Error::AllocatorOutOfSpace => {} |
812 | 0 | e => panic!("Expected allocator out of space error, got: {:?}", e), |
813 | | } |
814 | 1 | } |
815 | | |
816 | | #[test] |
817 | 1 | fn should_include_prefixes_in_total_heap_size() { |
818 | 1 | // given |
819 | 1 | let mut mem = [0u8; PAGE_SIZE as usize]; |
820 | 1 | let mut heap = FreeingBumpHeapAllocator::new(1); |
821 | 1 | |
822 | 1 | // when |
823 | 1 | // an item size of 16 must be used then |
824 | 1 | heap.allocate(&mut mem[..], 9).unwrap(); |
825 | 1 | |
826 | 1 | // then |
827 | 1 | assert_eq!(heap.total_size, HEADER_SIZE + 16); |
828 | 1 | } |
829 | | |
830 | | #[test] |
831 | 1 | fn should_calculate_total_heap_size_to_zero() { |
832 | 1 | // given |
833 | 1 | let mut mem = [0u8; PAGE_SIZE as usize]; |
834 | 1 | let mut heap = FreeingBumpHeapAllocator::new(13); |
835 | 1 | |
836 | 1 | // when |
837 | 1 | let ptr = heap.allocate(&mut mem[..], 42).unwrap(); |
838 | 1 | assert_eq!(ptr, to_pointer(16 + HEADER_SIZE)); |
839 | 1 | heap.deallocate(&mut mem[..], ptr).unwrap(); |
840 | 1 | |
841 | 1 | // then |
842 | 1 | assert_eq!(heap.total_size, 0); |
843 | 1 | } |
844 | | |
845 | | #[test] |
846 | 1 | fn should_calculate_total_size_of_zero() { |
847 | 1 | // given |
848 | 1 | let mut mem = [0u8; PAGE_SIZE as usize]; |
849 | 1 | let mut heap = FreeingBumpHeapAllocator::new(19); |
850 | | |
851 | | // when |
852 | 10 | for _ in 1..10 { |
853 | 9 | let ptr = heap.allocate(&mut mem[..], 42).unwrap(); |
854 | 9 | heap.deallocate(&mut mem[..], ptr).unwrap(); |
855 | 9 | } |
856 | | |
857 | | // then |
858 | 1 | assert_eq!(heap.total_size, 0); |
859 | 1 | } |
860 | | |
861 | | #[test] |
862 | 1 | fn should_read_and_write_u64_correctly() { |
863 | 1 | // given |
864 | 1 | let mut mem = [0u8; PAGE_SIZE as usize]; |
865 | 1 | |
866 | 1 | // when |
867 | 1 | Memory::write_le_u64(mem.as_mut(), 40, 4_480_113).unwrap(); |
868 | 1 | |
869 | 1 | // then |
870 | 1 | let value = Memory::read_le_u64(mem.as_mut(), 40).unwrap(); |
871 | 1 | assert_eq!(value, 4_480_113); |
872 | 1 | } |
873 | | |
874 | | #[test] |
875 | 1 | fn should_get_item_size_from_order() { |
876 | 1 | // given |
877 | 1 | let raw_order = 0; |
878 | 1 | |
879 | 1 | // when |
880 | 1 | let item_size = Order::from_raw(raw_order).unwrap().size(); |
881 | 1 | |
882 | 1 | // then |
883 | 1 | assert_eq!(item_size, 8); |
884 | 1 | } |
885 | | |
886 | | #[test] |
887 | 1 | fn should_get_max_item_size_from_index() { |
888 | 1 | // given |
889 | 1 | let raw_order = 22; |
890 | 1 | |
891 | 1 | // when |
892 | 1 | let item_size = Order::from_raw(raw_order).unwrap().size(); |
893 | 1 | |
894 | 1 | // then |
895 | 1 | assert_eq!(item_size as u32, MAX_POSSIBLE_ALLOCATION); |
896 | 1 | } |
897 | | |
898 | | #[test] |
899 | 1 | fn deallocate_needs_to_maintain_linked_list() { |
900 | 1 | let mut mem = [0u8; 8 * 2 * 4 + ALIGNMENT as usize]; |
901 | 1 | let mut heap = FreeingBumpHeapAllocator::new(0); |
902 | 1 | |
903 | 1 | // Allocate and free some pointers |
904 | 1 | let ptrs = (0..4) |
905 | 4 | .map(|_| heap.allocate(&mut mem[..], 8).unwrap()) |
906 | 1 | .collect::<Vec<_>>(); |
907 | 1 | ptrs.into_iter() |
908 | 4 | .for_each(|ptr| heap.deallocate(&mut mem[..], ptr).unwrap()); |
909 | 1 | |
910 | 1 | // Second time we should be able to allocate all of them again. |
911 | 1 | let _ = (0..4) |
912 | 4 | .map(|_| heap.allocate(&mut mem[..], 8).unwrap()) |
913 | 1 | .collect::<Vec<_>>(); |
914 | 1 | } |
915 | | |
916 | | #[test] |
917 | 1 | fn header_read_write() { |
918 | 5 | let roundtrip = |header: Header| { |
919 | 5 | let mut memory = [0u8; 32]; |
920 | 5 | header.write_into(memory.as_mut(), 0).unwrap(); |
921 | 5 | |
922 | 5 | let read_header = Header::read_from(memory.as_mut(), 0).unwrap(); |
923 | 5 | assert_eq!(header, read_header); |
924 | 5 | }; |
925 | 1 | |
926 | 1 | roundtrip(Header::Occupied(Order(0))); |
927 | 1 | roundtrip(Header::Occupied(Order(1))); |
928 | 1 | roundtrip(Header::Free(Link::Nil)); |
929 | 1 | roundtrip(Header::Free(Link::Ptr(0))); |
930 | 1 | roundtrip(Header::Free(Link::Ptr(4))); |
931 | 1 | } |
932 | | |
933 | | #[test] |
934 | 1 | fn poison_oom() { |
935 | 1 | // given |
936 | 1 | // a heap of 32 bytes. Should be enough for two allocations. |
937 | 1 | let mut mem = [0u8; 32]; |
938 | 1 | let mut heap = FreeingBumpHeapAllocator::new(0); |
939 | 1 | |
940 | 1 | // when |
941 | 1 | assert!(heap.allocate(mem.as_mut(), 8).is_ok()); |
942 | 1 | let alloc_ptr = heap.allocate(mem.as_mut(), 8).unwrap(); |
943 | 1 | assert!(heap.allocate(mem.as_mut(), 8).is_err()); |
944 | | |
945 | | // then |
946 | 1 | assert!(heap.poisoned); |
947 | 1 | assert!(heap.deallocate(mem.as_mut(), alloc_ptr).is_err()); |
948 | 1 | } |
949 | | |
950 | | #[test] |
951 | 1 | fn test_n_orders() { |
952 | 1 | // Test that N_ORDERS is consistent with min and max possible allocation. |
953 | 1 | assert_eq!( |
954 | 1 | MIN_POSSIBLE_ALLOCATION * 2u32.pow(N_ORDERS as u32 - 1), |
955 | 1 | MAX_POSSIBLE_ALLOCATION |
956 | 1 | ); |
957 | 1 | } |
958 | | |
959 | | #[test] |
960 | 1 | fn accepts_growing_memory() { |
961 | 1 | const ITEM_SIZE: u32 = 16; |
962 | 1 | const ITEM_ON_HEAP_SIZE: usize = 16 + HEADER_SIZE as usize; |
963 | 1 | |
964 | 1 | let mut mem = vec![0u8; ITEM_ON_HEAP_SIZE * 2]; |
965 | 1 | let mut heap = FreeingBumpHeapAllocator::new(0); |
966 | 1 | |
967 | 1 | let _ = heap.allocate(&mut mem[..], ITEM_SIZE).unwrap(); |
968 | 1 | let _ = heap.allocate(&mut mem[..], ITEM_SIZE).unwrap(); |
969 | 1 | |
970 | 1 | mem.extend_from_slice(&[0u8; ITEM_ON_HEAP_SIZE]); |
971 | 1 | |
972 | 1 | let _ = heap.allocate(&mut mem[..], ITEM_SIZE).unwrap(); |
973 | 1 | } |
974 | | |
975 | | #[test] |
976 | 1 | fn doesnt_accept_shrinking_memory() { |
977 | 1 | const ITEM_SIZE: u32 = 16; |
978 | 1 | const ITEM_ON_HEAP_SIZE: usize = 16 + HEADER_SIZE as usize; |
979 | 1 | |
980 | 1 | let initial_size = ITEM_ON_HEAP_SIZE * 3; |
981 | 1 | let mut mem = vec![0u8; initial_size]; |
982 | 1 | let mut heap = FreeingBumpHeapAllocator::new(0); |
983 | 1 | |
984 | 1 | let _ = heap.allocate(&mut mem[..], ITEM_SIZE).unwrap(); |
985 | 1 | |
986 | 1 | mem.truncate(initial_size - 1); |
987 | 1 | |
988 | 1 | match heap.allocate(&mut mem[..], ITEM_SIZE).unwrap_err() { |
989 | 1 | Error::MemoryShrinked => (), |
990 | 0 | _ => panic!(), |
991 | | } |
992 | 1 | } |
993 | | } |