/build/source/src/execution/linear_memory.rs
Line | Count | Source |
1 | | use core::{cell::UnsafeCell, mem}; |
2 | | |
3 | | use alloc::vec::Vec; |
4 | | |
5 | | use crate::{ |
6 | | core::{indices::MemIdx, little_endian::LittleEndianBytes}, |
7 | | rw_spinlock::RwSpinLock, |
8 | | RuntimeError, |
9 | | }; |
10 | | |
11 | | /// Implementation of the linear memory suitable for concurrent access |
12 | | /// |
13 | | /// Implements the base for the instructions described in |
14 | | /// <https://webassembly.github.io/spec/core/exec/instructions.html#memory-instructions>. |
15 | | /// |
16 | | /// This linear memory implementation internally relies on a `Vec<UnsafeCell<u8>>`. Thus, the atomic |
17 | | /// unit of information for it is a byte (`u8`). All access to the linear memory internally occurs |
18 | | /// through pointers, avoiding the creation of shared and mut refs to the internal data completely. |
19 | | /// This avoids undefined behavior, except for the race-condition inherent to concurrent writes. |
20 | | /// Because of this, the [`LinearMemory::store`] function does not require `&mut self` -- `&self` |
21 | | /// suffices. |
22 | | /// |
23 | | /// # Notes on overflowing |
24 | | /// |
25 | | /// All operations that rely on accessing `n` bytes starting at `index` in the linear memory have to |
26 | | /// perform bounds checking. Thus they always have to ensure that `n + index < linear_memory.len()` |
27 | | /// holds true (e.g. `n + index - 1` must be a valid index into `linear_memory`). However, |
28 | | /// writing that check as is bears the danger of an overflow, assuming that `n`, `index` and |
29 | | /// `linear_memory.len()` are the same given integer type, `n + index` can overflow, resulting in |
30 | | /// the check passing despite the access being out of bounds! |
31 | | /// |
32 | | /// To avoid this, the bounds checks are carefully ordered to avoid any overflows: |
33 | | /// |
34 | | /// - First we check, that `n <= linear_memory.len()` holds true, ensuring that the amount of bytes |
35 | | /// to be accessed is indeed smaller than or equal to the linear memory's size. If this does not |
36 | | /// hold true, continuation of the operation will yield out of bounds access in any case. |
37 | | /// - Then, as a second check, we verify that `index <= linear_memory.len() - n`. This way we |
38 | | /// avoid the overflow, as there is no addition. The subtraction in the left hand can not |
39 | | /// underflow, due to the previous check (which asserts that `n` is smaller than or equal to |
40 | | /// `linear_memory.len()`). |
41 | | /// |
42 | | /// Combined in the given order, these two checks enable bounds checking without risking any |
43 | | /// overflow or underflow, provided that `n`, `index` and `linear_memory.len()` are of the same |
44 | | /// integer type. |
45 | | /// |
46 | | /// # Notes on locking |
47 | | /// |
48 | | /// The internal data vector of the [`LinearMemory`] is wrapped in a [`RwSpinLock`]. Despite the |
49 | | /// name, writes to the linear memory do not require an acquisition of a write lock. Writes are |
50 | | /// implemented through a shared ref to the internal vector, with an `UnsafeCell` to achieve |
51 | | /// interior mutability. |
52 | | /// |
53 | | /// However, linear memory can grow. As the linear memory is implemented via a [`Vec`], a grow can |
54 | | /// result in the vector's internal data buffer to be copied over to a bigger, fresh allocation. |
55 | | /// The old buffer is then freed. Combined with concurrent mutable access, this can cause |
56 | | /// use-after-free. To avoid this, a grow operation of the linear memory acquires a write lock, |
57 | | /// blocking all read/write to the linear memory inbetween. |
58 | | /// |
59 | | /// # Unsafe Note |
60 | | /// |
61 | | /// Raw pointer access it required, because concurent mutation of the linear memory might happen |
62 | | /// (consider the threading proposal for WASM, where mutliple WASM threads access the same linear |
63 | | /// memory at the same time). The inherent race condition results in UB w/r/t the state of the `u8`s |
64 | | /// in the inner data. However, this is tolerable, e.g. avoiding race conditions on the state of the |
65 | | /// linear memory can not be the task of the interpreter, but has to be fulfilled by the interpreted |
66 | | /// bytecode itself. |
67 | | // TODO if a memmap like operation is available, the linear memory implementation can be optimized brutally. Out-of-bound access can be mapped to userspace handled page-faults, e.g. the MMU takes over that responsibility of catching out of bounds. Grow can happen without copying of data, by mapping new pages consecutively after the current final page of the linear memory. |
68 | | pub struct LinearMemory<const PAGE_SIZE: usize = { crate::Limits::MEM_PAGE_SIZE as usize }> { |
69 | | inner_data: RwSpinLock<Vec<UnsafeCell<u8>>>, |
70 | | } |
71 | | |
72 | | /// Type to express the page count |
73 | | pub type PageCountTy = u16; |
74 | | |
75 | | impl<const PAGE_SIZE: usize> LinearMemory<PAGE_SIZE> { |
76 | | /// Size of a page in the linear memory, measured in bytes |
77 | | /// |
78 | | /// The WASM specification demands a page size of 64 KiB, that is `65536` bytes: |
79 | | /// <https://webassembly.github.io/spec/core/exec/runtime.html?highlight=page#memory-instances> |
80 | | const PAGE_SIZE: usize = PAGE_SIZE; |
81 | | |
82 | | /// Create a new, empty [`LinearMemory`] |
83 | 2 | pub fn new() -> Self { |
84 | 2 | Self { |
85 | 2 | inner_data: RwSpinLock::new(Vec::new()), |
86 | 2 | } |
87 | 2 | } |
88 | | |
89 | | /// Create a new, empty [`LinearMemory`] |
90 | 1.25k | pub fn new_with_initial_pages(pages: PageCountTy) -> Self { |
91 | 1.25k | let size_bytes = Self::PAGE_SIZE * pages as usize; |
92 | 1.25k | let mut data = Vec::with_capacity(size_bytes); |
93 | 12.7M | data.resize_with(size_bytes, || UnsafeCell::new(0)); |
94 | 1.25k | |
95 | 1.25k | Self { |
96 | 1.25k | inner_data: RwSpinLock::new(data), |
97 | 1.25k | } |
98 | 1.25k | } |
99 | | |
100 | | /// Grow the [`LinearMemory`] by a number of pages |
101 | 112 | pub fn grow(&self, pages_to_add: PageCountTy) { |
102 | 112 | let mut lock_guard = self.inner_data.write(); |
103 | 112 | let prior_length_bytes = lock_guard.len(); |
104 | 112 | let new_length_bytes = prior_length_bytes + Self::PAGE_SIZE * pages_to_add as usize; |
105 | 154M | lock_guard.resize_with(new_length_bytes, || UnsafeCell::new(0)); |
106 | 112 | } |
107 | | |
108 | | /// Get the number of pages currently allocated to this [`LinearMemory`] |
109 | 3 | pub fn pages(&self) -> PageCountTy { |
110 | 3 | PageCountTy::try_from(self.inner_data.read().len() / PAGE_SIZE).unwrap() |
111 | 3 | } |
112 | | |
113 | | /// Get the length in bytes currently allocated to this [`LinearMemory`] |
114 | | // TODO remove this op |
115 | 311 | pub fn len(&self) -> usize { |
116 | 311 | self.inner_data.read().len() |
117 | 311 | } |
118 | | |
119 | | /// At a given index, store a datum in the [`LinearMemory`] |
120 | 2.65k | pub fn store<const N: usize, T: LittleEndianBytes<N>>( |
121 | 2.65k | &self, |
122 | 2.65k | index: MemIdx, |
123 | 2.65k | value: T, |
124 | 2.65k | ) -> Result<(), RuntimeError> { |
125 | 2.65k | let value_size = mem::size_of::<T>(); |
126 | 2.65k | |
127 | 2.65k | // Unless someone implementes something wrong like `impl LittleEndianBytes<3> for f64`, this |
128 | 2.65k | // check is already guaranteed at the type level. Therefore only a debug_assert. |
129 | 2.65k | debug_assert_eq!(value_size, N, "value size must match const generic N"0 ); |
130 | | |
131 | 2.65k | let lock_guard = self.inner_data.read(); |
132 | 2.65k | |
133 | 2.65k | // A value must fit into the linear memory |
134 | 2.65k | if value_size > lock_guard.len() { |
135 | 5 | error!("value does not fit into linear memory"4 ); |
136 | 5 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
137 | 2.65k | } |
138 | 2.65k | |
139 | 2.65k | // The following statement must be true |
140 | 2.65k | // `index + value_size <= lock_guard.len()` |
141 | 2.65k | // This check verifies it, while avoiding the possible overflow. The subtraction can not |
142 | 2.65k | // underflow because of the previous check. |
143 | 2.65k | |
144 | 2.65k | if (index) > lock_guard.len() - value_size { |
145 | 86 | error!("value write would extend beyond the end of the linear memory"85 ); |
146 | 86 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
147 | 2.56k | } |
148 | 2.56k | |
149 | 2.56k | // TODO this unwrap can not fail, maybe use unwrap_unchecked? |
150 | 2.56k | let ptr = lock_guard.get(index).unwrap().get(); |
151 | 2.56k | let bytes = value.to_le_bytes(); // |
152 | 2.56k | |
153 | 2.56k | // Safety argument: |
154 | 2.56k | // |
155 | 2.56k | // - nonoverlapping is guaranteed, because `src` is a pointer to a stack allocated array, |
156 | 2.56k | // while the destination is heap allocated Vec |
157 | 2.56k | // - the first check above guarantee that `src` fits into the destination |
158 | 2.56k | // - the second check above guarantees that even with the offset in `index`, `src` does not |
159 | 2.56k | // extend beyond the destinations last `UnsafeCell<u8>` |
160 | 2.56k | // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s |
161 | 2.56k | // contained in the `UnsafeCell`s, so no UB is created through the existence of unsound |
162 | 2.56k | // references |
163 | 2.56k | unsafe { ptr.copy_from_nonoverlapping(bytes.as_ref().as_ptr(), value_size) } |
164 | 2.56k | |
165 | 2.56k | Ok(()) |
166 | 2.65k | } |
167 | | |
168 | | /// From a given index, load a datum in the [`LinearMemory`] |
169 | 929k | pub fn load<const N: usize, T: LittleEndianBytes<N>>( |
170 | 929k | &self, |
171 | 929k | index: MemIdx, |
172 | 929k | ) -> Result<T, RuntimeError> { |
173 | 929k | let value_size = mem::size_of::<T>(); |
174 | 929k | |
175 | 929k | // Unless someone implementes something wrong like `LittleEndianBytes<3> for i8`, this |
176 | 929k | // check is already guaranteed at the type level. Therefore only a debug_assert. |
177 | 929k | debug_assert_eq!(value_size, N, "value size must match const generic N"0 ); |
178 | | |
179 | 929k | let lock_guard = self.inner_data.read(); |
180 | 929k | |
181 | 929k | // A value must fit into the linear memory |
182 | 929k | if value_size > lock_guard.len() { |
183 | 5 | error!("value does not fit into linear memory"4 ); |
184 | 5 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
185 | 929k | } |
186 | 929k | |
187 | 929k | // The following statement must be true |
188 | 929k | // `index + value_size <= lock_guard.len()` |
189 | 929k | // This check verifies it, while avoiding the possible overflow. The subtraction can not |
190 | 929k | // underflow because of the previous assert. |
191 | 929k | |
192 | 929k | if (index) > lock_guard.len() - value_size { |
193 | 135 | error!("value read would extend beyond the end of the linear_memory"134 ); |
194 | 135 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
195 | 929k | } |
196 | 929k | |
197 | 929k | let ptr = lock_guard.get(index).unwrap().get(); |
198 | 929k | let mut bytes = [0; N]; |
199 | 929k | |
200 | 929k | // Safety argument: |
201 | 929k | // |
202 | 929k | // - nonoverlapping is guaranteed, because `dest` is a pointer to a stack allocated array, |
203 | 929k | // while the source is heap allocated Vec |
204 | 929k | // - the first assert above guarantee that source is bigger than `dest` |
205 | 929k | // - the second assert above guarantees that even with the offset in `index`, `dest` does |
206 | 929k | // not extend beyond the destinations last `UnsafeCell<u8>` in source |
207 | 929k | // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s |
208 | 929k | // contained in the `UnsafeCell`s, so no UB is created through the existence of unsound |
209 | 929k | // references |
210 | 929k | unsafe { ptr.copy_to_nonoverlapping(bytes.as_mut_ptr(), bytes.len()) }; |
211 | 929k | Ok(T::from_le_bytes(bytes)) |
212 | 929k | } |
213 | | |
214 | | /// Implementation of the behavior described in |
215 | | /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-fill>. |
216 | | /// Note, that the WASM spec defines the behavior by recursion, while our implementation uses |
217 | | /// the memset like [`core::ptr::write_bytes`]. |
218 | 123 | pub fn fill(&self, index: MemIdx, data_byte: u8, count: MemIdx) -> Result<(), RuntimeError> { |
219 | 123 | if count == 0 { |
220 | 6 | return Ok(()); |
221 | 117 | } |
222 | 117 | |
223 | 117 | let lock_guard = self.inner_data.read(); |
224 | 117 | |
225 | 117 | if count > lock_guard.len() { |
226 | 1 | error!("fill count is bigger than the linear memory"); |
227 | 1 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
228 | 116 | } |
229 | 116 | |
230 | 116 | if index > lock_guard.len() - count { |
231 | 5 | error!("fill extends beyond the linear memory's end"); |
232 | 5 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
233 | 111 | } |
234 | 111 | |
235 | 111 | let ptr = lock_guard[index].get(); |
236 | 111 | unsafe { |
237 | 111 | ptr.write_bytes(data_byte, count); |
238 | 111 | } |
239 | 111 | |
240 | 111 | Ok(()) |
241 | 123 | } |
242 | | |
243 | | /// Copy `count` bytes from one region in the linear memory to another region in the same or a |
244 | | /// different linear memory |
245 | | /// |
246 | | /// - Both regions may overlap |
247 | | /// - Copies the `count` bytes starting from `source_index`, overwriting the `count` bytes |
248 | | /// starting from `destination_index` |
249 | 167 | pub fn copy( |
250 | 167 | &self, |
251 | 167 | destination_index: MemIdx, |
252 | 167 | source_mem: &Self, |
253 | 167 | source_index: MemIdx, |
254 | 167 | count: MemIdx, |
255 | 167 | ) -> Result<(), RuntimeError> { |
256 | 167 | if count == 0 { |
257 | 11 | return Ok(()); |
258 | 156 | } |
259 | 156 | |
260 | 156 | // self is the destination |
261 | 156 | let lock_guard_self = self.inner_data.read(); |
262 | 156 | |
263 | 156 | // other is the source |
264 | 156 | let lock_guard_other = source_mem.inner_data.read(); |
265 | 156 | |
266 | 156 | // check destination for out of bounds access |
267 | 156 | if count > lock_guard_self.len() { |
268 | 4 | error!("copy count is bigger than the destination linear memory"); |
269 | 4 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
270 | 152 | } |
271 | 152 | |
272 | 152 | if destination_index > lock_guard_self.len() - count { |
273 | 15 | error!("copy destination extends beyond the linear memory's end"); |
274 | 15 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
275 | 137 | } |
276 | 137 | |
277 | 137 | // check source for out of bounds access |
278 | 137 | if count > lock_guard_other.len() { |
279 | 1 | error!("copy count is bigger than the source linear memory"0 ); |
280 | 1 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
281 | 136 | } |
282 | 136 | |
283 | 136 | if source_index > lock_guard_other.len() - count { |
284 | 8 | error!("copy source extends beyond the linear memory's end"); |
285 | 8 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
286 | 128 | } |
287 | 128 | |
288 | 128 | // acquire pointers |
289 | 128 | let destination_ptr = lock_guard_self[destination_index].get(); |
290 | 128 | let source_ptr = lock_guard_other[source_index].get(); |
291 | 128 | |
292 | 128 | // copy the data |
293 | 128 | unsafe { |
294 | 128 | // TODO investigate if it is worth to use a conditional `copy_from_nonoverlapping` |
295 | 128 | // if the non-overlapping can be confirmed (and the count is bigger than a certain |
296 | 128 | // threshold). |
297 | 128 | destination_ptr.copy_from(source_ptr, count); |
298 | 128 | } |
299 | 128 | |
300 | 128 | Ok(()) |
301 | 167 | } |
302 | | |
303 | | // Rationale behind having `source_index` and `count` when the callsite could also just create a subslice for `source_data`? Have all the index error checks in one place. |
304 | 168 | pub fn init( |
305 | 168 | &self, |
306 | 168 | destination_index: MemIdx, |
307 | 168 | source_data: &[u8], |
308 | 168 | source_index: MemIdx, |
309 | 168 | count: MemIdx, |
310 | 168 | ) -> Result<(), RuntimeError> { |
311 | 168 | if count == 0 { |
312 | 30 | return Ok(()); |
313 | 138 | } |
314 | 138 | |
315 | 138 | let lock_guard_self = self.inner_data.read(); |
316 | 138 | let data_len = source_data.len(); |
317 | 138 | |
318 | 138 | // check destination for out of bounds access |
319 | 138 | if count > lock_guard_self.len() { |
320 | 2 | error!("init count is bigger than the linear memory"); |
321 | 2 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
322 | 136 | } |
323 | 136 | |
324 | 136 | if destination_index > lock_guard_self.len() - count { |
325 | 4 | error!("init extends beyond the linear memory's end"); |
326 | 4 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
327 | 132 | } |
328 | 132 | |
329 | 132 | // check source for out of bounds access |
330 | 132 | if count > data_len { |
331 | 9 | error!("init count is bigger than the data instance"); |
332 | 9 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
333 | 123 | } |
334 | 123 | |
335 | 123 | if source_index > data_len - count { |
336 | 2 | error!("init source extends beyond the data instance's end"); |
337 | 2 | return Err(RuntimeError::MemoryAccessOutOfBounds); |
338 | 121 | } |
339 | 121 | |
340 | 121 | // acquire pointers |
341 | 121 | let destination_ptr = lock_guard_self[destination_index].get(); |
342 | 121 | let source_ptr = &source_data[source_index]; |
343 | 121 | |
344 | 121 | // copy the data |
345 | 121 | unsafe { |
346 | 121 | destination_ptr.copy_from_nonoverlapping(source_ptr, count); |
347 | 121 | } |
348 | 121 | |
349 | 121 | Ok(()) |
350 | 168 | } |
351 | | } |
352 | | |
353 | | impl<const PAGE_SIZE: usize> core::fmt::Debug for LinearMemory<PAGE_SIZE> { |
354 | 1 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { |
355 | 1 | write!(f, "LinearMemory {{ inner_data: [ ")?0 ; |
356 | 1 | let lock_guard = self.inner_data.read(); |
357 | 1 | let mut iter = lock_guard.iter(); |
358 | | |
359 | 1 | if let Some(first_byte_uc) = iter.next() { |
360 | 1 | write!(f, "{}", unsafe { *first_byte_uc.get() })?0 ; |
361 | 0 | } |
362 | | |
363 | 256 | for uc255 in iter { |
364 | | // Safety argument: |
365 | | // |
366 | | // TODO |
367 | 255 | let byte = unsafe { *uc.get() }; |
368 | 255 | |
369 | 255 | write!(f, ", {byte}")?0 ; |
370 | | } |
371 | 1 | write!(f, " ] }}") |
372 | 1 | } |
373 | | } |
374 | | |
375 | | impl<const PAGE_SIZE: usize> Default for LinearMemory<PAGE_SIZE> { |
376 | 0 | fn default() -> Self { |
377 | 0 | Self::new() |
378 | 0 | } |
379 | | } |
380 | | |
381 | | #[cfg(test)] |
382 | | mod test { |
383 | | use alloc::format; |
384 | | |
385 | | use super::*; |
386 | | |
387 | | const PAGE_SIZE: usize = 1 << 8; |
388 | | const PAGES: PageCountTy = 2; |
389 | | |
390 | | #[test] |
391 | 1 | fn new_constructor() { |
392 | 1 | let lin_mem = LinearMemory::<PAGE_SIZE>::new(); |
393 | 1 | assert_eq!(lin_mem.pages(), 0); |
394 | 1 | } |
395 | | |
396 | | #[test] |
397 | 1 | fn new_grow() { |
398 | 1 | let lin_mem = LinearMemory::<PAGE_SIZE>::new(); |
399 | 1 | lin_mem.grow(1); |
400 | 1 | assert_eq!(lin_mem.pages(), 1); |
401 | 1 | } |
402 | | |
403 | | #[test] |
404 | 1 | fn debug_print() { |
405 | 1 | let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(1); |
406 | 1 | assert_eq!(lin_mem.pages(), 1); |
407 | | |
408 | 1 | let expected_length = "LinearMemory { inner_data: [ ] }".len() + PAGE_SIZE * "0, ".len(); |
409 | 1 | let tol = 2; |
410 | 1 | |
411 | 1 | let debug_repr = format!("{lin_mem:?}"); |
412 | 1 | let lower_bound = expected_length - tol; |
413 | 1 | let upper_bound = expected_length + tol; |
414 | 1 | assert!((lower_bound..upper_bound).contains(&debug_repr.len())); |
415 | 1 | } |
416 | | |
417 | | #[test] |
418 | 1 | fn roundtrip_normal_range_i8_neg127() { |
419 | 1 | let x: i8 = -127; |
420 | 1 | let highest_legal_offset = PAGE_SIZE - mem::size_of::<i8>(); |
421 | 255 | for offset in 0..MemIdx::try_from(highest_legal_offset).unwrap()1 { |
422 | 255 | let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES); |
423 | 255 | |
424 | 255 | lin_mem.store(offset, x).unwrap(); |
425 | 255 | |
426 | 255 | assert_eq!( |
427 | 255 | lin_mem |
428 | 255 | .load::<{ core::mem::size_of::<i8>() }, i8>(offset) |
429 | 255 | .unwrap(), |
430 | | x, |
431 | 0 | "load store roundtrip for {x:?} failed!" |
432 | | ); |
433 | | } |
434 | 1 | } |
435 | | |
436 | | #[test] |
437 | 1 | fn roundtrip_normal_range_f32_13() { |
438 | 1 | let x: f32 = 13.0; |
439 | 1 | let highest_legal_offset = PAGE_SIZE - mem::size_of::<f32>(); |
440 | 252 | for offset in 0..MemIdx::try_from(highest_legal_offset).unwrap()1 { |
441 | 252 | let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES); |
442 | 252 | |
443 | 252 | lin_mem.store(offset, x).unwrap(); |
444 | 252 | |
445 | 252 | assert_eq!( |
446 | 252 | lin_mem |
447 | 252 | .load::<{ core::mem::size_of::<f32>() }, f32>(offset) |
448 | 252 | .unwrap(), |
449 | | x, |
450 | 0 | "load store roundtrip for {x:?} failed!" |
451 | | ); |
452 | | } |
453 | 1 | } |
454 | | |
455 | | #[test] |
456 | 1 | fn roundtrip_normal_range_f64_min() { |
457 | 1 | let x: f64 = f64::MIN; |
458 | 1 | let highest_legal_offset = PAGE_SIZE - mem::size_of::<f64>(); |
459 | 248 | for offset in 0..MemIdx::try_from(highest_legal_offset).unwrap()1 { |
460 | 248 | let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES); |
461 | 248 | |
462 | 248 | lin_mem.store(offset, x).unwrap(); |
463 | 248 | |
464 | 248 | assert_eq!( |
465 | 248 | lin_mem |
466 | 248 | .load::<{ core::mem::size_of::<f64>() }, f64>(offset) |
467 | 248 | .unwrap(), |
468 | | x, |
469 | 0 | "load store roundtrip for {x:?} failed!" |
470 | | ); |
471 | | } |
472 | 1 | } |
473 | | |
474 | | #[test] |
475 | 1 | fn roundtrip_normal_range_f64_nan() { |
476 | 1 | let x: f64 = f64::NAN; |
477 | 1 | let highest_legal_offset = PAGE_SIZE - mem::size_of::<f64>(); |
478 | 248 | for offset in 0..MemIdx::try_from(highest_legal_offset).unwrap()1 { |
479 | 248 | let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES); |
480 | 248 | |
481 | 248 | lin_mem.store(offset, x).unwrap(); |
482 | 248 | |
483 | 248 | assert!( |
484 | 248 | lin_mem |
485 | 248 | .load::<{ core::mem::size_of::<f64>() }, f64>(offset) |
486 | 248 | .unwrap() |
487 | 248 | .is_nan(), |
488 | 0 | "load store roundtrip for {x:?} failed!" |
489 | | ); |
490 | | } |
491 | 1 | } |
492 | | |
493 | | #[test] |
494 | | #[should_panic( |
495 | | expected = "called `Result::unwrap()` on an `Err` value: MemoryAccessOutOfBounds" |
496 | | )] |
497 | 1 | fn store_out_of_range_u128_max() { |
498 | 1 | let x: u128 = u128::MAX; |
499 | 1 | let pages = 1; |
500 | 1 | let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u128>() + 1; |
501 | 1 | let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap(); |
502 | 1 | let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages); |
503 | 1 | |
504 | 1 | lin_mem.store(lowest_illegal_offset, x).unwrap(); |
505 | 1 | } |
506 | | |
507 | | #[test] |
508 | | #[should_panic( |
509 | | expected = "called `Result::unwrap()` on an `Err` value: MemoryAccessOutOfBounds" |
510 | | )] |
511 | 1 | fn store_empty_lineaer_memory_u8() { |
512 | 1 | let x: u8 = u8::MAX; |
513 | 1 | let pages = 0; |
514 | 1 | let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u8>() + 1; |
515 | 1 | let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap(); |
516 | 1 | let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages); |
517 | 1 | |
518 | 1 | lin_mem.store(lowest_illegal_offset, x).unwrap(); |
519 | 1 | } |
520 | | |
521 | | #[test] |
522 | | #[should_panic( |
523 | | expected = "called `Result::unwrap()` on an `Err` value: MemoryAccessOutOfBounds" |
524 | | )] |
525 | 1 | fn load_out_of_range_u128_max() { |
526 | 1 | let pages = 1; |
527 | 1 | let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u128>() + 1; |
528 | 1 | let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap(); |
529 | 1 | let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages); |
530 | 1 | |
531 | 1 | let _x: u128 = lin_mem.load(lowest_illegal_offset).unwrap(); |
532 | 1 | } |
533 | | |
534 | | #[test] |
535 | | #[should_panic( |
536 | | expected = "called `Result::unwrap()` on an `Err` value: MemoryAccessOutOfBounds" |
537 | | )] |
538 | 1 | fn load_empty_lineaer_memory_u8() { |
539 | 1 | let pages = 0; |
540 | 1 | let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u8>() + 1; |
541 | 1 | let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap(); |
542 | 1 | let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages); |
543 | 1 | |
544 | 1 | let _x: u8 = lin_mem.load(lowest_illegal_offset).unwrap(); |
545 | 1 | } |
546 | | |
547 | | #[test] |
548 | | #[should_panic] |
549 | 1 | fn copy_out_of_bounds() { |
550 | 1 | let lin_mem_0 = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(2); |
551 | 1 | let lin_mem_1 = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(1); |
552 | 1 | lin_mem_0.copy(0, &lin_mem_1, 0, PAGE_SIZE + 1).unwrap(); |
553 | 1 | } |
554 | | } |