wasm/execution/store/
linear_memory.rs

1use core::{
2    iter,
3    sync::atomic::{AtomicU8, Ordering},
4};
5
6use alloc::vec::Vec;
7
8use crate::{
9    core::indices::MemIdx,
10    execution::little_endian::LittleEndianBytes,
11    rw_spinlock::{ReadLockGuard, RwSpinLock},
12    RuntimeError, TrapError,
13};
14
15/// Implementation of the linear memory suitable for concurrent access
16///
17/// Implements the base for the instructions described in
18/// <https://webassembly.github.io/spec/core/exec/instructions.html#memory-instructions>.
19///
20/// This linear memory implementation internally relies on a [`Vec<AtomicU8>`]. Thus, the atomic unit
21/// of information for it is a byte (`u8`). All access to the linear memory internally occur through
22/// [`AtomicU8::load`] and [`AtomicU8::store`], avoiding the creation of shared and `mut ref`s to
23/// the internal data completely. This avoids undefined behavior. Racy multibyte writes to the same
24/// data however may tear (e.g. for any number of concurrent writes to a given byte, only one is
25/// effectively written). Because of this, the [`LinearMemory::store`] function does not require
26/// `&mut self` -- `&self` suffices.
27///
28/// The implementation of atomic stores to multibyte values requires a global write lock. Rust's
29/// memory model considers partially overlapping atomic operations involving a write as undefined
30/// behavior. As there is no way to predict if an atomic multibyte store operation might overlap
31/// with another store or load operation, only a lock at runtime can avoid this cause of undefined
32/// behavior.
33// TODO does it pay of to have more fine-granular locking for multibyte stores than a single global write lock?
34///
35/// # Notes on overflowing
36///
37/// All operations that rely on accessing `n` bytes starting at `index` in the linear memory have to
38/// perform bounds checking. Thus, they always have to ensure that `n + index < linear_memory.len()`
39/// holds true (e.g. `n + index - 1` must be a valid index into `linear_memory`). However,
40/// writing that check as is bears the danger of an overflow, assuming that `n`, `index` and
41/// `linear_memory.len()` are the same given integer type, `n + index` can overflow, resulting in
42/// the check passing despite the access being out of bounds!
43///
44/// To avoid this, the bounds checks are carefully ordered to avoid any overflows:
45///
46/// - First we check, that `n <= linear_memory.len()` holds true, ensuring that the amount of bytes
47///   to be accessed is indeed smaller than or equal to the linear memory's size. If this does not
48///   hold true, continuation of the operation will yield out of bounds access in any case.
49/// - Then, as a second check, we verify that `index <= linear_memory.len() - n`. This way we
50///   avoid the overflow, as there is no addition. The subtraction in the left hand can not
51///   underflow, due to the previous check (which asserts that `n` is smaller than or equal to
52///   `linear_memory.len()`).
53///
54/// Combined in the given order, these two checks enable bounds checking without risking any
55/// overflow or underflow, provided that `n`, `index` and `linear_memory.len()` are of the same
56/// integer type.
57///
58/// In addition, the Wasm specification requires a certain order of checks. For example, when a
59/// `copy` instruction is emitted with a `count` of zero (i.e. no bytes to be copied), an out of
60/// bounds index still has to cause a trap. To control the order of checks manually, use of slice
61/// indexing is avoided altogether.
62///
63/// # Notes on locking
64///
65/// The internal data vector of the [`LinearMemory`] is wrapped in a [`RwSpinLock`]. Despite the
66/// name, writes to the linear memory do not require an acquisition of a write lock. Non-atomic
67/// or atomic single-byte writes are implemented through a shared ref to the internal vector, with
68/// [`AtomicU8`] to achieve interior mutability without undefined behavior.
69///
70/// However, linear memory can grow. As the linear memory is implemented via a [`Vec`], a `grow`
71/// can result in the vector's internal data buffer to be copied over to a bigger, fresh allocation.
72/// The old buffer is then freed. Combined with concurrent access, this can cause use-after-free.
73/// To avoid this, a `grow` operation of the linear memory acquires a write lock, blocking all
74/// read/write to the linear memory in between.
75///
76/// # Unsafe Note
77///
78/// As the manual index checking assures all indices to be valid, there is no need to re-check.
79/// Therefore [`slice::get_unchecked`] is used access the internal [`AtomicU8`] in the vector
80/// backing a [`LinearMemory`], implicating the use of `unsafe`.
81///
82/// To gain some confidence in the correctness of the unsafe code in this module, run `miri`:
83///
84/// ```bash
85/// cargo miri test --test memory # quick
86/// cargo miri test # thorough
87/// ```
88// TODO if a memmap like operation is available, the linear memory implementation can be optimized brutally. Out-of-bound access can be mapped to userspace handled page-faults, e.g. the MMU takes over that responsibility of catching out of bounds. Grow can happen without copying of data, by mapping new pages consecutively after the current final page of the linear memory.
89pub struct LinearMemory<const PAGE_SIZE: usize = { crate::Limits::MEM_PAGE_SIZE as usize }> {
90    inner_data: RwSpinLock<Vec<AtomicU8>>,
91}
92
93/// Type to express the page count
94pub type PageCountTy = u16;
95
96impl<const PAGE_SIZE: usize> LinearMemory<PAGE_SIZE> {
97    /// Size of a page in the linear memory, measured in bytes
98    ///
99    /// The WASM specification demands a page size of 64 KiB, that is `65536` bytes:
100    /// <https://webassembly.github.io/spec/core/exec/runtime.html?highlight=page#memory-instances>
101    const PAGE_SIZE: usize = PAGE_SIZE;
102
103    /// Create a new, empty [`LinearMemory`]
104    pub fn new() -> Self {
105        Self {
106            inner_data: RwSpinLock::new(Vec::new()),
107        }
108    }
109
110    /// Create a new, empty [`LinearMemory`]
111    pub fn new_with_initial_pages(pages: PageCountTy) -> Self {
112        let size_bytes = Self::PAGE_SIZE * pages as usize;
113        let mut data = Vec::with_capacity(size_bytes);
114        data.resize_with(size_bytes, || AtomicU8::new(0));
115
116        Self {
117            inner_data: RwSpinLock::new(data),
118        }
119    }
120
121    /// Grow the [`LinearMemory`] by a number of pages
122    pub fn grow(&self, pages_to_add: PageCountTy) {
123        let mut lock_guard = self.inner_data.write();
124        let prior_length_bytes = lock_guard.len();
125        let new_length_bytes = prior_length_bytes + Self::PAGE_SIZE * pages_to_add as usize;
126        lock_guard.resize_with(new_length_bytes, || AtomicU8::new(0));
127    }
128
129    /// Get the number of pages currently allocated to this [`LinearMemory`]
130    pub fn pages(&self) -> PageCountTy {
131        PageCountTy::try_from(self.inner_data.read().len() / PAGE_SIZE).unwrap()
132    }
133
134    /// Get the length in bytes currently allocated to this [`LinearMemory`]
135    // TODO remove this op
136    pub fn len(&self) -> usize {
137        self.inner_data.read().len()
138    }
139
140    /// At a given index, store a datum in the [`LinearMemory`]
141    pub fn store<const N: usize, T: LittleEndianBytes<N>>(
142        &self,
143        index: MemIdx,
144        value: T,
145    ) -> Result<(), RuntimeError> {
146        self.store_bytes::<N>(index, value.to_le_bytes())
147    }
148
149    /// At a given index, store a number of bytes `N` in the [`LinearMemory`]
150    pub fn store_bytes<const N: usize>(
151        &self,
152        index: MemIdx,
153        bytes: [u8; N],
154    ) -> Result<(), RuntimeError> {
155        let lock_guard = self.inner_data.read();
156
157        /* check destination for out of bounds access */
158        // A value must fit into the linear memory
159        if N > lock_guard.len() {
160            error!("value does not fit into linear memory");
161            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
162        }
163
164        // The following statement must be true
165        // `index + N <= lock_guard.len()`
166        // This check verifies it, while avoiding the possible overflow. The subtraction can not
167        // underflow because of the previous check.
168
169        if index > lock_guard.len() - N {
170            error!("value write would extend beyond the end of the linear memory");
171            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
172        }
173
174        /* do the store */
175        for (i, byte) in bytes.into_iter().enumerate() {
176            // SAFETY:
177            // The safety of this `unsafe` block depends on the index being valid, which it is
178            // because:
179            //
180            // - the first if statement in this function guarantees that a `T` can fit into the
181            //   `LinearMemory` `&self`
182            // - the second if statement in this function guarantees that even with the offset
183            //   `index`, writing all of `value`'s bytes does not extend beyond the last byte in
184            //   the `LinearMemory` `&self`
185            let dst = unsafe { lock_guard.get_unchecked(i + index) };
186            dst.store(byte, Ordering::Relaxed);
187        }
188
189        Ok(())
190    }
191
192    /// From a given index, load a datum from the [`LinearMemory`]
193    pub fn load<const N: usize, T: LittleEndianBytes<N>>(
194        &self,
195        index: MemIdx,
196    ) -> Result<T, RuntimeError> {
197        self.load_bytes::<N>(index).map(T::from_le_bytes)
198    }
199
200    /// From a given index, load a number of bytes `N` from the [`LinearMemory`]
201    pub fn load_bytes<const N: usize>(&self, index: MemIdx) -> Result<[u8; N], RuntimeError> {
202        let lock_guard = self.inner_data.read();
203
204        /* check source for out of bounds access */
205        // A value must fit into the linear memory
206        if N > lock_guard.len() {
207            error!("value does not fit into linear memory");
208            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
209        }
210
211        // The following statement must be true
212        // `index + N <= lock_guard.len()`
213        // This check verifies it, while avoiding the possible overflow. The subtraction can not
214        // underflow because of the previous assert.
215
216        if index > lock_guard.len() - N {
217            error!("value read would extend beyond the end of the linear_memory");
218            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
219        }
220
221        let mut bytes = [0; N];
222
223        /* do the load */
224        for (i, byte) in bytes.iter_mut().enumerate() {
225            // SAFETY:
226            // The safety of this `unsafe` block depends on the index being valid, which it is
227            // because:
228            //
229            // - the first if statement in this function guarantees that a `T` can fit into the
230            //   `LinearMemory` `&self`
231            // - the second if statement in this function guarantees that even with the offset
232            //   `index`, reading all `N` bytes does not extend beyond the last byte in
233            //   the `LinearMemory` `&self`
234            let src = unsafe { lock_guard.get_unchecked(i + index) };
235            *byte = src.load(Ordering::Relaxed);
236        }
237
238        Ok(bytes)
239    }
240
241    /// Implementation of the behavior described in
242    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-fill>.
243    /// Note, that the WASM spec defines the behavior by recursion, while our implementation uses
244    /// the memset like [`core::ptr::write_bytes`].
245    ///
246    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-fill>
247    pub fn fill(&self, index: MemIdx, data_byte: u8, count: MemIdx) -> Result<(), RuntimeError> {
248        let lock_guard = self.inner_data.read();
249
250        /* check destination for out of bounds access */
251        // Specification step 12.
252        if count > lock_guard.len() {
253            error!("fill count is bigger than the linear memory");
254            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
255        }
256
257        // Specification step 12.
258        if index > lock_guard.len() - count {
259            error!("fill extends beyond the linear memory's end");
260            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
261        }
262
263        /* check if there is anything to be done */
264        // Specification step 13.
265        if count == 0 {
266            return Ok(());
267        }
268
269        /* do the fill */
270        // Specification step 14-21.
271        for i in index..(index + count) {
272            // SAFETY:
273            // The safety of this `unsafe` block depends on the index being valid, which it is
274            // because:
275            //
276            // - the first if statement in this function guarantees that `count` elements can fit
277            //   into the `LinearMemory` `&self`
278            // - the second if statement in this function guarantees that even with the offset
279            //   `index`, writing all `count`'s bytes does not extend beyond the last byte in
280            //   the `LinearMemory` `&self`
281            let lin_mem_byte = unsafe { lock_guard.get_unchecked(i) };
282            lin_mem_byte.store(data_byte, Ordering::Relaxed);
283        }
284
285        Ok(())
286    }
287
288    /// Copy `count` bytes from one region in the linear memory to another region in the same or a
289    /// different linear memory
290    ///
291    /// - Both regions may overlap
292    /// - Copies the `count` bytes starting from `source_index`, overwriting the `count` bytes
293    ///   starting from `destination_index`
294    ///
295    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-copy>
296    pub fn copy(
297        &self,
298        destination_index: MemIdx,
299        source_mem: &Self,
300        source_index: MemIdx,
301        count: MemIdx,
302    ) -> Result<(), RuntimeError> {
303        // self is the destination
304        let lock_guard_self = self.inner_data.read();
305
306        // other is the source
307        let lock_guard_other = source_mem.inner_data.read();
308
309        /* check source for out of bounds access */
310        // Specification step 12.
311        if count > lock_guard_other.len() {
312            error!("copy count is bigger than the source linear memory");
313            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
314        }
315
316        // Specification step 12.
317        if source_index > lock_guard_other.len() - count {
318            error!("copy source extends beyond the linear memory's end");
319            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
320        }
321
322        /* check destination for out of bounds access */
323        // Specification step 12.
324        if count > lock_guard_self.len() {
325            error!("copy count is bigger than the destination linear memory");
326            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
327        }
328
329        // Specification step 12.
330        if destination_index > lock_guard_self.len() - count {
331            error!("copy destination extends beyond the linear memory's end");
332            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
333        }
334
335        /* check if there is anything to be done */
336        // Specification step 13.
337        if count == 0 {
338            return Ok(());
339        }
340
341        /* do the copy */
342        let copy_one_byte = move |i| {
343            // SAFETY:
344            // The safety of this `unsafe` block depends on the index being valid, which it is
345            // because:
346            //
347            // - the first if statement in this function guarantees that `count` elements can fit
348            //   into the `LinearMemory` `&source_mem`
349            // - the second if statement in this function guarantees that even with the offset
350            //   `source_index`, writing all `count`'s bytes does not extend beyond the last byte in
351            let src_byte: &AtomicU8 = unsafe { lock_guard_other.get_unchecked(i + source_index) };
352
353            // SAFETY:
354            // The safety of this `unsafe` block depends on the index being valid, which it is
355            // because:
356            //
357            // - the third if statement in this function guarantees that `count` elements can fit
358            //   into the `LinearMemory` `&self`
359            // - the fourth if statement in this function guarantees that even with the offset
360            //   `destination_index`, writing all `count`'s bytes does not extend beyond the last byte in
361            //   the `LinearMemory` `&self`
362            let dst_byte: &AtomicU8 =
363                unsafe { lock_guard_self.get_unchecked(i + destination_index) };
364
365            let byte = src_byte.load(Ordering::Relaxed);
366            dst_byte.store(byte, Ordering::Relaxed);
367        };
368
369        // TODO investigate if it is worth to only do reverse order copy if there is actual overlap
370
371        // Specification step 14.
372        if destination_index <= source_index {
373            // if source index is bigger than or equal to destination index, forward processing copy
374            // handles overlaps just fine
375            (0..count).for_each(copy_one_byte)
376        }
377        // Specification step 15.
378        else {
379            // if source index is smaller than destination index, backward processing is required to
380            // avoid data loss on overlaps
381            (0..count).rev().for_each(copy_one_byte)
382        }
383
384        Ok(())
385    }
386
387    // Rationale behind having `source_index` and `count` when the callsite could also just create a
388    // subslice for `source_data`? Have all the index error checks in one place.
389    //
390    // <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-init-x>
391    pub fn init(
392        &self,
393        destination_index: MemIdx,
394        source_data: &[u8],
395        source_index: MemIdx,
396        count: MemIdx,
397    ) -> Result<(), RuntimeError> {
398        // self is the destination
399        let lock_guard_self = self.inner_data.read();
400        let data_len = source_data.len();
401
402        /* check source for out of bounds access */
403        // Specification step 16.
404        if count > data_len {
405            error!("init count is bigger than the data instance");
406            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
407        }
408
409        // Specification step 16.
410        if source_index > data_len - count {
411            error!("init source extends beyond the data instance's end");
412            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
413        }
414
415        /* check destination for out of bounds access */
416        // Specification step 16.
417        if count > lock_guard_self.len() {
418            error!("init count is bigger than the linear memory");
419            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
420        }
421
422        // Specification step 16.
423        if destination_index > lock_guard_self.len() - count {
424            error!("init extends beyond the linear memory's end");
425            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
426        }
427
428        /* check if there is anything to be done */
429        // Specification step 17.
430        if count == 0 {
431            return Ok(());
432        }
433
434        /* do the init */
435        // Specification step 18-27.
436        for i in 0..count {
437            // SAFETY:
438            // The safety of this `unsafe` block depends on the index being valid, which it is
439            // because:
440            //
441            // - the first if statement in this function guarantees that `count` elements can fit
442            //   into the `LinearMemory` `&source_mem`
443            // - the second if statement in this function guarantees that even with the offset
444            //   `source_index`, writing all `count`'s bytes does not extend beyond the last byte in
445            let src_byte = unsafe { source_data.get_unchecked(i + source_index) };
446
447            // SAFETY:
448            // The safety of this `unsafe` block depends on the index being valid, which it is
449            // because:
450            //
451            // - the third if statement in this function guarantees that `count` elements can fit
452            //   into the `LinearMemory` `&self`
453            // - the fourth if statement in this function guarantees that even with the offset
454            //   `destination_index`, writing all `count`'s bytes does not extend beyond the last byte in
455            //   the `LinearMemory` `&self`
456            let dst_byte = unsafe { lock_guard_self.get_unchecked(i + destination_index) };
457            dst_byte.store(*src_byte, Ordering::Relaxed);
458        }
459
460        Ok(())
461    }
462
463    /// Allows a given closure to temporarily access the entire memory as a
464    /// `&mut [u8]`.
465    ///
466    /// # Note on locking
467    ///
468    /// This operation exclusively locks the entire linear memory for the
469    /// duration of this function call. To acquire the lock, this function may
470    /// also block until the lock is available.
471    pub fn access_mut_slice<R>(&self, accessor: impl FnOnce(&mut [u8]) -> R) -> R {
472        /// Converts an exclusively borrowed slice of atomic `u8`s to a slice of
473        /// non-atomic `u8`s
474        // TODO when `atomic_from_mut` is stabilized, replace this function with
475        // `Atomic::U8::get_mut_slice`
476        fn atomic_u8_get_mut_slice(slice: &mut [AtomicU8]) -> &mut [u8] {
477            // SAFETY: the mutable reference guarantees unique ownership
478            unsafe { &mut *(slice as *mut [AtomicU8] as *mut [u8]) }
479        }
480
481        let mut write_lock_guard = self.inner_data.write();
482        let non_atomic_slice = atomic_u8_get_mut_slice(&mut write_lock_guard);
483        accessor(non_atomic_slice)
484    }
485}
486
487impl<const PAGE_SIZE: usize> core::fmt::Debug for LinearMemory<PAGE_SIZE> {
488    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
489        /// A helper struct for formatting a [`Vec<UnsafeCell<u8>>`] which is guarded by a [`ReadLockGuard`].
490        /// This formatter is able to detect and format byte repetitions in a compact way.
491        struct RepetitionDetectingMemoryWriter<'a>(ReadLockGuard<'a, Vec<AtomicU8>>);
492        impl core::fmt::Debug for RepetitionDetectingMemoryWriter<'_> {
493            fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
494                /// The number of repetitions required for successive elements to be grouped
495                // together.
496                const MIN_REPETITIONS_FOR_GROUP: usize = 8;
497
498                // First we create an iterator over all bytes
499                let mut bytes = self.0.iter().map(|x| x.load(Ordering::Relaxed));
500
501                // Then we iterate over all bytes and deduplicate repetitions. This produces an
502                // iterator of pairs, consisting of the number of repetitions and the repeated byte
503                // itself. `current_group` is captured by the iterator and used as state to track
504                // the current group.
505                let mut current_group: Option<(usize, u8)> = None;
506                let deduplicated_with_count = iter::from_fn(|| {
507                    for byte in bytes.by_ref() {
508                        // If the next byte is different than the one being tracked currently...
509                        if current_group.is_some() && current_group.unwrap().1 != byte {
510                            // ...then end and emit the current group but also start a new group for
511                            // the next byte with an initial count of 1.
512                            return current_group.replace((1, byte));
513                        }
514                        // Otherwise increment the current group's counter or start a new group if
515                        // this was the first byte.
516                        current_group.get_or_insert((0, byte)).0 += 1;
517                    }
518                    // In the end when there are no more bytes to read, directly emit the last
519                    current_group.take()
520                });
521
522                // Finally we use `DebugList` to print a list of all groups, while writing out all
523                // elements from groups with less than `MIN_REPETITIONS_FOR_GROUP` elements.
524                let mut list = f.debug_list();
525                deduplicated_with_count.for_each(|(count, value)| {
526                    if count < MIN_REPETITIONS_FOR_GROUP {
527                        list.entries(iter::repeat(value).take(count));
528                    } else {
529                        list.entry(&format_args!("#{count} × {value}"));
530                    }
531                });
532                list.finish()
533            }
534        }
535
536        // Format the linear memory by using Rust's formatter helpers and the previously defined
537        // `RepetitionDetectingMemoryWriter`
538        f.debug_struct("LinearMemory")
539            .field(
540                "inner_data",
541                &RepetitionDetectingMemoryWriter(self.inner_data.read()),
542            )
543            .finish()
544    }
545}
546
547impl<const PAGE_SIZE: usize> Default for LinearMemory<PAGE_SIZE> {
548    fn default() -> Self {
549        Self::new()
550    }
551}
552
553#[cfg(test)]
554mod test {
555    use core::f64;
556
557    use alloc::format;
558    use core::mem;
559
560    use crate::value::{F32, F64};
561
562    use super::*;
563
564    const PAGE_SIZE: usize = 1 << 8;
565    const PAGES: PageCountTy = 2;
566
567    #[test]
568    fn new_constructor() {
569        let lin_mem = LinearMemory::<PAGE_SIZE>::new();
570        assert_eq!(lin_mem.pages(), 0);
571    }
572
573    #[test]
574    fn new_grow() {
575        let lin_mem = LinearMemory::<PAGE_SIZE>::new();
576        lin_mem.grow(1);
577        assert_eq!(lin_mem.pages(), 1);
578    }
579
580    #[test]
581    fn debug_print_simple() {
582        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(1);
583        assert_eq!(lin_mem.pages(), 1);
584
585        let expected = format!("LinearMemory {{ inner_data: [#{PAGE_SIZE} × 0] }}");
586        let debug_repr = format!("{lin_mem:?}");
587
588        assert_eq!(debug_repr, expected);
589    }
590
591    #[test]
592    fn debug_print_complex() {
593        let page_count = 2;
594        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(page_count);
595        assert_eq!(lin_mem.pages(), page_count);
596
597        lin_mem.store(1, 0xffu8).unwrap();
598        lin_mem.store(10, 1u8).unwrap();
599        lin_mem.store(200, 0xffu8).unwrap();
600
601        let expected = "LinearMemory { inner_data: [0, 255, #8 × 0, 1, #189 × 0, 255, #311 × 0] }";
602        let debug_repr = format!("{lin_mem:?}");
603
604        assert_eq!(debug_repr, expected);
605    }
606
607    #[test]
608    fn debug_print_empty() {
609        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(0);
610        assert_eq!(lin_mem.pages(), 0);
611
612        let expected = "LinearMemory { inner_data: [] }";
613        let debug_repr = format!("{lin_mem:?}");
614
615        assert_eq!(debug_repr, expected);
616    }
617
618    #[test]
619    fn roundtrip_normal_range_i8_neg127() {
620        let x: i8 = -127;
621        let highest_legal_offset = PAGE_SIZE - mem::size_of::<i8>();
622        for offset in 0..MemIdx::try_from(highest_legal_offset).unwrap() {
623            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
624
625            lin_mem.store(offset, x).unwrap();
626
627            assert_eq!(
628                lin_mem
629                    .load::<{ core::mem::size_of::<i8>() }, i8>(offset)
630                    .unwrap(),
631                x,
632                "load store roundtrip for {x:?} failed!"
633            );
634        }
635    }
636
637    #[test]
638    fn roundtrip_normal_range_f32_13() {
639        let x = F32(13.0);
640        let highest_legal_offset = PAGE_SIZE - mem::size_of::<F32>();
641        for offset in 0..MemIdx::try_from(highest_legal_offset).unwrap() {
642            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
643
644            lin_mem.store(offset, x).unwrap();
645
646            assert_eq!(
647                lin_mem
648                    .load::<{ core::mem::size_of::<F32>() }, F32>(offset)
649                    .unwrap(),
650                x,
651                "load store roundtrip for {x:?} failed!"
652            );
653        }
654    }
655
656    #[test]
657    fn roundtrip_normal_range_f64_min() {
658        let x = F64(f64::MIN);
659        let highest_legal_offset = PAGE_SIZE - mem::size_of::<F64>();
660        for offset in 0..MemIdx::try_from(highest_legal_offset).unwrap() {
661            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
662
663            lin_mem.store(offset, x).unwrap();
664
665            assert_eq!(
666                lin_mem
667                    .load::<{ core::mem::size_of::<F64>() }, F64>(offset)
668                    .unwrap(),
669                x,
670                "load store roundtrip for {x:?} failed!"
671            );
672        }
673    }
674
675    #[test]
676    fn roundtrip_normal_range_f64_nan() {
677        let x = F64(f64::NAN);
678        let highest_legal_offset = PAGE_SIZE - mem::size_of::<f64>();
679        for offset in 0..MemIdx::try_from(highest_legal_offset).unwrap() {
680            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
681
682            lin_mem.store(offset, x).unwrap();
683
684            assert!(
685                lin_mem
686                    .load::<{ core::mem::size_of::<F64>() }, F64>(offset)
687                    .unwrap()
688                    .is_nan(),
689                "load store roundtrip for {x:?} failed!"
690            );
691        }
692    }
693
694    #[test]
695    #[should_panic(
696        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
697    )]
698    fn store_out_of_range_u128_max() {
699        let x: u128 = u128::MAX;
700        let pages = 1;
701        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u128>() + 1;
702        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
703        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
704
705        lin_mem.store(lowest_illegal_offset, x).unwrap();
706    }
707
708    #[test]
709    #[should_panic(
710        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
711    )]
712    fn store_empty_lineaer_memory_u8() {
713        let x: u8 = u8::MAX;
714        let pages = 0;
715        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u8>() + 1;
716        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
717        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
718
719        lin_mem.store(lowest_illegal_offset, x).unwrap();
720    }
721
722    #[test]
723    #[should_panic(
724        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
725    )]
726    fn load_out_of_range_u128_max() {
727        let pages = 1;
728        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u128>() + 1;
729        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
730        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
731
732        let _x: u128 = lin_mem.load(lowest_illegal_offset).unwrap();
733    }
734
735    #[test]
736    #[should_panic(
737        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
738    )]
739    fn load_empty_lineaer_memory_u8() {
740        let pages = 0;
741        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u8>() + 1;
742        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
743        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
744
745        let _x: u8 = lin_mem.load(lowest_illegal_offset).unwrap();
746    }
747
748    #[test]
749    #[should_panic]
750    fn copy_out_of_bounds() {
751        let lin_mem_0 = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(2);
752        let lin_mem_1 = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(1);
753        lin_mem_0.copy(0, &lin_mem_1, 0, PAGE_SIZE + 1).unwrap();
754    }
755}