wasm/execution/store/
linear_memory.rs

1use core::{
2    iter,
3    sync::atomic::{AtomicU8, Ordering},
4};
5
6use alloc::vec::Vec;
7
8use crate::{
9    execution::little_endian::LittleEndianBytes,
10    rw_spinlock::{ReadLockGuard, RwSpinLock},
11    RuntimeError, TrapError,
12};
13
14/// Implementation of the linear memory suitable for concurrent access
15///
16/// Implements the base for the instructions described in
17/// <https://webassembly.github.io/spec/core/exec/instructions.html#memory-instructions>.
18///
19/// This linear memory implementation internally relies on a [`Vec<AtomicU8>`]. Thus, the atomic unit
20/// of information for it is a byte (`u8`). All access to the linear memory internally occur through
21/// [`AtomicU8::load`] and [`AtomicU8::store`], avoiding the creation of shared and `mut ref`s to
22/// the internal data completely. This avoids undefined behavior. Racy multibyte writes to the same
23/// data however may tear (e.g. for any number of concurrent writes to a given byte, only one is
24/// effectively written). Because of this, the [`LinearMemory::store`] function does not require
25/// `&mut self` -- `&self` suffices.
26///
27/// The implementation of atomic stores to multibyte values requires a global write lock. Rust's
28/// memory model considers partially overlapping atomic operations involving a write as undefined
29/// behavior. As there is no way to predict if an atomic multibyte store operation might overlap
30/// with another store or load operation, only a lock at runtime can avoid this cause of undefined
31/// behavior.
32// TODO does it pay of to have more fine-granular locking for multibyte stores than a single global write lock?
33///
34/// # Notes on overflowing
35///
36/// All operations that rely on accessing `n` bytes starting at `index` in the linear memory have to
37/// perform bounds checking. Thus, they always have to ensure that `n + index < linear_memory.len()`
38/// holds true (e.g. `n + index - 1` must be a valid index into `linear_memory`). However,
39/// writing that check as is bears the danger of an overflow, assuming that `n`, `index` and
40/// `linear_memory.len()` are the same given integer type, `n + index` can overflow, resulting in
41/// the check passing despite the access being out of bounds!
42///
43/// To avoid this, the bounds checks are carefully ordered to avoid any overflows:
44///
45/// - First we check, that `n <= linear_memory.len()` holds true, ensuring that the amount of bytes
46///   to be accessed is indeed smaller than or equal to the linear memory's size. If this does not
47///   hold true, continuation of the operation will yield out of bounds access in any case.
48/// - Then, as a second check, we verify that `index <= linear_memory.len() - n`. This way we
49///   avoid the overflow, as there is no addition. The subtraction in the left hand can not
50///   underflow, due to the previous check (which asserts that `n` is smaller than or equal to
51///   `linear_memory.len()`).
52///
53/// Combined in the given order, these two checks enable bounds checking without risking any
54/// overflow or underflow, provided that `n`, `index` and `linear_memory.len()` are of the same
55/// integer type.
56///
57/// In addition, the Wasm specification requires a certain order of checks. For example, when a
58/// `copy` instruction is emitted with a `count` of zero (i.e. no bytes to be copied), an out of
59/// bounds index still has to cause a trap. To control the order of checks manually, use of slice
60/// indexing is avoided altogether.
61///
62/// # Notes on locking
63///
64/// The internal data vector of the [`LinearMemory`] is wrapped in a [`RwSpinLock`]. Despite the
65/// name, writes to the linear memory do not require an acquisition of a write lock. Non-atomic
66/// or atomic single-byte writes are implemented through a shared ref to the internal vector, with
67/// [`AtomicU8`] to achieve interior mutability without undefined behavior.
68///
69/// However, linear memory can grow. As the linear memory is implemented via a [`Vec`], a `grow`
70/// can result in the vector's internal data buffer to be copied over to a bigger, fresh allocation.
71/// The old buffer is then freed. Combined with concurrent access, this can cause use-after-free.
72/// To avoid this, a `grow` operation of the linear memory acquires a write lock, blocking all
73/// read/write to the linear memory in between.
74///
75/// # Unsafe Note
76///
77/// As the manual index checking assures all indices to be valid, there is no need to re-check.
78/// Therefore [`slice::get_unchecked`] is used access the internal [`AtomicU8`] in the vector
79/// backing a [`LinearMemory`], implicating the use of `unsafe`.
80///
81/// To gain some confidence in the correctness of the unsafe code in this module, run `miri`:
82///
83/// ```bash
84/// cargo miri test --test memory # quick
85/// cargo miri test # thorough
86/// ```
87// TODO if a memmap like operation is available, the linear memory implementation can be optimized brutally. Out-of-bound access can be mapped to userspace handled page-faults, e.g. the MMU takes over that responsibility of catching out of bounds. Grow can happen without copying of data, by mapping new pages consecutively after the current final page of the linear memory.
88pub struct LinearMemory<const PAGE_SIZE: usize = { crate::Limits::MEM_PAGE_SIZE as usize }> {
89    inner_data: RwSpinLock<Vec<AtomicU8>>,
90}
91
92/// Type to express the page count
93pub type PageCountTy = u16;
94
95impl<const PAGE_SIZE: usize> LinearMemory<PAGE_SIZE> {
96    /// Size of a page in the linear memory, measured in bytes
97    ///
98    /// The WASM specification demands a page size of 64 KiB, that is `65536` bytes:
99    /// <https://webassembly.github.io/spec/core/exec/runtime.html?highlight=page#memory-instances>
100    const PAGE_SIZE: usize = PAGE_SIZE;
101
102    /// Create a new, empty [`LinearMemory`]
103    pub fn new() -> Self {
104        Self {
105            inner_data: RwSpinLock::new(Vec::new()),
106        }
107    }
108
109    /// Create a new, empty [`LinearMemory`]
110    pub fn new_with_initial_pages(pages: PageCountTy) -> Self {
111        let size_bytes = Self::PAGE_SIZE * usize::from(pages);
112        let mut data = Vec::with_capacity(size_bytes);
113        data.resize_with(size_bytes, || AtomicU8::new(0));
114
115        Self {
116            inner_data: RwSpinLock::new(data),
117        }
118    }
119
120    /// Grow the [`LinearMemory`] by a number of pages
121    pub fn grow(&self, pages_to_add: PageCountTy) {
122        let mut lock_guard = self.inner_data.write();
123        let prior_length_bytes = lock_guard.len();
124        let new_length_bytes = prior_length_bytes + Self::PAGE_SIZE * usize::from(pages_to_add);
125        lock_guard.resize_with(new_length_bytes, || AtomicU8::new(0));
126    }
127
128    /// Get the number of pages currently allocated to this [`LinearMemory`]
129    pub fn pages(&self) -> PageCountTy {
130        PageCountTy::try_from(self.inner_data.read().len() / PAGE_SIZE).unwrap()
131    }
132
133    /// Get the length in bytes currently allocated to this [`LinearMemory`]
134    // TODO remove this op
135    pub fn len(&self) -> usize {
136        self.inner_data.read().len()
137    }
138
139    /// At a given index, store a datum in the [`LinearMemory`]
140    pub fn store<const N: usize, T: LittleEndianBytes<N>>(
141        &self,
142        index: usize,
143        value: T,
144    ) -> Result<(), RuntimeError> {
145        self.store_bytes::<N>(index, value.to_le_bytes())
146    }
147
148    /// At a given index, store a number of bytes `N` in the [`LinearMemory`]
149    pub fn store_bytes<const N: usize>(
150        &self,
151        index: usize,
152        bytes: [u8; N],
153    ) -> Result<(), RuntimeError> {
154        let lock_guard = self.inner_data.read();
155
156        /* check destination for out of bounds access */
157        // A value must fit into the linear memory
158        if N > lock_guard.len() {
159            error!("value does not fit into linear memory");
160            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
161        }
162
163        // The following statement must be true
164        // `index + N <= lock_guard.len()`
165        // This check verifies it, while avoiding the possible overflow. The subtraction can not
166        // underflow because of the previous check.
167
168        if index > lock_guard.len() - N {
169            error!("value write would extend beyond the end of the linear memory");
170            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
171        }
172
173        /* do the store */
174        for (i, byte) in bytes.into_iter().enumerate() {
175            // SAFETY:
176            // The safety of this `unsafe` block depends on the index being valid, which it is
177            // because:
178            //
179            // - the first if statement in this function guarantees that a `T` can fit into the
180            //   `LinearMemory` `&self`
181            // - the second if statement in this function guarantees that even with the offset
182            //   `index`, writing all of `value`'s bytes does not extend beyond the last byte in
183            //   the `LinearMemory` `&self`
184            let dst = unsafe { lock_guard.get_unchecked(i + index) };
185            dst.store(byte, Ordering::Relaxed);
186        }
187
188        Ok(())
189    }
190
191    /// From a given index, load a datum from the [`LinearMemory`]
192    pub fn load<const N: usize, T: LittleEndianBytes<N>>(
193        &self,
194        index: usize,
195    ) -> Result<T, RuntimeError> {
196        self.load_bytes::<N>(index).map(T::from_le_bytes)
197    }
198
199    /// From a given index, load a number of bytes `N` from the [`LinearMemory`]
200    pub fn load_bytes<const N: usize>(&self, index: usize) -> Result<[u8; N], RuntimeError> {
201        let lock_guard = self.inner_data.read();
202
203        /* check source for out of bounds access */
204        // A value must fit into the linear memory
205        if N > lock_guard.len() {
206            error!("value does not fit into linear memory");
207            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
208        }
209
210        // The following statement must be true
211        // `index + N <= lock_guard.len()`
212        // This check verifies it, while avoiding the possible overflow. The subtraction can not
213        // underflow because of the previous assert.
214
215        if index > lock_guard.len() - N {
216            error!("value read would extend beyond the end of the linear_memory");
217            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
218        }
219
220        let mut bytes = [0; N];
221
222        /* do the load */
223        for (i, byte) in bytes.iter_mut().enumerate() {
224            // SAFETY:
225            // The safety of this `unsafe` block depends on the index being valid, which it is
226            // because:
227            //
228            // - the first if statement in this function guarantees that a `T` can fit into the
229            //   `LinearMemory` `&self`
230            // - the second if statement in this function guarantees that even with the offset
231            //   `index`, reading all `N` bytes does not extend beyond the last byte in
232            //   the `LinearMemory` `&self`
233            let src = unsafe { lock_guard.get_unchecked(i + index) };
234            *byte = src.load(Ordering::Relaxed);
235        }
236
237        Ok(bytes)
238    }
239
240    /// Implementation of the behavior described in
241    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-fill>.
242    /// Note, that the WASM spec defines the behavior by recursion, while our implementation uses
243    /// the memset like [`core::ptr::write_bytes`].
244    ///
245    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-fill>
246    pub fn fill(&self, index: usize, data_byte: u8, count: usize) -> Result<(), RuntimeError> {
247        let lock_guard = self.inner_data.read();
248
249        /* check destination for out of bounds access */
250        // Specification step 12.
251        if count > lock_guard.len() {
252            error!("fill count is bigger than the linear memory");
253            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
254        }
255
256        // Specification step 12.
257        if index > lock_guard.len() - count {
258            error!("fill extends beyond the linear memory's end");
259            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
260        }
261
262        /* check if there is anything to be done */
263        // Specification step 13.
264        if count == 0 {
265            return Ok(());
266        }
267
268        /* do the fill */
269        // Specification step 14-21.
270        for i in index..(index + count) {
271            // SAFETY:
272            // The safety of this `unsafe` block depends on the index being valid, which it is
273            // because:
274            //
275            // - the first if statement in this function guarantees that `count` elements can fit
276            //   into the `LinearMemory` `&self`
277            // - the second if statement in this function guarantees that even with the offset
278            //   `index`, writing all `count`'s bytes does not extend beyond the last byte in
279            //   the `LinearMemory` `&self`
280            let lin_mem_byte = unsafe { lock_guard.get_unchecked(i) };
281            lin_mem_byte.store(data_byte, Ordering::Relaxed);
282        }
283
284        Ok(())
285    }
286
287    /// Copy `count` bytes from one region in the linear memory to another region in the same or a
288    /// different linear memory
289    ///
290    /// - Both regions may overlap
291    /// - Copies the `count` bytes starting from `source_index`, overwriting the `count` bytes
292    ///   starting from `destination_index`
293    ///
294    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-copy>
295    pub fn copy(
296        &self,
297        destination_index: usize,
298        source_mem: &Self,
299        source_index: usize,
300        count: usize,
301    ) -> Result<(), RuntimeError> {
302        // self is the destination
303        let lock_guard_self = self.inner_data.read();
304
305        // other is the source
306        let lock_guard_other = source_mem.inner_data.read();
307
308        /* check source for out of bounds access */
309        // Specification step 12.
310        if count > lock_guard_other.len() {
311            error!("copy count is bigger than the source linear memory");
312            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
313        }
314
315        // Specification step 12.
316        if source_index > lock_guard_other.len() - count {
317            error!("copy source extends beyond the linear memory's end");
318            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
319        }
320
321        /* check destination for out of bounds access */
322        // Specification step 12.
323        if count > lock_guard_self.len() {
324            error!("copy count is bigger than the destination linear memory");
325            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
326        }
327
328        // Specification step 12.
329        if destination_index > lock_guard_self.len() - count {
330            error!("copy destination extends beyond the linear memory's end");
331            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
332        }
333
334        /* check if there is anything to be done */
335        // Specification step 13.
336        if count == 0 {
337            return Ok(());
338        }
339
340        /* do the copy */
341        let copy_one_byte = move |i| {
342            // SAFETY:
343            // The safety of this `unsafe` block depends on the index being valid, which it is
344            // because:
345            //
346            // - the first if statement in this function guarantees that `count` elements can fit
347            //   into the `LinearMemory` `&source_mem`
348            // - the second if statement in this function guarantees that even with the offset
349            //   `source_index`, writing all `count`'s bytes does not extend beyond the last byte in
350            let src_byte: &AtomicU8 = unsafe { lock_guard_other.get_unchecked(i + source_index) };
351
352            // SAFETY:
353            // The safety of this `unsafe` block depends on the index being valid, which it is
354            // because:
355            //
356            // - the third if statement in this function guarantees that `count` elements can fit
357            //   into the `LinearMemory` `&self`
358            // - the fourth if statement in this function guarantees that even with the offset
359            //   `destination_index`, writing all `count`'s bytes does not extend beyond the last byte in
360            //   the `LinearMemory` `&self`
361            let dst_byte: &AtomicU8 =
362                unsafe { lock_guard_self.get_unchecked(i + destination_index) };
363
364            let byte = src_byte.load(Ordering::Relaxed);
365            dst_byte.store(byte, Ordering::Relaxed);
366        };
367
368        // TODO investigate if it is worth to only do reverse order copy if there is actual overlap
369
370        // Specification step 14.
371        if destination_index <= source_index {
372            // if source index is bigger than or equal to destination index, forward processing copy
373            // handles overlaps just fine
374            (0..count).for_each(copy_one_byte)
375        }
376        // Specification step 15.
377        else {
378            // if source index is smaller than destination index, backward processing is required to
379            // avoid data loss on overlaps
380            (0..count).rev().for_each(copy_one_byte)
381        }
382
383        Ok(())
384    }
385
386    // Rationale behind having `source_index` and `count` when the callsite could also just create a
387    // subslice for `source_data`? Have all the index error checks in one place.
388    //
389    // <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-init-x>
390    pub fn init(
391        &self,
392        destination_index: usize,
393        source_data: &[u8],
394        source_index: usize,
395        count: usize,
396    ) -> Result<(), RuntimeError> {
397        // self is the destination
398        let lock_guard_self = self.inner_data.read();
399        let data_len = source_data.len();
400
401        /* check source for out of bounds access */
402        // Specification step 16.
403        if count > data_len {
404            error!("init count is bigger than the data instance");
405            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
406        }
407
408        // Specification step 16.
409        if source_index > data_len - count {
410            error!("init source extends beyond the data instance's end");
411            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
412        }
413
414        /* check destination for out of bounds access */
415        // Specification step 16.
416        if count > lock_guard_self.len() {
417            error!("init count is bigger than the linear memory");
418            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
419        }
420
421        // Specification step 16.
422        if destination_index > lock_guard_self.len() - count {
423            error!("init extends beyond the linear memory's end");
424            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
425        }
426
427        /* check if there is anything to be done */
428        // Specification step 17.
429        if count == 0 {
430            return Ok(());
431        }
432
433        /* do the init */
434        // Specification step 18-27.
435        for i in 0..count {
436            // SAFETY:
437            // The safety of this `unsafe` block depends on the index being valid, which it is
438            // because:
439            //
440            // - the first if statement in this function guarantees that `count` elements can fit
441            //   into the `LinearMemory` `&source_mem`
442            // - the second if statement in this function guarantees that even with the offset
443            //   `source_index`, writing all `count`'s bytes does not extend beyond the last byte in
444            let src_byte = unsafe { source_data.get_unchecked(i + source_index) };
445
446            // SAFETY:
447            // The safety of this `unsafe` block depends on the index being valid, which it is
448            // because:
449            //
450            // - the third if statement in this function guarantees that `count` elements can fit
451            //   into the `LinearMemory` `&self`
452            // - the fourth if statement in this function guarantees that even with the offset
453            //   `destination_index`, writing all `count`'s bytes does not extend beyond the last byte in
454            //   the `LinearMemory` `&self`
455            let dst_byte = unsafe { lock_guard_self.get_unchecked(i + destination_index) };
456            dst_byte.store(*src_byte, Ordering::Relaxed);
457        }
458
459        Ok(())
460    }
461
462    /// Allows a given closure to temporarily access the entire memory as a
463    /// `&mut [u8]`.
464    ///
465    /// # Note on locking
466    ///
467    /// This operation exclusively locks the entire linear memory for the
468    /// duration of this function call. To acquire the lock, this function may
469    /// also block until the lock is available.
470    pub fn access_mut_slice<R>(&self, accessor: impl FnOnce(&mut [u8]) -> R) -> R {
471        /// Converts an exclusively borrowed slice of atomic `u8`s to a slice of
472        /// non-atomic `u8`s
473        // TODO when `atomic_from_mut` is stabilized, replace this function with
474        // `Atomic::U8::get_mut_slice`
475        fn atomic_u8_get_mut_slice(slice: &mut [AtomicU8]) -> &mut [u8] {
476            // SAFETY: the mutable reference guarantees unique ownership
477            unsafe { &mut *(slice as *mut [AtomicU8] as *mut [u8]) }
478        }
479
480        let mut write_lock_guard = self.inner_data.write();
481        let non_atomic_slice = atomic_u8_get_mut_slice(&mut write_lock_guard);
482        accessor(non_atomic_slice)
483    }
484}
485
486impl<const PAGE_SIZE: usize> core::fmt::Debug for LinearMemory<PAGE_SIZE> {
487    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
488        /// A helper struct for formatting a [`Vec<UnsafeCell<u8>>`] which is guarded by a [`ReadLockGuard`].
489        /// This formatter is able to detect and format byte repetitions in a compact way.
490        struct RepetitionDetectingMemoryWriter<'a>(ReadLockGuard<'a, Vec<AtomicU8>>);
491        impl core::fmt::Debug for RepetitionDetectingMemoryWriter<'_> {
492            fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
493                /// The number of repetitions required for successive elements to be grouped
494                // together.
495                const MIN_REPETITIONS_FOR_GROUP: usize = 8;
496
497                // First we create an iterator over all bytes
498                let mut bytes = self.0.iter().map(|x| x.load(Ordering::Relaxed));
499
500                // Then we iterate over all bytes and deduplicate repetitions. This produces an
501                // iterator of pairs, consisting of the number of repetitions and the repeated byte
502                // itself. `current_group` is captured by the iterator and used as state to track
503                // the current group.
504                let mut current_group: Option<(usize, u8)> = None;
505                let deduplicated_with_count = iter::from_fn(|| {
506                    for byte in bytes.by_ref() {
507                        // If the next byte is different than the one being tracked currently...
508                        if current_group.is_some() && current_group.unwrap().1 != byte {
509                            // ...then end and emit the current group but also start a new group for
510                            // the next byte with an initial count of 1.
511                            return current_group.replace((1, byte));
512                        }
513                        // Otherwise increment the current group's counter or start a new group if
514                        // this was the first byte.
515                        current_group.get_or_insert((0, byte)).0 += 1;
516                    }
517                    // In the end when there are no more bytes to read, directly emit the last
518                    current_group.take()
519                });
520
521                // Finally we use `DebugList` to print a list of all groups, while writing out all
522                // elements from groups with less than `MIN_REPETITIONS_FOR_GROUP` elements.
523                let mut list = f.debug_list();
524                deduplicated_with_count.for_each(|(count, value)| {
525                    if count < MIN_REPETITIONS_FOR_GROUP {
526                        list.entries(iter::repeat_n(value, count));
527                    } else {
528                        list.entry(&format_args!("#{count} × {value}"));
529                    }
530                });
531                list.finish()
532            }
533        }
534
535        // Format the linear memory by using Rust's formatter helpers and the previously defined
536        // `RepetitionDetectingMemoryWriter`
537        f.debug_struct("LinearMemory")
538            .field(
539                "inner_data",
540                &RepetitionDetectingMemoryWriter(self.inner_data.read()),
541            )
542            .finish()
543    }
544}
545
546impl<const PAGE_SIZE: usize> Default for LinearMemory<PAGE_SIZE> {
547    fn default() -> Self {
548        Self::new()
549    }
550}
551
552#[cfg(test)]
553mod test {
554    use core::f64;
555
556    use alloc::format;
557    use core::mem;
558
559    use crate::value::{F32, F64};
560
561    use super::*;
562
563    const PAGE_SIZE: usize = 1 << 8;
564    const PAGES: PageCountTy = 2;
565
566    #[test]
567    fn new_constructor() {
568        let lin_mem = LinearMemory::<PAGE_SIZE>::new();
569        assert_eq!(lin_mem.pages(), 0);
570    }
571
572    #[test]
573    fn new_grow() {
574        let lin_mem = LinearMemory::<PAGE_SIZE>::new();
575        lin_mem.grow(1);
576        assert_eq!(lin_mem.pages(), 1);
577    }
578
579    #[test]
580    fn debug_print_simple() {
581        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(1);
582        assert_eq!(lin_mem.pages(), 1);
583
584        let expected = format!("LinearMemory {{ inner_data: [#{PAGE_SIZE} × 0] }}");
585        let debug_repr = format!("{lin_mem:?}");
586
587        assert_eq!(debug_repr, expected);
588    }
589
590    #[test]
591    fn debug_print_complex() {
592        let page_count = 2;
593        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(page_count);
594        assert_eq!(lin_mem.pages(), page_count);
595
596        lin_mem.store(1, 0xffu8).unwrap();
597        lin_mem.store(10, 1u8).unwrap();
598        lin_mem.store(200, 0xffu8).unwrap();
599
600        let expected = "LinearMemory { inner_data: [0, 255, #8 × 0, 1, #189 × 0, 255, #311 × 0] }";
601        let debug_repr = format!("{lin_mem:?}");
602
603        assert_eq!(debug_repr, expected);
604    }
605
606    #[test]
607    fn debug_print_empty() {
608        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(0);
609        assert_eq!(lin_mem.pages(), 0);
610
611        let expected = "LinearMemory { inner_data: [] }";
612        let debug_repr = format!("{lin_mem:?}");
613
614        assert_eq!(debug_repr, expected);
615    }
616
617    #[test]
618    fn roundtrip_normal_range_i8_neg127() {
619        let x: i8 = -127;
620        let highest_legal_offset = PAGE_SIZE - mem::size_of::<i8>();
621        for offset in 0..highest_legal_offset {
622            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
623
624            lin_mem.store(offset, x).unwrap();
625
626            assert_eq!(
627                lin_mem
628                    .load::<{ core::mem::size_of::<i8>() }, i8>(offset)
629                    .unwrap(),
630                x,
631                "load store roundtrip for {x:?} failed!"
632            );
633        }
634    }
635
636    #[test]
637    fn roundtrip_normal_range_f32_13() {
638        let x = F32(13.0);
639        let highest_legal_offset = PAGE_SIZE - mem::size_of::<F32>();
640        for offset in 0..highest_legal_offset {
641            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
642
643            lin_mem.store(offset, x).unwrap();
644
645            assert_eq!(
646                lin_mem
647                    .load::<{ core::mem::size_of::<F32>() }, F32>(offset)
648                    .unwrap(),
649                x,
650                "load store roundtrip for {x:?} failed!"
651            );
652        }
653    }
654
655    #[test]
656    fn roundtrip_normal_range_f64_min() {
657        let x = F64(f64::MIN);
658        let highest_legal_offset = PAGE_SIZE - mem::size_of::<F64>();
659        for offset in 0..highest_legal_offset {
660            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
661
662            lin_mem.store(offset, x).unwrap();
663
664            assert_eq!(
665                lin_mem
666                    .load::<{ core::mem::size_of::<F64>() }, F64>(offset)
667                    .unwrap(),
668                x,
669                "load store roundtrip for {x:?} failed!"
670            );
671        }
672    }
673
674    #[test]
675    fn roundtrip_normal_range_f64_nan() {
676        let x = F64(f64::NAN);
677        let highest_legal_offset = PAGE_SIZE - mem::size_of::<f64>();
678        for offset in 0..highest_legal_offset {
679            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
680
681            lin_mem.store(offset, x).unwrap();
682
683            assert!(
684                lin_mem
685                    .load::<{ core::mem::size_of::<F64>() }, F64>(offset)
686                    .unwrap()
687                    .is_nan(),
688                "load store roundtrip for {x:?} failed!"
689            );
690        }
691    }
692
693    #[test]
694    #[should_panic(
695        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
696    )]
697    fn store_out_of_range_u128_max() {
698        let x: u128 = u128::MAX;
699        let pages = 1;
700        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u128>() + 1;
701        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
702
703        lin_mem.store(lowest_illegal_offset, x).unwrap();
704    }
705
706    #[test]
707    #[should_panic(
708        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
709    )]
710    fn store_empty_lineaer_memory_u8() {
711        let x: u8 = u8::MAX;
712        let pages = 0;
713        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u8>() + 1;
714        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
715
716        lin_mem.store(lowest_illegal_offset, x).unwrap();
717    }
718
719    #[test]
720    #[should_panic(
721        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
722    )]
723    fn load_out_of_range_u128_max() {
724        let pages = 1;
725        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u128>() + 1;
726        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
727
728        let _x: u128 = lin_mem.load(lowest_illegal_offset).unwrap();
729    }
730
731    #[test]
732    #[should_panic(
733        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
734    )]
735    fn load_empty_lineaer_memory_u8() {
736        let pages = 0;
737        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u8>() + 1;
738        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
739
740        let _x: u8 = lin_mem.load(lowest_illegal_offset).unwrap();
741    }
742
743    #[test]
744    #[should_panic]
745    fn copy_out_of_bounds() {
746        let lin_mem_0 = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(2);
747        let lin_mem_1 = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(1);
748        lin_mem_0.copy(0, &lin_mem_1, 0, PAGE_SIZE + 1).unwrap();
749    }
750}