wasm/execution/
linear_memory.rs

1use core::{cell::UnsafeCell, iter, ptr};
2
3use alloc::vec::Vec;
4
5use crate::{
6    core::indices::MemIdx,
7    execution::little_endian::LittleEndianBytes,
8    rw_spinlock::{ReadLockGuard, RwSpinLock},
9    RuntimeError, TrapError,
10};
11
12/// Implementation of the linear memory suitable for concurrent access
13///
14/// Implements the base for the instructions described in
15/// <https://webassembly.github.io/spec/core/exec/instructions.html#memory-instructions>.
16///
17/// This linear memory implementation internally relies on a `Vec<UnsafeCell<u8>>`. Thus, the atomic
18/// unit of information for it is a byte (`u8`). All access to the linear memory internally occurs
19/// through pointers, avoiding the creation of shared and mut refs to the internal data completely.
20/// This avoids undefined behavior, except for the race-condition inherent to concurrent writes.
21/// Because of this, the [`LinearMemory::store`] function does not require `&mut self` -- `&self`
22/// suffices.
23///
24/// # Notes on overflowing
25///
26/// All operations that rely on accessing `n` bytes starting at `index` in the linear memory have to
27/// perform bounds checking. Thus they always have to ensure that `n + index < linear_memory.len()`
28/// holds true (e.g. `n + index - 1` must be a valid index into `linear_memory`). However,
29/// writing that check as is bears the danger of an overflow, assuming that `n`, `index` and
30/// `linear_memory.len()` are the same given integer type, `n + index` can overflow, resulting in
31/// the check passing despite the access being out of bounds!
32///
33/// To avoid this, the bounds checks are carefully ordered to avoid any overflows:
34///
35/// - First we check, that `n <= linear_memory.len()` holds true, ensuring that the amount of bytes
36///   to be accessed is indeed smaller than or equal to the linear memory's size. If this does not
37///   hold true, continuation of the operation will yield out of bounds access in any case.
38/// - Then, as a second check, we verify that `index <= linear_memory.len() - n`. This way we
39///   avoid the overflow, as there is no addition. The subtraction in the left hand can not
40///   underflow, due to the previous check (which asserts that `n` is smaller than or equal to
41///   `linear_memory.len()`).
42///
43/// Combined in the given order, these two checks enable bounds checking without risking any
44/// overflow or underflow, provided that `n`, `index` and `linear_memory.len()` are of the same
45/// integer type.
46///
47/// # Notes on locking
48///
49/// The internal data vector of the [`LinearMemory`] is wrapped in a [`RwSpinLock`]. Despite the
50/// name, writes to the linear memory do not require an acquisition of a write lock. Writes are
51/// implemented through a shared ref to the internal vector, with an `UnsafeCell` to achieve
52/// interior mutability.
53///
54/// However, linear memory can grow. As the linear memory is implemented via a [`Vec`], a grow can
55/// result in the vector's internal data buffer to be copied over to a bigger, fresh allocation.
56/// The old buffer is then freed. Combined with concurrent mutable access, this can cause
57/// use-after-free. To avoid this, a grow operation of the linear memory acquires a write lock,
58/// blocking all read/write to the linear memory inbetween.
59///
60/// # Unsafe Note
61///
62/// Raw pointer access it required, because concurent mutation of the linear memory might happen
63/// (consider the threading proposal for WASM, where mutliple WASM threads access the same linear
64/// memory at the same time). The inherent race condition results in UB w/r/t the state of the `u8`s
65/// in the inner data. However, this is tolerable, e.g. avoiding race conditions on the state of the
66/// linear memory can not be the task of the interpreter, but has to be fulfilled by the interpreted
67/// bytecode itself.
68///
69/// To gain some confidence in the correctness of the unsafe code in this module, run `miri`:
70///
71/// ```bash
72/// cargo miri test --test memory # quick
73/// cargo miri test # thorough
74/// ```
75// TODO if a memmap like operation is available, the linear memory implementation can be optimized brutally. Out-of-bound access can be mapped to userspace handled page-faults, e.g. the MMU takes over that responsibility of catching out of bounds. Grow can happen without copying of data, by mapping new pages consecutively after the current final page of the linear memory.
76pub struct LinearMemory<const PAGE_SIZE: usize = { crate::Limits::MEM_PAGE_SIZE as usize }> {
77    inner_data: RwSpinLock<Vec<UnsafeCell<u8>>>,
78}
79
80/// Type to express the page count
81pub type PageCountTy = u16;
82
83impl<const PAGE_SIZE: usize> LinearMemory<PAGE_SIZE> {
84    /// Size of a page in the linear memory, measured in bytes
85    ///
86    /// The WASM specification demands a page size of 64 KiB, that is `65536` bytes:
87    /// <https://webassembly.github.io/spec/core/exec/runtime.html?highlight=page#memory-instances>
88    const PAGE_SIZE: usize = PAGE_SIZE;
89
90    /// Create a new, empty [`LinearMemory`]
91    pub fn new() -> Self {
92        Self {
93            inner_data: RwSpinLock::new(Vec::new()),
94        }
95    }
96
97    /// Create a new, empty [`LinearMemory`]
98    pub fn new_with_initial_pages(pages: PageCountTy) -> Self {
99        let size_bytes = Self::PAGE_SIZE * pages as usize;
100        let mut data = Vec::with_capacity(size_bytes);
101        data.resize_with(size_bytes, || UnsafeCell::new(0));
102
103        Self {
104            inner_data: RwSpinLock::new(data),
105        }
106    }
107
108    /// Grow the [`LinearMemory`] by a number of pages
109    pub fn grow(&self, pages_to_add: PageCountTy) {
110        let mut lock_guard = self.inner_data.write();
111        let prior_length_bytes = lock_guard.len();
112        let new_length_bytes = prior_length_bytes + Self::PAGE_SIZE * pages_to_add as usize;
113        lock_guard.resize_with(new_length_bytes, || UnsafeCell::new(0));
114    }
115
116    /// Get the number of pages currently allocated to this [`LinearMemory`]
117    pub fn pages(&self) -> PageCountTy {
118        PageCountTy::try_from(self.inner_data.read().len() / PAGE_SIZE).unwrap()
119    }
120
121    /// Get the length in bytes currently allocated to this [`LinearMemory`]
122    // TODO remove this op
123    pub fn len(&self) -> usize {
124        self.inner_data.read().len()
125    }
126
127    /// At a given index, store a datum in the [`LinearMemory`]
128    pub fn store<const N: usize, T: LittleEndianBytes<N>>(
129        &self,
130        index: MemIdx,
131        value: T,
132    ) -> Result<(), RuntimeError> {
133        self.store_bytes::<N>(index, value.to_le_bytes())
134    }
135
136    /// At a given index, store a number of bytes `N` in the [`LinearMemory`]
137    pub fn store_bytes<const N: usize>(
138        &self,
139        index: MemIdx,
140        bytes: [u8; N],
141    ) -> Result<(), RuntimeError> {
142        let lock_guard = self.inner_data.read();
143
144        /* check destination for out of bounds access */
145        // A value must fit into the linear memory
146        if N > lock_guard.len() {
147            error!("value does not fit into linear memory");
148            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
149        }
150
151        // The following statement must be true
152        // `index + N <= lock_guard.len()`
153        // This check verifies it, while avoiding the possible overflow. The subtraction can not
154        // underflow because of the previous check.
155
156        if index > lock_guard.len() - N {
157            error!("value write would extend beyond the end of the linear memory");
158            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
159        }
160
161        /* gather pointers */
162        let src_ptr = bytes.as_ptr();
163        let dst_ptr = UnsafeCell::raw_get(lock_guard.as_ptr());
164
165        /* write `value` to this `LinearMemory` */
166
167        // SAFETY:
168        // - nonoverlapping is guaranteed, because `src_ptr` is a pointer to a stack allocated
169        //   array, while `dst_ptr` points to a heap allocated `Vec`
170        // - the first if statement in this function guarantees that a `T` can fit into
171        //   `LinearMemory` behind the `dst_ptr`
172        // - the second if statement in this function guarantees that even with the offset
173        //   `index`, writing all of `src_ptr`'s bytes does not extend beyond the `dst_ptr`'s last
174        //   `UnsafeCell<u8>`
175        // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s
176        //   contained in the `UnsafeCell`s, so no UB is created through the existence of unsound
177        //   references
178        unsafe { ptr::copy_nonoverlapping(src_ptr, dst_ptr.add(index), bytes.len()) };
179
180        Ok(())
181    }
182
183    /// From a given index, load a datum from the [`LinearMemory`]
184    pub fn load<const N: usize, T: LittleEndianBytes<N>>(
185        &self,
186        index: MemIdx,
187    ) -> Result<T, RuntimeError> {
188        self.load_bytes::<N>(index).map(T::from_le_bytes)
189    }
190
191    /// From a given index, load a number of bytes `N` from the [`LinearMemory`]
192    pub fn load_bytes<const N: usize>(&self, index: MemIdx) -> Result<[u8; N], RuntimeError> {
193        let lock_guard = self.inner_data.read();
194
195        /* check source for out of bounds access */
196        // A value must fit into the linear memory
197        if N > lock_guard.len() {
198            error!("value does not fit into linear memory");
199            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
200        }
201
202        // The following statement must be true
203        // `index + N <= lock_guard.len()`
204        // This check verifies it, while avoiding the possible overflow. The subtraction can not
205        // underflow because of the previous assert.
206
207        if index > lock_guard.len() - N {
208            error!("value read would extend beyond the end of the linear_memory");
209            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
210        }
211
212        let mut bytes = [0; N];
213
214        /* gather pointers */
215        let src_ptr = UnsafeCell::raw_get(lock_guard.as_ptr());
216        let dst_ptr = bytes.as_mut_ptr();
217
218        /* read `value` from this `LinearMemory` */
219        // SAFETY:
220        // - nonoverlapping is guaranteed, because `dst_ptr` is a pointer to a stack allocated
221        //   array, while the source is heap allocated Vec
222        // - the first if statement in this function guarantees that a `T` can fit into the linear
223        //   memory behind the `src_ptr`
224        // - the second if statement in this function guarantees that even with the offset `index`,
225        //   reading all of `T`s bytes does not extend beyond the `src_ptrs`'s last `UnsafeCell<u8>`
226        // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s
227        //   contained in the `UnsafeCell`s, so no UB is created through the existence of unsound
228        //   references
229        unsafe { ptr::copy_nonoverlapping(src_ptr.add(index), dst_ptr, bytes.len()) };
230
231        Ok(bytes)
232    }
233
234    /// Implementation of the behavior described in
235    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-fill>.
236    /// Note, that the WASM spec defines the behavior by recursion, while our implementation uses
237    /// the memset like [`core::ptr::write_bytes`].
238    ///
239    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-fill>
240    pub fn fill(&self, index: MemIdx, data_byte: u8, count: MemIdx) -> Result<(), RuntimeError> {
241        let lock_guard = self.inner_data.read();
242
243        /* check destination for out of bounds access */
244        // Specification step 12.
245        if count > lock_guard.len() {
246            error!("fill count is bigger than the linear memory");
247            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
248        }
249
250        // Specification step 12.
251        if index > lock_guard.len() - count {
252            error!("fill extends beyond the linear memory's end");
253            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
254        }
255
256        /* check if there is anything to be done */
257        // Specification step 13.
258        if count == 0 {
259            return Ok(());
260        }
261
262        /* gather pointer */
263        let dst_ptr = UnsafeCell::raw_get(lock_guard.as_ptr());
264
265        /* write the `data_byte` to this `LinearMemory` */
266
267        // SAFETY:
268        // - the first if statement of this function guarantees that count fits into this
269        //   `LinearMemory`
270        // - the second if statement of this function guarantees that even with the offset `index`,
271        //   `count` many bytes can be written to this `LinearMemory` without extending beyond its
272        //   last `UnsafeCell<u8>`
273        // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s
274        //   contained in the `UnsafeCell`s, so no UB is created through the existence of unsound
275        //   references
276
277        // Specification step 14-21.
278        unsafe { dst_ptr.add(index).write_bytes(data_byte, count) };
279
280        Ok(())
281    }
282
283    /// Copy `count` bytes from one region in the linear memory to another region in the same or a
284    /// different linear memory
285    ///
286    /// - Both regions may overlap
287    /// - Copies the `count` bytes starting from `source_index`, overwriting the `count` bytes
288    ///   starting from `destination_index`
289    ///
290    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-copy>
291    pub fn copy(
292        &self,
293        destination_index: MemIdx,
294        source_mem: &Self,
295        source_index: MemIdx,
296        count: MemIdx,
297    ) -> Result<(), RuntimeError> {
298        // self is the destination
299        let lock_guard_self = self.inner_data.read();
300
301        // other is the source
302        let lock_guard_other = source_mem.inner_data.read();
303
304        /* check source for out of bounds access */
305        // Specification step 12.
306        if count > lock_guard_other.len() {
307            error!("copy count is bigger than the source linear memory");
308            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
309        }
310
311        // Specification step 12.
312        if source_index > lock_guard_other.len() - count {
313            error!("copy source extends beyond the linear memory's end");
314            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
315        }
316
317        /* check destination for out of bounds access */
318        // Specification step 12.
319        if count > lock_guard_self.len() {
320            error!("copy count is bigger than the destination linear memory");
321            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
322        }
323
324        // Specification step 12.
325        if destination_index > lock_guard_self.len() - count {
326            error!("copy destination extends beyond the linear memory's end");
327            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
328        }
329
330        /* check if there is anything to be done */
331        // Specification step 13.
332        if count == 0 {
333            return Ok(());
334        }
335
336        /* gather pointers */
337        let src_ptr = UnsafeCell::raw_get(lock_guard_other.as_ptr());
338        let dst_ptr = UnsafeCell::raw_get(lock_guard_self.as_ptr());
339
340        /* write from `source_mem` to `self` */
341
342        // SAFETY:
343        // - the first two if statements above guarantee that starting from `source_index`,
344        //   there are at least `count` further `UnsafeCell<u8>`s in the other `LinearMemory`
345        // - the third and fourth if statement above guarantee that starting from
346        //   `destination_index`, there are at least `count` further `UnsafeCell<u8>`s in this
347        //   `LinearMemory`
348        // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s
349        //   contained in the `UnsafeCell`s, so no UB is created through the existence of unsound
350        //   references
351        // - as per the other statements above, both `*_ptr` are valid, and have at least `count`
352        //   further values after them in their respective `LinearMemory`s
353        // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s
354        //   contained in the `UnsafeCell`s, so no UB is created through the existence of unsound
355        //   references
356
357        // Specification step 14-15.
358        // TODO investigate if it is worth to use a conditional `copy_from_nonoverlapping`
359        // if the non-overlapping can be confirmed (and the count is bigger than a certain
360        // threshold).
361        unsafe {
362            ptr::copy(
363                src_ptr.add(source_index),
364                dst_ptr.add(destination_index),
365                count,
366            )
367        }
368
369        Ok(())
370    }
371
372    // Rationale behind having `source_index` and `count` when the callsite could also just create a
373    // subslice for `source_data`? Have all the index error checks in one place.
374    //
375    // <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-init-x>
376    pub fn init(
377        &self,
378        destination_index: MemIdx,
379        source_data: &[u8],
380        source_index: MemIdx,
381        count: MemIdx,
382    ) -> Result<(), RuntimeError> {
383        // self is the destination
384        let lock_guard_self = self.inner_data.read();
385        let data_len = source_data.len();
386
387        /* check source for out of bounds access */
388        // Specification step 16.
389        if count > data_len {
390            error!("init count is bigger than the data instance");
391            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
392        }
393
394        // Specification step 16.
395        if source_index > data_len - count {
396            error!("init source extends beyond the data instance's end");
397            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
398        }
399
400        /* check destination for out of bounds access */
401        // Specification step 16.
402        if count > lock_guard_self.len() {
403            error!("init count is bigger than the linear memory");
404            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
405        }
406
407        // Specification step 16.
408        if destination_index > lock_guard_self.len() - count {
409            error!("init extends beyond the linear memory's end");
410            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
411        }
412
413        /* check if there is anything to be done */
414        // Specification step 17.
415        if count == 0 {
416            return Ok(());
417        }
418
419        /* copy the data to this `LinearMemory` */
420
421        // Specification step 18-27.
422        for i in 0..count {
423            // SAFETY: this is sound, as the two if statements above guarantee that starting from
424            // `source_index`, there are at least `count` further `u8`s in `source_data`
425            let src_ptr = unsafe { source_data.get_unchecked(source_index + i) };
426
427            // SAFETY: this is sound, as the two if statements above guarantee that starting from
428            // `destination_index`, there are at least `count` further `UnsafeCell<u8>`s in this
429            // `LinearMemory`
430            let dst_ptr = unsafe { lock_guard_self.get_unchecked(destination_index + i) }.get();
431
432            // SAFETY:
433            // - as per the other SAFETY statements in this function, both `*_ptr` are valid, and
434            //   have at least `count` further values after them in them respectively
435            // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the
436            //   `u8`s contained in the `UnsafeCell`s, so no UB is created through the existence of
437            //   unsound references
438            unsafe {
439                ptr::copy(src_ptr, dst_ptr, 1);
440            }
441        }
442
443        Ok(())
444    }
445}
446
447impl<const PAGE_SIZE: usize> core::fmt::Debug for LinearMemory<PAGE_SIZE> {
448    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
449        /// A helper struct for formatting a [`Vec<UnsafeCell<u8>>`] which is guarded by a [`ReadLockGuard`].
450        /// This formatter is able to detect and format byte repetitions in a compact way.
451        struct RepetitionDetectingMemoryWriter<'a>(ReadLockGuard<'a, Vec<UnsafeCell<u8>>>);
452        impl core::fmt::Debug for RepetitionDetectingMemoryWriter<'_> {
453            fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
454                /// The number of repetitions required for successive elements to be grouped
455                // together.
456                const MIN_REPETITIONS_FOR_GROUP: usize = 8;
457
458                // First we create an iterator over all bytes
459                let mut bytes = self.0.iter().map(|x| {
460                    // SAFETY: The [`ReadLockGuard`] stored in `self` prevents a resize/realloc of
461                    // its data, so access to the value inside each [`UnsafeCell`] is safe.
462                    unsafe { *x.get() }
463                });
464
465                // Then we iterate over all bytes and deduplicate repetitions. This produces an
466                // iterator of pairs, consisting of the number of repetitions and the repeated byte
467                // itself. `current_group` is captured by the iterator and used as state to track
468                // the current group.
469                let mut current_group: Option<(usize, u8)> = None;
470                let deduplicated_with_count = iter::from_fn(|| {
471                    for byte in bytes.by_ref() {
472                        // If the next byte is different than the one being tracked currently...
473                        if current_group.is_some() && current_group.unwrap().1 != byte {
474                            // ...then end and emit the current group but also start a new group for
475                            // the next byte with an initial count of 1.
476                            return current_group.replace((1, byte));
477                        }
478                        // Otherwise increment the current group's counter or start a new group if
479                        // this was the first byte.
480                        current_group.get_or_insert((0, byte)).0 += 1;
481                    }
482                    // In the end when there are no more bytes to read, directly emit the last
483                    current_group.take()
484                });
485
486                // Finally we use `DebugList` to print a list of all groups, while writing out all
487                // elements from groups with less than `MIN_REPETITIONS_FOR_GROUP` elements.
488                let mut list = f.debug_list();
489                deduplicated_with_count.for_each(|(count, value)| {
490                    if count < MIN_REPETITIONS_FOR_GROUP {
491                        list.entries(iter::repeat(value).take(count));
492                    } else {
493                        list.entry(&format_args!("#{count} × {value}"));
494                    }
495                });
496                list.finish()
497            }
498        }
499
500        // Format the linear memory by using Rust's formatter helpers and the previously defined
501        // `RepetitionDetectingMemoryWriter`
502        f.debug_struct("LinearMemory")
503            .field(
504                "inner_data",
505                &RepetitionDetectingMemoryWriter(self.inner_data.read()),
506            )
507            .finish()
508    }
509}
510
511impl<const PAGE_SIZE: usize> Default for LinearMemory<PAGE_SIZE> {
512    fn default() -> Self {
513        Self::new()
514    }
515}
516
517#[cfg(test)]
518mod test {
519    use core::f64;
520
521    use alloc::format;
522    use core::mem;
523
524    use crate::value::{F32, F64};
525
526    use super::*;
527
528    const PAGE_SIZE: usize = 1 << 8;
529    const PAGES: PageCountTy = 2;
530
531    #[test]
532    fn new_constructor() {
533        let lin_mem = LinearMemory::<PAGE_SIZE>::new();
534        assert_eq!(lin_mem.pages(), 0);
535    }
536
537    #[test]
538    fn new_grow() {
539        let lin_mem = LinearMemory::<PAGE_SIZE>::new();
540        lin_mem.grow(1);
541        assert_eq!(lin_mem.pages(), 1);
542    }
543
544    #[test]
545    fn debug_print_simple() {
546        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(1);
547        assert_eq!(lin_mem.pages(), 1);
548
549        let expected = format!("LinearMemory {{ inner_data: [#{PAGE_SIZE} × 0] }}");
550        let debug_repr = format!("{lin_mem:?}");
551
552        assert_eq!(debug_repr, expected);
553    }
554
555    #[test]
556    fn debug_print_complex() {
557        let page_count = 2;
558        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(page_count);
559        assert_eq!(lin_mem.pages(), page_count);
560
561        lin_mem.store(1, 0xffu8).unwrap();
562        lin_mem.store(10, 1u8).unwrap();
563        lin_mem.store(200, 0xffu8).unwrap();
564
565        let expected = "LinearMemory { inner_data: [0, 255, #8 × 0, 1, #189 × 0, 255, #311 × 0] }";
566        let debug_repr = format!("{lin_mem:?}");
567
568        assert_eq!(debug_repr, expected);
569    }
570
571    #[test]
572    fn debug_print_empty() {
573        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(0);
574        assert_eq!(lin_mem.pages(), 0);
575
576        let expected = "LinearMemory { inner_data: [] }";
577        let debug_repr = format!("{lin_mem:?}");
578
579        assert_eq!(debug_repr, expected);
580    }
581
582    #[test]
583    fn roundtrip_normal_range_i8_neg127() {
584        let x: i8 = -127;
585        let highest_legal_offset = PAGE_SIZE - mem::size_of::<i8>();
586        for offset in 0..MemIdx::try_from(highest_legal_offset).unwrap() {
587            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
588
589            lin_mem.store(offset, x).unwrap();
590
591            assert_eq!(
592                lin_mem
593                    .load::<{ core::mem::size_of::<i8>() }, i8>(offset)
594                    .unwrap(),
595                x,
596                "load store roundtrip for {x:?} failed!"
597            );
598        }
599    }
600
601    #[test]
602    fn roundtrip_normal_range_f32_13() {
603        let x = F32(13.0);
604        let highest_legal_offset = PAGE_SIZE - mem::size_of::<F32>();
605        for offset in 0..MemIdx::try_from(highest_legal_offset).unwrap() {
606            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
607
608            lin_mem.store(offset, x).unwrap();
609
610            assert_eq!(
611                lin_mem
612                    .load::<{ core::mem::size_of::<F32>() }, F32>(offset)
613                    .unwrap(),
614                x,
615                "load store roundtrip for {x:?} failed!"
616            );
617        }
618    }
619
620    #[test]
621    fn roundtrip_normal_range_f64_min() {
622        let x = F64(f64::MIN);
623        let highest_legal_offset = PAGE_SIZE - mem::size_of::<F64>();
624        for offset in 0..MemIdx::try_from(highest_legal_offset).unwrap() {
625            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
626
627            lin_mem.store(offset, x).unwrap();
628
629            assert_eq!(
630                lin_mem
631                    .load::<{ core::mem::size_of::<F64>() }, F64>(offset)
632                    .unwrap(),
633                x,
634                "load store roundtrip for {x:?} failed!"
635            );
636        }
637    }
638
639    #[test]
640    fn roundtrip_normal_range_f64_nan() {
641        let x = F64(f64::NAN);
642        let highest_legal_offset = PAGE_SIZE - mem::size_of::<f64>();
643        for offset in 0..MemIdx::try_from(highest_legal_offset).unwrap() {
644            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
645
646            lin_mem.store(offset, x).unwrap();
647
648            assert!(
649                lin_mem
650                    .load::<{ core::mem::size_of::<F64>() }, F64>(offset)
651                    .unwrap()
652                    .is_nan(),
653                "load store roundtrip for {x:?} failed!"
654            );
655        }
656    }
657
658    #[test]
659    #[should_panic(
660        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
661    )]
662    fn store_out_of_range_u128_max() {
663        let x: u128 = u128::MAX;
664        let pages = 1;
665        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u128>() + 1;
666        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
667        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
668
669        lin_mem.store(lowest_illegal_offset, x).unwrap();
670    }
671
672    #[test]
673    #[should_panic(
674        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
675    )]
676    fn store_empty_lineaer_memory_u8() {
677        let x: u8 = u8::MAX;
678        let pages = 0;
679        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u8>() + 1;
680        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
681        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
682
683        lin_mem.store(lowest_illegal_offset, x).unwrap();
684    }
685
686    #[test]
687    #[should_panic(
688        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
689    )]
690    fn load_out_of_range_u128_max() {
691        let pages = 1;
692        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u128>() + 1;
693        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
694        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
695
696        let _x: u128 = lin_mem.load(lowest_illegal_offset).unwrap();
697    }
698
699    #[test]
700    #[should_panic(
701        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
702    )]
703    fn load_empty_lineaer_memory_u8() {
704        let pages = 0;
705        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u8>() + 1;
706        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
707        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
708
709        let _x: u8 = lin_mem.load(lowest_illegal_offset).unwrap();
710    }
711
712    #[test]
713    #[should_panic]
714    fn copy_out_of_bounds() {
715        let lin_mem_0 = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(2);
716        let lin_mem_1 = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(1);
717        lin_mem_0.copy(0, &lin_mem_1, 0, PAGE_SIZE + 1).unwrap();
718    }
719}