Coverage Report

Created: 2025-10-23 14:20

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/build/source/src/execution/linear_memory.rs
Line
Count
Source
1
use core::{cell::UnsafeCell, iter, ptr};
2
3
use alloc::vec::Vec;
4
5
use crate::{
6
    core::indices::MemIdx,
7
    execution::little_endian::LittleEndianBytes,
8
    rw_spinlock::{ReadLockGuard, RwSpinLock},
9
    RuntimeError, TrapError,
10
};
11
12
/// Implementation of the linear memory suitable for concurrent access
13
///
14
/// Implements the base for the instructions described in
15
/// <https://webassembly.github.io/spec/core/exec/instructions.html#memory-instructions>.
16
///
17
/// This linear memory implementation internally relies on a `Vec<UnsafeCell<u8>>`. Thus, the atomic
18
/// unit of information for it is a byte (`u8`). All access to the linear memory internally occurs
19
/// through pointers, avoiding the creation of shared and mut refs to the internal data completely.
20
/// This avoids undefined behavior, except for the race-condition inherent to concurrent writes.
21
/// Because of this, the [`LinearMemory::store`] function does not require `&mut self` -- `&self`
22
/// suffices.
23
///
24
/// # Notes on overflowing
25
///
26
/// All operations that rely on accessing `n` bytes starting at `index` in the linear memory have to
27
/// perform bounds checking. Thus they always have to ensure that `n + index < linear_memory.len()`
28
/// holds true (e.g. `n + index - 1` must be a valid index into `linear_memory`). However,
29
/// writing that check as is bears the danger of an overflow, assuming that `n`, `index` and
30
/// `linear_memory.len()` are the same given integer type, `n + index` can overflow, resulting in
31
/// the check passing despite the access being out of bounds!
32
///
33
/// To avoid this, the bounds checks are carefully ordered to avoid any overflows:
34
///
35
/// - First we check, that `n <= linear_memory.len()` holds true, ensuring that the amount of bytes
36
///   to be accessed is indeed smaller than or equal to the linear memory's size. If this does not
37
///   hold true, continuation of the operation will yield out of bounds access in any case.
38
/// - Then, as a second check, we verify that `index <= linear_memory.len() - n`. This way we
39
///   avoid the overflow, as there is no addition. The subtraction in the left hand can not
40
///   underflow, due to the previous check (which asserts that `n` is smaller than or equal to
41
///   `linear_memory.len()`).
42
///
43
/// Combined in the given order, these two checks enable bounds checking without risking any
44
/// overflow or underflow, provided that `n`, `index` and `linear_memory.len()` are of the same
45
/// integer type.
46
///
47
/// # Notes on locking
48
///
49
/// The internal data vector of the [`LinearMemory`] is wrapped in a [`RwSpinLock`]. Despite the
50
/// name, writes to the linear memory do not require an acquisition of a write lock. Writes are
51
/// implemented through a shared ref to the internal vector, with an `UnsafeCell` to achieve
52
/// interior mutability.
53
///
54
/// However, linear memory can grow. As the linear memory is implemented via a [`Vec`], a grow can
55
/// result in the vector's internal data buffer to be copied over to a bigger, fresh allocation.
56
/// The old buffer is then freed. Combined with concurrent mutable access, this can cause
57
/// use-after-free. To avoid this, a grow operation of the linear memory acquires a write lock,
58
/// blocking all read/write to the linear memory inbetween.
59
///
60
/// # Unsafe Note
61
///
62
/// Raw pointer access it required, because concurent mutation of the linear memory might happen
63
/// (consider the threading proposal for WASM, where mutliple WASM threads access the same linear
64
/// memory at the same time). The inherent race condition results in UB w/r/t the state of the `u8`s
65
/// in the inner data. However, this is tolerable, e.g. avoiding race conditions on the state of the
66
/// linear memory can not be the task of the interpreter, but has to be fulfilled by the interpreted
67
/// bytecode itself.
68
///
69
/// To gain some confidence in the correctness of the unsafe code in this module, run `miri`:
70
///
71
/// ```bash
72
/// cargo miri test --test memory # quick
73
/// cargo miri test # thorough
74
/// ```
75
// TODO if a memmap like operation is available, the linear memory implementation can be optimized brutally. Out-of-bound access can be mapped to userspace handled page-faults, e.g. the MMU takes over that responsibility of catching out of bounds. Grow can happen without copying of data, by mapping new pages consecutively after the current final page of the linear memory.
76
pub struct LinearMemory<const PAGE_SIZE: usize = { crate::Limits::MEM_PAGE_SIZE as usize }> {
77
    inner_data: RwSpinLock<Vec<UnsafeCell<u8>>>,
78
}
79
80
/// Type to express the page count
81
pub type PageCountTy = u16;
82
83
impl<const PAGE_SIZE: usize> LinearMemory<PAGE_SIZE> {
84
    /// Size of a page in the linear memory, measured in bytes
85
    ///
86
    /// The WASM specification demands a page size of 64 KiB, that is `65536` bytes:
87
    /// <https://webassembly.github.io/spec/core/exec/runtime.html?highlight=page#memory-instances>
88
    const PAGE_SIZE: usize = PAGE_SIZE;
89
90
    /// Create a new, empty [`LinearMemory`]
91
2
    pub fn new() -> Self {
92
2
        Self {
93
2
            inner_data: RwSpinLock::new(Vec::new()),
94
2
        }
95
2
    }
96
97
    /// Create a new, empty [`LinearMemory`]
98
1.51k
    pub fn new_with_initial_pages(pages: PageCountTy) -> Self {
99
1.51k
        let size_bytes = Self::PAGE_SIZE * pages as usize;
100
1.51k
        let mut data = Vec::with_capacity(size_bytes);
101
29.3M
        data.resize_with(size_bytes, || UnsafeCell::new(0));
102
1.51k
103
1.51k
        Self {
104
1.51k
            inner_data: RwSpinLock::new(data),
105
1.51k
        }
106
1.51k
    }
107
108
    /// Grow the [`LinearMemory`] by a number of pages
109
125
    pub fn grow(&self, pages_to_add: PageCountTy) {
110
125
        let mut lock_guard = self.inner_data.write();
111
125
        let prior_length_bytes = lock_guard.len();
112
125
        let new_length_bytes = prior_length_bytes + Self::PAGE_SIZE * pages_to_add as usize;
113
154M
        lock_guard.resize_with(new_length_bytes, || UnsafeCell::new(0));
114
125
    }
115
116
    /// Get the number of pages currently allocated to this [`LinearMemory`]
117
147
    pub fn pages(&self) -> PageCountTy {
118
147
        PageCountTy::try_from(self.inner_data.read().len() / PAGE_SIZE).unwrap()
119
147
    }
120
121
    /// Get the length in bytes currently allocated to this [`LinearMemory`]
122
    // TODO remove this op
123
216
    pub fn len(&self) -> usize {
124
216
        self.inner_data.read().len()
125
216
    }
126
127
    /// At a given index, store a datum in the [`LinearMemory`]
128
2.92k
    pub fn store<const N: usize, T: LittleEndianBytes<N>>(
129
2.92k
        &self,
130
2.92k
        index: MemIdx,
131
2.92k
        value: T,
132
2.92k
    ) -> Result<(), RuntimeError> {
133
2.92k
        self.store_bytes::<N>(index, value.to_le_bytes())
134
2.92k
    }
135
136
    /// At a given index, store a number of bytes `N` in the [`LinearMemory`]
137
2.92k
    pub fn store_bytes<const N: usize>(
138
2.92k
        &self,
139
2.92k
        index: MemIdx,
140
2.92k
        bytes: [u8; N],
141
2.92k
    ) -> Result<(), RuntimeError> {
142
2.92k
        let lock_guard = self.inner_data.read();
143
2.92k
144
2.92k
        /* check destination for out of bounds access */
145
2.92k
        // A value must fit into the linear memory
146
2.92k
        if N > lock_guard.len() {
147
5
            error!("value does not fit into linear memory");
148
5
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
149
2.92k
        }
150
2.92k
151
2.92k
        // The following statement must be true
152
2.92k
        // `index + N <= lock_guard.len()`
153
2.92k
        // This check verifies it, while avoiding the possible overflow. The subtraction can not
154
2.92k
        // underflow because of the previous check.
155
2.92k
156
2.92k
        if index > lock_guard.len() - N {
157
88
            error!("value write would extend beyond the end of the linear memory");
158
88
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
159
2.83k
        }
160
2.83k
161
2.83k
        /* gather pointers */
162
2.83k
        let src_ptr = bytes.as_ptr();
163
2.83k
        let dst_ptr = UnsafeCell::raw_get(lock_guard.as_ptr());
164
2.83k
165
2.83k
        /* write `value` to this `LinearMemory` */
166
2.83k
167
2.83k
        // SAFETY:
168
2.83k
        // - nonoverlapping is guaranteed, because `src_ptr` is a pointer to a stack allocated
169
2.83k
        //   array, while `dst_ptr` points to a heap allocated `Vec`
170
2.83k
        // - the first if statement in this function guarantees that a `T` can fit into
171
2.83k
        //   `LinearMemory` behind the `dst_ptr`
172
2.83k
        // - the second if statement in this function guarantees that even with the offset
173
2.83k
        //   `index`, writing all of `src_ptr`'s bytes does not extend beyond the `dst_ptr`'s last
174
2.83k
        //   `UnsafeCell<u8>`
175
2.83k
        // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s
176
2.83k
        //   contained in the `UnsafeCell`s, so no UB is created through the existence of unsound
177
2.83k
        //   references
178
2.83k
        unsafe { ptr::copy_nonoverlapping(src_ptr, dst_ptr.add(index), bytes.len()) };
179
2.83k
180
2.83k
        Ok(())
181
2.92k
    }
182
183
    /// From a given index, load a datum from the [`LinearMemory`]
184
930k
    pub fn load<const N: usize, T: LittleEndianBytes<N>>(
185
930k
        &self,
186
930k
        index: MemIdx,
187
930k
    ) -> Result<T, RuntimeError> {
188
930k
        self.load_bytes::<N>(index).map(T::from_le_bytes)
189
930k
    }
190
191
    /// From a given index, load a number of bytes `N` from the [`LinearMemory`]
192
930k
    pub fn load_bytes<const N: usize>(&self, index: MemIdx) -> Result<[u8; N], RuntimeError> {
193
930k
        let lock_guard = self.inner_data.read();
194
930k
195
930k
        /* check source for out of bounds access */
196
930k
        // A value must fit into the linear memory
197
930k
        if N > lock_guard.len() {
198
5
            error!("value does not fit into linear memory");
199
5
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
200
930k
        }
201
930k
202
930k
        // The following statement must be true
203
930k
        // `index + N <= lock_guard.len()`
204
930k
        // This check verifies it, while avoiding the possible overflow. The subtraction can not
205
930k
        // underflow because of the previous assert.
206
930k
207
930k
        if index > lock_guard.len() - N {
208
175
            error!("value read would extend beyond the end of the linear_memory");
209
175
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
210
930k
        }
211
930k
212
930k
        let mut bytes = [0; N];
213
930k
214
930k
        /* gather pointers */
215
930k
        let src_ptr = UnsafeCell::raw_get(lock_guard.as_ptr());
216
930k
        let dst_ptr = bytes.as_mut_ptr();
217
930k
218
930k
        /* read `value` from this `LinearMemory` */
219
930k
        // SAFETY:
220
930k
        // - nonoverlapping is guaranteed, because `dst_ptr` is a pointer to a stack allocated
221
930k
        //   array, while the source is heap allocated Vec
222
930k
        // - the first if statement in this function guarantees that a `T` can fit into the linear
223
930k
        //   memory behind the `src_ptr`
224
930k
        // - the second if statement in this function guarantees that even with the offset `index`,
225
930k
        //   reading all of `T`s bytes does not extend beyond the `src_ptrs`'s last `UnsafeCell<u8>`
226
930k
        // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s
227
930k
        //   contained in the `UnsafeCell`s, so no UB is created through the existence of unsound
228
930k
        //   references
229
930k
        unsafe { ptr::copy_nonoverlapping(src_ptr.add(index), dst_ptr, bytes.len()) };
230
930k
231
930k
        Ok(bytes)
232
930k
    }
233
234
    /// Implementation of the behavior described in
235
    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-fill>.
236
    /// Note, that the WASM spec defines the behavior by recursion, while our implementation uses
237
    /// the memset like [`core::ptr::write_bytes`].
238
    ///
239
    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-fill>
240
123
    pub fn fill(&self, index: MemIdx, data_byte: u8, count: MemIdx) -> Result<(), RuntimeError> {
241
123
        let lock_guard = self.inner_data.read();
242
123
243
123
        /* check destination for out of bounds access */
244
123
        // Specification step 12.
245
123
        if count > lock_guard.len() {
246
1
            error!("fill count is bigger than the linear memory");
247
1
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
248
122
        }
249
122
250
122
        // Specification step 12.
251
122
        if index > lock_guard.len() - count {
252
7
            error!("fill extends beyond the linear memory's end");
253
7
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
254
115
        }
255
115
256
115
        /* check if there is anything to be done */
257
115
        // Specification step 13.
258
115
        if count == 0 {
259
4
            return Ok(());
260
111
        }
261
111
262
111
        /* gather pointer */
263
111
        let dst_ptr = UnsafeCell::raw_get(lock_guard.as_ptr());
264
111
265
111
        /* write the `data_byte` to this `LinearMemory` */
266
111
267
111
        // SAFETY:
268
111
        // - the first if statement of this function guarantees that count fits into this
269
111
        //   `LinearMemory`
270
111
        // - the second if statement of this function guarantees that even with the offset `index`,
271
111
        //   `count` many bytes can be written to this `LinearMemory` without extending beyond its
272
111
        //   last `UnsafeCell<u8>`
273
111
        // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s
274
111
        //   contained in the `UnsafeCell`s, so no UB is created through the existence of unsound
275
111
        //   references
276
111
277
111
        // Specification step 14-21.
278
111
        unsafe { dst_ptr.add(index).write_bytes(data_byte, count) };
279
111
280
111
        Ok(())
281
123
    }
282
283
    /// Copy `count` bytes from one region in the linear memory to another region in the same or a
284
    /// different linear memory
285
    ///
286
    /// - Both regions may overlap
287
    /// - Copies the `count` bytes starting from `source_index`, overwriting the `count` bytes
288
    ///   starting from `destination_index`
289
    ///
290
    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-copy>
291
167
    pub fn copy(
292
167
        &self,
293
167
        destination_index: MemIdx,
294
167
        source_mem: &Self,
295
167
        source_index: MemIdx,
296
167
        count: MemIdx,
297
167
    ) -> Result<(), RuntimeError> {
298
167
        // self is the destination
299
167
        let lock_guard_self = self.inner_data.read();
300
167
301
167
        // other is the source
302
167
        let lock_guard_other = source_mem.inner_data.read();
303
167
304
167
        /* check source for out of bounds access */
305
167
        // Specification step 12.
306
167
        if count > lock_guard_other.len() {
307
5
            error!("copy count is bigger than the source linear memory");
308
5
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
309
162
        }
310
162
311
162
        // Specification step 12.
312
162
        if source_index > lock_guard_other.len() - count {
313
17
            error!("copy source extends beyond the linear memory's end");
314
17
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
315
145
        }
316
145
317
145
        /* check destination for out of bounds access */
318
145
        // Specification step 12.
319
145
        if count > lock_guard_self.len() {
320
0
            error!("copy count is bigger than the destination linear memory");
321
0
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
322
145
        }
323
145
324
145
        // Specification step 12.
325
145
        if destination_index > lock_guard_self.len() - count {
326
11
            error!("copy destination extends beyond the linear memory's end");
327
11
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
328
134
        }
329
134
330
134
        /* check if there is anything to be done */
331
134
        // Specification step 13.
332
134
        if count == 0 {
333
6
            return Ok(());
334
128
        }
335
128
336
128
        /* gather pointers */
337
128
        let src_ptr = UnsafeCell::raw_get(lock_guard_other.as_ptr());
338
128
        let dst_ptr = UnsafeCell::raw_get(lock_guard_self.as_ptr());
339
128
340
128
        /* write from `source_mem` to `self` */
341
128
342
128
        // SAFETY:
343
128
        // - the first two if statements above guarantee that starting from `source_index`,
344
128
        //   there are at least `count` further `UnsafeCell<u8>`s in the other `LinearMemory`
345
128
        // - the third and fourth if statement above guarantee that starting from
346
128
        //   `destination_index`, there are at least `count` further `UnsafeCell<u8>`s in this
347
128
        //   `LinearMemory`
348
128
        // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s
349
128
        //   contained in the `UnsafeCell`s, so no UB is created through the existence of unsound
350
128
        //   references
351
128
        // - as per the other statements above, both `*_ptr` are valid, and have at least `count`
352
128
        //   further values after them in their respective `LinearMemory`s
353
128
        // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s
354
128
        //   contained in the `UnsafeCell`s, so no UB is created through the existence of unsound
355
128
        //   references
356
128
357
128
        // Specification step 14-15.
358
128
        // TODO investigate if it is worth to use a conditional `copy_from_nonoverlapping`
359
128
        // if the non-overlapping can be confirmed (and the count is bigger than a certain
360
128
        // threshold).
361
128
        unsafe {
362
128
            ptr::copy(
363
128
                src_ptr.add(source_index),
364
128
                dst_ptr.add(destination_index),
365
128
                count,
366
128
            )
367
128
        }
368
128
369
128
        Ok(())
370
167
    }
371
372
    // Rationale behind having `source_index` and `count` when the callsite could also just create a
373
    // subslice for `source_data`? Have all the index error checks in one place.
374
    //
375
    // <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-init-x>
376
264
    pub fn init(
377
264
        &self,
378
264
        destination_index: MemIdx,
379
264
        source_data: &[u8],
380
264
        source_index: MemIdx,
381
264
        count: MemIdx,
382
264
    ) -> Result<(), RuntimeError> {
383
264
        // self is the destination
384
264
        let lock_guard_self = self.inner_data.read();
385
264
        let data_len = source_data.len();
386
264
387
264
        /* check source for out of bounds access */
388
264
        // Specification step 16.
389
264
        if count > data_len {
390
14
            error!("init count is bigger than the data instance");
391
14
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
392
250
        }
393
250
394
250
        // Specification step 16.
395
250
        if source_index > data_len - count {
396
3
            error!("init source extends beyond the data instance's end");
397
3
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
398
247
        }
399
247
400
247
        /* check destination for out of bounds access */
401
247
        // Specification step 16.
402
247
        if count > lock_guard_self.len() {
403
4
            error!("init count is bigger than the linear memory");
404
4
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
405
243
        }
406
243
407
243
        // Specification step 16.
408
243
        if destination_index > lock_guard_self.len() - count {
409
18
            error!("init extends beyond the linear memory's end");
410
18
            return Err(TrapError::MemoryOrDataAccessOutOfBounds.into());
411
225
        }
412
225
413
225
        /* check if there is anything to be done */
414
225
        // Specification step 17.
415
225
        if count == 0 {
416
34
            return Ok(());
417
191
        }
418
419
        /* copy the data to this `LinearMemory` */
420
421
        // Specification step 18-27.
422
4.80k
        for i in 0..
count191
{
423
            // SAFETY: this is sound, as the two if statements above guarantee that starting from
424
            // `source_index`, there are at least `count` further `u8`s in `source_data`
425
4.80k
            let src_ptr = unsafe { source_data.get_unchecked(source_index + i) };
426
4.80k
427
4.80k
            // SAFETY: this is sound, as the two if statements above guarantee that starting from
428
4.80k
            // `destination_index`, there are at least `count` further `UnsafeCell<u8>`s in this
429
4.80k
            // `LinearMemory`
430
4.80k
            let dst_ptr = unsafe { lock_guard_self.get_unchecked(destination_index + i) }.get();
431
4.80k
432
4.80k
            // SAFETY:
433
4.80k
            // - as per the other SAFETY statements in this function, both `*_ptr` are valid, and
434
4.80k
            //   have at least `count` further values after them in them respectively
435
4.80k
            // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the
436
4.80k
            //   `u8`s contained in the `UnsafeCell`s, so no UB is created through the existence of
437
4.80k
            //   unsound references
438
4.80k
            unsafe {
439
4.80k
                ptr::copy(src_ptr, dst_ptr, 1);
440
4.80k
            }
441
        }
442
443
191
        Ok(())
444
264
    }
445
}
446
447
impl<const PAGE_SIZE: usize> core::fmt::Debug for LinearMemory<PAGE_SIZE> {
448
3
    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
449
        /// A helper struct for formatting a [`Vec<UnsafeCell<u8>>`] which is guarded by a [`ReadLockGuard`].
450
        /// This formatter is able to detect and format byte repetitions in a compact way.
451
        struct RepetitionDetectingMemoryWriter<'a>(ReadLockGuard<'a, Vec<UnsafeCell<u8>>>);
452
        impl core::fmt::Debug for RepetitionDetectingMemoryWriter<'_> {
453
3
            fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
454
                /// The number of repetitions required for successive elements to be grouped
455
                // together.
456
                const MIN_REPETITIONS_FOR_GROUP: usize = 8;
457
458
                // First we create an iterator over all bytes
459
768
                let 
mut bytes = self.0.iter().map(3
|x| {
460
768
                    // SAFETY: The [`ReadLockGuard`] stored in `self` prevents a resize/realloc of
461
768
                    // its data, so access to the value inside each [`UnsafeCell`] is safe.
462
768
                    unsafe { *x.get() }
463
768
                });
464
3
465
3
                // Then we iterate over all bytes and deduplicate repetitions. This produces an
466
3
                // iterator of pairs, consisting of the number of repetitions and the repeated byte
467
3
                // itself. `current_group` is captured by the iterator and used as state to track
468
3
                // the current group.
469
3
                let mut current_group: Option<(usize, u8)> = None;
470
11
                let deduplicated_with_count = iter::from_fn(|| {
471
768
                    for byte in 
bytes.by_ref()11
{
472
                        // If the next byte is different than the one being tracked currently...
473
768
                        if current_group.is_some() && 
current_group.unwrap().1 != byte766
{
474
                            // ...then end and emit the current group but also start a new group for
475
                            // the next byte with an initial count of 1.
476
6
                            return current_group.replace((1, byte));
477
762
                        }
478
762
                        // Otherwise increment the current group's counter or start a new group if
479
762
                        // this was the first byte.
480
762
                        current_group.get_or_insert((0, byte)).0 += 1;
481
                    }
482
                    // In the end when there are no more bytes to read, directly emit the last
483
5
                    current_group.take()
484
11
                });
485
3
486
3
                // Finally we use `DebugList` to print a list of all groups, while writing out all
487
3
                // elements from groups with less than `MIN_REPETITIONS_FOR_GROUP` elements.
488
3
                let mut list = f.debug_list();
489
8
                deduplicated_with_count.for_each(|(count, value)| {
490
8
                    if count < MIN_REPETITIONS_FOR_GROUP {
491
4
                        list.entries(iter::repeat(value).take(count));
492
4
                    } else {
493
4
                        list.entry(&format_args!("#{count} × {value}"));
494
4
                    }
495
8
                });
496
3
                list.finish()
497
3
            }
498
        }
499
500
        // Format the linear memory by using Rust's formatter helpers and the previously defined
501
        // `RepetitionDetectingMemoryWriter`
502
3
        f.debug_struct("LinearMemory")
503
3
            .field(
504
3
                "inner_data",
505
3
                &RepetitionDetectingMemoryWriter(self.inner_data.read()),
506
3
            )
507
3
            .finish()
508
3
    }
509
}
510
511
impl<const PAGE_SIZE: usize> Default for LinearMemory<PAGE_SIZE> {
512
0
    fn default() -> Self {
513
0
        Self::new()
514
0
    }
515
}
516
517
#[cfg(test)]
518
mod test {
519
    use core::f64;
520
521
    use alloc::format;
522
    use core::mem;
523
524
    use crate::value::{F32, F64};
525
526
    use super::*;
527
528
    const PAGE_SIZE: usize = 1 << 8;
529
    const PAGES: PageCountTy = 2;
530
531
    #[test]
532
1
    fn new_constructor() {
533
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new();
534
1
        assert_eq!(lin_mem.pages(), 0);
535
1
    }
536
537
    #[test]
538
1
    fn new_grow() {
539
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new();
540
1
        lin_mem.grow(1);
541
1
        assert_eq!(lin_mem.pages(), 1);
542
1
    }
543
544
    #[test]
545
1
    fn debug_print_simple() {
546
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(1);
547
1
        assert_eq!(lin_mem.pages(), 1);
548
549
1
        let expected = format!("LinearMemory {{ inner_data: [#{PAGE_SIZE} × 0] }}");
550
1
        let debug_repr = format!("{lin_mem:?}");
551
1
552
1
        assert_eq!(debug_repr, expected);
553
1
    }
554
555
    #[test]
556
1
    fn debug_print_complex() {
557
1
        let page_count = 2;
558
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(page_count);
559
1
        assert_eq!(lin_mem.pages(), page_count);
560
561
1
        lin_mem.store(1, 0xffu8).unwrap();
562
1
        lin_mem.store(10, 1u8).unwrap();
563
1
        lin_mem.store(200, 0xffu8).unwrap();
564
1
565
1
        let expected = "LinearMemory { inner_data: [0, 255, #8 × 0, 1, #189 × 0, 255, #311 × 0] }";
566
1
        let debug_repr = format!("{lin_mem:?}");
567
1
568
1
        assert_eq!(debug_repr, expected);
569
1
    }
570
571
    #[test]
572
1
    fn debug_print_empty() {
573
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(0);
574
1
        assert_eq!(lin_mem.pages(), 0);
575
576
1
        let expected = "LinearMemory { inner_data: [] }";
577
1
        let debug_repr = format!("{lin_mem:?}");
578
1
579
1
        assert_eq!(debug_repr, expected);
580
1
    }
581
582
    #[test]
583
1
    fn roundtrip_normal_range_i8_neg127() {
584
1
        let x: i8 = -127;
585
1
        let highest_legal_offset = PAGE_SIZE - mem::size_of::<i8>();
586
255
        for offset in 0..
MemIdx::try_from(highest_legal_offset).unwrap()1
{
587
255
            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
588
255
589
255
            lin_mem.store(offset, x).unwrap();
590
255
591
255
            assert_eq!(
592
255
                lin_mem
593
255
                    .load::<{ core::mem::size_of::<i8>() }, i8>(offset)
594
255
                    .unwrap(),
595
                x,
596
0
                "load store roundtrip for {x:?} failed!"
597
            );
598
        }
599
1
    }
600
601
    #[test]
602
1
    fn roundtrip_normal_range_f32_13() {
603
1
        let x = F32(13.0);
604
1
        let highest_legal_offset = PAGE_SIZE - mem::size_of::<F32>();
605
252
        for offset in 0..
MemIdx::try_from(highest_legal_offset).unwrap()1
{
606
252
            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
607
252
608
252
            lin_mem.store(offset, x).unwrap();
609
252
610
252
            assert_eq!(
611
252
                lin_mem
612
252
                    .load::<{ core::mem::size_of::<F32>() }, F32>(offset)
613
252
                    .unwrap(),
614
                x,
615
0
                "load store roundtrip for {x:?} failed!"
616
            );
617
        }
618
1
    }
619
620
    #[test]
621
1
    fn roundtrip_normal_range_f64_min() {
622
1
        let x = F64(f64::MIN);
623
1
        let highest_legal_offset = PAGE_SIZE - mem::size_of::<F64>();
624
248
        for offset in 0..
MemIdx::try_from(highest_legal_offset).unwrap()1
{
625
248
            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
626
248
627
248
            lin_mem.store(offset, x).unwrap();
628
248
629
248
            assert_eq!(
630
248
                lin_mem
631
248
                    .load::<{ core::mem::size_of::<F64>() }, F64>(offset)
632
248
                    .unwrap(),
633
                x,
634
0
                "load store roundtrip for {x:?} failed!"
635
            );
636
        }
637
1
    }
638
639
    #[test]
640
1
    fn roundtrip_normal_range_f64_nan() {
641
1
        let x = F64(f64::NAN);
642
1
        let highest_legal_offset = PAGE_SIZE - mem::size_of::<f64>();
643
248
        for offset in 0..
MemIdx::try_from(highest_legal_offset).unwrap()1
{
644
248
            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
645
248
646
248
            lin_mem.store(offset, x).unwrap();
647
248
648
248
            assert!(
649
248
                lin_mem
650
248
                    .load::<{ core::mem::size_of::<F64>() }, F64>(offset)
651
248
                    .unwrap()
652
248
                    .is_nan(),
653
0
                "load store roundtrip for {x:?} failed!"
654
            );
655
        }
656
1
    }
657
658
    #[test]
659
    #[should_panic(
660
        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
661
    )]
662
1
    fn store_out_of_range_u128_max() {
663
1
        let x: u128 = u128::MAX;
664
1
        let pages = 1;
665
1
        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u128>() + 1;
666
1
        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
667
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
668
1
669
1
        lin_mem.store(lowest_illegal_offset, x).unwrap();
670
1
    }
671
672
    #[test]
673
    #[should_panic(
674
        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
675
    )]
676
1
    fn store_empty_lineaer_memory_u8() {
677
1
        let x: u8 = u8::MAX;
678
1
        let pages = 0;
679
1
        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u8>() + 1;
680
1
        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
681
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
682
1
683
1
        lin_mem.store(lowest_illegal_offset, x).unwrap();
684
1
    }
685
686
    #[test]
687
    #[should_panic(
688
        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
689
    )]
690
1
    fn load_out_of_range_u128_max() {
691
1
        let pages = 1;
692
1
        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u128>() + 1;
693
1
        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
694
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
695
1
696
1
        let _x: u128 = lin_mem.load(lowest_illegal_offset).unwrap();
697
1
    }
698
699
    #[test]
700
    #[should_panic(
701
        expected = "called `Result::unwrap()` on an `Err` value: Trap(MemoryOrDataAccessOutOfBounds)"
702
    )]
703
1
    fn load_empty_lineaer_memory_u8() {
704
1
        let pages = 0;
705
1
        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u8>() + 1;
706
1
        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
707
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
708
1
709
1
        let _x: u8 = lin_mem.load(lowest_illegal_offset).unwrap();
710
1
    }
711
712
    #[test]
713
    #[should_panic]
714
1
    fn copy_out_of_bounds() {
715
1
        let lin_mem_0 = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(2);
716
1
        let lin_mem_1 = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(1);
717
1
        lin_mem_0.copy(0, &lin_mem_1, 0, PAGE_SIZE + 1).unwrap();
718
1
    }
719
}