Coverage Report

Created: 2025-06-23 13:53

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/build/source/src/execution/linear_memory.rs
Line
Count
Source
1
use core::{cell::UnsafeCell, mem};
2
3
use alloc::vec::Vec;
4
5
use crate::{
6
    core::{indices::MemIdx, little_endian::LittleEndianBytes},
7
    rw_spinlock::RwSpinLock,
8
    RuntimeError,
9
};
10
11
/// Implementation of the linear memory suitable for concurrent access
12
///
13
/// Implements the base for the instructions described in
14
/// <https://webassembly.github.io/spec/core/exec/instructions.html#memory-instructions>.
15
///
16
/// This linear memory implementation internally relies on a `Vec<UnsafeCell<u8>>`. Thus, the atomic
17
/// unit of information for it is a byte (`u8`). All access to the linear memory internally occurs
18
/// through pointers, avoiding the creation of shared and mut refs to the internal data completely.
19
/// This avoids undefined behavior, except for the race-condition inherent to concurrent writes.
20
/// Because of this, the [`LinearMemory::store`] function does not require `&mut self` -- `&self`
21
/// suffices.
22
///
23
/// # Notes on overflowing
24
///
25
/// All operations that rely on accessing `n` bytes starting at `index` in the linear memory have to
26
/// perform bounds checking. Thus they always have to ensure that `n + index < linear_memory.len()`
27
/// holds true (e.g. `n + index - 1` must be a valid index into `linear_memory`). However,
28
/// writing that check as is bears the danger of an overflow, assuming that `n`, `index` and
29
/// `linear_memory.len()` are the same given integer type, `n + index` can overflow, resulting in
30
/// the check passing despite the access being out of bounds!
31
///
32
/// To avoid this, the bounds checks are carefully ordered to avoid any overflows:
33
///
34
/// - First we check, that `n <= linear_memory.len()` holds true, ensuring that the amount of bytes
35
///   to be accessed is indeed smaller than or equal to the linear memory's size. If this does not
36
///   hold true, continuation of the operation will yield out of bounds access in any case.
37
/// - Then, as a second check, we verify that `index <= linear_memory.len() - n`. This way we
38
///   avoid the overflow, as there is no addition. The subtraction in the left hand can not
39
///   underflow, due to the previous check (which asserts that `n` is smaller than or equal to
40
///   `linear_memory.len()`).
41
///
42
/// Combined in the given order, these two checks enable bounds checking without risking any
43
/// overflow or underflow, provided that `n`, `index` and `linear_memory.len()` are of the same
44
/// integer type.
45
///
46
/// # Notes on locking
47
///
48
/// The internal data vector of the [`LinearMemory`] is wrapped in a [`RwSpinLock`]. Despite the
49
/// name, writes to the linear memory do not require an acquisition of a write lock. Writes are
50
/// implemented through a shared ref to the internal vector, with an `UnsafeCell` to achieve
51
/// interior mutability.
52
///
53
/// However, linear memory can grow. As the linear memory is implemented via a [`Vec`], a grow can
54
/// result in the vector's internal data buffer to be copied over to a bigger, fresh allocation.
55
/// The old buffer is then freed. Combined with concurrent mutable access, this can cause
56
/// use-after-free. To avoid this, a grow operation of the linear memory acquires a write lock,
57
/// blocking all read/write to the linear memory inbetween.
58
///
59
/// # Unsafe Note
60
///
61
/// Raw pointer access it required, because concurent mutation of the linear memory might happen
62
/// (consider the threading proposal for WASM, where mutliple WASM threads access the same linear
63
/// memory at the same time). The inherent race condition results in UB w/r/t the state of the `u8`s
64
/// in the inner data. However, this is tolerable, e.g. avoiding race conditions on the state of the
65
/// linear memory can not be the task of the interpreter, but has to be fulfilled by the interpreted
66
/// bytecode itself.
67
// TODO if a memmap like operation is available, the linear memory implementation can be optimized brutally. Out-of-bound access can be mapped to userspace handled page-faults, e.g. the MMU takes over that responsibility of catching out of bounds. Grow can happen without copying of data, by mapping new pages consecutively after the current final page of the linear memory.
68
pub struct LinearMemory<const PAGE_SIZE: usize = { crate::Limits::MEM_PAGE_SIZE as usize }> {
69
    inner_data: RwSpinLock<Vec<UnsafeCell<u8>>>,
70
}
71
72
/// Type to express the page count
73
pub type PageCountTy = u16;
74
75
impl<const PAGE_SIZE: usize> LinearMemory<PAGE_SIZE> {
76
    /// Size of a page in the linear memory, measured in bytes
77
    ///
78
    /// The WASM specification demands a page size of 64 KiB, that is `65536` bytes:
79
    /// <https://webassembly.github.io/spec/core/exec/runtime.html?highlight=page#memory-instances>
80
    const PAGE_SIZE: usize = PAGE_SIZE;
81
82
    /// Create a new, empty [`LinearMemory`]
83
2
    pub fn new() -> Self {
84
2
        Self {
85
2
            inner_data: RwSpinLock::new(Vec::new()),
86
2
        }
87
2
    }
88
89
    /// Create a new, empty [`LinearMemory`]
90
1.36k
    pub fn new_with_initial_pages(pages: PageCountTy) -> Self {
91
1.36k
        let size_bytes = Self::PAGE_SIZE * pages as usize;
92
1.36k
        let mut data = Vec::with_capacity(size_bytes);
93
19.5M
        data.resize_with(size_bytes, || UnsafeCell::new(0));
94
1.36k
95
1.36k
        Self {
96
1.36k
            inner_data: RwSpinLock::new(data),
97
1.36k
        }
98
1.36k
    }
99
100
    /// Grow the [`LinearMemory`] by a number of pages
101
125
    pub fn grow(&self, pages_to_add: PageCountTy) {
102
125
        let mut lock_guard = self.inner_data.write();
103
125
        let prior_length_bytes = lock_guard.len();
104
125
        let new_length_bytes = prior_length_bytes + Self::PAGE_SIZE * pages_to_add as usize;
105
154M
        lock_guard.resize_with(new_length_bytes, || UnsafeCell::new(0));
106
125
    }
107
108
    /// Get the number of pages currently allocated to this [`LinearMemory`]
109
145
    pub fn pages(&self) -> PageCountTy {
110
145
        PageCountTy::try_from(self.inner_data.read().len() / PAGE_SIZE).unwrap()
111
145
    }
112
113
    /// Get the length in bytes currently allocated to this [`LinearMemory`]
114
    // TODO remove this op
115
216
    pub fn len(&self) -> usize {
116
216
        self.inner_data.read().len()
117
216
    }
118
119
    /// At a given index, store a datum in the [`LinearMemory`]
120
2.65k
    pub fn store<const N: usize, T: LittleEndianBytes<N>>(
121
2.65k
        &self,
122
2.65k
        index: MemIdx,
123
2.65k
        value: T,
124
2.65k
    ) -> Result<(), RuntimeError> {
125
2.65k
        let value_size = mem::size_of::<T>();
126
2.65k
127
2.65k
        // Unless someone implementes something wrong like `impl LittleEndianBytes<3> for f64`, this
128
2.65k
        // check is already guaranteed at the type level. Therefore only a debug_assert.
129
2.65k
        debug_assert_eq!(value_size, N, 
"value size must match const generic N"0
);
130
131
2.65k
        let lock_guard = self.inner_data.read();
132
2.65k
133
2.65k
        // A value must fit into the linear memory
134
2.65k
        if value_size > lock_guard.len() {
135
5
            error!(
"value does not fit into linear memory"0
);
136
5
            return Err(RuntimeError::MemoryAccessOutOfBounds);
137
2.65k
        }
138
2.65k
139
2.65k
        // The following statement must be true
140
2.65k
        // `index + value_size <= lock_guard.len()`
141
2.65k
        // This check verifies it, while avoiding the possible overflow. The subtraction can not
142
2.65k
        // underflow because of the previous check.
143
2.65k
144
2.65k
        if (index) > lock_guard.len() - value_size {
145
86
            error!(
"value write would extend beyond the end of the linear memory"0
);
146
86
            return Err(RuntimeError::MemoryAccessOutOfBounds);
147
2.56k
        }
148
2.56k
149
2.56k
        // TODO this unwrap can not fail, maybe use unwrap_unchecked?
150
2.56k
        let ptr = lock_guard.get(index).unwrap().get();
151
2.56k
        let bytes = value.to_le_bytes(); //
152
2.56k
153
2.56k
        // Safety argument:
154
2.56k
        //
155
2.56k
        // - nonoverlapping is guaranteed, because `src` is a pointer to a stack allocated array,
156
2.56k
        //   while the destination is heap allocated Vec
157
2.56k
        // - the first check above guarantee that `src` fits into the destination
158
2.56k
        // - the second check above guarantees that even with the offset in `index`, `src` does not
159
2.56k
        //   extend beyond the destinations last `UnsafeCell<u8>`
160
2.56k
        // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s
161
2.56k
        //   contained in the `UnsafeCell`s, so no UB is created through the existence of unsound
162
2.56k
        //   references
163
2.56k
        unsafe { ptr.copy_from_nonoverlapping(bytes.as_ref().as_ptr(), value_size) }
164
2.56k
165
2.56k
        Ok(())
166
2.65k
    }
167
168
    /// From a given index, load a datum in the [`LinearMemory`]
169
929k
    pub fn load<const N: usize, T: LittleEndianBytes<N>>(
170
929k
        &self,
171
929k
        index: MemIdx,
172
929k
    ) -> Result<T, RuntimeError> {
173
929k
        let value_size = mem::size_of::<T>();
174
929k
175
929k
        // Unless someone implementes something wrong like `LittleEndianBytes<3> for i8`, this
176
929k
        // check is already guaranteed at the type level. Therefore only a debug_assert.
177
929k
        debug_assert_eq!(value_size, N, 
"value size must match const generic N"0
);
178
179
929k
        let lock_guard = self.inner_data.read();
180
929k
181
929k
        // A value must fit into the linear memory
182
929k
        if value_size > lock_guard.len() {
183
5
            error!(
"value does not fit into linear memory"0
);
184
5
            return Err(RuntimeError::MemoryAccessOutOfBounds);
185
929k
        }
186
929k
187
929k
        // The following statement must be true
188
929k
        // `index + value_size <= lock_guard.len()`
189
929k
        // This check verifies it, while avoiding the possible overflow. The subtraction can not
190
929k
        // underflow because of the previous assert.
191
929k
192
929k
        if (index) > lock_guard.len() - value_size {
193
137
            error!(
"value read would extend beyond the end of the linear_memory"0
);
194
137
            return Err(RuntimeError::MemoryAccessOutOfBounds);
195
929k
        }
196
929k
197
929k
        let ptr = lock_guard.get(index).unwrap().get();
198
929k
        let mut bytes = [0; N];
199
929k
200
929k
        // Safety argument:
201
929k
        //
202
929k
        // - nonoverlapping is guaranteed, because `dest` is a pointer to a stack allocated array,
203
929k
        //   while the source is heap allocated Vec
204
929k
        // - the first assert above guarantee that source is bigger than `dest`
205
929k
        // - the second assert above guarantees that even with the offset in `index`, `dest` does
206
929k
        //   not extend beyond the destinations last `UnsafeCell<u8>` in source
207
929k
        // - the use of `UnsafeCell` avoids any `&` or `&mut` to ever be created on any of the `u8`s
208
929k
        //   contained in the `UnsafeCell`s, so no UB is created through the existence of unsound
209
929k
        //   references
210
929k
        unsafe { ptr.copy_to_nonoverlapping(bytes.as_mut_ptr(), bytes.len()) };
211
929k
        Ok(T::from_le_bytes(bytes))
212
929k
    }
213
214
    /// Implementation of the behavior described in
215
    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-fill>.
216
    /// Note, that the WASM spec defines the behavior by recursion, while our implementation uses
217
    /// the memset like [`core::ptr::write_bytes`].
218
    ///
219
    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-fill>
220
123
    pub fn fill(&self, index: MemIdx, data_byte: u8, count: MemIdx) -> Result<(), RuntimeError> {
221
123
        let lock_guard = self.inner_data.read();
222
123
223
123
        /* check destination for out of bounds access */
224
123
        // Specification step 12.
225
123
        if count > lock_guard.len() {
226
1
            error!(
"fill count is bigger than the linear memory"0
);
227
1
            return Err(RuntimeError::MemoryAccessOutOfBounds);
228
122
        }
229
122
230
122
        // Specification step 12.
231
122
        if index > lock_guard.len() - count {
232
7
            error!(
"fill extends beyond the linear memory's end"0
);
233
7
            return Err(RuntimeError::MemoryAccessOutOfBounds);
234
115
        }
235
115
236
115
        /* check if there is anything to be done */
237
115
        // Specification step 13.
238
115
        if count == 0 {
239
4
            return Ok(());
240
111
        }
241
111
242
111
        let ptr = lock_guard[index].get();
243
111
        unsafe {
244
111
            // Specification step 14-21.
245
111
            ptr.write_bytes(data_byte, count);
246
111
        }
247
111
248
111
        Ok(())
249
123
    }
250
251
    /// Copy `count` bytes from one region in the linear memory to another region in the same or a
252
    /// different linear memory
253
    ///
254
    /// - Both regions may overlap
255
    /// - Copies the `count` bytes starting from `source_index`, overwriting the `count` bytes
256
    ///   starting from `destination_index`
257
    ///
258
    /// <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-copy>
259
167
    pub fn copy(
260
167
        &self,
261
167
        destination_index: MemIdx,
262
167
        source_mem: &Self,
263
167
        source_index: MemIdx,
264
167
        count: MemIdx,
265
167
    ) -> Result<(), RuntimeError> {
266
167
        // self is the destination
267
167
        let lock_guard_self = self.inner_data.read();
268
167
269
167
        // other is the source
270
167
        let lock_guard_other = source_mem.inner_data.read();
271
167
272
167
        /* check destination for out of bounds access */
273
167
        // Specification step 12.
274
167
        if count > lock_guard_self.len() {
275
4
            error!(
"copy count is bigger than the destination linear memory"0
);
276
4
            return Err(RuntimeError::MemoryAccessOutOfBounds);
277
163
        }
278
163
279
163
        // Specification step 12.
280
163
        if destination_index > lock_guard_self.len() - count {
281
18
            error!(
"copy destination extends beyond the linear memory's end"0
);
282
18
            return Err(RuntimeError::MemoryAccessOutOfBounds);
283
145
        }
284
145
285
145
        /* check source for out of bounds access */
286
145
        // Specification step 12.
287
145
        if count > lock_guard_other.len() {
288
1
            error!(
"copy count is bigger than the source linear memory"0
);
289
1
            return Err(RuntimeError::MemoryAccessOutOfBounds);
290
144
        }
291
144
292
144
        // Specification step 12.
293
144
        if source_index > lock_guard_other.len() - count {
294
10
            error!(
"copy source extends beyond the linear memory's end"0
);
295
10
            return Err(RuntimeError::MemoryAccessOutOfBounds);
296
134
        }
297
134
298
134
        /* check if there is anything to be done */
299
134
        // Specification step 13.
300
134
        if count == 0 {
301
6
            return Ok(());
302
128
        }
303
128
304
128
        // acquire pointers
305
128
        let destination_ptr = lock_guard_self[destination_index].get();
306
128
        let source_ptr = lock_guard_other[source_index].get();
307
128
308
128
        // copy the data
309
128
        unsafe {
310
128
            // TODO investigate if it is worth to use a conditional `copy_from_nonoverlapping`
311
128
            // if the non-overlapping can be confirmed (and the count is bigger than a certain
312
128
            // threshold).
313
128
314
128
            // Specification step 14-15.
315
128
            destination_ptr.copy_from(source_ptr, count);
316
128
        }
317
128
318
128
        Ok(())
319
167
    }
320
321
    // Rationale behind having `source_index` and `count` when the callsite could also just create a subslice for `source_data`? Have all the index error checks in one place.
322
    //
323
    // <https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-memory-mathsf-memory-init-x>
324
229
    pub fn init(
325
229
        &self,
326
229
        destination_index: MemIdx,
327
229
        source_data: &[u8],
328
229
        source_index: MemIdx,
329
229
        count: MemIdx,
330
229
    ) -> Result<(), RuntimeError> {
331
229
        // self is the destination
332
229
        let lock_guard_self = self.inner_data.read();
333
229
        let data_len = source_data.len();
334
229
335
229
        /* check source for out of bounds access */
336
229
        // Specification step 16.
337
229
        if count > data_len {
338
14
            error!(
"init count is bigger than the data instance"0
);
339
14
            return Err(RuntimeError::MemoryAccessOutOfBounds);
340
215
        }
341
215
342
215
        // Specification step 16.
343
215
        if source_index > data_len - count {
344
3
            error!(
"init source extends beyond the data instance's end"0
);
345
3
            return Err(RuntimeError::MemoryAccessOutOfBounds);
346
212
        }
347
212
348
212
        /* check destination for out of bounds access */
349
212
        // Specification step 16.
350
212
        if count > lock_guard_self.len() {
351
4
            error!(
"init count is bigger than the linear memory"0
);
352
4
            return Err(RuntimeError::MemoryAccessOutOfBounds);
353
208
        }
354
208
355
208
        // Specification step 16.
356
208
        if destination_index > lock_guard_self.len() - count {
357
18
            error!(
"init extends beyond the linear memory's end"0
);
358
18
            return Err(RuntimeError::MemoryAccessOutOfBounds);
359
190
        }
360
190
361
190
        /* check if there is anything to be done */
362
190
        // Specification step 17.
363
190
        if count == 0 {
364
34
            return Ok(());
365
156
        }
366
156
367
156
        // acquire pointers
368
156
        let destination_ptr = lock_guard_self[destination_index].get();
369
156
        let source_ptr = &source_data[source_index];
370
156
371
156
        // copy the data
372
156
        unsafe {
373
156
            // Specification step 18-27.
374
156
            destination_ptr.copy_from_nonoverlapping(source_ptr, count);
375
156
        }
376
156
377
156
        Ok(())
378
229
    }
379
}
380
381
impl<const PAGE_SIZE: usize> core::fmt::Debug for LinearMemory<PAGE_SIZE> {
382
1
    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
383
1
        write!(f, "LinearMemory {{ inner_data: [ ")
?0
;
384
1
        let lock_guard = self.inner_data.read();
385
1
        let mut iter = lock_guard.iter();
386
387
1
        if let Some(first_byte_uc) = iter.next() {
388
1
            write!(f, "{}", unsafe { *first_byte_uc.get() })
?0
;
389
0
        }
390
391
256
        for 
uc255
in iter {
392
            // Safety argument:
393
            //
394
            // TODO
395
255
            let byte = unsafe { *uc.get() };
396
255
397
255
            write!(f, ", {byte}")
?0
;
398
        }
399
1
        write!(f, " ] }}")
400
1
    }
401
}
402
403
impl<const PAGE_SIZE: usize> Default for LinearMemory<PAGE_SIZE> {
404
0
    fn default() -> Self {
405
0
        Self::new()
406
0
    }
407
}
408
409
#[cfg(test)]
410
mod test {
411
    use alloc::format;
412
413
    use super::*;
414
415
    const PAGE_SIZE: usize = 1 << 8;
416
    const PAGES: PageCountTy = 2;
417
418
    #[test]
419
1
    fn new_constructor() {
420
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new();
421
1
        assert_eq!(lin_mem.pages(), 0);
422
1
    }
423
424
    #[test]
425
1
    fn new_grow() {
426
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new();
427
1
        lin_mem.grow(1);
428
1
        assert_eq!(lin_mem.pages(), 1);
429
1
    }
430
431
    #[test]
432
1
    fn debug_print() {
433
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(1);
434
1
        assert_eq!(lin_mem.pages(), 1);
435
436
1
        let expected_length = "LinearMemory { inner_data: [  ] }".len() + PAGE_SIZE * "0, ".len();
437
1
        let tol = 2;
438
1
439
1
        let debug_repr = format!("{lin_mem:?}");
440
1
        let lower_bound = expected_length - tol;
441
1
        let upper_bound = expected_length + tol;
442
1
        assert!((lower_bound..upper_bound).contains(&debug_repr.len()));
443
1
    }
444
445
    #[test]
446
1
    fn roundtrip_normal_range_i8_neg127() {
447
1
        let x: i8 = -127;
448
1
        let highest_legal_offset = PAGE_SIZE - mem::size_of::<i8>();
449
255
        for offset in 0..
MemIdx::try_from(highest_legal_offset).unwrap()1
{
450
255
            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
451
255
452
255
            lin_mem.store(offset, x).unwrap();
453
255
454
255
            assert_eq!(
455
255
                lin_mem
456
255
                    .load::<{ core::mem::size_of::<i8>() }, i8>(offset)
457
255
                    .unwrap(),
458
                x,
459
0
                "load store roundtrip for {x:?} failed!"
460
            );
461
        }
462
1
    }
463
464
    #[test]
465
1
    fn roundtrip_normal_range_f32_13() {
466
1
        let x: f32 = 13.0;
467
1
        let highest_legal_offset = PAGE_SIZE - mem::size_of::<f32>();
468
252
        for offset in 0..
MemIdx::try_from(highest_legal_offset).unwrap()1
{
469
252
            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
470
252
471
252
            lin_mem.store(offset, x).unwrap();
472
252
473
252
            assert_eq!(
474
252
                lin_mem
475
252
                    .load::<{ core::mem::size_of::<f32>() }, f32>(offset)
476
252
                    .unwrap(),
477
                x,
478
0
                "load store roundtrip for {x:?} failed!"
479
            );
480
        }
481
1
    }
482
483
    #[test]
484
1
    fn roundtrip_normal_range_f64_min() {
485
1
        let x: f64 = f64::MIN;
486
1
        let highest_legal_offset = PAGE_SIZE - mem::size_of::<f64>();
487
248
        for offset in 0..
MemIdx::try_from(highest_legal_offset).unwrap()1
{
488
248
            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
489
248
490
248
            lin_mem.store(offset, x).unwrap();
491
248
492
248
            assert_eq!(
493
248
                lin_mem
494
248
                    .load::<{ core::mem::size_of::<f64>() }, f64>(offset)
495
248
                    .unwrap(),
496
                x,
497
0
                "load store roundtrip for {x:?} failed!"
498
            );
499
        }
500
1
    }
501
502
    #[test]
503
1
    fn roundtrip_normal_range_f64_nan() {
504
1
        let x: f64 = f64::NAN;
505
1
        let highest_legal_offset = PAGE_SIZE - mem::size_of::<f64>();
506
248
        for offset in 0..
MemIdx::try_from(highest_legal_offset).unwrap()1
{
507
248
            let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(PAGES);
508
248
509
248
            lin_mem.store(offset, x).unwrap();
510
248
511
248
            assert!(
512
248
                lin_mem
513
248
                    .load::<{ core::mem::size_of::<f64>() }, f64>(offset)
514
248
                    .unwrap()
515
248
                    .is_nan(),
516
0
                "load store roundtrip for {x:?} failed!"
517
            );
518
        }
519
1
    }
520
521
    #[test]
522
    #[should_panic(
523
        expected = "called `Result::unwrap()` on an `Err` value: MemoryAccessOutOfBounds"
524
    )]
525
1
    fn store_out_of_range_u128_max() {
526
1
        let x: u128 = u128::MAX;
527
1
        let pages = 1;
528
1
        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u128>() + 1;
529
1
        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
530
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
531
1
532
1
        lin_mem.store(lowest_illegal_offset, x).unwrap();
533
1
    }
534
535
    #[test]
536
    #[should_panic(
537
        expected = "called `Result::unwrap()` on an `Err` value: MemoryAccessOutOfBounds"
538
    )]
539
1
    fn store_empty_lineaer_memory_u8() {
540
1
        let x: u8 = u8::MAX;
541
1
        let pages = 0;
542
1
        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u8>() + 1;
543
1
        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
544
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
545
1
546
1
        lin_mem.store(lowest_illegal_offset, x).unwrap();
547
1
    }
548
549
    #[test]
550
    #[should_panic(
551
        expected = "called `Result::unwrap()` on an `Err` value: MemoryAccessOutOfBounds"
552
    )]
553
1
    fn load_out_of_range_u128_max() {
554
1
        let pages = 1;
555
1
        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u128>() + 1;
556
1
        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
557
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
558
1
559
1
        let _x: u128 = lin_mem.load(lowest_illegal_offset).unwrap();
560
1
    }
561
562
    #[test]
563
    #[should_panic(
564
        expected = "called `Result::unwrap()` on an `Err` value: MemoryAccessOutOfBounds"
565
    )]
566
1
    fn load_empty_lineaer_memory_u8() {
567
1
        let pages = 0;
568
1
        let lowest_illegal_offset = PAGE_SIZE - mem::size_of::<u8>() + 1;
569
1
        let lowest_illegal_offset = MemIdx::try_from(lowest_illegal_offset).unwrap();
570
1
        let lin_mem = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(pages);
571
1
572
1
        let _x: u8 = lin_mem.load(lowest_illegal_offset).unwrap();
573
1
    }
574
575
    #[test]
576
    #[should_panic]
577
1
    fn copy_out_of_bounds() {
578
1
        let lin_mem_0 = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(2);
579
1
        let lin_mem_1 = LinearMemory::<PAGE_SIZE>::new_with_initial_pages(1);
580
1
        lin_mem_0.copy(0, &lin_mem_1, 0, PAGE_SIZE + 1).unwrap();
581
1
    }
582
}