/build/cargo-vendor-dir/memchr-2.7.1/src/arch/all/memchr.rs
Line | Count | Source (jump to first uncovered line) |
1 | | /*! |
2 | | Provides architecture independent implementations of `memchr` and friends. |
3 | | |
4 | | The main types in this module are [`One`], [`Two`] and [`Three`]. They are for |
5 | | searching for one, two or three distinct bytes, respectively, in a haystack. |
6 | | Each type also has corresponding double ended iterators. These searchers |
7 | | are typically slower than hand-coded vector routines accomplishing the same |
8 | | task, but are also typically faster than naive scalar code. These routines |
9 | | effectively work by treating a `usize` as a vector of 8-bit lanes, and thus |
10 | | achieves some level of data parallelism even without explicit vector support. |
11 | | |
12 | | The `One` searcher also provides a [`One::count`] routine for efficiently |
13 | | counting the number of times a single byte occurs in a haystack. This is |
14 | | useful, for example, for counting the number of lines in a haystack. This |
15 | | routine exists because it is usually faster, especially with a high match |
16 | | count, then using [`One::find`] repeatedly. ([`OneIter`] specializes its |
17 | | `Iterator::count` implementation to use this routine.) |
18 | | |
19 | | Only one, two and three bytes are supported because three bytes is about |
20 | | the point where one sees diminishing returns. Beyond this point and it's |
21 | | probably (but not necessarily) better to just use a simple `[bool; 256]` array |
22 | | or similar. However, it depends mightily on the specific work-load and the |
23 | | expected match frequency. |
24 | | */ |
25 | | |
26 | | use crate::{arch::generic::memchr as generic, ext::Pointer}; |
27 | | |
28 | | /// The number of bytes in a single `usize` value. |
29 | | const USIZE_BYTES: usize = (usize::BITS / 8) as usize; |
30 | | /// The bits that must be zero for a `*const usize` to be properly aligned. |
31 | | const USIZE_ALIGN: usize = USIZE_BYTES - 1; |
32 | | |
33 | | /// Finds all occurrences of a single byte in a haystack. |
34 | | #[derive(Clone, Copy, Debug)] |
35 | | pub struct One { |
36 | | s1: u8, |
37 | | v1: usize, |
38 | | } |
39 | | |
40 | | impl One { |
41 | | /// The number of bytes we examine per each iteration of our search loop. |
42 | | const LOOP_BYTES: usize = 2 * USIZE_BYTES; |
43 | | |
44 | | /// Create a new searcher that finds occurrences of the byte given. |
45 | | #[inline] |
46 | 0 | pub fn new(needle: u8) -> One { |
47 | 0 | One { s1: needle, v1: splat(needle) } |
48 | 0 | } |
49 | | |
50 | | /// A test-only routine so that we can bundle a bunch of quickcheck |
51 | | /// properties into a single macro. Basically, this provides a constructor |
52 | | /// that makes it identical to most other memchr implementations, which |
53 | | /// have fallible constructors. |
54 | | #[cfg(test)] |
55 | | pub(crate) fn try_new(needle: u8) -> Option<One> { |
56 | | Some(One::new(needle)) |
57 | | } |
58 | | |
59 | | /// Return the first occurrence of the needle in the given haystack. If no |
60 | | /// such occurrence exists, then `None` is returned. |
61 | | /// |
62 | | /// The occurrence is reported as an offset into `haystack`. Its maximum |
63 | | /// value for a non-empty haystack is `haystack.len() - 1`. |
64 | | #[inline] |
65 | 0 | pub fn find(&self, haystack: &[u8]) -> Option<usize> { |
66 | 0 | // SAFETY: `find_raw` guarantees that if a pointer is returned, it |
67 | 0 | // falls within the bounds of the start and end pointers. |
68 | 0 | unsafe { |
69 | 0 | generic::search_slice_with_raw(haystack, |s, e| { |
70 | 0 | self.find_raw(s, e) |
71 | 0 | }) |
72 | 0 | } |
73 | 0 | } |
74 | | |
75 | | /// Return the last occurrence of the needle in the given haystack. If no |
76 | | /// such occurrence exists, then `None` is returned. |
77 | | /// |
78 | | /// The occurrence is reported as an offset into `haystack`. Its maximum |
79 | | /// value for a non-empty haystack is `haystack.len() - 1`. |
80 | | #[inline] |
81 | 0 | pub fn rfind(&self, haystack: &[u8]) -> Option<usize> { |
82 | 0 | // SAFETY: `find_raw` guarantees that if a pointer is returned, it |
83 | 0 | // falls within the bounds of the start and end pointers. |
84 | 0 | unsafe { |
85 | 0 | generic::search_slice_with_raw(haystack, |s, e| { |
86 | 0 | self.rfind_raw(s, e) |
87 | 0 | }) |
88 | 0 | } |
89 | 0 | } |
90 | | |
91 | | /// Counts all occurrences of this byte in the given haystack. |
92 | | #[inline] |
93 | 0 | pub fn count(&self, haystack: &[u8]) -> usize { |
94 | 0 | // SAFETY: All of our pointers are derived directly from a borrowed |
95 | 0 | // slice, which is guaranteed to be valid. |
96 | 0 | unsafe { |
97 | 0 | let start = haystack.as_ptr(); |
98 | 0 | let end = start.add(haystack.len()); |
99 | 0 | self.count_raw(start, end) |
100 | 0 | } |
101 | 0 | } |
102 | | |
103 | | /// Like `find`, but accepts and returns raw pointers. |
104 | | /// |
105 | | /// When a match is found, the pointer returned is guaranteed to be |
106 | | /// `>= start` and `< end`. |
107 | | /// |
108 | | /// This routine is useful if you're already using raw pointers and would |
109 | | /// like to avoid converting back to a slice before executing a search. |
110 | | /// |
111 | | /// # Safety |
112 | | /// |
113 | | /// * Both `start` and `end` must be valid for reads. |
114 | | /// * Both `start` and `end` must point to an initialized value. |
115 | | /// * Both `start` and `end` must point to the same allocated object and |
116 | | /// must either be in bounds or at most one byte past the end of the |
117 | | /// allocated object. |
118 | | /// * Both `start` and `end` must be _derived from_ a pointer to the same |
119 | | /// object. |
120 | | /// * The distance between `start` and `end` must not overflow `isize`. |
121 | | /// * The distance being in bounds must not rely on "wrapping around" the |
122 | | /// address space. |
123 | | /// |
124 | | /// Note that callers may pass a pair of pointers such that `start >= end`. |
125 | | /// In that case, `None` will always be returned. |
126 | | #[inline] |
127 | 0 | pub unsafe fn find_raw( |
128 | 0 | &self, |
129 | 0 | start: *const u8, |
130 | 0 | end: *const u8, |
131 | 0 | ) -> Option<*const u8> { |
132 | 0 | if start >= end { |
133 | 0 | return None; |
134 | 0 | } |
135 | 0 | let confirm = |b| self.confirm(b); |
136 | 0 | let len = end.distance(start); |
137 | 0 | if len < USIZE_BYTES { |
138 | 0 | return generic::fwd_byte_by_byte(start, end, confirm); |
139 | 0 | } |
140 | 0 |
|
141 | 0 | // The start of the search may not be aligned to `*const usize`, |
142 | 0 | // so we do an unaligned load here. |
143 | 0 | let chunk = start.cast::<usize>().read_unaligned(); |
144 | 0 | if self.has_needle(chunk) { |
145 | 0 | return generic::fwd_byte_by_byte(start, end, confirm); |
146 | 0 | } |
147 | 0 |
|
148 | 0 | // And now we start our search at a guaranteed aligned position. |
149 | 0 | // The first iteration of the loop below will overlap with the the |
150 | 0 | // unaligned chunk above in cases where the search starts at an |
151 | 0 | // unaligned offset, but that's okay as we're only here if that |
152 | 0 | // above didn't find a match. |
153 | 0 | let mut cur = |
154 | 0 | start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN)); |
155 | 0 | debug_assert!(cur > start); |
156 | 0 | if len <= One::LOOP_BYTES { |
157 | 0 | return generic::fwd_byte_by_byte(cur, end, confirm); |
158 | 0 | } |
159 | 0 | debug_assert!(end.sub(One::LOOP_BYTES) >= start); |
160 | 0 | while cur <= end.sub(One::LOOP_BYTES) { |
161 | 0 | debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); |
162 | | |
163 | 0 | let a = cur.cast::<usize>().read(); |
164 | 0 | let b = cur.add(USIZE_BYTES).cast::<usize>().read(); |
165 | 0 | if self.has_needle(a) || self.has_needle(b) { |
166 | 0 | break; |
167 | 0 | } |
168 | 0 | cur = cur.add(One::LOOP_BYTES); |
169 | | } |
170 | 0 | generic::fwd_byte_by_byte(cur, end, confirm) |
171 | 0 | } |
172 | | |
173 | | /// Like `rfind`, but accepts and returns raw pointers. |
174 | | /// |
175 | | /// When a match is found, the pointer returned is guaranteed to be |
176 | | /// `>= start` and `< end`. |
177 | | /// |
178 | | /// This routine is useful if you're already using raw pointers and would |
179 | | /// like to avoid converting back to a slice before executing a search. |
180 | | /// |
181 | | /// # Safety |
182 | | /// |
183 | | /// * Both `start` and `end` must be valid for reads. |
184 | | /// * Both `start` and `end` must point to an initialized value. |
185 | | /// * Both `start` and `end` must point to the same allocated object and |
186 | | /// must either be in bounds or at most one byte past the end of the |
187 | | /// allocated object. |
188 | | /// * Both `start` and `end` must be _derived from_ a pointer to the same |
189 | | /// object. |
190 | | /// * The distance between `start` and `end` must not overflow `isize`. |
191 | | /// * The distance being in bounds must not rely on "wrapping around" the |
192 | | /// address space. |
193 | | /// |
194 | | /// Note that callers may pass a pair of pointers such that `start >= end`. |
195 | | /// In that case, `None` will always be returned. |
196 | | #[inline] |
197 | 0 | pub unsafe fn rfind_raw( |
198 | 0 | &self, |
199 | 0 | start: *const u8, |
200 | 0 | end: *const u8, |
201 | 0 | ) -> Option<*const u8> { |
202 | 0 | if start >= end { |
203 | 0 | return None; |
204 | 0 | } |
205 | 0 | let confirm = |b| self.confirm(b); |
206 | 0 | let len = end.distance(start); |
207 | 0 | if len < USIZE_BYTES { |
208 | 0 | return generic::rev_byte_by_byte(start, end, confirm); |
209 | 0 | } |
210 | 0 |
|
211 | 0 | let chunk = end.sub(USIZE_BYTES).cast::<usize>().read_unaligned(); |
212 | 0 | if self.has_needle(chunk) { |
213 | 0 | return generic::rev_byte_by_byte(start, end, confirm); |
214 | 0 | } |
215 | 0 |
|
216 | 0 | let mut cur = end.sub(end.as_usize() & USIZE_ALIGN); |
217 | 0 | debug_assert!(start <= cur && cur <= end); |
218 | 0 | if len <= One::LOOP_BYTES { |
219 | 0 | return generic::rev_byte_by_byte(start, cur, confirm); |
220 | 0 | } |
221 | 0 | while cur >= start.add(One::LOOP_BYTES) { |
222 | 0 | debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); |
223 | | |
224 | 0 | let a = cur.sub(2 * USIZE_BYTES).cast::<usize>().read(); |
225 | 0 | let b = cur.sub(1 * USIZE_BYTES).cast::<usize>().read(); |
226 | 0 | if self.has_needle(a) || self.has_needle(b) { |
227 | 0 | break; |
228 | 0 | } |
229 | 0 | cur = cur.sub(One::LOOP_BYTES); |
230 | | } |
231 | 0 | generic::rev_byte_by_byte(start, cur, confirm) |
232 | 0 | } |
233 | | |
234 | | /// Counts all occurrences of this byte in the given haystack represented |
235 | | /// by raw pointers. |
236 | | /// |
237 | | /// This routine is useful if you're already using raw pointers and would |
238 | | /// like to avoid converting back to a slice before executing a search. |
239 | | /// |
240 | | /// # Safety |
241 | | /// |
242 | | /// * Both `start` and `end` must be valid for reads. |
243 | | /// * Both `start` and `end` must point to an initialized value. |
244 | | /// * Both `start` and `end` must point to the same allocated object and |
245 | | /// must either be in bounds or at most one byte past the end of the |
246 | | /// allocated object. |
247 | | /// * Both `start` and `end` must be _derived from_ a pointer to the same |
248 | | /// object. |
249 | | /// * The distance between `start` and `end` must not overflow `isize`. |
250 | | /// * The distance being in bounds must not rely on "wrapping around" the |
251 | | /// address space. |
252 | | /// |
253 | | /// Note that callers may pass a pair of pointers such that `start >= end`. |
254 | | /// In that case, `0` will always be returned. |
255 | | #[inline] |
256 | 0 | pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize { |
257 | 0 | if start >= end { |
258 | 0 | return 0; |
259 | 0 | } |
260 | 0 | // Sadly I couldn't get the SWAR approach to work here, so we just do |
261 | 0 | // one byte at a time for now. PRs to improve this are welcome. |
262 | 0 | let mut ptr = start; |
263 | 0 | let mut count = 0; |
264 | 0 | while ptr < end { |
265 | 0 | count += (ptr.read() == self.s1) as usize; |
266 | 0 | ptr = ptr.offset(1); |
267 | 0 | } |
268 | 0 | count |
269 | 0 | } |
270 | | |
271 | | /// Returns an iterator over all occurrences of the needle byte in the |
272 | | /// given haystack. |
273 | | /// |
274 | | /// The iterator returned implements `DoubleEndedIterator`. This means it |
275 | | /// can also be used to find occurrences in reverse order. |
276 | 0 | pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> { |
277 | 0 | OneIter { searcher: self, it: generic::Iter::new(haystack) } |
278 | 0 | } |
279 | | |
280 | | #[inline(always)] |
281 | 0 | fn has_needle(&self, chunk: usize) -> bool { |
282 | 0 | has_zero_byte(self.v1 ^ chunk) |
283 | 0 | } |
284 | | |
285 | | #[inline(always)] |
286 | 0 | fn confirm(&self, haystack_byte: u8) -> bool { |
287 | 0 | self.s1 == haystack_byte |
288 | 0 | } |
289 | | } |
290 | | |
291 | | /// An iterator over all occurrences of a single byte in a haystack. |
292 | | /// |
293 | | /// This iterator implements `DoubleEndedIterator`, which means it can also be |
294 | | /// used to find occurrences in reverse order. |
295 | | /// |
296 | | /// This iterator is created by the [`One::iter`] method. |
297 | | /// |
298 | | /// The lifetime parameters are as follows: |
299 | | /// |
300 | | /// * `'a` refers to the lifetime of the underlying [`One`] searcher. |
301 | | /// * `'h` refers to the lifetime of the haystack being searched. |
302 | | #[derive(Clone, Debug)] |
303 | | pub struct OneIter<'a, 'h> { |
304 | | /// The underlying memchr searcher. |
305 | | searcher: &'a One, |
306 | | /// Generic iterator implementation. |
307 | | it: generic::Iter<'h>, |
308 | | } |
309 | | |
310 | | impl<'a, 'h> Iterator for OneIter<'a, 'h> { |
311 | | type Item = usize; |
312 | | |
313 | | #[inline] |
314 | 0 | fn next(&mut self) -> Option<usize> { |
315 | 0 | // SAFETY: We rely on the generic iterator to provide valid start |
316 | 0 | // and end pointers, but we guarantee that any pointer returned by |
317 | 0 | // 'find_raw' falls within the bounds of the start and end pointer. |
318 | 0 | unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } |
319 | 0 | } |
320 | | |
321 | | #[inline] |
322 | 0 | fn count(self) -> usize { |
323 | 0 | self.it.count(|s, e| { |
324 | 0 | // SAFETY: We rely on our generic iterator to return valid start |
325 | 0 | // and end pointers. |
326 | 0 | unsafe { self.searcher.count_raw(s, e) } |
327 | 0 | }) |
328 | 0 | } |
329 | | |
330 | | #[inline] |
331 | 0 | fn size_hint(&self) -> (usize, Option<usize>) { |
332 | 0 | self.it.size_hint() |
333 | 0 | } |
334 | | } |
335 | | |
336 | | impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> { |
337 | | #[inline] |
338 | 0 | fn next_back(&mut self) -> Option<usize> { |
339 | 0 | // SAFETY: We rely on the generic iterator to provide valid start |
340 | 0 | // and end pointers, but we guarantee that any pointer returned by |
341 | 0 | // 'rfind_raw' falls within the bounds of the start and end pointer. |
342 | 0 | unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } |
343 | 0 | } |
344 | | } |
345 | | |
346 | | /// Finds all occurrences of two bytes in a haystack. |
347 | | /// |
348 | | /// That is, this reports matches of one of two possible bytes. For example, |
349 | | /// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, |
350 | | /// `4` and `5`. |
351 | | #[derive(Clone, Copy, Debug)] |
352 | | pub struct Two { |
353 | | s1: u8, |
354 | | s2: u8, |
355 | | v1: usize, |
356 | | v2: usize, |
357 | | } |
358 | | |
359 | | impl Two { |
360 | | /// Create a new searcher that finds occurrences of the two needle bytes |
361 | | /// given. |
362 | | #[inline] |
363 | 0 | pub fn new(needle1: u8, needle2: u8) -> Two { |
364 | 0 | Two { |
365 | 0 | s1: needle1, |
366 | 0 | s2: needle2, |
367 | 0 | v1: splat(needle1), |
368 | 0 | v2: splat(needle2), |
369 | 0 | } |
370 | 0 | } |
371 | | |
372 | | /// A test-only routine so that we can bundle a bunch of quickcheck |
373 | | /// properties into a single macro. Basically, this provides a constructor |
374 | | /// that makes it identical to most other memchr implementations, which |
375 | | /// have fallible constructors. |
376 | | #[cfg(test)] |
377 | | pub(crate) fn try_new(needle1: u8, needle2: u8) -> Option<Two> { |
378 | | Some(Two::new(needle1, needle2)) |
379 | | } |
380 | | |
381 | | /// Return the first occurrence of one of the needle bytes in the given |
382 | | /// haystack. If no such occurrence exists, then `None` is returned. |
383 | | /// |
384 | | /// The occurrence is reported as an offset into `haystack`. Its maximum |
385 | | /// value for a non-empty haystack is `haystack.len() - 1`. |
386 | | #[inline] |
387 | 0 | pub fn find(&self, haystack: &[u8]) -> Option<usize> { |
388 | 0 | // SAFETY: `find_raw` guarantees that if a pointer is returned, it |
389 | 0 | // falls within the bounds of the start and end pointers. |
390 | 0 | unsafe { |
391 | 0 | generic::search_slice_with_raw(haystack, |s, e| { |
392 | 0 | self.find_raw(s, e) |
393 | 0 | }) |
394 | 0 | } |
395 | 0 | } |
396 | | |
397 | | /// Return the last occurrence of one of the needle bytes in the given |
398 | | /// haystack. If no such occurrence exists, then `None` is returned. |
399 | | /// |
400 | | /// The occurrence is reported as an offset into `haystack`. Its maximum |
401 | | /// value for a non-empty haystack is `haystack.len() - 1`. |
402 | | #[inline] |
403 | 0 | pub fn rfind(&self, haystack: &[u8]) -> Option<usize> { |
404 | 0 | // SAFETY: `find_raw` guarantees that if a pointer is returned, it |
405 | 0 | // falls within the bounds of the start and end pointers. |
406 | 0 | unsafe { |
407 | 0 | generic::search_slice_with_raw(haystack, |s, e| { |
408 | 0 | self.rfind_raw(s, e) |
409 | 0 | }) |
410 | 0 | } |
411 | 0 | } |
412 | | |
413 | | /// Like `find`, but accepts and returns raw pointers. |
414 | | /// |
415 | | /// When a match is found, the pointer returned is guaranteed to be |
416 | | /// `>= start` and `< end`. |
417 | | /// |
418 | | /// This routine is useful if you're already using raw pointers and would |
419 | | /// like to avoid converting back to a slice before executing a search. |
420 | | /// |
421 | | /// # Safety |
422 | | /// |
423 | | /// * Both `start` and `end` must be valid for reads. |
424 | | /// * Both `start` and `end` must point to an initialized value. |
425 | | /// * Both `start` and `end` must point to the same allocated object and |
426 | | /// must either be in bounds or at most one byte past the end of the |
427 | | /// allocated object. |
428 | | /// * Both `start` and `end` must be _derived from_ a pointer to the same |
429 | | /// object. |
430 | | /// * The distance between `start` and `end` must not overflow `isize`. |
431 | | /// * The distance being in bounds must not rely on "wrapping around" the |
432 | | /// address space. |
433 | | /// |
434 | | /// Note that callers may pass a pair of pointers such that `start >= end`. |
435 | | /// In that case, `None` will always be returned. |
436 | | #[inline] |
437 | 0 | pub unsafe fn find_raw( |
438 | 0 | &self, |
439 | 0 | start: *const u8, |
440 | 0 | end: *const u8, |
441 | 0 | ) -> Option<*const u8> { |
442 | 0 | if start >= end { |
443 | 0 | return None; |
444 | 0 | } |
445 | 0 | let confirm = |b| self.confirm(b); |
446 | 0 | let len = end.distance(start); |
447 | 0 | if len < USIZE_BYTES { |
448 | 0 | return generic::fwd_byte_by_byte(start, end, confirm); |
449 | 0 | } |
450 | 0 |
|
451 | 0 | // The start of the search may not be aligned to `*const usize`, |
452 | 0 | // so we do an unaligned load here. |
453 | 0 | let chunk = start.cast::<usize>().read_unaligned(); |
454 | 0 | if self.has_needle(chunk) { |
455 | 0 | return generic::fwd_byte_by_byte(start, end, confirm); |
456 | 0 | } |
457 | 0 |
|
458 | 0 | // And now we start our search at a guaranteed aligned position. |
459 | 0 | // The first iteration of the loop below will overlap with the the |
460 | 0 | // unaligned chunk above in cases where the search starts at an |
461 | 0 | // unaligned offset, but that's okay as we're only here if that |
462 | 0 | // above didn't find a match. |
463 | 0 | let mut cur = |
464 | 0 | start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN)); |
465 | 0 | debug_assert!(cur > start); |
466 | 0 | debug_assert!(end.sub(USIZE_BYTES) >= start); |
467 | 0 | while cur <= end.sub(USIZE_BYTES) { |
468 | 0 | debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); |
469 | | |
470 | 0 | let chunk = cur.cast::<usize>().read(); |
471 | 0 | if self.has_needle(chunk) { |
472 | 0 | break; |
473 | 0 | } |
474 | 0 | cur = cur.add(USIZE_BYTES); |
475 | | } |
476 | 0 | generic::fwd_byte_by_byte(cur, end, confirm) |
477 | 0 | } |
478 | | |
479 | | /// Like `rfind`, but accepts and returns raw pointers. |
480 | | /// |
481 | | /// When a match is found, the pointer returned is guaranteed to be |
482 | | /// `>= start` and `< end`. |
483 | | /// |
484 | | /// This routine is useful if you're already using raw pointers and would |
485 | | /// like to avoid converting back to a slice before executing a search. |
486 | | /// |
487 | | /// # Safety |
488 | | /// |
489 | | /// * Both `start` and `end` must be valid for reads. |
490 | | /// * Both `start` and `end` must point to an initialized value. |
491 | | /// * Both `start` and `end` must point to the same allocated object and |
492 | | /// must either be in bounds or at most one byte past the end of the |
493 | | /// allocated object. |
494 | | /// * Both `start` and `end` must be _derived from_ a pointer to the same |
495 | | /// object. |
496 | | /// * The distance between `start` and `end` must not overflow `isize`. |
497 | | /// * The distance being in bounds must not rely on "wrapping around" the |
498 | | /// address space. |
499 | | /// |
500 | | /// Note that callers may pass a pair of pointers such that `start >= end`. |
501 | | /// In that case, `None` will always be returned. |
502 | | #[inline] |
503 | 0 | pub unsafe fn rfind_raw( |
504 | 0 | &self, |
505 | 0 | start: *const u8, |
506 | 0 | end: *const u8, |
507 | 0 | ) -> Option<*const u8> { |
508 | 0 | if start >= end { |
509 | 0 | return None; |
510 | 0 | } |
511 | 0 | let confirm = |b| self.confirm(b); |
512 | 0 | let len = end.distance(start); |
513 | 0 | if len < USIZE_BYTES { |
514 | 0 | return generic::rev_byte_by_byte(start, end, confirm); |
515 | 0 | } |
516 | 0 |
|
517 | 0 | let chunk = end.sub(USIZE_BYTES).cast::<usize>().read_unaligned(); |
518 | 0 | if self.has_needle(chunk) { |
519 | 0 | return generic::rev_byte_by_byte(start, end, confirm); |
520 | 0 | } |
521 | 0 |
|
522 | 0 | let mut cur = end.sub(end.as_usize() & USIZE_ALIGN); |
523 | 0 | debug_assert!(start <= cur && cur <= end); |
524 | 0 | while cur >= start.add(USIZE_BYTES) { |
525 | 0 | debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); |
526 | | |
527 | 0 | let chunk = cur.sub(USIZE_BYTES).cast::<usize>().read(); |
528 | 0 | if self.has_needle(chunk) { |
529 | 0 | break; |
530 | 0 | } |
531 | 0 | cur = cur.sub(USIZE_BYTES); |
532 | | } |
533 | 0 | generic::rev_byte_by_byte(start, cur, confirm) |
534 | 0 | } |
535 | | |
536 | | /// Returns an iterator over all occurrences of one of the needle bytes in |
537 | | /// the given haystack. |
538 | | /// |
539 | | /// The iterator returned implements `DoubleEndedIterator`. This means it |
540 | | /// can also be used to find occurrences in reverse order. |
541 | 0 | pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> { |
542 | 0 | TwoIter { searcher: self, it: generic::Iter::new(haystack) } |
543 | 0 | } |
544 | | |
545 | | #[inline(always)] |
546 | 0 | fn has_needle(&self, chunk: usize) -> bool { |
547 | 0 | has_zero_byte(self.v1 ^ chunk) || has_zero_byte(self.v2 ^ chunk) |
548 | 0 | } |
549 | | |
550 | | #[inline(always)] |
551 | 0 | fn confirm(&self, haystack_byte: u8) -> bool { |
552 | 0 | self.s1 == haystack_byte || self.s2 == haystack_byte |
553 | 0 | } |
554 | | } |
555 | | |
556 | | /// An iterator over all occurrences of two possible bytes in a haystack. |
557 | | /// |
558 | | /// This iterator implements `DoubleEndedIterator`, which means it can also be |
559 | | /// used to find occurrences in reverse order. |
560 | | /// |
561 | | /// This iterator is created by the [`Two::iter`] method. |
562 | | /// |
563 | | /// The lifetime parameters are as follows: |
564 | | /// |
565 | | /// * `'a` refers to the lifetime of the underlying [`Two`] searcher. |
566 | | /// * `'h` refers to the lifetime of the haystack being searched. |
567 | | #[derive(Clone, Debug)] |
568 | | pub struct TwoIter<'a, 'h> { |
569 | | /// The underlying memchr searcher. |
570 | | searcher: &'a Two, |
571 | | /// Generic iterator implementation. |
572 | | it: generic::Iter<'h>, |
573 | | } |
574 | | |
575 | | impl<'a, 'h> Iterator for TwoIter<'a, 'h> { |
576 | | type Item = usize; |
577 | | |
578 | | #[inline] |
579 | 0 | fn next(&mut self) -> Option<usize> { |
580 | 0 | // SAFETY: We rely on the generic iterator to provide valid start |
581 | 0 | // and end pointers, but we guarantee that any pointer returned by |
582 | 0 | // 'find_raw' falls within the bounds of the start and end pointer. |
583 | 0 | unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } |
584 | 0 | } |
585 | | |
586 | | #[inline] |
587 | 0 | fn size_hint(&self) -> (usize, Option<usize>) { |
588 | 0 | self.it.size_hint() |
589 | 0 | } |
590 | | } |
591 | | |
592 | | impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> { |
593 | | #[inline] |
594 | 0 | fn next_back(&mut self) -> Option<usize> { |
595 | 0 | // SAFETY: We rely on the generic iterator to provide valid start |
596 | 0 | // and end pointers, but we guarantee that any pointer returned by |
597 | 0 | // 'rfind_raw' falls within the bounds of the start and end pointer. |
598 | 0 | unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } |
599 | 0 | } |
600 | | } |
601 | | |
602 | | /// Finds all occurrences of three bytes in a haystack. |
603 | | /// |
604 | | /// That is, this reports matches of one of three possible bytes. For example, |
605 | | /// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets |
606 | | /// `0`, `2`, `3`, `4` and `5`. |
607 | | #[derive(Clone, Copy, Debug)] |
608 | | pub struct Three { |
609 | | s1: u8, |
610 | | s2: u8, |
611 | | s3: u8, |
612 | | v1: usize, |
613 | | v2: usize, |
614 | | v3: usize, |
615 | | } |
616 | | |
617 | | impl Three { |
618 | | /// Create a new searcher that finds occurrences of the three needle bytes |
619 | | /// given. |
620 | | #[inline] |
621 | 0 | pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Three { |
622 | 0 | Three { |
623 | 0 | s1: needle1, |
624 | 0 | s2: needle2, |
625 | 0 | s3: needle3, |
626 | 0 | v1: splat(needle1), |
627 | 0 | v2: splat(needle2), |
628 | 0 | v3: splat(needle3), |
629 | 0 | } |
630 | 0 | } |
631 | | |
632 | | /// A test-only routine so that we can bundle a bunch of quickcheck |
633 | | /// properties into a single macro. Basically, this provides a constructor |
634 | | /// that makes it identical to most other memchr implementations, which |
635 | | /// have fallible constructors. |
636 | | #[cfg(test)] |
637 | | pub(crate) fn try_new( |
638 | | needle1: u8, |
639 | | needle2: u8, |
640 | | needle3: u8, |
641 | | ) -> Option<Three> { |
642 | | Some(Three::new(needle1, needle2, needle3)) |
643 | | } |
644 | | |
645 | | /// Return the first occurrence of one of the needle bytes in the given |
646 | | /// haystack. If no such occurrence exists, then `None` is returned. |
647 | | /// |
648 | | /// The occurrence is reported as an offset into `haystack`. Its maximum |
649 | | /// value for a non-empty haystack is `haystack.len() - 1`. |
650 | | #[inline] |
651 | 0 | pub fn find(&self, haystack: &[u8]) -> Option<usize> { |
652 | 0 | // SAFETY: `find_raw` guarantees that if a pointer is returned, it |
653 | 0 | // falls within the bounds of the start and end pointers. |
654 | 0 | unsafe { |
655 | 0 | generic::search_slice_with_raw(haystack, |s, e| { |
656 | 0 | self.find_raw(s, e) |
657 | 0 | }) |
658 | 0 | } |
659 | 0 | } |
660 | | |
661 | | /// Return the last occurrence of one of the needle bytes in the given |
662 | | /// haystack. If no such occurrence exists, then `None` is returned. |
663 | | /// |
664 | | /// The occurrence is reported as an offset into `haystack`. Its maximum |
665 | | /// value for a non-empty haystack is `haystack.len() - 1`. |
666 | | #[inline] |
667 | 0 | pub fn rfind(&self, haystack: &[u8]) -> Option<usize> { |
668 | 0 | // SAFETY: `find_raw` guarantees that if a pointer is returned, it |
669 | 0 | // falls within the bounds of the start and end pointers. |
670 | 0 | unsafe { |
671 | 0 | generic::search_slice_with_raw(haystack, |s, e| { |
672 | 0 | self.rfind_raw(s, e) |
673 | 0 | }) |
674 | 0 | } |
675 | 0 | } |
676 | | |
677 | | /// Like `find`, but accepts and returns raw pointers. |
678 | | /// |
679 | | /// When a match is found, the pointer returned is guaranteed to be |
680 | | /// `>= start` and `< end`. |
681 | | /// |
682 | | /// This routine is useful if you're already using raw pointers and would |
683 | | /// like to avoid converting back to a slice before executing a search. |
684 | | /// |
685 | | /// # Safety |
686 | | /// |
687 | | /// * Both `start` and `end` must be valid for reads. |
688 | | /// * Both `start` and `end` must point to an initialized value. |
689 | | /// * Both `start` and `end` must point to the same allocated object and |
690 | | /// must either be in bounds or at most one byte past the end of the |
691 | | /// allocated object. |
692 | | /// * Both `start` and `end` must be _derived from_ a pointer to the same |
693 | | /// object. |
694 | | /// * The distance between `start` and `end` must not overflow `isize`. |
695 | | /// * The distance being in bounds must not rely on "wrapping around" the |
696 | | /// address space. |
697 | | /// |
698 | | /// Note that callers may pass a pair of pointers such that `start >= end`. |
699 | | /// In that case, `None` will always be returned. |
700 | | #[inline] |
701 | 0 | pub unsafe fn find_raw( |
702 | 0 | &self, |
703 | 0 | start: *const u8, |
704 | 0 | end: *const u8, |
705 | 0 | ) -> Option<*const u8> { |
706 | 0 | if start >= end { |
707 | 0 | return None; |
708 | 0 | } |
709 | 0 | let confirm = |b| self.confirm(b); |
710 | 0 | let len = end.distance(start); |
711 | 0 | if len < USIZE_BYTES { |
712 | 0 | return generic::fwd_byte_by_byte(start, end, confirm); |
713 | 0 | } |
714 | 0 |
|
715 | 0 | // The start of the search may not be aligned to `*const usize`, |
716 | 0 | // so we do an unaligned load here. |
717 | 0 | let chunk = start.cast::<usize>().read_unaligned(); |
718 | 0 | if self.has_needle(chunk) { |
719 | 0 | return generic::fwd_byte_by_byte(start, end, confirm); |
720 | 0 | } |
721 | 0 |
|
722 | 0 | // And now we start our search at a guaranteed aligned position. |
723 | 0 | // The first iteration of the loop below will overlap with the the |
724 | 0 | // unaligned chunk above in cases where the search starts at an |
725 | 0 | // unaligned offset, but that's okay as we're only here if that |
726 | 0 | // above didn't find a match. |
727 | 0 | let mut cur = |
728 | 0 | start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN)); |
729 | 0 | debug_assert!(cur > start); |
730 | 0 | debug_assert!(end.sub(USIZE_BYTES) >= start); |
731 | 0 | while cur <= end.sub(USIZE_BYTES) { |
732 | 0 | debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); |
733 | | |
734 | 0 | let chunk = cur.cast::<usize>().read(); |
735 | 0 | if self.has_needle(chunk) { |
736 | 0 | break; |
737 | 0 | } |
738 | 0 | cur = cur.add(USIZE_BYTES); |
739 | | } |
740 | 0 | generic::fwd_byte_by_byte(cur, end, confirm) |
741 | 0 | } |
742 | | |
743 | | /// Like `rfind`, but accepts and returns raw pointers. |
744 | | /// |
745 | | /// When a match is found, the pointer returned is guaranteed to be |
746 | | /// `>= start` and `< end`. |
747 | | /// |
748 | | /// This routine is useful if you're already using raw pointers and would |
749 | | /// like to avoid converting back to a slice before executing a search. |
750 | | /// |
751 | | /// # Safety |
752 | | /// |
753 | | /// * Both `start` and `end` must be valid for reads. |
754 | | /// * Both `start` and `end` must point to an initialized value. |
755 | | /// * Both `start` and `end` must point to the same allocated object and |
756 | | /// must either be in bounds or at most one byte past the end of the |
757 | | /// allocated object. |
758 | | /// * Both `start` and `end` must be _derived from_ a pointer to the same |
759 | | /// object. |
760 | | /// * The distance between `start` and `end` must not overflow `isize`. |
761 | | /// * The distance being in bounds must not rely on "wrapping around" the |
762 | | /// address space. |
763 | | /// |
764 | | /// Note that callers may pass a pair of pointers such that `start >= end`. |
765 | | /// In that case, `None` will always be returned. |
766 | | #[inline] |
767 | 0 | pub unsafe fn rfind_raw( |
768 | 0 | &self, |
769 | 0 | start: *const u8, |
770 | 0 | end: *const u8, |
771 | 0 | ) -> Option<*const u8> { |
772 | 0 | if start >= end { |
773 | 0 | return None; |
774 | 0 | } |
775 | 0 | let confirm = |b| self.confirm(b); |
776 | 0 | let len = end.distance(start); |
777 | 0 | if len < USIZE_BYTES { |
778 | 0 | return generic::rev_byte_by_byte(start, end, confirm); |
779 | 0 | } |
780 | 0 |
|
781 | 0 | let chunk = end.sub(USIZE_BYTES).cast::<usize>().read_unaligned(); |
782 | 0 | if self.has_needle(chunk) { |
783 | 0 | return generic::rev_byte_by_byte(start, end, confirm); |
784 | 0 | } |
785 | 0 |
|
786 | 0 | let mut cur = end.sub(end.as_usize() & USIZE_ALIGN); |
787 | 0 | debug_assert!(start <= cur && cur <= end); |
788 | 0 | while cur >= start.add(USIZE_BYTES) { |
789 | 0 | debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); |
790 | | |
791 | 0 | let chunk = cur.sub(USIZE_BYTES).cast::<usize>().read(); |
792 | 0 | if self.has_needle(chunk) { |
793 | 0 | break; |
794 | 0 | } |
795 | 0 | cur = cur.sub(USIZE_BYTES); |
796 | | } |
797 | 0 | generic::rev_byte_by_byte(start, cur, confirm) |
798 | 0 | } |
799 | | |
800 | | /// Returns an iterator over all occurrences of one of the needle bytes in |
801 | | /// the given haystack. |
802 | | /// |
803 | | /// The iterator returned implements `DoubleEndedIterator`. This means it |
804 | | /// can also be used to find occurrences in reverse order. |
805 | 0 | pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> { |
806 | 0 | ThreeIter { searcher: self, it: generic::Iter::new(haystack) } |
807 | 0 | } |
808 | | |
809 | | #[inline(always)] |
810 | 0 | fn has_needle(&self, chunk: usize) -> bool { |
811 | 0 | has_zero_byte(self.v1 ^ chunk) |
812 | 0 | || has_zero_byte(self.v2 ^ chunk) |
813 | 0 | || has_zero_byte(self.v3 ^ chunk) |
814 | 0 | } |
815 | | |
816 | | #[inline(always)] |
817 | 0 | fn confirm(&self, haystack_byte: u8) -> bool { |
818 | 0 | self.s1 == haystack_byte |
819 | 0 | || self.s2 == haystack_byte |
820 | 0 | || self.s3 == haystack_byte |
821 | 0 | } |
822 | | } |
823 | | |
824 | | /// An iterator over all occurrences of three possible bytes in a haystack. |
825 | | /// |
826 | | /// This iterator implements `DoubleEndedIterator`, which means it can also be |
827 | | /// used to find occurrences in reverse order. |
828 | | /// |
829 | | /// This iterator is created by the [`Three::iter`] method. |
830 | | /// |
831 | | /// The lifetime parameters are as follows: |
832 | | /// |
833 | | /// * `'a` refers to the lifetime of the underlying [`Three`] searcher. |
834 | | /// * `'h` refers to the lifetime of the haystack being searched. |
835 | | #[derive(Clone, Debug)] |
836 | | pub struct ThreeIter<'a, 'h> { |
837 | | /// The underlying memchr searcher. |
838 | | searcher: &'a Three, |
839 | | /// Generic iterator implementation. |
840 | | it: generic::Iter<'h>, |
841 | | } |
842 | | |
843 | | impl<'a, 'h> Iterator for ThreeIter<'a, 'h> { |
844 | | type Item = usize; |
845 | | |
846 | | #[inline] |
847 | 0 | fn next(&mut self) -> Option<usize> { |
848 | 0 | // SAFETY: We rely on the generic iterator to provide valid start |
849 | 0 | // and end pointers, but we guarantee that any pointer returned by |
850 | 0 | // 'find_raw' falls within the bounds of the start and end pointer. |
851 | 0 | unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } |
852 | 0 | } |
853 | | |
854 | | #[inline] |
855 | 0 | fn size_hint(&self) -> (usize, Option<usize>) { |
856 | 0 | self.it.size_hint() |
857 | 0 | } |
858 | | } |
859 | | |
860 | | impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> { |
861 | | #[inline] |
862 | 0 | fn next_back(&mut self) -> Option<usize> { |
863 | 0 | // SAFETY: We rely on the generic iterator to provide valid start |
864 | 0 | // and end pointers, but we guarantee that any pointer returned by |
865 | 0 | // 'rfind_raw' falls within the bounds of the start and end pointer. |
866 | 0 | unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } |
867 | 0 | } |
868 | | } |
869 | | |
870 | | /// Return `true` if `x` contains any zero byte. |
871 | | /// |
872 | | /// That is, this routine treats `x` as a register of 8-bit lanes and returns |
873 | | /// true when any of those lanes is `0`. |
874 | | /// |
875 | | /// From "Matters Computational" by J. Arndt. |
876 | | #[inline(always)] |
877 | 0 | fn has_zero_byte(x: usize) -> bool { |
878 | 0 | // "The idea is to subtract one from each of the bytes and then look for |
879 | 0 | // bytes where the borrow propagated all the way to the most significant |
880 | 0 | // bit." |
881 | 0 | const LO: usize = splat(0x01); |
882 | 0 | const HI: usize = splat(0x80); |
883 | 0 |
|
884 | 0 | (x.wrapping_sub(LO) & !x & HI) != 0 |
885 | 0 | } |
886 | | |
887 | | /// Repeat the given byte into a word size number. That is, every 8 bits |
888 | | /// is equivalent to the given byte. For example, if `b` is `\x4E` or |
889 | | /// `01001110` in binary, then the returned value on a 32-bit system would be: |
890 | | /// `01001110_01001110_01001110_01001110`. |
891 | | #[inline(always)] |
892 | 0 | const fn splat(b: u8) -> usize { |
893 | 0 | // TODO: use `usize::from` once it can be used in const context. |
894 | 0 | (b as usize) * (usize::MAX / 255) |
895 | 0 | } |
896 | | |
897 | | #[cfg(test)] |
898 | | mod tests { |
899 | | use super::*; |
900 | | |
901 | | define_memchr_quickcheck!(super, try_new); |
902 | | |
903 | | #[test] |
904 | | fn forward_one() { |
905 | | crate::tests::memchr::Runner::new(1).forward_iter( |
906 | | |haystack, needles| { |
907 | | Some(One::new(needles[0]).iter(haystack).collect()) |
908 | | }, |
909 | | ) |
910 | | } |
911 | | |
912 | | #[test] |
913 | | fn reverse_one() { |
914 | | crate::tests::memchr::Runner::new(1).reverse_iter( |
915 | | |haystack, needles| { |
916 | | Some(One::new(needles[0]).iter(haystack).rev().collect()) |
917 | | }, |
918 | | ) |
919 | | } |
920 | | |
921 | | #[test] |
922 | | fn count_one() { |
923 | | crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { |
924 | | Some(One::new(needles[0]).iter(haystack).count()) |
925 | | }) |
926 | | } |
927 | | |
928 | | #[test] |
929 | | fn forward_two() { |
930 | | crate::tests::memchr::Runner::new(2).forward_iter( |
931 | | |haystack, needles| { |
932 | | let n1 = needles.get(0).copied()?; |
933 | | let n2 = needles.get(1).copied()?; |
934 | | Some(Two::new(n1, n2).iter(haystack).collect()) |
935 | | }, |
936 | | ) |
937 | | } |
938 | | |
939 | | #[test] |
940 | | fn reverse_two() { |
941 | | crate::tests::memchr::Runner::new(2).reverse_iter( |
942 | | |haystack, needles| { |
943 | | let n1 = needles.get(0).copied()?; |
944 | | let n2 = needles.get(1).copied()?; |
945 | | Some(Two::new(n1, n2).iter(haystack).rev().collect()) |
946 | | }, |
947 | | ) |
948 | | } |
949 | | |
950 | | #[test] |
951 | | fn forward_three() { |
952 | | crate::tests::memchr::Runner::new(3).forward_iter( |
953 | | |haystack, needles| { |
954 | | let n1 = needles.get(0).copied()?; |
955 | | let n2 = needles.get(1).copied()?; |
956 | | let n3 = needles.get(2).copied()?; |
957 | | Some(Three::new(n1, n2, n3).iter(haystack).collect()) |
958 | | }, |
959 | | ) |
960 | | } |
961 | | |
962 | | #[test] |
963 | | fn reverse_three() { |
964 | | crate::tests::memchr::Runner::new(3).reverse_iter( |
965 | | |haystack, needles| { |
966 | | let n1 = needles.get(0).copied()?; |
967 | | let n2 = needles.get(1).copied()?; |
968 | | let n3 = needles.get(2).copied()?; |
969 | | Some(Three::new(n1, n2, n3).iter(haystack).rev().collect()) |
970 | | }, |
971 | | ) |
972 | | } |
973 | | |
974 | | // This was found by quickcheck in the course of refactoring this crate |
975 | | // after memchr 2.5.0. |
976 | | #[test] |
977 | | fn regression_double_ended_iterator() { |
978 | | let finder = One::new(b'a'); |
979 | | let haystack = "a"; |
980 | | let mut it = finder.iter(haystack.as_bytes()); |
981 | | assert_eq!(Some(0), it.next()); |
982 | | assert_eq!(None, it.next_back()); |
983 | | } |
984 | | |
985 | | // This regression test was caught by ripgrep's test suite on i686 when |
986 | | // upgrading to memchr 2.6. Namely, something about the \x0B bytes here |
987 | | // screws with the SWAR counting approach I was using. This regression test |
988 | | // prompted me to remove the SWAR counting approach and just replace it |
989 | | // with a byte-at-a-time loop. |
990 | | #[test] |
991 | | fn regression_count_new_lines() { |
992 | | let haystack = "01234567\x0b\n\x0b\n\x0b\n\x0b\nx"; |
993 | | let count = One::new(b'\n').count(haystack.as_bytes()); |
994 | | assert_eq!(4, count); |
995 | | } |
996 | | } |