#![no_std]
#![deny(missing_docs)]
#[cfg(feature = "simd-accel")]
extern crate simd;
use core::{cmp, mem, slice, usize};
#[cfg(feature = "simd-accel")]
use simd::u8x16;
#[cfg(feature = "avx-accel")]
use simd::x86::sse2::Sse2U8x16;
#[cfg(feature = "avx-accel")]
use simd::x86::avx::{LowHigh128, u8x32};
trait ByteChunk: Copy {
type Splat: Copy;
fn splat(byte: u8) -> Self::Splat;
fn from_splat(splat: Self::Splat) -> Self;
fn bytewise_equal(self, other: Self::Splat) -> Self;
fn is_leading_utf8_byte(self) -> Self;
fn increment(self, incr: Self) -> Self;
fn sum(&self) -> usize;
}
impl ByteChunk for usize {
type Splat = Self;
fn splat(byte: u8) -> Self {
let lo = usize::MAX / 0xFF;
lo * byte as usize
}
fn from_splat(splat: Self) -> Self {
splat
}
fn is_leading_utf8_byte(self) -> Self {
((!self >> 7) | (self >> 6)) & Self::splat(1)
}
fn bytewise_equal(self, other: Self) -> Self {
let lo = usize::MAX / 0xFF;
let hi = lo << 7;
let x = self ^ other;
!((((x & !hi) + !hi) | x) >> 7) & lo
}
fn increment(self, incr: Self) -> Self {
self + incr
}
fn sum(&self) -> usize {
let every_other_byte_lo = usize::MAX / 0xFFFF;
let every_other_byte = every_other_byte_lo * 0xFF;
let pair_sum: usize = (self & every_other_byte) + ((self >> 8) & every_other_byte);
pair_sum.wrapping_mul(every_other_byte_lo) >> ((mem::size_of::<usize>() - 2) * 8)
}
}
#[cfg(feature = "simd-accel")]
impl ByteChunk for u8x16 {
type Splat = Self;
fn splat(byte: u8) -> Self {
Self::splat(byte)
}
fn from_splat(splat: Self) -> Self {
splat
}
fn is_leading_utf8_byte(self) -> Self {
(self & Self::splat(0b1100_0000)).ne(Self::splat(0b1000_0000)).to_repr().to_u8()
}
fn bytewise_equal(self, other: Self) -> Self {
self.eq(other).to_repr().to_u8()
}
fn increment(self, incr: Self) -> Self {
self - incr
}
fn sum(&self) -> usize {
let mut count = 0;
for i in 0..16 {
count += self.extract(i) as usize;
}
count
}
}
#[cfg(feature = "avx-accel")]
impl ByteChunk for u8x32 {
type Splat = Self;
fn splat(byte: u8) -> Self {
Self::splat(byte)
}
fn from_splat(splat: Self) -> Self {
splat
}
fn is_leading_utf8_byte(self) -> Self {
(self & Self::splat(0b1100_0000)).ne(Self::splat(0b1000_0000)).to_repr().to_u8()
}
fn bytewise_equal(self, other: Self) -> Self {
self.eq(other).to_repr().to_u8()
}
fn increment(self, incr: Self) -> Self {
self - incr
}
fn sum(&self) -> usize {
let zero = u8x16::splat(0);
let sad_lo = self.low().sad(zero);
let sad_hi = self.high().sad(zero);
let mut count = 0;
count += (sad_lo.extract(0) + sad_lo.extract(1)) as usize;
count += (sad_hi.extract(0) + sad_hi.extract(1)) as usize;
count
}
}
fn chunk_align<Chunk: ByteChunk>(x: &[u8]) -> (&[u8], &[Chunk], &[u8]) {
let align = mem::size_of::<Chunk>();
let offset_ptr = (x.as_ptr() as usize) % align;
let offset_end = (x.as_ptr() as usize + x.len()) % align;
let d2 = x.len().saturating_sub(offset_end);
let d1 = cmp::min((align - offset_ptr) % align, d2);
let (init, tail) = x.split_at(d2);
let (init, mid) = init.split_at(d1);
assert_eq!(mid.len() % align, 0);
let mid = unsafe { slice::from_raw_parts(mid.as_ptr() as *const Chunk, mid.len() / align) };
(init, mid, tail)
}
fn chunk_count<Chunk: ByteChunk>(haystack: &[Chunk], needle: u8) -> usize {
let zero = Chunk::splat(0);
let needles = Chunk::splat(needle);
let mut count = 0;
let mut i = 0;
while i < haystack.len() {
let mut counts = Chunk::from_splat(zero);
let end = cmp::min(i + 255, haystack.len());
for &chunk in &haystack[i..end] {
counts = counts.increment(chunk.bytewise_equal(needles));
}
i = end;
count += counts.sum();
}
count
}
fn count_generic<Chunk: ByteChunk<Splat = Chunk>>(naive_below: usize, haystack: &[u8], needle: u8) -> usize {
let mut count = 0;
let len = haystack.len();
let unchunked = if len < naive_below {
[haystack, &haystack[0..0]]
} else {
let (pre, mid, post) = chunk_align::<Chunk>(haystack);
count += chunk_count(mid, needle);
[pre, post]
};
for &slice in &unchunked {
count += naive_count(slice, needle);
}
count
}
#[cfg(not(feature = "simd-accel"))]
pub fn count(haystack: &[u8], needle: u8) -> usize {
count_generic::<usize>(32, haystack, needle)
}
#[cfg(all(feature = "simd-accel", not(feature = "avx-accel")))]
pub fn count(haystack: &[u8], needle: u8) -> usize {
count_generic::<u8x16>(32, haystack, needle)
}
#[cfg(feature = "avx-accel")]
pub fn count(haystack: &[u8], needle: u8) -> usize {
count_generic::<u8x32>(64, haystack, needle)
}
pub fn naive_count_32(haystack: &[u8], needle: u8) -> usize {
haystack.iter().fold(0, |n, c| n + (*c == needle) as u32) as usize
}
pub fn naive_count(haystack: &[u8], needle: u8) -> usize {
haystack.iter().fold(0, |n, c| n + (*c == needle) as usize)
}
fn chunk_num_chars<Chunk: ByteChunk>(haystack: &[Chunk]) -> usize {
let zero = Chunk::splat(0);
let mut count = 0;
let mut i = 0;
while i < haystack.len() {
let mut counts = Chunk::from_splat(zero);
let end = cmp::min(i + 255, haystack.len());
for &chunk in &haystack[i..end] {
counts = counts.increment(chunk.is_leading_utf8_byte());
}
i = end;
count += counts.sum();
}
count
}
fn num_chars_generic<Chunk: ByteChunk<Splat = Chunk>>(naive_below: usize, haystack: &[u8]) -> usize {
let len = haystack.len();
let mut count = 0;
let unchunked = if len < naive_below {
[haystack, &haystack[0..0]]
} else {
let (pre, mid, post) = chunk_align::<Chunk>(haystack);
count += chunk_num_chars(mid);
[pre, post]
};
for &slice in &unchunked {
count += naive_num_chars(slice);
}
count
}
#[cfg(not(feature = "simd-accel"))]
pub fn num_chars(haystack: &[u8]) -> usize {
num_chars_generic::<usize>(32, haystack)
}
#[cfg(all(feature = "simd-accel", not(feature = "avx-accel")))]
pub fn num_chars(haystack: &[u8]) -> usize {
num_chars_generic::<u8x16>(32, haystack)
}
#[cfg(feature = "avx-accel")]
pub fn num_chars(haystack: &[u8]) -> usize {
num_chars_generic::<u8x32>(64, haystack)
}
pub fn naive_num_chars(haystack: &[u8]) -> usize {
haystack.iter().filter(|&&byte| (byte >> 6) != 0b10).count()
}