Implement [T]::align_to

This commit is contained in:
Simonas Kazlauskas 2018-04-29 17:09:56 +03:00
parent d45378216b
commit 680031b016
5 changed files with 285 additions and 112 deletions

View file

@ -1697,27 +1697,169 @@ impl<T> [T] {
}
}
// #[unstable(feature = "slice_align_to", issue = "44488")]
// pub fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// // First, find at what point do we split between the first and 2nd slice.
// let x = self.as_ptr();
// let offset = x.align_offset(::mem::align_of::<U>());
// if offset > x * ::mem::size_of::<T>() {
// return (self, [], []);
// }
/// Function to calculate lenghts of the middle and trailing slice for `align_to{,_mut}`.
fn align_to_offsets<U>(&self) -> (usize, usize) {
// What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
// lowest number of `T`s. And how many `T`s we need for each such "multiple".
//
// Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider
// for example a case where size_of::<T> = 16, size_of::<U> = 24. We can put 2 Us in
// place of every 3 Ts in the `rest` slice. A bit more complicated.
//
// Formula to calculate this is:
//
// Us = lcm(size_of::<T>, size_of::<U>) / size_of::<U>
// Ts = lcm(size_of::<T>, size_of::<U>) / size_of::<T>
//
// Expanded and simplified:
//
// Us = size_of::<T> / gcd(size_of::<T>, size_of::<U>)
// Ts = size_of::<U> / gcd(size_of::<T>, size_of::<U>)
//
// Luckily since all this is constant-evaluated... performance here matters not!
#[inline]
fn gcd(a: usize, b: usize) -> usize {
// iterative steins algorithm
// We should still make this `const fn` (and revert to recursive algorithm if we do)
// because relying on llvm to consteval all this is… well, it makes me
let (ctz_a, mut ctz_b) = unsafe {
if a == 0 { return b; }
if b == 0 { return a; }
(::intrinsics::cttz_nonzero(a), ::intrinsics::cttz_nonzero(b))
};
let k = ctz_a.min(ctz_b);
let mut a = a >> ctz_a;
let mut b = b;
loop {
// remove all factors of 2 from b
b >>= ctz_b;
if a > b {
::mem::swap(&mut a, &mut b);
}
b = b - a;
unsafe {
if b == 0 {
break;
}
ctz_b = ::intrinsics::cttz_nonzero(b);
}
}
return a << k;
}
let gcd: usize = gcd(::mem::size_of::<T>(), ::mem::size_of::<U>());
let ts: usize = ::mem::size_of::<U>() / gcd;
let us: usize = ::mem::size_of::<T>() / gcd;
// }
// Armed with this knowledge, we can find how many `U`s we can fit!
let us_len = self.len() / ts * us;
// And how many `T`s will be in the trailing slice!
let ts_len = self.len() % ts;
return (us_len, ts_len);
}
// #[unstable(feature = "slice_align_to", issue = "44488")]
// pub fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// }
}}
/// Transmute the slice to a slice of another type, ensuring aligment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The middle slice will have the greatest length
/// possible for a given type and input slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Unsafety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// # #![feature(slice_align_to)]
/// unsafe {
/// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[unstable(feature = "slice_align_to", issue = "44488")]
#[cfg(not(stage0))]
pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// Note that most of this function will be constant-evaluated,
if ::mem::size_of::<U>() == 0 || ::mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is don't handle them at all.
return (self, &[], &[]);
}
let ptr = self.as_ptr();
let offset = ::intrinsics::align_offset(ptr, ::mem::align_of::<U>());
if offset > self.len() {
return (self, &[], &[]);
} else {
let (left, rest) = self.split_at(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
return (left,
from_raw_parts(rest.as_ptr() as *const U, us_len),
from_raw_parts(rest.as_ptr().offset((rest.len() - ts_len) as isize), ts_len))
}
}
#[lang = "slice"]
#[cfg(not(test))]
#[cfg(not(stage0))]
impl<T> [T] {
slice_core_methods!();
/// Transmute the slice to a slice of another type, ensuring aligment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The middle slice will have the greatest length
/// possible for a given type and input slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Unsafety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// # #![feature(slice_align_to)]
/// unsafe {
/// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to_mut::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[unstable(feature = "slice_align_to", issue = "44488")]
#[cfg(not(stage0))]
pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// Note that most of this function will be constant-evaluated,
if ::mem::size_of::<U>() == 0 || ::mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is don't handle them at all.
return (self, &mut [], &mut []);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
let offset = ::intrinsics::align_offset(ptr, ::mem::align_of::<U>());
if offset > self.len() {
return (self, &mut [], &mut []);
} else {
let (left, rest) = self.split_at_mut(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
let mut_ptr = rest.as_mut_ptr();
return (left,
from_raw_parts_mut(mut_ptr as *mut U, us_len),
from_raw_parts_mut(mut_ptr.offset((rest.len() - ts_len) as isize), ts_len))
}
}
}
#[lang = "slice_u8"]

View file

@ -41,6 +41,8 @@
#![feature(try_from)]
#![feature(try_trait)]
#![feature(exact_chunks)]
#![feature(slice_align_to)]
#![feature(align_offset)]
#![feature(reverse_bits)]
#![feature(inclusive_range_methods)]
#![feature(iterator_find_map)]

View file

@ -296,3 +296,92 @@ fn write_unaligned_drop() {
}
DROPS.with(|d| assert_eq!(*d.borrow(), [0]));
}
#[test]
fn align_offset_zst() {
// For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
// all, because no amount of elements will align the pointer.
let mut p = 1;
while p < 1024 {
assert_eq!((p as *const ()).align_offset(p), 0);
if p != 1 {
assert_eq!(((p + 1) as *const ()).align_offset(p), !0);
}
p = (p + 1).next_power_of_two();
}
}
#[test]
fn align_offset_stride1() {
// For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
// number of bytes.
let mut align = 1;
while align < 1024 {
for ptr in 1..2*align {
let expected = ptr % align;
let offset = if expected == 0 { 0 } else { align - expected };
assert_eq!((ptr as *const u8).align_offset(align), offset,
"ptr = {}, align = {}, size = 1", ptr, align);
align = (align + 1).next_power_of_two();
}
}
}
#[test]
fn align_offset_weird_strides() {
#[repr(packed)]
struct A3(u16, u8);
struct A4(u32);
#[repr(packed)]
struct A5(u32, u8);
#[repr(packed)]
struct A6(u32, u16);
#[repr(packed)]
struct A7(u32, u16, u8);
#[repr(packed)]
struct A8(u32, u32);
#[repr(packed)]
struct A9(u32, u32, u8);
#[repr(packed)]
struct A10(u32, u32, u16);
unsafe fn test_weird_stride<T>(ptr: *const T, align: usize) -> bool {
let numptr = ptr as usize;
let mut expected = usize::max_value();
// Naive but definitely correct way to find the *first* aligned element of stride::<T>.
for el in 0..align {
if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
expected = el;
break;
}
}
let got = ptr.align_offset(align);
if got != expected {
eprintln!("aligning {:p} (with stride of {}) to {}, expected {}, got {}", ptr,
::std::mem::size_of::<T>(), align, expected, got);
return true;
}
return false;
}
// For pointers of stride != 1, we verify the algorithm against the naivest possible
// implementation
let mut align = 1;
let mut x = false;
while align < 1024 {
for ptr in 1usize..4*align {
unsafe {
x |= test_weird_stride::<A3>(ptr as *const A3, align);
x |= test_weird_stride::<A4>(ptr as *const A4, align);
x |= test_weird_stride::<A5>(ptr as *const A5, align);
x |= test_weird_stride::<A6>(ptr as *const A6, align);
x |= test_weird_stride::<A7>(ptr as *const A7, align);
x |= test_weird_stride::<A8>(ptr as *const A8, align);
x |= test_weird_stride::<A9>(ptr as *const A9, align);
x |= test_weird_stride::<A10>(ptr as *const A10, align);
}
}
align = (align + 1).next_power_of_two();
}
assert!(!x);
}

View file

@ -812,3 +812,37 @@ pub mod memchr {
}
}
}
#[test]
fn test_align_to_simple() {
let bytes = [1u8, 2, 3, 4, 5, 6, 7];
let (prefix, aligned, suffix) = unsafe { bytes.align_to::<u16>() };
assert_eq!(aligned.len(), 3);
assert!(prefix == [1] || suffix == [7]);
let expect1 = [1 << 8 | 2, 3 << 8 | 4, 5 << 8 | 6];
let expect2 = [1 | 2 << 8, 3 | 4 << 8, 5 | 6 << 8];
let expect3 = [2 | 3 << 8, 4 | 5 << 8, 6 | 7 << 8];
let expect4 = [2 | 3 << 8, 4 | 5 << 8, 6 | 7 << 8];
assert!(aligned == expect1 || aligned == expect2 || aligned == expect3 || aligned == expect4,
"aligned={:?} expected={:?} || {:?} || {:?} || {:?}",
aligned, expect1, expect2, expect3, expect4);
}
#[test]
fn test_align_to_zst() {
let bytes = [1, 2, 3, 4, 5, 6, 7];
let (prefix, aligned, suffix) = unsafe { bytes.align_to::<()>() };
assert_eq!(aligned.len(), 0);
assert!(prefix == [1, 2, 3, 4, 5, 6, 7] || suffix == [1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn test_align_to_non_trivial() {
#[repr(align(8))] struct U64(u64, u64);
#[repr(align(8))] struct U64U64U32(u64, u64, u32);
let data = [U64(1, 2), U64(3, 4), U64(5, 6), U64(7, 8), U64(9, 10), U64(11, 12), U64(13, 14),
U64(15, 16)];
let (prefix, aligned, suffix) = unsafe { data.align_to::<U64U64U32>() };
assert_eq!(aligned.len(), 4);
assert_eq!(prefix.len() + suffix.len(), 2);
}

View file

@ -1,94 +0,0 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(align_offset)]
#[derive(Clone, Copy)]
#[repr(packed)]
struct A3(u16, u8);
struct A4(u32);
#[repr(packed)]
struct A5(u32, u8);
#[repr(packed)]
struct A6(u32, u16);
#[repr(packed)]
struct A7(u32, u16, u8);
#[repr(packed)]
struct A8(u32, u32);
#[repr(packed)]
struct A9(u32, u32, u8);
#[repr(packed)]
struct A10(u32, u32, u16);
unsafe fn test_weird_stride<T>(ptr: *const T, align: usize) -> bool {
let numptr = ptr as usize;
let mut expected = usize::max_value();
// Naive but definitely correct way to find the *first* aligned element of stride::<T>.
for el in (0..align) {
if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
expected = el;
break;
}
}
let got = ptr.align_offset(align);
if got != expected {
eprintln!("aligning {:p} (with stride of {}) to {}, expected {}, got {}", ptr, ::std::mem::size_of::<T>(), align, expected, got);
return true;
}
return false;
}
fn main() {
unsafe {
// For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
// all, because no amount of elements will align the pointer.
let mut p = 1;
while p < 1024 {
assert_eq!((p as *const ()).align_offset(p), 0);
if (p != 1) {
assert_eq!(((p + 1) as *const ()).align_offset(p), !0);
}
p = (p + 1).next_power_of_two();
}
// For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
// number of bytes.
let mut align = 1;
while align < 1024 {
for ptr in 1..2*align {
let expected = ptr % align;
let offset = if expected == 0 { 0 } else { align - expected };
assert_eq!((ptr as *const u8).align_offset(align), offset,
"ptr = {}, align = {}, size = 1", ptr, align);
align = (align + 1).next_power_of_two();
}
}
// For pointers of stride != 1, we verify the algorithm against the naivest possible
// implementation
let mut align = 1;
let mut x = false;
while align < 1024 {
for ptr in 1usize..4*align {
x |= test_weird_stride::<A3>(ptr as *const A3, align);
x |= test_weird_stride::<A4>(ptr as *const A4, align);
x |= test_weird_stride::<A5>(ptr as *const A5, align);
x |= test_weird_stride::<A6>(ptr as *const A6, align);
x |= test_weird_stride::<A7>(ptr as *const A7, align);
x |= test_weird_stride::<A8>(ptr as *const A8, align);
x |= test_weird_stride::<A9>(ptr as *const A9, align);
x |= test_weird_stride::<A10>(ptr as *const A10, align);
}
align = (align + 1).next_power_of_two();
}
assert!(!x);
}
}