--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd, platform_intrinsics)]
+
+#[repr(simd)]
+struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16);
+#[repr(simd)]
+struct u16x8(u16, u16, u16, u16, u16, u16, u16, u16);
+
+#[repr(simd)]
+struct i8x16(i8, i8, i8, i8, i8, i8, i8, i8,
+ i8, i8, i8, i8, i8, i8, i8, i8);
+#[repr(simd)]
+struct i32x4(i32, i32, i32, i32);
+#[repr(simd)]
+struct f32x4(f32, f32, f32, f32);
+#[repr(simd)]
+struct i64x2(i64, i64);
+
+// signed vs. unsigned doesn't matter
+mod i {
+ use i16x8;
+ extern "platform-intrinsic" {
+ fn x86_mm_adds_epi16(x: i16x8, y: i16x8) -> i16x8;
+ }
+}
+mod u {
+ use u16x8;
+ extern "platform-intrinsic" {
+ fn x86_mm_adds_epi16(x: u16x8, y: u16x8) -> u16x8;
+ }
+}
+// but lengths do
+extern "platform-intrinsic" {
+ fn x86_mm_adds_epi16(x: i8x16, y: i32x4) -> i64x2;
+ //~^ ERROR intrinsic argument 1 has wrong type
+ //~^^ ERROR intrinsic argument 2 has wrong type
+ //~^^^ ERROR intrinsic return value has wrong type
+}
+// and so does int vs. float
+extern "platform-intrinsic" {
+ fn x86_mm_max_ps(x: i32x4, y: i32x4) -> i32x4;
+ //~^ ERROR intrinsic argument 1 has wrong type
+ //~^^ ERROR intrinsic argument 2 has wrong type
+ //~^^^ ERROR intrinsic return value has wrong type
+}
+
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd, platform_intrinsics)]
+#![allow(non_camel_case_types)]
+#[repr(simd)]
+#[derive(Copy, Clone)]
+pub struct i32x4(pub i32, pub i32, pub i32, pub i32);
+
+#[repr(simd)]
+#[derive(Copy, Clone)]
+pub struct u32x4(pub u32, pub u32, pub u32, pub u32);
+
+#[repr(simd)]
+#[derive(Copy, Clone)]
+pub struct f32x4(pub f32, pub f32, pub f32, pub f32);
+
+extern "platform-intrinsic" {
+ fn simd_add<T>(x: T, y: T) -> T;
+ fn simd_sub<T>(x: T, y: T) -> T;
+ fn simd_mul<T>(x: T, y: T) -> T;
+ fn simd_div<T>(x: T, y: T) -> T;
+ fn simd_shl<T>(x: T, y: T) -> T;
+ fn simd_shr<T>(x: T, y: T) -> T;
+ fn simd_and<T>(x: T, y: T) -> T;
+ fn simd_or<T>(x: T, y: T) -> T;
+ fn simd_xor<T>(x: T, y: T) -> T;
+}
+
+fn main() {
+ let x = i32x4(0, 0, 0, 0);
+ let y = u32x4(0, 0, 0, 0);
+ let z = f32x4(0.0, 0.0, 0.0, 0.0);
+
+ unsafe {
+ simd_add(x, x);
+ simd_add(y, y);
+ simd_add(z, z);
+ simd_sub(x, x);
+ simd_sub(y, y);
+ simd_sub(z, z);
+ simd_mul(x, x);
+ simd_mul(y, y);
+ simd_mul(z, z);
+
+ simd_div(z, z);
+
+ simd_shl(x, x);
+ simd_shl(y, y);
+ simd_shr(x, x);
+ simd_shr(y, y);
+ simd_and(x, x);
+ simd_and(y, y);
+ simd_or(x, x);
+ simd_or(y, y);
+ simd_xor(x, x);
+ simd_xor(y, y);
+
+
+ simd_add(0, 0);
+ //~^ ERROR `simd_add` intrinsic monomorphized with non-SIMD type
+ simd_sub(0, 0);
+ //~^ ERROR `simd_sub` intrinsic monomorphized with non-SIMD type
+ simd_mul(0, 0);
+ //~^ ERROR `simd_mul` intrinsic monomorphized with non-SIMD type
+ simd_div(0, 0);
+ //~^ ERROR `simd_div` intrinsic monomorphized with non-SIMD type
+ simd_shl(0, 0);
+ //~^ ERROR `simd_shl` intrinsic monomorphized with non-SIMD type
+ simd_shr(0, 0);
+ //~^ ERROR `simd_shr` intrinsic monomorphized with non-SIMD type
+ simd_and(0, 0);
+ //~^ ERROR `simd_and` intrinsic monomorphized with non-SIMD type
+ simd_or(0, 0);
+ //~^ ERROR `simd_or` intrinsic monomorphized with non-SIMD type
+ simd_xor(0, 0);
+ //~^ ERROR `simd_xor` intrinsic monomorphized with non-SIMD type
+
+
+ simd_div(x, x);
+//~^ ERROR `simd_div` intrinsic monomorphized with SIMD vector `i32x4` with unsupported element type
+ simd_div(y, y);
+//~^ ERROR `simd_div` intrinsic monomorphized with SIMD vector `u32x4` with unsupported element type
+ simd_shl(z, z);
+//~^ ERROR `simd_shl` intrinsic monomorphized with SIMD vector `f32x4` with unsupported element type
+ simd_shr(z, z);
+//~^ ERROR `simd_shr` intrinsic monomorphized with SIMD vector `f32x4` with unsupported element type
+ simd_and(z, z);
+//~^ ERROR `simd_and` intrinsic monomorphized with SIMD vector `f32x4` with unsupported element type
+ simd_or(z, z);
+//~^ ERROR `simd_or` intrinsic monomorphized with SIMD vector `f32x4` with unsupported element type
+ simd_xor(z, z);
+//~^ ERROR `simd_xor` intrinsic monomorphized with SIMD vector `f32x4` with unsupported element type
+ }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd, platform_intrinsics)]
+
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct i32x4(i32, i32, i32, i32);
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct i32x8(i32, i32, i32, i32,
+ i32, i32, i32, i32);
+
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct f32x4(f32, f32, f32, f32);
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct f32x8(f32, f32, f32, f32,
+ f32, f32, f32, f32);
+
+
+extern "platform-intrinsic" {
+ fn simd_cast<T, U>(x: T) -> U;
+}
+
+fn main() {
+ let x = i32x4(0, 0, 0, 0);
+
+ unsafe {
+ simd_cast::<i32, i32>(0);
+ //~^ ERROR SIMD cast intrinsic monomorphized with non-SIMD input type `i32`
+ simd_cast::<i32, i32x4>(0);
+ //~^ ERROR SIMD cast intrinsic monomorphized with non-SIMD input type `i32`
+ simd_cast::<i32x4, i32>(x);
+ //~^ ERROR SIMD cast intrinsic monomorphized with non-SIMD return type `i32`
+ simd_cast::<_, i32x8>(x);
+//~^ ERROR monomorphized with input type `i32x4` and return type `i32x8` with different lengths
+ }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd, platform_intrinsics)]
+
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct i32x4(i32, i32, i32, i32);
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct i16x8(i16, i16, i16, i16,
+ i16, i16, i16, i16);
+
+extern "platform-intrinsic" {
+ fn simd_eq<T, U>(x: T, y: T) -> U;
+ fn simd_ne<T, U>(x: T, y: T) -> U;
+ fn simd_lt<T, U>(x: T, y: T) -> U;
+ fn simd_le<T, U>(x: T, y: T) -> U;
+ fn simd_gt<T, U>(x: T, y: T) -> U;
+ fn simd_ge<T, U>(x: T, y: T) -> U;
+}
+
+fn main() {
+ let x = i32x4(0, 0, 0, 0);
+
+ unsafe {
+ simd_eq::<i32, i32>(0, 0);
+ //~^ ERROR SIMD comparison intrinsic monomorphized for non-SIMD argument type
+ simd_ne::<i32, i32>(0, 0);
+ //~^ ERROR SIMD comparison intrinsic monomorphized for non-SIMD argument type
+ simd_lt::<i32, i32>(0, 0);
+ //~^ ERROR SIMD comparison intrinsic monomorphized for non-SIMD argument type
+ simd_le::<i32, i32>(0, 0);
+ //~^ ERROR SIMD comparison intrinsic monomorphized for non-SIMD argument type
+ simd_gt::<i32, i32>(0, 0);
+ //~^ ERROR SIMD comparison intrinsic monomorphized for non-SIMD argument type
+ simd_ge::<i32, i32>(0, 0);
+ //~^ ERROR SIMD comparison intrinsic monomorphized for non-SIMD argument type
+
+ simd_eq::<_, i32>(x, x);
+ //~^ ERROR SIMD comparison intrinsic monomorphized for non-SIMD return type
+ simd_ne::<_, i32>(x, x);
+ //~^ ERROR SIMD comparison intrinsic monomorphized for non-SIMD return type
+ simd_lt::<_, i32>(x, x);
+ //~^ ERROR SIMD comparison intrinsic monomorphized for non-SIMD return type
+ simd_le::<_, i32>(x, x);
+ //~^ ERROR SIMD comparison intrinsic monomorphized for non-SIMD return type
+ simd_gt::<_, i32>(x, x);
+ //~^ ERROR SIMD comparison intrinsic monomorphized for non-SIMD return type
+ simd_ge::<_, i32>(x, x);
+ //~^ ERROR SIMD comparison intrinsic monomorphized for non-SIMD return type
+
+ simd_eq::<_, i16x8>(x, x);
+//~^ ERROR monomorphized with input type `i32x4` and return type `i16x8` with different lengths
+ simd_ne::<_, i16x8>(x, x);
+//~^ ERROR monomorphized with input type `i32x4` and return type `i16x8` with different lengths
+ simd_lt::<_, i16x8>(x, x);
+//~^ ERROR monomorphized with input type `i32x4` and return type `i16x8` with different lengths
+ simd_le::<_, i16x8>(x, x);
+//~^ ERROR monomorphized with input type `i32x4` and return type `i16x8` with different lengths
+ simd_gt::<_, i16x8>(x, x);
+//~^ ERROR monomorphized with input type `i32x4` and return type `i16x8` with different lengths
+ simd_ge::<_, i16x8>(x, x);
+//~^ ERROR monomorphized with input type `i32x4` and return type `i16x8` with different lengths
+ }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd, platform_intrinsics)]
+
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct i32x2(i32, i32);
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct i32x3(i32, i32, i32);
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct i32x4(i32, i32, i32, i32);
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct i32x8(i32, i32, i32, i32,
+ i32, i32, i32, i32);
+
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct f32x2(f32, f32);
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct f32x3(f32, f32, f32);
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct f32x4(f32, f32, f32, f32);
+#[repr(simd)]
+#[derive(Copy, Clone)]
+#[allow(non_camel_case_types)]
+struct f32x8(f32, f32, f32, f32,
+ f32, f32, f32, f32);
+
+extern "platform-intrinsic" {
+ fn simd_insert<T, E>(x: T, idx: u32, y: E) -> T;
+ fn simd_extract<T, E>(x: T, idx: u32) -> E;
+
+ fn simd_shuffle2<T, U>(x: T, y: T, idx: [u32; 2]) -> U;
+ fn simd_shuffle3<T, U>(x: T, y: T, idx: [u32; 3]) -> U;
+ fn simd_shuffle4<T, U>(x: T, y: T, idx: [u32; 4]) -> U;
+ fn simd_shuffle8<T, U>(x: T, y: T, idx: [u32; 8]) -> U;
+}
+
+fn main() {
+ let x = i32x4(0, 0, 0, 0);
+
+ unsafe {
+ simd_insert(0, 0, 0);
+ //~^ ERROR SIMD insert intrinsic monomorphized for non-SIMD input type
+ simd_insert(x, 0, 1.0);
+ //~^ ERROR SIMD insert intrinsic monomorphized with inserted type not SIMD element type
+ simd_extract::<_, f32>(x, 0);
+ //~^ ERROR SIMD insert intrinsic monomorphized with returned type not SIMD element type
+
+ simd_shuffle2::<i32, i32>(0, 0, [0; 2]);
+ //~^ ERROR SIMD shuffle intrinsic monomorphized with non-SIMD input type
+ simd_shuffle3::<i32, i32>(0, 0, [0; 3]);
+ //~^ ERROR SIMD shuffle intrinsic monomorphized with non-SIMD input type
+ simd_shuffle4::<i32, i32>(0, 0, [0; 4]);
+ //~^ ERROR SIMD shuffle intrinsic monomorphized with non-SIMD input type
+ simd_shuffle8::<i32, i32>(0, 0, [0; 8]);
+ //~^ ERROR SIMD shuffle intrinsic monomorphized with non-SIMD input type
+
+ simd_shuffle2::<_, f32x2>(x, x, [0; 2]);
+ //~^ ERROR SIMD shuffle intrinsic monomorphized with different input and return element
+ simd_shuffle3::<_, f32x3>(x, x, [0; 3]);
+ //~^ ERROR SIMD shuffle intrinsic monomorphized with different input and return element
+ simd_shuffle4::<_, f32x4>(x, x, [0; 4]);
+ //~^ ERROR SIMD shuffle intrinsic monomorphized with different input and return element
+ simd_shuffle8::<_, f32x8>(x, x, [0; 8]);
+ //~^ ERROR SIMD shuffle intrinsic monomorphized with different input and return element
+ }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd, platform_intrinsics)]
+
+#[repr(simd)]
+struct A(i16, i16, i16, i16, i16, i16, i16, i16);
+#[repr(simd)]
+struct B(i16, i16, i16, i16, i16, i16, i16, i16);
+
+// each intrinsic definition has to use the same nominal type for any
+// vector structure throughout that declaration (i.e. every instance
+// of i16x8 in each `fn ...;` needs to be either A or B)
+
+extern "platform-intrinsic" {
+ fn x86_mm_adds_epi16(x: A, y: A) -> B;
+ //~^ ERROR intrinsic return value has wrong type: found `B`, expected `A`
+ fn x86_mm_subs_epi16(x: A, y: B) -> A;
+ //~^ ERROR intrinsic argument 2 has wrong type: found `B`, expected `A`
+
+ // ok:
+ fn x86_mm_max_epi16(x: B, y: B) -> B;
+ fn x86_mm_min_epi16(x: A, y: A) -> A;
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd, platform_intrinsics)]
+
+// error-pattern:monomorphising SIMD type `Simd2<X>` with a non-machine element type `X`
+
+struct X(Vec<i32>);
+#[repr(simd)]
+struct Simd2<T>(T, T);
+
+fn main() {
+ let _ = Simd2(X(vec![]), X(vec![]));
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd, platform_intrinsics)]
+
+#[repr(simd)]
+#[derive(Copy, Clone)]
+struct i32x4(pub i32, pub i32, pub i32, pub i32);
+
+#[repr(simd)]
+#[derive(Copy, Clone)]
+struct u32x4(pub u32, pub u32, pub u32, pub u32);
+
+#[repr(simd)]
+#[derive(Copy, Clone)]
+struct f32x4(pub f32, pub f32, pub f32, pub f32);
+
+macro_rules! all_eq {
+ ($a: expr, $b: expr) => {{
+ let a = $a;
+ let b = $b;
+ assert!(a.0 == b.0 && a.1 == b.1 && a.2 == b.2 && a.3 == b.3);
+ }}
+}
+
+extern "platform-intrinsic" {
+ fn simd_add<T>(x: T, y: T) -> T;
+ fn simd_sub<T>(x: T, y: T) -> T;
+ fn simd_mul<T>(x: T, y: T) -> T;
+ fn simd_div<T>(x: T, y: T) -> T;
+ fn simd_shl<T>(x: T, y: T) -> T;
+ fn simd_shr<T>(x: T, y: T) -> T;
+ fn simd_and<T>(x: T, y: T) -> T;
+ fn simd_or<T>(x: T, y: T) -> T;
+ fn simd_xor<T>(x: T, y: T) -> T;
+}
+
+fn main() {
+ let x1 = i32x4(1, 2, 3, 4);
+ let y1 = u32x4(1, 2, 3, 4);
+ let z1 = f32x4(1.0, 2.0, 3.0, 4.0);
+ let x2 = i32x4(2, 3, 4, 5);
+ let y2 = u32x4(2, 3, 4, 5);
+ let z2 = f32x4(2.0, 3.0, 4.0, 5.0);
+
+ unsafe {
+ all_eq!(simd_add(x1, x2), i32x4(3, 5, 7, 9));
+ all_eq!(simd_add(x2, x1), i32x4(3, 5, 7, 9));
+ all_eq!(simd_add(y1, y2), u32x4(3, 5, 7, 9));
+ all_eq!(simd_add(y2, y1), u32x4(3, 5, 7, 9));
+ all_eq!(simd_add(z1, z2), f32x4(3.0, 5.0, 7.0, 9.0));
+ all_eq!(simd_add(z2, z1), f32x4(3.0, 5.0, 7.0, 9.0));
+
+ all_eq!(simd_mul(x1, x2), i32x4(2, 6, 12, 20));
+ all_eq!(simd_mul(x2, x1), i32x4(2, 6, 12, 20));
+ all_eq!(simd_mul(y1, y2), u32x4(2, 6, 12, 20));
+ all_eq!(simd_mul(y2, y1), u32x4(2, 6, 12, 20));
+ all_eq!(simd_mul(z1, z2), f32x4(2.0, 6.0, 12.0, 20.0));
+ all_eq!(simd_mul(z2, z1), f32x4(2.0, 6.0, 12.0, 20.0));
+
+ all_eq!(simd_sub(x2, x1), i32x4(1, 1, 1, 1));
+ all_eq!(simd_sub(x1, x2), i32x4(-1, -1, -1, -1));
+ all_eq!(simd_sub(y2, y1), u32x4(1, 1, 1, 1));
+ all_eq!(simd_sub(y1, y2), u32x4(!0, !0, !0, !0));
+ all_eq!(simd_sub(z2, z1), f32x4(1.0, 1.0, 1.0, 1.0));
+ all_eq!(simd_sub(z1, z2), f32x4(-1.0, -1.0, -1.0, -1.0));
+
+ all_eq!(simd_div(z1, z2), f32x4(1.0/2.0, 2.0/3.0, 3.0/4.0, 4.0/5.0));
+ all_eq!(simd_div(z2, z1), f32x4(2.0/1.0, 3.0/2.0, 4.0/3.0, 5.0/4.0));
+
+ all_eq!(simd_shl(x1, x2), i32x4(1 << 2, 2 << 3, 3 << 4, 4 << 5));
+ all_eq!(simd_shl(x2, x1), i32x4(2 << 1, 3 << 2, 4 << 3, 5 << 4));
+ all_eq!(simd_shl(y1, y2), u32x4(1 << 2, 2 << 3, 3 << 4, 4 << 5));
+ all_eq!(simd_shl(y2, y1), u32x4(2 << 1, 3 << 2, 4 << 3, 5 << 4));
+
+ // test right-shift by assuming left-shift is correct
+ all_eq!(simd_shr(simd_shl(x1, x2), x2), x1);
+ all_eq!(simd_shr(simd_shl(x2, x1), x1), x2);
+ all_eq!(simd_shr(simd_shl(y1, y2), y2), y1);
+ all_eq!(simd_shr(simd_shl(y2, y1), y1), y2);
+
+ // ensure we get logical vs. arithmetic shifts correct
+ let (a, b, c, d) = (-12, -123, -1234, -12345);
+ all_eq!(simd_shr(i32x4(a, b, c, d), x1), i32x4(a >> 1, b >> 2, c >> 3, d >> 4));
+ all_eq!(simd_shr(u32x4(a as u32, b as u32, c as u32, d as u32), y1),
+ u32x4((a as u32) >> 1, (b as u32) >> 2, (c as u32) >> 3, (d as u32) >> 4));
+
+ all_eq!(simd_and(x1, x2), i32x4(0, 2, 0, 4));
+ all_eq!(simd_and(x2, x1), i32x4(0, 2, 0, 4));
+ all_eq!(simd_and(y1, y2), u32x4(0, 2, 0, 4));
+ all_eq!(simd_and(y2, y1), u32x4(0, 2, 0, 4));
+
+ all_eq!(simd_or(x1, x2), i32x4(3, 3, 7, 5));
+ all_eq!(simd_or(x2, x1), i32x4(3, 3, 7, 5));
+ all_eq!(simd_or(y1, y2), u32x4(3, 3, 7, 5));
+ all_eq!(simd_or(y2, y1), u32x4(3, 3, 7, 5));
+
+ all_eq!(simd_xor(x1, x2), i32x4(3, 1, 7, 1));
+ all_eq!(simd_xor(x2, x1), i32x4(3, 1, 7, 1));
+ all_eq!(simd_xor(y1, y2), u32x4(3, 1, 7, 1));
+ all_eq!(simd_xor(y2, y1), u32x4(3, 1, 7, 1));
+
+ }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd, platform_intrinsics, concat_idents,
+ type_macros, test)]
+#![allow(non_camel_case_types)]
+
+extern crate test;
+
+#[repr(simd)]
+#[derive(PartialEq, Debug)]
+struct i32x4(i32, i32, i32, i32);
+#[repr(simd)]
+#[derive(PartialEq, Debug)]
+struct i8x4(i8, i8, i8, i8);
+
+#[repr(simd)]
+#[derive(PartialEq, Debug)]
+struct u32x4(u32, u32, u32, u32);
+#[repr(simd)]
+#[derive(PartialEq, Debug)]
+struct u8x4(u8, u8, u8, u8);
+
+#[repr(simd)]
+#[derive(PartialEq, Debug)]
+struct f32x4(f32, f32, f32, f32);
+
+#[repr(simd)]
+#[derive(PartialEq, Debug)]
+struct f64x4(f64, f64, f64, f64);
+
+
+extern "platform-intrinsic" {
+ fn simd_cast<T, U>(x: T) -> U;
+}
+
+const A: i32 = -1234567;
+const B: i32 = 12345678;
+const C: i32 = -123456789;
+const D: i32 = 1234567890;
+
+trait Foo {
+ fn is_float() -> bool { false }
+ fn in_range(x: i32) -> bool;
+}
+impl Foo for i32 {
+ fn in_range(_: i32) -> bool { true }
+}
+impl Foo for i8 {
+ fn in_range(x: i32) -> bool { -128 <= x && x < 128 }
+}
+impl Foo for u32 {
+ fn in_range(x: i32) -> bool { 0 <= x }
+}
+impl Foo for u8 {
+ fn in_range(x: i32) -> bool { 0 <= x && x < 128 }
+}
+impl Foo for f32 {
+ fn is_float() -> bool { true }
+ fn in_range(_: i32) -> bool { true }
+}
+impl Foo for f64 {
+ fn is_float() -> bool { true }
+ fn in_range(_: i32) -> bool { true }
+}
+
+fn main() {
+ macro_rules! test {
+ ($from: ident, $to: ident) => {{
+ // force the casts to actually happen, or else LLVM/rustc
+ // may fold them and get slightly different results.
+ let (a, b, c, d) = test::black_box((A as $from, B as $from, C as $from, D as $from));
+ // the SIMD vectors are all FOOx4, so we can concat_idents
+ // so we don't have to pass in the extra args to the macro
+ let mut from = simd_cast(concat_idents!($from, x4)(a, b, c, d));
+ let mut to = concat_idents!($to, x4)(a as $to,
+ b as $to,
+ c as $to,
+ d as $to);
+ // assist type inference, it needs to know what `from` is
+ // for the `if` statements.
+ to == from;
+
+ // there are platform differences for some out of range
+ // casts, so we just normalize such things: it's OK for
+ // "invalid" calculations to result in nonsense answers.
+ // (E.g. negative float to unsigned integer goes through a
+ // library routine on the default i686 platforms, and the
+ // implementation of that routine differs on e.g. Linux
+ // vs. OSX, resulting in different answers.)
+ if $from::is_float() {
+ if !$to::in_range(A) { from.0 = 0 as $to; to.0 = 0 as $to; }
+ if !$to::in_range(B) { from.1 = 0 as $to; to.1 = 0 as $to; }
+ if !$to::in_range(C) { from.2 = 0 as $to; to.2 = 0 as $to; }
+ if !$to::in_range(D) { from.3 = 0 as $to; to.3 = 0 as $to; }
+ }
+
+ assert!(to == from,
+ "{} -> {} ({:?} != {:?})", stringify!($from), stringify!($to),
+ from, to);
+ }}
+ }
+ macro_rules! tests {
+ (: $($to: ident),*) => { () };
+ // repeating the list twice is easier than writing a cartesian
+ // product macro
+ ($from: ident $(, $from_: ident)*: $($to: ident),*) => {
+ fn $from() { unsafe { $( test!($from, $to); )* } }
+ tests!($($from_),*: $($to),*)
+ };
+ ($($types: ident),*) => {{
+ tests!($($types),* : $($types),*);
+ $($types();)*
+ }}
+ }
+
+ // test various combinations, including truncation,
+ // signed/unsigned extension, and floating point casts.
+ tests!(i32, i8, u32, u8, f32);
+ tests!(i32, u32, f32, f64)
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd, platform_intrinsics, concat_idents)]
+#![allow(non_camel_case_types)]
+
+use std::f32::NAN;
+
+#[repr(simd)]
+#[derive(Copy, Clone)]
+struct i32x4(i32, i32, i32, i32);
+#[repr(simd)]
+#[derive(Copy, Clone)]
+struct u32x4(pub u32, pub u32, pub u32, pub u32);
+#[repr(simd)]
+#[derive(Copy, Clone)]
+struct f32x4(pub f32, pub f32, pub f32, pub f32);
+
+extern "platform-intrinsic" {
+ fn simd_eq<T, U>(x: T, y: T) -> U;
+ fn simd_ne<T, U>(x: T, y: T) -> U;
+ fn simd_lt<T, U>(x: T, y: T) -> U;
+ fn simd_le<T, U>(x: T, y: T) -> U;
+ fn simd_gt<T, U>(x: T, y: T) -> U;
+ fn simd_ge<T, U>(x: T, y: T) -> U;
+}
+
+macro_rules! cmp {
+ ($method: ident($lhs: expr, $rhs: expr)) => {{
+ let lhs = $lhs;
+ let rhs = $rhs;
+ let e: u32x4 = concat_idents!(simd_, $method)($lhs, $rhs);
+ // assume the scalar version is correct/the behaviour we want.
+ assert!((e.0 != 0) == lhs.0 .$method(&rhs.0));
+ assert!((e.1 != 0) == lhs.1 .$method(&rhs.1));
+ assert!((e.2 != 0) == lhs.2 .$method(&rhs.2));
+ assert!((e.3 != 0) == lhs.3 .$method(&rhs.3));
+ }}
+}
+macro_rules! tests {
+ ($($lhs: ident, $rhs: ident;)*) => {{
+ $(
+ (|| {
+ cmp!(eq($lhs, $rhs));
+ cmp!(ne($lhs, $rhs));
+
+ // test both directions
+ cmp!(lt($lhs, $rhs));
+ cmp!(lt($rhs, $lhs));
+
+ cmp!(le($lhs, $rhs));
+ cmp!(le($rhs, $lhs));
+
+ cmp!(gt($lhs, $rhs));
+ cmp!(gt($rhs, $lhs));
+
+ cmp!(ge($lhs, $rhs));
+ cmp!(ge($rhs, $lhs));
+ })();
+ )*
+ }}
+}
+fn main() {
+ // 13 vs. -100 tests that we get signed vs. unsigned comparisons
+ // correct (i32: 13 > -100, u32: 13 < -100). let i1 = i32x4(10, -11, 12, 13);
+ let i1 = i32x4(10, -11, 12, 13);
+ let i2 = i32x4(5, -5, 20, -100);
+ let i3 = i32x4(10, -11, 20, -100);
+
+ let u1 = u32x4(10, !11+1, 12, 13);
+ let u2 = u32x4(5, !5+1, 20, !100+1);
+ let u3 = u32x4(10, !11+1, 20, !100+1);
+
+ let f1 = f32x4(10.0, -11.0, 12.0, 13.0);
+ let f2 = f32x4(5.0, -5.0, 20.0, -100.0);
+ let f3 = f32x4(10.0, -11.0, 20.0, -100.0);
+
+ unsafe {
+ tests! {
+ i1, i1;
+ u1, u1;
+ f1, f1;
+
+ i1, i2;
+ u1, u2;
+ f1, f2;
+
+ i1, i3;
+ u1, u3;
+ f1, f3;
+ }
+ }
+
+ // NAN comparisons are special:
+ // -11 (*) 13
+ // -5 -100 (*)
+ let f4 = f32x4(NAN, f1.1, NAN, f2.3);
+
+ unsafe {
+ tests! {
+ f1, f4;
+ f2, f4;
+ f4, f4;
+ }
+ }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(repr_simd, platform_intrinsics)]
+
+#[repr(simd)]
+#[derive(Copy, Clone, Debug, PartialEq)]
+#[allow(non_camel_case_types)]
+struct i32x2(i32, i32);
+#[repr(simd)]
+#[derive(Copy, Clone, Debug, PartialEq)]
+#[allow(non_camel_case_types)]
+struct i32x3(i32, i32, i32);
+#[repr(simd)]
+#[derive(Copy, Clone, Debug, PartialEq)]
+#[allow(non_camel_case_types)]
+struct i32x4(i32, i32, i32, i32);
+#[repr(simd)]
+#[derive(Copy, Clone, Debug, PartialEq)]
+#[allow(non_camel_case_types)]
+struct i32x8(i32, i32, i32, i32,
+ i32, i32, i32, i32);
+
+extern "platform-intrinsic" {
+ fn simd_insert<T, E>(x: T, idx: u32, y: E) -> T;
+ fn simd_extract<T, E>(x: T, idx: u32) -> E;
+
+ fn simd_shuffle2<T, U>(x: T, y: T, idx: [u32; 2]) -> U;
+ fn simd_shuffle3<T, U>(x: T, y: T, idx: [u32; 3]) -> U;
+ fn simd_shuffle4<T, U>(x: T, y: T, idx: [u32; 4]) -> U;
+ fn simd_shuffle8<T, U>(x: T, y: T, idx: [u32; 8]) -> U;
+}
+
+macro_rules! all_eq {
+ ($a: expr, $b: expr) => {{
+ let a = $a;
+ let b = $b;
+ // type inference works better with the concrete type on the
+ // left, but humans work better with the expected on the
+ // right.
+ assert!(b == a,
+ "{:?} != {:?}", a, b);
+ }}
+}
+
+fn main() {
+ let x2 = i32x2(20, 21);
+ let x3 = i32x3(30, 31, 32);
+ let x4 = i32x4(40, 41, 42, 43);
+ let x8 = i32x8(80, 81, 82, 83, 84, 85, 86, 87);
+ unsafe {
+ all_eq!(simd_insert(x2, 0, 100), i32x2(100, 21));
+ all_eq!(simd_insert(x2, 1, 100), i32x2(20, 100));
+
+ all_eq!(simd_insert(x3, 0, 100), i32x3(100, 31, 32));
+ all_eq!(simd_insert(x3, 1, 100), i32x3(30, 100, 32));
+ all_eq!(simd_insert(x3, 2, 100), i32x3(30, 31, 100));
+
+ all_eq!(simd_insert(x4, 0, 100), i32x4(100, 41, 42, 43));
+ all_eq!(simd_insert(x4, 1, 100), i32x4(40, 100, 42, 43));
+ all_eq!(simd_insert(x4, 2, 100), i32x4(40, 41, 100, 43));
+ all_eq!(simd_insert(x4, 3, 100), i32x4(40, 41, 42, 100));
+
+ all_eq!(simd_insert(x8, 0, 100), i32x8(100, 81, 82, 83, 84, 85, 86, 87));
+ all_eq!(simd_insert(x8, 1, 100), i32x8(80, 100, 82, 83, 84, 85, 86, 87));
+ all_eq!(simd_insert(x8, 2, 100), i32x8(80, 81, 100, 83, 84, 85, 86, 87));
+ all_eq!(simd_insert(x8, 3, 100), i32x8(80, 81, 82, 100, 84, 85, 86, 87));
+ all_eq!(simd_insert(x8, 4, 100), i32x8(80, 81, 82, 83, 100, 85, 86, 87));
+ all_eq!(simd_insert(x8, 5, 100), i32x8(80, 81, 82, 83, 84, 100, 86, 87));
+ all_eq!(simd_insert(x8, 6, 100), i32x8(80, 81, 82, 83, 84, 85, 100, 87));
+ all_eq!(simd_insert(x8, 7, 100), i32x8(80, 81, 82, 83, 84, 85, 86, 100));
+
+ all_eq!(simd_extract(x2, 0), 20);
+ all_eq!(simd_extract(x2, 1), 21);
+
+ all_eq!(simd_extract(x3, 0), 30);
+ all_eq!(simd_extract(x3, 1), 31);
+ all_eq!(simd_extract(x3, 2), 32);
+
+ all_eq!(simd_extract(x4, 0), 40);
+ all_eq!(simd_extract(x4, 1), 41);
+ all_eq!(simd_extract(x4, 2), 42);
+ all_eq!(simd_extract(x4, 3), 43);
+
+ all_eq!(simd_extract(x8, 0), 80);
+ all_eq!(simd_extract(x8, 1), 81);
+ all_eq!(simd_extract(x8, 2), 82);
+ all_eq!(simd_extract(x8, 3), 83);
+ all_eq!(simd_extract(x8, 4), 84);
+ all_eq!(simd_extract(x8, 5), 85);
+ all_eq!(simd_extract(x8, 6), 86);
+ all_eq!(simd_extract(x8, 7), 87);
+ }
+
+ let y2 = i32x2(120, 121);
+ let y3 = i32x3(130, 131, 132);
+ let y4 = i32x4(140, 141, 142, 143);
+ let y8 = i32x8(180, 181, 182, 183, 184, 185, 186, 187);
+ unsafe {
+ all_eq!(simd_shuffle2(x2, y2, [3, 0]), i32x2(121, 20));
+ all_eq!(simd_shuffle3(x2, y2, [3, 0, 1]), i32x3(121, 20, 21));
+ all_eq!(simd_shuffle4(x2, y2, [3, 0, 1, 2]), i32x4(121, 20, 21, 120));
+ all_eq!(simd_shuffle8(x2, y2, [3, 0, 1, 2, 1, 2, 3, 0]),
+ i32x8(121, 20, 21, 120, 21, 120, 121, 20));
+
+ all_eq!(simd_shuffle2(x3, y3, [4, 2]), i32x2(131, 32));
+ all_eq!(simd_shuffle3(x3, y3, [4, 2, 3]), i32x3(131, 32, 130));
+ all_eq!(simd_shuffle4(x3, y3, [4, 2, 3, 0]), i32x4(131, 32, 130, 30));
+ all_eq!(simd_shuffle8(x3, y3, [4, 2, 3, 0, 1, 5, 5, 1]),
+ i32x8(131, 32, 130, 30, 31, 132, 132, 31));
+
+ all_eq!(simd_shuffle2(x4, y4, [7, 2]), i32x2(143, 42));
+ all_eq!(simd_shuffle3(x4, y4, [7, 2, 5]), i32x3(143, 42, 141));
+ all_eq!(simd_shuffle4(x4, y4, [7, 2, 5, 0]), i32x4(143, 42, 141, 40));
+ all_eq!(simd_shuffle8(x4, y4, [7, 2, 5, 0, 3, 6, 4, 1]),
+ i32x8(143, 42, 141, 40, 43, 142, 140, 41));
+
+ all_eq!(simd_shuffle2(x8, y8, [11, 5]), i32x2(183, 85));
+ all_eq!(simd_shuffle3(x8, y8, [11, 5, 15]), i32x3(183, 85, 187));
+ all_eq!(simd_shuffle4(x8, y8, [11, 5, 15, 0]), i32x4(183, 85, 187, 80));
+ all_eq!(simd_shuffle8(x8, y8, [11, 5, 15, 0, 3, 8, 12, 1]),
+ i32x8(183, 85, 187, 80, 83, 180, 184, 81));
+ }
+
+}