#![feature(core_intrinsics, generators, generator_trait, is_sorted)]
+#[cfg(target_arch = "x86_64")]
use std::arch::x86_64::*;
use std::io::Write;
use std::ops::Generator;
let stderr = ::std::io::stderr();
let mut stderr = stderr.lock();
+ // FIXME support lazy jit when multi threading
+ #[cfg(not(lazy_jit))]
std::thread::spawn(move || {
println!("Hello from another thread!");
});
assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
+ assert_eq!(core::intrinsics::saturating_sub(0, -170141183460469231731687303715884105728i128), 170141183460469231731687303715884105727i128);
let _d = 0i128.checked_div(2i128);
let _d = 0u128.checked_div(2u128);
assert_eq!(houndred_i128 as f64, 100.0);
assert_eq!(houndred_f32 as i128, 100);
assert_eq!(houndred_f64 as i128, 100);
+ assert_eq!(1u128.rotate_left(2), 4);
// Test signed 128bit comparing
let max = usize::MAX as i128;
println!("{:?}", std::intrinsics::caller_location());
+ #[cfg(target_arch = "x86_64")]
unsafe {
test_simd();
}
panic!();
}
+#[cfg(target_arch = "x86_64")]
#[target_feature(enable = "sse2")]
unsafe fn test_simd() {
+ assert!(is_x86_feature_detected!("sse2"));
+
let x = _mm_setzero_si128();
let y = _mm_set1_epi16(7);
let or = _mm_or_si128(x, y);
test_mm_cvtepi8_epi16();
test_mm_cvtsi128_si64();
- // FIXME(#666) implement `#[rustc_arg_required_const(..)]` support
- //test_mm_extract_epi8();
+ test_mm_extract_epi8();
+ test_mm_insert_epi16();
let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(mask1, 1);
}
+#[cfg(target_arch = "x86_64")]
#[target_feature(enable = "sse2")]
unsafe fn test_mm_slli_si128() {
#[rustfmt::skip]
assert_eq_m128i(r, _mm_set1_epi8(0));
}
+#[cfg(target_arch = "x86_64")]
#[target_feature(enable = "sse2")]
unsafe fn test_mm_movemask_epi8() {
#[rustfmt::skip]
assert_eq!(r, 0b10100100_00100101);
}
+#[cfg(target_arch = "x86_64")]
#[target_feature(enable = "avx2")]
unsafe fn test_mm256_movemask_epi8() {
let a = _mm256_set1_epi8(-1);
assert_eq!(r, e);
}
+#[cfg(target_arch = "x86_64")]
#[target_feature(enable = "sse2")]
unsafe fn test_mm_add_epi8() {
let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
assert_eq_m128i(r, e);
}
+#[cfg(target_arch = "x86_64")]
#[target_feature(enable = "sse2")]
unsafe fn test_mm_add_pd() {
let a = _mm_setr_pd(1.0, 2.0);
assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
}
+#[cfg(target_arch = "x86_64")]
fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
unsafe {
assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
}
}
+#[cfg(target_arch = "x86_64")]
#[target_feature(enable = "sse2")]
pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
}
}
+#[cfg(target_arch = "x86_64")]
#[target_feature(enable = "sse2")]
unsafe fn test_mm_cvtsi128_si64() {
let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
assert_eq!(r, 5);
}
+#[cfg(target_arch = "x86_64")]
#[target_feature(enable = "sse4.1")]
unsafe fn test_mm_cvtepi8_epi16() {
let a = _mm_set1_epi8(10);
assert_eq_m128i(r, e);
}
+#[cfg(target_arch = "x86_64")]
#[target_feature(enable = "sse4.1")]
unsafe fn test_mm_extract_epi8() {
#[rustfmt::skip]
assert_eq!(r2, 3);
}
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_insert_epi16() {
+ let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
+ let r = _mm_insert_epi16(a, 9, 0);
+ let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7);
+ assert_eq_m128i(r, e);
+}
+
fn test_checked_mul() {
let u: Option<u8> = u8::from_str_radix("1000", 10).ok();
assert_eq!(u, None);
assert_eq!(1i8.checked_mul(-128i8), Some(-128i8));
assert_eq!((-128i8).checked_mul(-128i8), None);
- assert_eq!(1u64.checked_mul(u64::max_value()), Some(u64::max_value()));
- assert_eq!(u64::max_value().checked_mul(u64::max_value()), None);
- assert_eq!(1i64.checked_mul(i64::max_value()), Some(i64::max_value()));
- assert_eq!(i64::max_value().checked_mul(i64::max_value()), None);
- assert_eq!((-1i64).checked_mul(i64::min_value() + 1), Some(i64::max_value()));
- assert_eq!(1i64.checked_mul(i64::min_value()), Some(i64::min_value()));
- assert_eq!(i64::min_value().checked_mul(i64::min_value()), None);
+ assert_eq!(1u64.checked_mul(u64::MAX), Some(u64::MAX));
+ assert_eq!(u64::MAX.checked_mul(u64::MAX), None);
+ assert_eq!(1i64.checked_mul(i64::MAX), Some(i64::MAX));
+ assert_eq!(i64::MAX.checked_mul(i64::MAX), None);
+ assert_eq!((-1i64).checked_mul(i64::MIN + 1), Some(i64::MAX));
+ assert_eq!(1i64.checked_mul(i64::MIN), Some(i64::MIN));
+ assert_eq!(i64::MIN.checked_mul(i64::MIN), None);
}
#[derive(PartialEq)]