1 // compile-flags: -Zmir-opt-level=0
2 #![feature(const_float_bits_conv)]
3 #![feature(const_float_classify)]
6 const fn nop<T>(x: T) -> T { x }
8 macro_rules! const_assert {
11 const _: () = assert!($a);
15 ($a:expr, $b:expr) => {
17 const _: () = assert!($a == $b);
18 assert_eq!(nop($a), nop($b));
24 // Check that NaNs roundtrip their bits regardless of signalingness
25 // 0xA is 0b1010; 0x5 is 0b0101 -- so these two together clobbers all the mantissa bits
26 // ...actually, let's just check that these break. :D
27 const MASKED_NAN1: u32 = f32::NAN.to_bits() ^ 0x002A_AAAA;
28 const MASKED_NAN2: u32 = f32::NAN.to_bits() ^ 0x0055_5555;
30 const_assert!(f32::from_bits(MASKED_NAN1).is_nan());
31 const_assert!(f32::from_bits(MASKED_NAN1).is_nan());
33 // LLVM does not guarantee that loads and stores of NaNs preserve their exact bit pattern.
34 // In practice, this seems to only cause a problem on x86, since the most widely used calling
35 // convention mandates that floating point values are returned on the x87 FPU stack. See #73328.
36 if !cfg!(target_arch = "x86") {
37 const_assert!(f32::from_bits(MASKED_NAN1).to_bits(), MASKED_NAN1);
38 const_assert!(f32::from_bits(MASKED_NAN2).to_bits(), MASKED_NAN2);
43 // Check that NaNs roundtrip their bits regardless of signalingness
44 // 0xA is 0b1010; 0x5 is 0b0101 -- so these two together clobbers all the mantissa bits
45 // ...actually, let's just check that these break. :D
46 const MASKED_NAN1: u64 = f64::NAN.to_bits() ^ 0x000A_AAAA_AAAA_AAAA;
47 const MASKED_NAN2: u64 = f64::NAN.to_bits() ^ 0x0005_5555_5555_5555;
49 const_assert!(f64::from_bits(MASKED_NAN1).is_nan());
50 const_assert!(f64::from_bits(MASKED_NAN1).is_nan());
53 if !cfg!(target_arch = "x86") {
54 const_assert!(f64::from_bits(MASKED_NAN1).to_bits(), MASKED_NAN1);
55 const_assert!(f64::from_bits(MASKED_NAN2).to_bits(), MASKED_NAN2);