2 use crate::{Category, ExpInt, Float, FloatConvert, ParseError, Round, Status, StatusAnd};
4 use core::cmp::Ordering;
9 #[derive(Copy, Clone, PartialEq, PartialOrd, Debug)]
10 pub struct DoubleFloat<F>(F, F);
11 pub type DoubleDouble = DoubleFloat<ieee::Double>;
13 // These are legacy semantics for the Fallback, inaccurate implementation of
14 // IBM double-double, if the accurate DoubleDouble doesn't handle the
15 // operation. It's equivalent to having an IEEE number with consecutive 106
16 // bits of mantissa and 11 bits of exponent.
18 // It's not equivalent to IBM double-double. For example, a legit IBM
19 // double-double, 1 + epsilon:
21 // 1 + epsilon = 1 + (1 >> 1076)
23 // is not representable by a consecutive 106 bits of mantissa.
25 // Currently, these semantics are used in the following way:
27 // DoubleDouble -> (Double, Double) ->
28 // DoubleDouble's Fallback -> IEEE operations
30 // FIXME: Implement all operations in DoubleDouble, and delete these
32 // FIXME(eddyb) This shouldn't need to be `pub`, it's only used in bounds.
33 pub struct FallbackS<F>(F);
34 type Fallback<F> = ieee::IeeeFloat<FallbackS<F>>;
35 impl<F: Float> ieee::Semantics for FallbackS<F> {
36 // Forbid any conversion to/from bits.
37 const BITS: usize = 0;
38 const PRECISION: usize = F::PRECISION * 2;
39 const MAX_EXP: ExpInt = F::MAX_EXP as ExpInt;
40 const MIN_EXP: ExpInt = F::MIN_EXP as ExpInt + F::PRECISION as ExpInt;
43 // Convert number to F. To avoid spurious underflows, we re-
44 // normalize against the F exponent range first, and only *then*
45 // truncate the mantissa. The result of that second conversion
46 // may be inexact, but should never underflow.
47 // FIXME(eddyb) This shouldn't need to be `pub`, it's only used in bounds.
48 pub struct FallbackExtendedS<F>(F);
49 type FallbackExtended<F> = ieee::IeeeFloat<FallbackExtendedS<F>>;
50 impl<F: Float> ieee::Semantics for FallbackExtendedS<F> {
51 // Forbid any conversion to/from bits.
52 const BITS: usize = 0;
53 const PRECISION: usize = Fallback::<F>::PRECISION;
54 const MAX_EXP: ExpInt = F::MAX_EXP as ExpInt;
57 impl<F: Float> From<Fallback<F>> for DoubleFloat<F>
59 F: FloatConvert<FallbackExtended<F>>,
60 FallbackExtended<F>: FloatConvert<F>,
62 fn from(x: Fallback<F>) -> Self {
64 let mut loses_info = false;
66 let extended: FallbackExtended<F> = unpack!(status=, x.convert(&mut loses_info));
67 assert_eq!((status, loses_info), (Status::OK, false));
69 let a = unpack!(status=, extended.convert(&mut loses_info));
70 assert_eq!(status - Status::INEXACT, Status::OK);
72 // If conversion was exact or resulted in a special case, we're done;
73 // just set the second double to zero. Otherwise, re-convert back to
74 // the extended format and compute the difference. This now should
75 // convert exactly to double.
76 let b = if a.is_finite_non_zero() && loses_info {
77 let u: FallbackExtended<F> = unpack!(status=, a.convert(&mut loses_info));
78 assert_eq!((status, loses_info), (Status::OK, false));
79 let v = unpack!(status=, extended - u);
80 assert_eq!(status, Status::OK);
81 let v = unpack!(status=, v.convert(&mut loses_info));
82 assert_eq!((status, loses_info), (Status::OK, false));
92 impl<F: FloatConvert<Self>> From<DoubleFloat<F>> for Fallback<F> {
93 fn from(DoubleFloat(a, b): DoubleFloat<F>) -> Self {
95 let mut loses_info = false;
97 // Get the first F and convert to our format.
98 let a = unpack!(status=, a.convert(&mut loses_info));
99 assert_eq!((status, loses_info), (Status::OK, false));
101 // Unless we have a special case, add in second F.
102 if a.is_finite_non_zero() {
103 let b = unpack!(status=, b.convert(&mut loses_info));
104 assert_eq!((status, loses_info), (Status::OK, false));
113 float_common_impls!(DoubleFloat<F>);
115 impl<F: Float> Neg for DoubleFloat<F> {
117 fn neg(self) -> Self {
118 if self.1.is_finite_non_zero() {
119 DoubleFloat(-self.0, -self.1)
121 DoubleFloat(-self.0, self.1)
126 impl<F: FloatConvert<Fallback<F>>> fmt::Display for DoubleFloat<F> {
127 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
128 fmt::Display::fmt(&Fallback::from(*self), f)
132 impl<F: FloatConvert<Fallback<F>>> Float for DoubleFloat<F>
134 Self: From<Fallback<F>>,
136 const BITS: usize = F::BITS * 2;
137 const PRECISION: usize = Fallback::<F>::PRECISION;
138 const MAX_EXP: ExpInt = Fallback::<F>::MAX_EXP;
139 const MIN_EXP: ExpInt = Fallback::<F>::MIN_EXP;
141 const ZERO: Self = DoubleFloat(F::ZERO, F::ZERO);
143 const INFINITY: Self = DoubleFloat(F::INFINITY, F::ZERO);
145 // FIXME(eddyb) remove when qnan becomes const fn.
146 const NAN: Self = DoubleFloat(F::NAN, F::ZERO);
148 fn qnan(payload: Option<u128>) -> Self {
149 DoubleFloat(F::qnan(payload), F::ZERO)
152 fn snan(payload: Option<u128>) -> Self {
153 DoubleFloat(F::snan(payload), F::ZERO)
156 fn largest() -> Self {
158 let mut r = DoubleFloat(F::largest(), F::largest());
159 r.1 = r.1.scalbn(-(F::PRECISION as ExpInt + 1));
160 r.1 = unpack!(status=, r.1.next_down());
161 assert_eq!(status, Status::OK);
165 const SMALLEST: Self = DoubleFloat(F::SMALLEST, F::ZERO);
167 fn smallest_normalized() -> Self {
168 DoubleFloat(F::smallest_normalized().scalbn(F::PRECISION as ExpInt), F::ZERO)
171 // Implement addition, subtraction, multiplication and division based on:
172 // "Software for Doubled-Precision Floating-Point Computations",
173 // by Seppo Linnainmaa, ACM TOMS vol 7 no 3, September 1981, pages 272-283.
175 fn add_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
176 match (self.category(), rhs.category()) {
177 (Category::Infinity, Category::Infinity) => {
178 if self.is_negative() != rhs.is_negative() {
179 Status::INVALID_OP.and(Self::NAN.copy_sign(self))
185 (_, Category::Zero) | (Category::NaN, _) | (Category::Infinity, Category::Normal) => {
189 (Category::Zero, _) | (_, Category::NaN | Category::Infinity) => Status::OK.and(rhs),
191 (Category::Normal, Category::Normal) => {
192 let mut status = Status::OK;
193 let (a, aa, c, cc) = (self.0, self.1, rhs.0, rhs.1);
195 z = unpack!(status|=, z.add_r(c, round));
197 if !z.is_infinite() {
198 return status.and(DoubleFloat(z, F::ZERO));
201 let a_cmp_c = a.cmp_abs_normal(c);
203 z = unpack!(status|=, z.add_r(aa, round));
204 if a_cmp_c == Ordering::Greater {
205 // z = cc + aa + c + a;
206 z = unpack!(status|=, z.add_r(c, round));
207 z = unpack!(status|=, z.add_r(a, round));
209 // z = cc + aa + a + c;
210 z = unpack!(status|=, z.add_r(a, round));
211 z = unpack!(status|=, z.add_r(c, round));
214 return status.and(DoubleFloat(z, F::ZERO));
218 zz = unpack!(status|=, zz.add_r(cc, round));
219 if a_cmp_c == Ordering::Greater {
220 // self.1 = a - z + c + zz;
222 self.1 = unpack!(status|=, self.1.sub_r(z, round));
223 self.1 = unpack!(status|=, self.1.add_r(c, round));
224 self.1 = unpack!(status|=, self.1.add_r(zz, round));
226 // self.1 = c - z + a + zz;
228 self.1 = unpack!(status|=, self.1.sub_r(z, round));
229 self.1 = unpack!(status|=, self.1.add_r(a, round));
230 self.1 = unpack!(status|=, self.1.add_r(zz, round));
235 q = unpack!(status|=, q.sub_r(z, round));
237 // zz = q + c + (a - (q + z)) + aa + cc;
238 // Compute a - (q + z) as -((q + z) - a) to avoid temporary copies.
240 zz = unpack!(status|=, zz.add_r(c, round));
241 q = unpack!(status|=, q.add_r(z, round));
242 q = unpack!(status|=, q.sub_r(a, round));
244 zz = unpack!(status|=, zz.add_r(q, round));
245 zz = unpack!(status|=, zz.add_r(aa, round));
246 zz = unpack!(status|=, zz.add_r(cc, round));
247 if zz.is_zero() && !zz.is_negative() {
248 return Status::OK.and(DoubleFloat(z, F::ZERO));
251 self.0 = unpack!(status|=, self.0.add_r(zz, round));
252 if !self.0.is_finite() {
254 return status.and(self);
257 self.1 = unpack!(status|=, self.1.sub_r(self.0, round));
258 self.1 = unpack!(status|=, self.1.add_r(zz, round));
265 fn mul_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
266 // Interesting observation: For special categories, finding the lowest
267 // common ancestor of the following layered graph gives the correct
276 // e.g., NaN * NaN = NaN
278 // Normal * Zero = Zero
279 // Normal * Inf = Inf
280 match (self.category(), rhs.category()) {
281 (Category::NaN, _) => Status::OK.and(self),
283 (_, Category::NaN) => Status::OK.and(rhs),
285 (Category::Zero, Category::Infinity) | (Category::Infinity, Category::Zero) => {
286 Status::OK.and(Self::NAN)
289 (Category::Zero | Category::Infinity, _) => Status::OK.and(self),
291 (_, Category::Zero | Category::Infinity) => Status::OK.and(rhs),
293 (Category::Normal, Category::Normal) => {
294 let mut status = Status::OK;
295 let (a, b, c, d) = (self.0, self.1, rhs.0, rhs.1);
298 t = unpack!(status|=, t.mul_r(c, round));
299 if !t.is_finite_non_zero() {
300 return status.and(DoubleFloat(t, F::ZERO));
303 // tau = fmsub(a, c, t), that is -fmadd(-a, c, t).
305 tau = unpack!(status|=, tau.mul_add_r(c, -t, round));
308 v = unpack!(status|=, v.mul_r(d, round));
311 w = unpack!(status|=, w.mul_r(c, round));
312 v = unpack!(status|=, v.add_r(w, round));
314 tau = unpack!(status|=, tau.add_r(v, round));
317 u = unpack!(status|=, u.add_r(tau, round));
323 // self.1 = (t - u) + tau
324 t = unpack!(status|=, t.sub_r(u, round));
325 t = unpack!(status|=, t.add_r(tau, round));
333 fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd<Self> {
335 .mul_add_r(Fallback::from(multiplicand), Fallback::from(addend), round)
339 fn div_r(self, rhs: Self, round: Round) -> StatusAnd<Self> {
340 Fallback::from(self).div_r(Fallback::from(rhs), round).map(Self::from)
343 fn c_fmod(self, rhs: Self) -> StatusAnd<Self> {
344 Fallback::from(self).c_fmod(Fallback::from(rhs)).map(Self::from)
347 fn round_to_integral(self, round: Round) -> StatusAnd<Self> {
348 Fallback::from(self).round_to_integral(round).map(Self::from)
351 fn next_up(self) -> StatusAnd<Self> {
352 Fallback::from(self).next_up().map(Self::from)
355 fn from_bits(input: u128) -> Self {
356 let (a, b) = (input, input >> F::BITS);
357 DoubleFloat(F::from_bits(a & ((1 << F::BITS) - 1)), F::from_bits(b & ((1 << F::BITS) - 1)))
360 fn from_u128_r(input: u128, round: Round) -> StatusAnd<Self> {
361 Fallback::from_u128_r(input, round).map(Self::from)
364 fn from_str_r(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError> {
365 Fallback::from_str_r(s, round).map(|r| r.map(Self::from))
368 fn to_bits(self) -> u128 {
369 self.0.to_bits() | (self.1.to_bits() << F::BITS)
372 fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<u128> {
373 Fallback::from(self).to_u128_r(width, round, is_exact)
376 fn cmp_abs_normal(self, rhs: Self) -> Ordering {
377 self.0.cmp_abs_normal(rhs.0).then_with(|| {
378 let result = self.1.cmp_abs_normal(rhs.1);
379 if result != Ordering::Equal {
380 let against = self.0.is_negative() ^ self.1.is_negative();
381 let rhs_against = rhs.0.is_negative() ^ rhs.1.is_negative();
384 .then_with(|| if against { result.reverse() } else { result })
391 fn bitwise_eq(self, rhs: Self) -> bool {
392 self.0.bitwise_eq(rhs.0) && self.1.bitwise_eq(rhs.1)
395 fn is_negative(self) -> bool {
399 fn is_denormal(self) -> bool {
400 self.category() == Category::Normal
401 && (self.0.is_denormal() || self.0.is_denormal() ||
402 // (double)(Hi + Lo) == Hi defines a normal number.
403 !(self.0 + self.1).value.bitwise_eq(self.0))
406 fn is_signaling(self) -> bool {
407 self.0.is_signaling()
410 fn category(self) -> Category {
414 fn get_exact_inverse(self) -> Option<Self> {
415 Fallback::from(self).get_exact_inverse().map(Self::from)
418 fn ilogb(self) -> ExpInt {
422 fn scalbn_r(self, exp: ExpInt, round: Round) -> Self {
423 DoubleFloat(self.0.scalbn_r(exp, round), self.1.scalbn_r(exp, round))
426 fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self {
427 let a = self.0.frexp_r(exp, round);
429 if self.category() == Category::Normal {
430 b = b.scalbn_r(-*exp, round);