s_fma.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*-
  2. * Copyright (c) 2005-2011 David Schultz <das@FreeBSD.ORG>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. *
  14. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  15. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  16. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  17. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  18. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  19. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  20. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  21. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  22. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  23. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  24. * SUCH DAMAGE.
  25. */
  26. #include "cdefs-compat.h"
  27. //__FBSDID("$FreeBSD: src/lib/msun/src/s_fma.c,v 1.8 2011/10/21 06:30:43 das Exp $");
  28. #include <float.h>
  29. #include <openlibm_fenv.h>
  30. #include <openlibm_math.h>
  31. #include "math_private.h"
  32. /*
  33. * A struct dd represents a floating-point number with twice the precision
  34. * of a double. We maintain the invariant that "hi" stores the 53 high-order
  35. * bits of the result.
  36. */
  37. struct dd {
  38. double hi;
  39. double lo;
  40. };
  41. /*
  42. * Compute a+b exactly, returning the exact result in a struct dd. We assume
  43. * that both a and b are finite, but make no assumptions about their relative
  44. * magnitudes.
  45. */
  46. static inline struct dd
  47. dd_add(double a, double b)
  48. {
  49. struct dd ret;
  50. double s;
  51. ret.hi = a + b;
  52. s = ret.hi - a;
  53. ret.lo = (a - (ret.hi - s)) + (b - s);
  54. return (ret);
  55. }
  56. /*
  57. * Compute a+b, with a small tweak: The least significant bit of the
  58. * result is adjusted into a sticky bit summarizing all the bits that
  59. * were lost to rounding. This adjustment negates the effects of double
  60. * rounding when the result is added to another number with a higher
  61. * exponent. For an explanation of round and sticky bits, see any reference
  62. * on FPU design, e.g.,
  63. *
  64. * J. Coonen. An Implementation Guide to a Proposed Standard for
  65. * Floating-Point Arithmetic. Computer, vol. 13, no. 1, Jan 1980.
  66. */
  67. static inline double
  68. add_adjusted(double a, double b)
  69. {
  70. struct dd sum;
  71. u_int64_t hibits, lobits;
  72. sum = dd_add(a, b);
  73. if (sum.lo != 0) {
  74. EXTRACT_WORD64(hibits, sum.hi);
  75. if ((hibits & 1) == 0) {
  76. /* hibits += (int)copysign(1.0, sum.hi * sum.lo) */
  77. EXTRACT_WORD64(lobits, sum.lo);
  78. hibits += 1 - ((hibits ^ lobits) >> 62);
  79. INSERT_WORD64(sum.hi, hibits);
  80. }
  81. }
  82. return (sum.hi);
  83. }
  84. /*
  85. * Compute ldexp(a+b, scale) with a single rounding error. It is assumed
  86. * that the result will be subnormal, and care is taken to ensure that
  87. * double rounding does not occur.
  88. */
  89. static inline double
  90. add_and_denormalize(double a, double b, int scale)
  91. {
  92. struct dd sum;
  93. u_int64_t hibits, lobits;
  94. int bits_lost;
  95. sum = dd_add(a, b);
  96. /*
  97. * If we are losing at least two bits of accuracy to denormalization,
  98. * then the first lost bit becomes a round bit, and we adjust the
  99. * lowest bit of sum.hi to make it a sticky bit summarizing all the
  100. * bits in sum.lo. With the sticky bit adjusted, the hardware will
  101. * break any ties in the correct direction.
  102. *
  103. * If we are losing only one bit to denormalization, however, we must
  104. * break the ties manually.
  105. */
  106. if (sum.lo != 0) {
  107. EXTRACT_WORD64(hibits, sum.hi);
  108. bits_lost = -((int)(hibits >> 52) & 0x7ff) - scale + 1;
  109. if ((bits_lost != 1) ^ (int)(hibits & 1)) {
  110. /* hibits += (int)copysign(1.0, sum.hi * sum.lo) */
  111. EXTRACT_WORD64(lobits, sum.lo);
  112. hibits += 1 - (((hibits ^ lobits) >> 62) & 2);
  113. INSERT_WORD64(sum.hi, hibits);
  114. }
  115. }
  116. return (ldexp(sum.hi, scale));
  117. }
  118. /*
  119. * Compute a*b exactly, returning the exact result in a struct dd. We assume
  120. * that both a and b are normalized, so no underflow or overflow will occur.
  121. * The current rounding mode must be round-to-nearest.
  122. */
  123. static inline struct dd
  124. dd_mul(double a, double b)
  125. {
  126. static const double split = 0x1p27 + 1.0;
  127. struct dd ret;
  128. double ha, hb, la, lb, p, q;
  129. p = a * split;
  130. ha = a - p;
  131. ha += p;
  132. la = a - ha;
  133. p = b * split;
  134. hb = b - p;
  135. hb += p;
  136. lb = b - hb;
  137. p = ha * hb;
  138. q = ha * lb + la * hb;
  139. ret.hi = p + q;
  140. ret.lo = p - ret.hi + q + la * lb;
  141. return (ret);
  142. }
  143. /*
  144. * Fused multiply-add: Compute x * y + z with a single rounding error.
  145. *
  146. * We use scaling to avoid overflow/underflow, along with the
  147. * canonical precision-doubling technique adapted from:
  148. *
  149. * Dekker, T. A Floating-Point Technique for Extending the
  150. * Available Precision. Numer. Math. 18, 224-242 (1971).
  151. *
  152. * This algorithm is sensitive to the rounding precision. FPUs such
  153. * as the i387 must be set in double-precision mode if variables are
  154. * to be stored in FP registers in order to avoid incorrect results.
  155. * This is the default on FreeBSD, but not on many other systems.
  156. *
  157. * Hardware instructions should be used on architectures that support it,
  158. * since this implementation will likely be several times slower.
  159. */
  160. OLM_DLLEXPORT double
  161. fma(double x, double y, double z)
  162. {
  163. double xs, ys, zs, adj;
  164. struct dd xy, r;
  165. int oround;
  166. int ex, ey, ez;
  167. int spread;
  168. /*
  169. * Handle special cases. The order of operations and the particular
  170. * return values here are crucial in handling special cases involving
  171. * infinities, NaNs, overflows, and signed zeroes correctly.
  172. */
  173. if (x == 0.0 || y == 0.0)
  174. return (x * y + z);
  175. if (z == 0.0)
  176. return (x * y);
  177. if (!isfinite(x) || !isfinite(y))
  178. return (x * y + z);
  179. if (!isfinite(z))
  180. return (z);
  181. xs = frexp(x, &ex);
  182. ys = frexp(y, &ey);
  183. zs = frexp(z, &ez);
  184. oround = fegetround();
  185. spread = ex + ey - ez;
  186. /*
  187. * If x * y and z are many orders of magnitude apart, the scaling
  188. * will overflow, so we handle these cases specially. Rounding
  189. * modes other than FE_TONEAREST are painful.
  190. */
  191. if (spread < -DBL_MANT_DIG) {
  192. feraiseexcept(FE_INEXACT);
  193. if (!isnormal(z))
  194. feraiseexcept(FE_UNDERFLOW);
  195. switch (oround) {
  196. case FE_TONEAREST:
  197. return (z);
  198. case FE_TOWARDZERO:
  199. if ((x > 0.0) ^ (y < 0.0) ^ (z < 0.0))
  200. return (z);
  201. else
  202. return (nextafter(z, 0));
  203. case FE_DOWNWARD:
  204. if ((x > 0.0) ^ (y < 0.0))
  205. return (z);
  206. else
  207. return (nextafter(z, -INFINITY));
  208. default: /* FE_UPWARD */
  209. if ((x > 0.0) ^ (y < 0.0))
  210. return (nextafter(z, INFINITY));
  211. else
  212. return (z);
  213. }
  214. }
  215. if (spread <= DBL_MANT_DIG * 2)
  216. zs = ldexp(zs, -spread);
  217. else
  218. zs = copysign(DBL_MIN, zs);
  219. fesetround(FE_TONEAREST);
  220. /*
  221. * Basic approach for round-to-nearest:
  222. *
  223. * (xy.hi, xy.lo) = x * y (exact)
  224. * (r.hi, r.lo) = xy.hi + z (exact)
  225. * adj = xy.lo + r.lo (inexact; low bit is sticky)
  226. * result = r.hi + adj (correctly rounded)
  227. */
  228. xy = dd_mul(xs, ys);
  229. r = dd_add(xy.hi, zs);
  230. spread = ex + ey;
  231. if (r.hi == 0.0) {
  232. /*
  233. * When the addends cancel to 0, ensure that the result has
  234. * the correct sign.
  235. */
  236. fesetround(oround);
  237. volatile double vzs = zs; /* XXX gcc CSE bug workaround */
  238. return (xy.hi + vzs + ldexp(xy.lo, spread));
  239. }
  240. if (oround != FE_TONEAREST) {
  241. /*
  242. * There is no need to worry about double rounding in directed
  243. * rounding modes.
  244. */
  245. fesetround(oround);
  246. adj = r.lo + xy.lo;
  247. return (ldexp(r.hi + adj, spread));
  248. }
  249. adj = add_adjusted(r.lo, xy.lo);
  250. if (spread + ilogb(r.hi) > -1023)
  251. return (ldexp(r.hi + adj, spread));
  252. else
  253. return (add_and_denormalize(r.hi, adj, spread));
  254. }
  255. #if (LDBL_MANT_DIG == 53)
  256. __weak_reference(fma, fmal);
  257. #endif