delegate.rs 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. /// Creates an unsigned division function that uses a combination of hardware division and
  2. /// binary long division to divide integers larger than what hardware division by itself can do. This
  3. /// function is intended for microarchitectures that have division hardware, but not fast enough
  4. /// multiplication hardware for `impl_trifecta` to be faster.
  5. #[doc(hidden)]
  6. #[macro_export]
  7. macro_rules! impl_delegate {
  8. (
  9. $fn:ident, // name of the unsigned division function
  10. $zero_div_fn:ident, // function called when division by zero is attempted
  11. $half_normalization_shift:ident, // function for finding the normalization shift of $uX
  12. $half_division:ident, // function for division of a $uX by a $uX
  13. $n_h:expr, // the number of bits in $iH or $uH
  14. $uH:ident, // unsigned integer with half the bit width of $uX
  15. $uX:ident, // unsigned integer with half the bit width of $uD.
  16. $uD:ident, // unsigned integer type for the inputs and outputs of `$fn`
  17. $iD:ident // signed integer type with the same bitwidth as `$uD`
  18. ) => {
  19. /// Computes the quotient and remainder of `duo` divided by `div` and returns them as a
  20. /// tuple.
  21. pub fn $fn(duo: $uD, div: $uD) -> ($uD, $uD) {
  22. // The two possibility algorithm, undersubtracting long division algorithm, or any kind
  23. // of reciprocal based algorithm will not be fastest, because they involve large
  24. // multiplications that we assume to not be fast enough relative to the divisions to
  25. // outweigh setup times.
  26. // the number of bits in a $uX
  27. let n = $n_h * 2;
  28. let duo_lo = duo as $uX;
  29. let duo_hi = (duo >> n) as $uX;
  30. let div_lo = div as $uX;
  31. let div_hi = (div >> n) as $uX;
  32. match (div_lo == 0, div_hi == 0, duo_hi == 0) {
  33. (true, true, _) => $zero_div_fn(),
  34. (_, false, true) => {
  35. // `duo` < `div`
  36. return (0, duo);
  37. }
  38. (false, true, true) => {
  39. // delegate to smaller division
  40. let tmp = $half_division(duo_lo, div_lo);
  41. return (tmp.0 as $uD, tmp.1 as $uD);
  42. }
  43. (false, true, false) => {
  44. if duo_hi < div_lo {
  45. // `quo_hi` will always be 0. This performs a binary long division algorithm
  46. // to zero `duo_hi` followed by a half division.
  47. // We can calculate the normalization shift using only `$uX` size functions.
  48. // If we calculated the normalization shift using
  49. // `$half_normalization_shift(duo_hi, div_lo false)`, it would break the
  50. // assumption the function has that the first argument is more than the
  51. // second argument. If the arguments are switched, the assumption holds true
  52. // since `duo_hi < div_lo`.
  53. let norm_shift = $half_normalization_shift(div_lo, duo_hi, false);
  54. let shl = if norm_shift == 0 {
  55. // Consider what happens if the msbs of `duo_hi` and `div_lo` align with
  56. // no shifting. The normalization shift will always return
  57. // `norm_shift == 0` regardless of whether it is fully normalized,
  58. // because `duo_hi < div_lo`. In that edge case, `n - norm_shift` would
  59. // result in shift overflow down the line. For the edge case, because
  60. // both `duo_hi < div_lo` and we are comparing all the significant bits
  61. // of `duo_hi` and `div`, we can make `shl = n - 1`.
  62. n - 1
  63. } else {
  64. // We also cannot just use `shl = n - norm_shift - 1` in the general
  65. // case, because when we are not in the edge case comparing all the
  66. // significant bits, then the full `duo < div` may not be true and thus
  67. // breaks the division algorithm.
  68. n - norm_shift
  69. };
  70. // The 3 variable restoring division algorithm (see binary_long.rs) is ideal
  71. // for this task, since `pow` and `quo` can be `$uX` and the delegation
  72. // check is simple.
  73. let mut div: $uD = div << shl;
  74. let mut pow_lo: $uX = 1 << shl;
  75. let mut quo_lo: $uX = 0;
  76. let mut duo = duo;
  77. loop {
  78. let sub = duo.wrapping_sub(div);
  79. if 0 <= (sub as $iD) {
  80. duo = sub;
  81. quo_lo |= pow_lo;
  82. let duo_hi = (duo >> n) as $uX;
  83. if duo_hi == 0 {
  84. // Delegate to get the rest of the quotient. Note that the
  85. // `div_lo` here is the original unshifted `div`.
  86. let tmp = $half_division(duo as $uX, div_lo);
  87. return ((quo_lo | tmp.0) as $uD, tmp.1 as $uD);
  88. }
  89. }
  90. div >>= 1;
  91. pow_lo >>= 1;
  92. }
  93. } else if duo_hi == div_lo {
  94. // `quo_hi == 1`. This branch is cheap and helps with edge cases.
  95. let tmp = $half_division(duo as $uX, div as $uX);
  96. return ((1 << n) | (tmp.0 as $uD), tmp.1 as $uD);
  97. } else {
  98. // `div_lo < duo_hi`
  99. // `rem_hi == 0`
  100. if (div_lo >> $n_h) == 0 {
  101. // Short division of $uD by a $uH, using $uX by $uX division
  102. let div_0 = div_lo as $uH as $uX;
  103. let (quo_hi, rem_3) = $half_division(duo_hi, div_0);
  104. let duo_mid = ((duo >> $n_h) as $uH as $uX) | (rem_3 << $n_h);
  105. let (quo_1, rem_2) = $half_division(duo_mid, div_0);
  106. let duo_lo = (duo as $uH as $uX) | (rem_2 << $n_h);
  107. let (quo_0, rem_1) = $half_division(duo_lo, div_0);
  108. return (
  109. (quo_0 as $uD) | ((quo_1 as $uD) << $n_h) | ((quo_hi as $uD) << n),
  110. rem_1 as $uD,
  111. );
  112. }
  113. // This is basically a short division composed of a half division for the hi
  114. // part, specialized 3 variable binary long division in the middle, and
  115. // another half division for the lo part.
  116. let duo_lo = duo as $uX;
  117. let tmp = $half_division(duo_hi, div_lo);
  118. let quo_hi = tmp.0;
  119. let mut duo = (duo_lo as $uD) | ((tmp.1 as $uD) << n);
  120. // This check is required to avoid breaking the long division below.
  121. if duo < div {
  122. return ((quo_hi as $uD) << n, duo);
  123. }
  124. // The half division handled all shift alignments down to `n`, so this
  125. // division can continue with a shift of `n - 1`.
  126. let mut div: $uD = div << (n - 1);
  127. let mut pow_lo: $uX = 1 << (n - 1);
  128. let mut quo_lo: $uX = 0;
  129. loop {
  130. let sub = duo.wrapping_sub(div);
  131. if 0 <= (sub as $iD) {
  132. duo = sub;
  133. quo_lo |= pow_lo;
  134. let duo_hi = (duo >> n) as $uX;
  135. if duo_hi == 0 {
  136. // Delegate to get the rest of the quotient. Note that the
  137. // `div_lo` here is the original unshifted `div`.
  138. let tmp = $half_division(duo as $uX, div_lo);
  139. return (
  140. (tmp.0) as $uD | (quo_lo as $uD) | ((quo_hi as $uD) << n),
  141. tmp.1 as $uD,
  142. );
  143. }
  144. }
  145. div >>= 1;
  146. pow_lo >>= 1;
  147. }
  148. }
  149. }
  150. (_, false, false) => {
  151. // Full $uD by $uD binary long division. `quo_hi` will always be 0.
  152. if duo < div {
  153. return (0, duo);
  154. }
  155. let div_original = div;
  156. let shl = $half_normalization_shift(duo_hi, div_hi, false);
  157. let mut duo = duo;
  158. let mut div: $uD = div << shl;
  159. let mut pow_lo: $uX = 1 << shl;
  160. let mut quo_lo: $uX = 0;
  161. loop {
  162. let sub = duo.wrapping_sub(div);
  163. if 0 <= (sub as $iD) {
  164. duo = sub;
  165. quo_lo |= pow_lo;
  166. if duo < div_original {
  167. return (quo_lo as $uD, duo);
  168. }
  169. }
  170. div >>= 1;
  171. pow_lo >>= 1;
  172. }
  173. }
  174. }
  175. }
  176. };
  177. }
  178. /// Returns `n / d` and sets `*rem = n % d`.
  179. ///
  180. /// This specialization exists because:
  181. /// - The LLVM backend for 32-bit SPARC cannot compile functions that return `(u128, u128)`,
  182. /// so we have to use an old fashioned `&mut u128` argument to return the remainder.
  183. /// - 64-bit SPARC does not have u64 * u64 => u128 widening multiplication, which makes the
  184. /// delegate algorithm strategy the only reasonably fast way to perform `u128` division.
  185. #[doc(hidden)]
  186. pub fn u128_divide_sparc(duo: u128, div: u128, rem: &mut u128) -> u128 {
  187. use super::*;
  188. let duo_lo = duo as u64;
  189. let duo_hi = (duo >> 64) as u64;
  190. let div_lo = div as u64;
  191. let div_hi = (div >> 64) as u64;
  192. match (div_lo == 0, div_hi == 0, duo_hi == 0) {
  193. (true, true, _) => zero_div_fn(),
  194. (_, false, true) => {
  195. *rem = duo;
  196. return 0;
  197. }
  198. (false, true, true) => {
  199. let tmp = u64_by_u64_div_rem(duo_lo, div_lo);
  200. *rem = tmp.1 as u128;
  201. return tmp.0 as u128;
  202. }
  203. (false, true, false) => {
  204. if duo_hi < div_lo {
  205. let norm_shift = u64_normalization_shift(div_lo, duo_hi, false);
  206. let shl = if norm_shift == 0 {
  207. 64 - 1
  208. } else {
  209. 64 - norm_shift
  210. };
  211. let mut div: u128 = div << shl;
  212. let mut pow_lo: u64 = 1 << shl;
  213. let mut quo_lo: u64 = 0;
  214. let mut duo = duo;
  215. loop {
  216. let sub = duo.wrapping_sub(div);
  217. if 0 <= (sub as i128) {
  218. duo = sub;
  219. quo_lo |= pow_lo;
  220. let duo_hi = (duo >> 64) as u64;
  221. if duo_hi == 0 {
  222. let tmp = u64_by_u64_div_rem(duo as u64, div_lo);
  223. *rem = tmp.1 as u128;
  224. return (quo_lo | tmp.0) as u128;
  225. }
  226. }
  227. div >>= 1;
  228. pow_lo >>= 1;
  229. }
  230. } else if duo_hi == div_lo {
  231. let tmp = u64_by_u64_div_rem(duo as u64, div as u64);
  232. *rem = tmp.1 as u128;
  233. return (1 << 64) | (tmp.0 as u128);
  234. } else {
  235. if (div_lo >> 32) == 0 {
  236. let div_0 = div_lo as u32 as u64;
  237. let (quo_hi, rem_3) = u64_by_u64_div_rem(duo_hi, div_0);
  238. let duo_mid = ((duo >> 32) as u32 as u64) | (rem_3 << 32);
  239. let (quo_1, rem_2) = u64_by_u64_div_rem(duo_mid, div_0);
  240. let duo_lo = (duo as u32 as u64) | (rem_2 << 32);
  241. let (quo_0, rem_1) = u64_by_u64_div_rem(duo_lo, div_0);
  242. *rem = rem_1 as u128;
  243. return (quo_0 as u128) | ((quo_1 as u128) << 32) | ((quo_hi as u128) << 64);
  244. }
  245. let duo_lo = duo as u64;
  246. let tmp = u64_by_u64_div_rem(duo_hi, div_lo);
  247. let quo_hi = tmp.0;
  248. let mut duo = (duo_lo as u128) | ((tmp.1 as u128) << 64);
  249. if duo < div {
  250. *rem = duo;
  251. return (quo_hi as u128) << 64;
  252. }
  253. let mut div: u128 = div << (64 - 1);
  254. let mut pow_lo: u64 = 1 << (64 - 1);
  255. let mut quo_lo: u64 = 0;
  256. loop {
  257. let sub = duo.wrapping_sub(div);
  258. if 0 <= (sub as i128) {
  259. duo = sub;
  260. quo_lo |= pow_lo;
  261. let duo_hi = (duo >> 64) as u64;
  262. if duo_hi == 0 {
  263. let tmp = u64_by_u64_div_rem(duo as u64, div_lo);
  264. *rem = tmp.1 as u128;
  265. return (tmp.0) as u128 | (quo_lo as u128) | ((quo_hi as u128) << 64);
  266. }
  267. }
  268. div >>= 1;
  269. pow_lo >>= 1;
  270. }
  271. }
  272. }
  273. (_, false, false) => {
  274. if duo < div {
  275. *rem = duo;
  276. return 0;
  277. }
  278. let div_original = div;
  279. let shl = u64_normalization_shift(duo_hi, div_hi, false);
  280. let mut duo = duo;
  281. let mut div: u128 = div << shl;
  282. let mut pow_lo: u64 = 1 << shl;
  283. let mut quo_lo: u64 = 0;
  284. loop {
  285. let sub = duo.wrapping_sub(div);
  286. if 0 <= (sub as i128) {
  287. duo = sub;
  288. quo_lo |= pow_lo;
  289. if duo < div_original {
  290. *rem = duo;
  291. return quo_lo as u128;
  292. }
  293. }
  294. div >>= 1;
  295. pow_lo >>= 1;
  296. }
  297. }
  298. }
  299. }