[Gmp-commit] /home/hgfiles/gmp: 2 new changesets
mercurial at gmplib.org
mercurial at gmplib.org
Tue Dec 22 02:32:51 CET 2009
details: /home/hgfiles/gmp/rev/069071a498f0
changeset: 13173:069071a498f0
user: Torbjorn Granlund <tege at gmplib.org>
date: Tue Dec 22 02:23:37 2009 +0100
description:
Remove defunct TODO item.
details: /home/hgfiles/gmp/rev/1374ef68341b
changeset: 13174:1374ef68341b
user: Torbjorn Granlund <tege at gmplib.org>
date: Tue Dec 22 02:32:45 2009 +0100
description:
ASSERT that output of mpn_mul_fft is zero.
diffstat:
mpn/generic/mu_bdiv_q.c | 10 +++++++---
mpn/generic/mu_bdiv_qr.c | 23 +++++++++++++++++------
mpn/generic/mu_div_qr.c | 3 ++-
mpn/generic/mu_divappr_q.c | 3 ++-
mpn/generic/redc_n.c | 4 ----
5 files changed, 28 insertions(+), 15 deletions(-)
diffs (144 lines):
diff -r 9c995e3f6f20 -r 1374ef68341b mpn/generic/mu_bdiv_q.c
--- a/mpn/generic/mu_bdiv_q.c Tue Dec 22 01:46:02 2009 +0100
+++ b/mpn/generic/mu_bdiv_q.c Tue Dec 22 02:32:45 2009 +0100
@@ -138,7 +138,8 @@
remainder; we undo that operation with another subtraction. */
int c0;
- mpn_mul_fft (tp, m, dp, dn, qp, in, k);
+ c0 = mpn_mul_fft (tp, m, dp, dn, qp, in, k);
+ ASSERT_ALWAYS (c0 == 0);
c0 = mpn_sub_n (tp + m, rp, tp, wn);
@@ -175,7 +176,8 @@
{
int c0;
- mpn_mul_fft (tp, m, dp, dn, qp, in, k);
+ c0 = mpn_mul_fft (tp, m, dp, dn, qp, in, k);
+ ASSERT_ALWAYS (c0 == 0);
c0 = mpn_sub_n (tp + m, rp, tp, wn);
@@ -219,10 +221,12 @@
{
int k;
mp_size_t m;
+ int c0;
k = mpn_fft_best_k (qn, 0);
m = mpn_fft_next_size (qn, k);
- mpn_mul_fft (rp, m, dp, qn, qp, in, k);
+ c0 = mpn_mul_fft (rp, m, dp, qn, qp, in, k);
+ ASSERT_ALWAYS (c0 == 0);
if (mpn_cmp (np, rp, in) < 0)
mpn_incr_u (rp + in, 1);
}
diff -r 9c995e3f6f20 -r 1374ef68341b mpn/generic/mu_bdiv_qr.c
--- a/mpn/generic/mu_bdiv_qr.c Tue Dec 22 01:46:02 2009 +0100
+++ b/mpn/generic/mu_bdiv_qr.c Tue Dec 22 02:32:45 2009 +0100
@@ -116,6 +116,7 @@
#if WANT_FFT
if (ABOVE_THRESHOLD (dn, MUL_FFT_MODF_THRESHOLD))
{
+ int c0;
/* The two multiplicands are dn and 'in' limbs, with dn >= in.
The relevant part of the result will typically partially wrap,
and that part will come out as subtracted to the right. The
@@ -126,11 +127,12 @@
k = mpn_fft_best_k (dn, 0);
m = mpn_fft_next_size (dn, k);
- mpn_mul_fft (tp, m, dp, dn, qp, in, k);
+ c0 = mpn_mul_fft (tp, m, dp, dn, qp, in, k);
+ ASSERT_ALWAYS (c0 == 0);
wn = dn + in - m; /* number of wrapped limbs */
if (wn > 0)
{
- int c0 = mpn_sub_n (tp + m, rp, tp, wn);
+ c0 = mpn_sub_n (tp + m, rp, tp, wn);
for (i = wn; c0 != 0 && i < in; i++)
c0 = tp[i] == GMP_NUMB_MASK;
mpn_incr_u (tp + in, c0);
@@ -165,11 +167,14 @@
{
k = mpn_fft_best_k (dn, 0);
m = mpn_fft_next_size (dn, k);
- mpn_mul_fft (tp, m, dp, dn, qp, qn, k);
+ int c0;
+
+ c0 = mpn_mul_fft (tp, m, dp, dn, qp, qn, k);
+ ASSERT_ALWAYS (c0 == 0);
wn = dn + qn - m; /* number of wrapped limbs */
if (wn > 0)
{
- int c0 = mpn_sub_n (tp + m, rp, tp, wn);
+ c0 = mpn_sub_n (tp + m, rp, tp, wn);
for (i = wn; c0 != 0 && i < qn; i++)
c0 = tp[i] == GMP_NUMB_MASK;
mpn_incr_u (tp + qn, c0);
@@ -217,7 +222,10 @@
{
k = mpn_fft_best_k (dn, 0);
m = mpn_fft_next_size (dn, k);
- mpn_mul_fft (tp, m, dp, dn, qp, in, k);
+ int c0;
+
+ c0 = mpn_mul_fft (tp, m, dp, dn, qp, in, k);
+ ASSERT_ALWAYS (c0 == 0);
wn = dn + in - m;
if (wn > 0)
{
@@ -240,7 +248,10 @@
{
k = mpn_fft_best_k (dn, 0);
m = mpn_fft_next_size (dn, k);
- mpn_mul_fft (tp, m, dp, dn, qp, qn, k);
+ int c0;
+
+ c0 = mpn_mul_fft (tp, m, dp, dn, qp, qn, k);
+ ASSERT_ALWAYS (c0 == 0);
wn = dn + qn - m;
if (wn > 0)
{
diff -r 9c995e3f6f20 -r 1374ef68341b mpn/generic/mu_div_qr.c
--- a/mpn/generic/mu_div_qr.c Tue Dec 22 01:46:02 2009 +0100
+++ b/mpn/generic/mu_div_qr.c Tue Dec 22 02:32:45 2009 +0100
@@ -286,7 +286,8 @@
m = mpn_fft_next_size (dn + 1, k);
wn = dn + in - m; /* number of wrapped limbs */
- mpn_mul_fft (tp, m, dp, dn, qp, in, k);
+ cy = mpn_mul_fft (tp, m, dp, dn, qp, in, k);
+ ASSERT_ALWAYS (cy == 0);
if (wn > 0)
{
diff -r 9c995e3f6f20 -r 1374ef68341b mpn/generic/mu_divappr_q.c
--- a/mpn/generic/mu_divappr_q.c Tue Dec 22 01:46:02 2009 +0100
+++ b/mpn/generic/mu_divappr_q.c Tue Dec 22 02:32:45 2009 +0100
@@ -308,7 +308,8 @@
m = mpn_fft_next_size (dn + 1, k);
wn = dn + in - m; /* number of wrapped limbs */
- mpn_mul_fft (tp, m, dp, dn, qp, in, k);
+ cy = mpn_mul_fft (tp, m, dp, dn, qp, in, k);
+ ASSERT_ALWAYS (cy == 0);
if (wn > 0)
{
diff -r 9c995e3f6f20 -r 1374ef68341b mpn/generic/redc_n.c
--- a/mpn/generic/redc_n.c Tue Dec 22 01:46:02 2009 +0100
+++ b/mpn/generic/redc_n.c Tue Dec 22 02:32:45 2009 +0100
@@ -31,10 +31,6 @@
future mpn_mulhi) for the range we will be called. Follow up that
assumption.
- * Call mpn_mul_fft directly for large-enough operands. It computes mod B^n+1
- (while mpn_mulmod_bnm1 of course computed mod B^n-1), so the "unwrap" code
- will be very similar to the current unwrap code.
-
* Decrease scratch usage.
* Call mpn_mulmod_bnm1_itch.
More information about the gmp-commit
mailing list