[Gmp-commit] /var/hg/gmp: 2 new changesets

mercurial at gmplib.org mercurial at gmplib.org
Wed Oct 17 23:03:11 CEST 2012


details:   /var/hg/gmp/rev/f6e3f4659f8a
changeset: 15082:f6e3f4659f8a
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Wed Oct 17 23:03:01 2012 +0200
description:
(mpz_powm_ui): Deflect to mpz_powm for large exponent.

details:   /var/hg/gmp/rev/d3a9efa9d582
changeset: 15083:d3a9efa9d582
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Wed Oct 17 23:03:05 2012 +0200
description:
*** empty log message ***

diffstat:

 ChangeLog     |    4 +
 mpz/powm_ui.c |  300 ++++++++++++++++++++++++++++++++-------------------------
 2 files changed, 171 insertions(+), 133 deletions(-)

diffs (truncated from 348 to 300 lines):

diff -r 40f8f681e95b -r d3a9efa9d582 ChangeLog
--- a/ChangeLog	Thu Sep 13 01:29:14 2012 +0200
+++ b/ChangeLog	Wed Oct 17 23:03:05 2012 +0200
@@ -1,3 +1,7 @@
+2012-10-17  Torbjorn Granlund  <tege at gmplib.org>
+
+	* mpz/powm_ui.c (mpz_powm_ui): Deflect to mpz_powm for large exponent.
+
 2012-09-10  Torbjorn Granlund  <tege at gmplib.org>
 
 	* demos/factorize.c: Rewrite no more current form.  Implement Lucas
diff -r 40f8f681e95b -r d3a9efa9d582 mpz/powm_ui.c
--- a/mpz/powm_ui.c	Thu Sep 13 01:29:14 2012 +0200
+++ b/mpz/powm_ui.c	Wed Oct 17 23:03:05 2012 +0200
@@ -1,7 +1,9 @@
-/* mpz_powm_ui(res,base,exp,mod) -- Set RES to (base**exp) mod MOD.
+/* mpz_powm_ui(res,base,exp,mod) -- Set R to (U^E) mod M.
 
-Copyright 1991, 1993, 1994, 1996, 1997, 2000, 2001, 2002, 2005 Free Software
-Foundation, Inc.
+   Contributed to the GNU project by Torbjorn Granlund.
+
+Copyright 1991, 1993, 1994, 1996, 1997, 2000, 2001, 2002, 2005, 2008,
+2009, 2011, 2012 Free Software Foundation, Inc.
 
 This file is part of the GNU MP Library.
 
@@ -23,6 +25,26 @@
 #include "gmp-impl.h"
 #include "longlong.h"
 
+
+/* This code is very old, and should be rewritten to current GMP standard.  It
+   is slower than mpz_powm for large exponents, but also for small exponents
+   when the mod argument is small.
+
+   As an intermediate solution, we now deflect to mpz_powm for exponents >= 20.
+*/
+
+/*
+  b ^ e mod m   res
+  0   0     0    ?
+  0   e     0    ?
+  0   0     m    ?
+  0   e     m    0
+  b   0     0    ?
+  b   e     0    ?
+  b   0     m    1 mod m
+  b   e     m    b^e mod m
+*/
+
 /* Compute t = a mod m, a is defined by (ap,an), m is defined by (mp,mn), and
    t is defined by (tp,mn).  */
 static void
@@ -42,107 +64,94 @@
 void
 mpz_powm_ui (mpz_ptr r, mpz_srcptr b, unsigned long int el, mpz_srcptr m)
 {
-  mp_ptr xp, tp, qp, mp, bp;
-  mp_size_t xn, tn, mn, bn;
-  int m_zero_cnt;
-  int c;
-  mp_limb_t e;
-  TMP_DECL;
+  if (el < 20)
+    {
+      mp_ptr xp, tp, qp, mp, bp;
+      mp_size_t xn, tn, mn, bn;
+      int m_zero_cnt;
+      int c;
+      mp_limb_t e;
+      TMP_DECL;
 
-  mp = PTR(m);
-  mn = ABSIZ(m);
-  if (UNLIKELY (mn == 0))
-    DIVIDE_BY_ZERO;
+      mp = PTR(m);
+      mn = ABSIZ(m);
+      if (UNLIKELY (mn == 0))
+	DIVIDE_BY_ZERO;
 
-  if (el == 0)
-    {
-      /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
-	 depending on if MOD equals 1.  */
-      SIZ(r) = (mn == 1 && mp[0] == 1) ? 0 : 1;
-      PTR(r)[0] = 1;
-      return;
-    }
-
-  TMP_MARK;
-
-  /* Normalize m (i.e. make its most significant bit set) as required by
-     division functions below.  */
-  count_leading_zeros (m_zero_cnt, mp[mn - 1]);
-  m_zero_cnt -= GMP_NAIL_BITS;
-  if (m_zero_cnt != 0)
-    {
-      mp_ptr new_mp = TMP_ALLOC_LIMBS (mn);
-      mpn_lshift (new_mp, mp, mn, m_zero_cnt);
-      mp = new_mp;
-    }
-
-  bn = ABSIZ(b);
-  bp = PTR(b);
-  if (bn > mn)
-    {
-      /* Reduce possibly huge base.  Use a function call to reduce, since we
-	 don't want the quotient allocation to live until function return.  */
-      mp_ptr new_bp = TMP_ALLOC_LIMBS (mn);
-      reduce (new_bp, bp, bn, mp, mn);
-      bp = new_bp;
-      bn = mn;
-      /* Canonicalize the base, since we are potentially going to multiply with
-	 it quite a few times.  */
-      MPN_NORMALIZE (bp, bn);
-    }
-
-  if (bn == 0)
-    {
-      SIZ(r) = 0;
-      TMP_FREE;
-      return;
-    }
-
-  tp = TMP_ALLOC_LIMBS (2 * mn + 1);
-  xp = TMP_ALLOC_LIMBS (mn);
-
-  qp = TMP_ALLOC_LIMBS (mn + 1);
-
-  MPN_COPY (xp, bp, bn);
-  xn = bn;
-
-  e = el;
-  count_leading_zeros (c, e);
-  e = (e << c) << 1;		/* shift the exp bits to the left, lose msb */
-  c = GMP_LIMB_BITS - 1 - c;
-
-  /* Main loop. */
-
-  /* If m is already normalized (high bit of high limb set), and b is the
-     same size, but a bigger value, and e==1, then there's no modular
-     reductions done and we can end up with a result out of range at the
-     end. */
-  if (c == 0)
-    {
-      if (xn == mn && mpn_cmp (xp, mp, mn) >= 0)
-	mpn_sub_n (xp, xp, mp, mn);
-      goto finishup;
-    }
-
-  while (c != 0)
-    {
-      mpn_sqr (tp, xp, xn);
-      tn = 2 * xn; tn -= tp[tn - 1] == 0;
-      if (tn < mn)
+      if (el == 0)
 	{
-	  MPN_COPY (xp, tp, tn);
-	  xn = tn;
-	}
-      else
-	{
-	  mpn_tdiv_qr (qp, xp, 0L, tp, tn, mp, mn);
-	  xn = mn;
+	  /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
+	     depending on if MOD equals 1.  */
+	  SIZ(r) = (mn == 1 && mp[0] == 1) ? 0 : 1;
+	  PTR(r)[0] = 1;
+	  return;
 	}
 
-      if ((mp_limb_signed_t) e < 0)
+      TMP_MARK;
+
+      /* Normalize m (i.e. make its most significant bit set) as required by
+	 division functions below.  */
+      count_leading_zeros (m_zero_cnt, mp[mn - 1]);
+      m_zero_cnt -= GMP_NAIL_BITS;
+      if (m_zero_cnt != 0)
 	{
-	  mpn_mul (tp, xp, xn, bp, bn);
-	  tn = xn + bn; tn -= tp[tn - 1] == 0;
+	  mp_ptr new_mp = TMP_ALLOC_LIMBS (mn);
+	  mpn_lshift (new_mp, mp, mn, m_zero_cnt);
+	  mp = new_mp;
+	}
+
+      bn = ABSIZ(b);
+      bp = PTR(b);
+      if (bn > mn)
+	{
+	  /* Reduce possibly huge base.  Use a function call to reduce, since we
+	     don't want the quotient allocation to live until function return.  */
+	  mp_ptr new_bp = TMP_ALLOC_LIMBS (mn);
+	  reduce (new_bp, bp, bn, mp, mn);
+	  bp = new_bp;
+	  bn = mn;
+	  /* Canonicalize the base, since we are potentially going to multiply with
+	     it quite a few times.  */
+	  MPN_NORMALIZE (bp, bn);
+	}
+
+      if (bn == 0)
+	{
+	  SIZ(r) = 0;
+	  TMP_FREE;
+	  return;
+	}
+
+      tp = TMP_ALLOC_LIMBS (2 * mn + 1);
+      xp = TMP_ALLOC_LIMBS (mn);
+
+      qp = TMP_ALLOC_LIMBS (mn + 1);
+
+      MPN_COPY (xp, bp, bn);
+      xn = bn;
+
+      e = el;
+      count_leading_zeros (c, e);
+      e = (e << c) << 1;		/* shift the exp bits to the left, lose msb */
+      c = GMP_LIMB_BITS - 1 - c;
+
+      /* Main loop. */
+
+      /* If m is already normalized (high bit of high limb set), and b is the
+	 same size, but a bigger value, and e==1, then there's no modular
+	 reductions done and we can end up with a result out of range at the
+	 end. */
+      if (c == 0)
+	{
+	  if (xn == mn && mpn_cmp (xp, mp, mn) >= 0)
+	    mpn_sub_n (xp, xp, mp, mn);
+	  goto finishup;
+	}
+
+      while (c != 0)
+	{
+	  mpn_sqr (tp, xp, xn);
+	  tn = 2 * xn; tn -= tp[tn - 1] == 0;
 	  if (tn < mn)
 	    {
 	      MPN_COPY (xp, tp, tn);
@@ -153,43 +162,68 @@
 	      mpn_tdiv_qr (qp, xp, 0L, tp, tn, mp, mn);
 	      xn = mn;
 	    }
+
+	  if ((mp_limb_signed_t) e < 0)
+	    {
+	      mpn_mul (tp, xp, xn, bp, bn);
+	      tn = xn + bn; tn -= tp[tn - 1] == 0;
+	      if (tn < mn)
+		{
+		  MPN_COPY (xp, tp, tn);
+		  xn = tn;
+		}
+	      else
+		{
+		  mpn_tdiv_qr (qp, xp, 0L, tp, tn, mp, mn);
+		  xn = mn;
+		}
+	    }
+	  e <<= 1;
+	  c--;
 	}
-      e <<= 1;
-      c--;
+
+     finishup:
+      /* We shifted m left m_zero_cnt steps.  Adjust the result by reducing
+	 it with the original MOD.  */
+      if (m_zero_cnt != 0)
+	{
+	  mp_limb_t cy;
+	  cy = mpn_lshift (tp, xp, xn, m_zero_cnt);
+	  tp[xn] = cy; xn += cy != 0;
+
+	  if (xn < mn)
+	    {
+	      MPN_COPY (xp, tp, xn);
+	    }
+	  else
+	    {
+	      mpn_tdiv_qr (qp, xp, 0L, tp, xn, mp, mn);
+	      xn = mn;
+	    }
+	  mpn_rshift (xp, xp, xn, m_zero_cnt);
+	}
+      MPN_NORMALIZE (xp, xn);
+
+      if ((el & 1) != 0 && SIZ(b) < 0 && xn != 0)
+	{
+	  mp = PTR(m);			/* want original, unnormalized m */
+	  mpn_sub (xp, mp, mn, xp, xn);
+	  xn = mn;
+	  MPN_NORMALIZE (xp, xn);
+	}
+      MPZ_REALLOC (r, xn);


More information about the gmp-commit mailing list