Alternative div_qr_1

Niels Möller nisse at lysator.liu.se
Fri Jun 18 15:45:21 CEST 2010


Torbjorn Granlund <tg at gmplib.org> writes:

> Yes, I think we should really replace the default C mpn_mod_1_1p code
> with your variant.  Mainly since it saves an umul_ppmm, which ranslates
> to many cycles for lots of different processors.

I'm appending a new version of mpn/generic/mod_1_1.c. It uses
add_sssaaaa, although it would be even better with an add_cssaaaa which
produces the carry in the form of a mask.

The loop seems to be almost a cycle slower, when benchmarking on my
x86_32 laptop.

Before:

  $ ./speed -C -s 1500 mpn_mod_1_1.0xabcdef01 mpn_mod_1_1.0xabcdef1
  overhead 6.13 cycles, precision 10000 units of 1.67e-09 secs, CPU freq 600.00 MHz
          mpn_mod_1_1.0xabcdef01 mpn_mod_1_1.0xabcdef1
  1500         #16.1173       16.1247

After:

  $  ./speed -C -s 1500 mpn_mod_1_1.0xabcdef01 mpn_mod_1_1.0xabcdef1
  overhead 6.13 cycles, precision 10000 units of 1.67e-09 secs, CPU freq 600.00 MHz
          mpn_mod_1_1.0xabcdef01 mpn_mod_1_1.0xabcdef1
  1500          17.0287      #16.3500

For some reason, the condition

  if (cy)
    r0 = r0_alt;

seems to be compiled to a branch rather than a cmov by gcc-4.3.2. Maybe
gcc-4.4.4 or gcc-4.5.0 is smarter.

It wins many cycles for small inputs, due to the simpler precomputation.
It needs only the inverse of (normalized) d, and B^2 mod (normalized d).

Before:

$ ./speed -c -s 2-10 mpn_mod_1_1.0xabcdef01 mpn_mod_1_1.0xabcdef1
overhead 6.13 cycles, precision 10000 units of 1.67e-09 secs, CPU freq 600.00 MHz
        mpn_mod_1_1.0xabcdef01 mpn_mod_1_1.0xabcdef1
2             #116.01        140.07
3             #137.26        159.72
4             #150.91        172.95
5             #164.14        191.82
6             #184.40        206.27
7             #200.48        222.43
8             #215.86        240.04
9             #231.83        255.58
10            #248.72        272.00

After:

$  ./speed -c -s 2-10 mpn_mod_1_1.0xabcdef01 mpn_mod_1_1.0xabcdef1
overhead 6.13 cycles, precision 10000 units of 1.67e-09 secs, CPU freq 600.00 MHz
        mpn_mod_1_1.0xabcdef01 mpn_mod_1_1.0xabcdef1
2              #83.05        109.71
3             #110.21        128.87
4             #126.33        145.30
5             #141.47        162.02
6             #157.15        176.48
7             #174.25        192.91
8             #191.78        208.53
9             #206.82        227.56
10            #221.98        242.98

Regards,
/Niels


/* mpn_mod_1_1p (ap, n, b, cps)
   Divide (ap,,n) by b.  Return the single-limb remainder.

   Contributed to the GNU project by Niels Möller and Torbjörn
   Granlund. Based on a suggestion by Peter L. Montgomery.

   THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES.  IT IS ONLY
   SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
   GUARANTEED THAT THEY WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.

Copyright 2008, 2009, 2010 Free Software Foundation, Inc.

This file is part of the GNU MP Library.

The GNU MP Library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 3 of the License, or (at your
option) any later version.

The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
License for more details.

You should have received a copy of the GNU Lesser General Public License
along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */

#include "gmp.h"
#include "gmp-impl.h"
#include "longlong.h"

/* Define some longlong.h-style macros, but for wider operations.
 * add_sssaaaa is like longlong.h's add_ssaaaa but adds the
 * propagating carry-out into an additional sum operand. */

#if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
#define add_sssaaaa(s2, s1, s0, a1, a0, b1, b0)                         \
  __asm__ ("add\t%7, %k2\n\tadc\t%5, %k1\n\tadc\t$0, %k0"               \
	   : "=r" (s2), "=&r" (s1), "=&r" (s0)                          \
	   : "0"  ((USItype)(s2)),                                      \
	     "1"  ((USItype)(a1)), "g" ((USItype)(b1)),                 \
	     "%2" ((USItype)(a0)), "g" ((USItype)(b0)))
#endif

#if defined (__amd64__) && W_TYPE_SIZE == 64
#define add_sssaaaa(s2, s1, s0, a1, a0, b1, b0)                         \
  __asm__ ("add\t%7, %q2\n\tadc\t%5, %q1\n\tadc\t$0, %q0"               \
	   : "=r" (s2), "=&r" (s1), "=&r" (s0)                          \
	   : "0"  ((UDItype)(s2)),                                      \
	     "1"  ((UDItype)(a1)), "rme" ((UDItype)(b1)),               \
	     "%2" ((UDItype)(a0)), "rme" ((UDItype)(b0)))
#endif

#if HAVE_HOST_CPU_FAMILY_powerpc && W_TYPE_SIZE == 64
#define add_sssaaaa(s2, s1, s0, a1, a0, b1, b0)                         \
  __asm__ ("add%I7c\t%2,%6,%7\n\tadde\t%1,%4,%5\n\taddze\t%0,%0"        \
	   : "=r" (s2), "=&r" (s1), "=&r" (s0)                          \
	   : "0"  ((UDItype)(s2)),                                      \
	     "r"  ((UDItype)(a1)), "r" ((UDItype)(b1)),                 \
	     "%2" ((UDItype)(a0)), "rI" ((UDItype)(b0)))
#endif

#ifndef add_sssaaaa
#define add_sssaaaa(s2, s1, s0, a1, a0, b1, b0)                         \
  do {                                                                  \
    UWtype __s0, __s1, __c0, __c1;                                      \
    __s0 = (a0) + (b0);                                                 \
    __s1 = (a1) + (b1);                                                 \
    __c0 = __s0 < (a0);                                                 \
    __c1 = __s1 < (a1);                                                 \
    (s0) = __s0;                                                        \
    __s1 = __s1 + __c0;                                                 \
    (s1) = __s1;                                                        \
    (s2) += __c1 + (__s1 < __c0);                                       \
  } while (0)
#endif

/* Normalizes d, then sets cps[0] = dinv, cps[1] = cnt, cps[2] = B^2
   (mod d), cps[3] = B^2 (mod d) + B - d. */
void
mpn_mod_1_1p_cps (mp_limb_t cps[4], mp_limb_t d)
{
  mp_limb_t dinv;
  mp_limb_t B2;
  int cnt;

  count_leading_zeros (cnt, d);
  cps[1] = cnt;

  d <<= cnt;

  invert_limb (dinv, d);
  cps[0] = dinv;
  B2 = -dinv * d;
  cps[2] = B2;
  cps[3] = B2 - d;
}

mp_limb_t
mpn_mod_1_1p (mp_srcptr up, mp_size_t n, mp_limb_t d, mp_limb_t cps[4])
{
  mp_limb_t r0, r1, r2;
  mp_limb_t p0, p1;
  mp_limb_t q;
  mp_limb_t dinv;
  mp_limb_t B2;
  mp_limb_t B2md;
  int cnt;

  ASSERT (n >= 2);		/* fix tuneup.c if this is changed */

  /* We work exclusively wth the normalized d. In the end, before the
     final division, we left what's left of u to match the
     normalization, and then right shift the resulting remainder. */
  cnt = cps[1];
  B2 = cps[2];
  B2md = cps[3];

  r1 = up[n-1];
  r0 = up[n-2];

  if (n == 2)
    {
      if (cnt > 0)
	{
	  r2 = r1 >> (GMP_LIMB_BITS - cnt);
	  goto final_unnormalized;
	}
      /* The case n == 2 and d normalized doesn't make much sense with
	 this algorithm, since the precomputed B2 isn't used. */
      goto final;
    }

  umul_ppmm (p1, p0, r1, B2);
  r2 = 0;
  add_sssaaaa (r2, r1, r0, r0, up[n-3], p1, p0);

  for (n -= 4; n >= 0; n--)
    {
      mp_limb_t mask;
      mp_limb_t cy;
      mp_limb_t r0_alt;
      mask = -r2;
      
      umul_ppmm (p1, p0, r1, B2);
      r0_alt = r0 + B2md;
      ADDC_LIMB (cy, r0, r0, mask & B2);
      if (cy)
	r0 = r0_alt;
      r2 = 0;
      add_sssaaaa (r2, r1, r0, r0, up[n], p1, p0);
    }

  if (cnt > 0)
    {
      r2 &= 1;
      r2 = (r2 << cnt) | (r1 >> (GMP_LIMB_BITS  - cnt));
    final_unnormalized:
      r1 = (r1 << cnt) | (r0 >> (GMP_LIMB_BITS  - cnt));
      r0 <<= cnt;

      umul_ppmm (p1, p0, B2, r2);
      r2 = 0;
      add_sssaaaa (r2, r1, r0, r1, r0, p1, p0);
    }

  r1 -= (-r2) & d;

 final:

  if (r1 >= d)
    r1 -= d;
  
  dinv = cps[0];
  udiv_qrnnd_preinv (q, r0, r1, r0, d, dinv);
  ASSERT (cnt == 0 || (r0 & ((CNST_LIMB(1) << cnt) - 1)) == 0);
  return r0 >> cnt;
}

-- 
Niels Möller. PGP-encrypted email is preferred. Keyid C0B98E26.
Internet email is subject to wholesale government surveillance.


More information about the gmp-devel mailing list