[Gmp-commit] /var/hg/gmp: Spacing cleanup.
mercurial at gmplib.org
mercurial at gmplib.org
Mon Nov 21 21:15:21 CET 2011
details: /var/hg/gmp/rev/c50f17990772
changeset: 14461:c50f17990772
user: Torbjorn Granlund <tege at gmplib.org>
date: Mon Nov 21 21:15:18 2011 +0100
description:
Spacing cleanup.
diffstat:
AUTHORS | 2 +-
gmp-h.in | 2 +-
gmpxx.h | 2 +-
mpn/generic/gcd_subdiv_step.c | 2 +-
mpn/generic/hgcd_appr.c | 6 ++--
mpn/generic/hgcd_jacobi.c | 4 +-
mpn/generic/hgcd_reduce.c | 14 ++++++------
mpn/generic/hgcd_step.c | 2 +-
mpn/powerpc64/mode64/aorsmul_1.asm | 2 +-
mpn/s390_32/lshift.asm | 2 +-
mpn/s390_32/lshiftc.asm | 2 +-
mpn/s390_32/rshift.asm | 2 +-
mpn/x86/atom/lshift.asm | 4 +-
mpn/x86/atom/sse2/mul_1.asm | 2 +-
mpn/x86/bdiv_dbm1c.asm | 4 +-
mpn/x86/bdiv_q_1.asm | 2 +-
mpn/x86/k7/addlsh1_n.asm | 6 ++--
mpn/x86/k7/invert_limb.asm | 2 +-
mpn/x86/k7/sublsh1_n.asm | 8 +++---
mpn/x86/p6/bdiv_q_1.asm | 4 +-
mpn/x86/pentium/bdiv_q_1.asm | 2 +-
mpn/x86_64/div_qr_2n_pi1.asm | 6 ++--
mpn/x86_64/div_qr_2u_pi1.asm | 6 ++--
mpn/x86_64/mod_1_1.asm | 4 +-
mpz/jacobi.c | 8 +++---
tests/cxx/t-ops2.cc | 40 +++++++++++++++++++-------------------
tests/devel/try.c | 2 +-
tests/mpn/t-hgcd_appr.c | 14 ++++++------
tests/mpn/t-mod_1.c | 2 +-
tests/mpn/t-mulmid.c | 2 +-
tests/mpz/t-jac.c | 4 +-
tune/tune-gcd-p.c | 4 +-
tune/tuneup.c | 2 +-
33 files changed, 85 insertions(+), 85 deletions(-)
diffs (truncated from 727 to 300 lines):
diff -r c8459b86807e -r c50f17990772 AUTHORS
--- a/AUTHORS Mon Nov 21 21:03:39 2011 +0100
+++ b/AUTHORS Mon Nov 21 21:15:18 2011 +0100
@@ -58,5 +58,5 @@
aors_err2_n.asm, aors_err3_n.asm,
mulmid_basecase.asm,
mpn/x86_64/core2/aors_err1_n.asm.
-
+
Martin Boij mpn/generic/perfpow.c
diff -r c8459b86807e -r c50f17990772 gmp-h.in
--- a/gmp-h.in Mon Nov 21 21:03:39 2011 +0100
+++ b/gmp-h.in Mon Nov 21 21:15:18 2011 +0100
@@ -1535,7 +1535,7 @@
#define mpn_div_qr_2 __MPN(div_qr_2)
__GMP_DECLSPEC mp_limb_t mpn_div_qr_2 __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
-
+
#define mpn_gcd __MPN(gcd)
__GMP_DECLSPEC mp_size_t mpn_gcd __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t));
diff -r c8459b86807e -r c50f17990772 gmpxx.h
--- a/gmpxx.h Mon Nov 21 21:03:39 2011 +0100
+++ b/gmpxx.h Mon Nov 21 21:15:18 2011 +0100
@@ -616,7 +616,7 @@
}
else
#endif
- mpz_tdiv_q_ui(z, w, l);
+ mpz_tdiv_q_ui(z, w, l);
}
static void eval(mpz_ptr z, unsigned long int l, mpz_srcptr w)
{
diff -r c8459b86807e -r c50f17990772 mpn/generic/gcd_subdiv_step.c
--- a/mpn/generic/gcd_subdiv_step.c Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/generic/gcd_subdiv_step.c Mon Nov 21 21:15:18 2011 +0100
@@ -185,7 +185,7 @@
}
else
MPN_COPY (bp, ap, an);
-
+
MPN_DECR_U (tp, qn, 1);
}
diff -r c8459b86807e -r c50f17990772 mpn/generic/hgcd_appr.c
--- a/mpn/generic/hgcd_appr.c Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/generic/hgcd_appr.c Mon Nov 21 21:15:18 2011 +0100
@@ -72,7 +72,7 @@
we discard some of the least significant limbs, we must keep one
additional bit to account for the truncation error. We maintain
the GMP_NUMB_BITS * s - extra_bits as the current target size. */
-
+
s = n/2 + 1;
if (BELOW_THRESHOLD (n, HGCD_APPR_THRESHOLD))
{
@@ -155,7 +155,7 @@
ASSERT (n <= 2*s);
nn = mpn_hgcd_step (n, ap, bp, s, M, tp);
-
+
if (!nn)
return 1;
@@ -249,7 +249,7 @@
ASSERT (n <= 2*s);
nn = mpn_hgcd_step (n, ap, bp, s, M, tp);
-
+
if (!nn)
return success;
diff -r c8459b86807e -r c50f17990772 mpn/generic/hgcd_jacobi.c
--- a/mpn/generic/hgcd_jacobi.c Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/generic/hgcd_jacobi.c Mon Nov 21 21:15:18 2011 +0100
@@ -26,7 +26,7 @@
#include "longlong.h"
/* This file is almost a copy of hgcd.c, with some added calls to
- mpn_jacobi_update */
+ mpn_jacobi_update */
struct hgcd_jacobi_ctx
{
@@ -127,7 +127,7 @@
struct hgcd_jacobi_ctx ctx;
ctx.M = M;
ctx.bitsp = bitsp;
-
+
return mpn_gcd_subdiv_step (ap, bp, n, s, hgcd_jacobi_hook, &ctx, tp);
}
}
diff -r c8459b86807e -r c50f17990772 mpn/generic/hgcd_reduce.c
--- a/mpn/generic/hgcd_reduce.c Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/generic/hgcd_reduce.c Mon Nov 21 21:15:18 2011 +0100
@@ -38,7 +38,7 @@
ASSERT (an >= bn);
ASSERT (rn >= an);
ASSERT (an + bn <= rn + 1);
-
+
TMP_MARK;
tp = TMP_ALLOC_LIMBS (an + bn);
@@ -61,7 +61,7 @@
/* FIXME:
x Take scratch parameter, and figure out scratch need.
- x Use some fallback for small M->n?
+ x Use some fallback for small M->n?
*/
static mp_size_t
hgcd_matrix_apply (const struct hgcd_matrix *M,
@@ -83,7 +83,7 @@
MPN_NORMALIZE (ap, an);
bn = n;
MPN_NORMALIZE (bp, bn);
-
+
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)
{
@@ -102,7 +102,7 @@
if (mn[0][1] == 0)
{
mp_size_t qn;
-
+
/* A unchanged, M = (1, 0; q, 1) */
ASSERT (mn[0][0] == 1);
ASSERT (M->p[0][0][0] == 1);
@@ -121,7 +121,7 @@
ASSERT (M->p[1][1][0] == 1);
/* Put A <-- A - q * B */
- nn = submul (ap, an, bp, bn, M->p[0][1], mn[0][1]);
+ nn = submul (ap, an, bp, bn, M->p[0][1], mn[0][1]);
}
else
{
@@ -159,7 +159,7 @@
MPN_ZERO (tp + n + mn[1][1], modn - n - mn[1][1]);
if (n + mn[0][1] < modn)
MPN_ZERO (sp + n + mn[0][1], modn - n - mn[0][1]);
-
+
cy = mpn_sub_n (tp, tp, sp, modn);
MPN_DECR_U (tp, modn, cy);
@@ -209,7 +209,7 @@
itch = 2*(n-p) + mpn_hgcd_itch (n-p);
/* Currently, hgcd_matrix_apply allocates its own storage. */
}
- return itch;
+ return itch;
}
/* FIXME: Document storage need. */
diff -r c8459b86807e -r c50f17990772 mpn/generic/hgcd_step.c
--- a/mpn/generic/hgcd_step.c Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/generic/hgcd_step.c Mon Nov 21 21:15:18 2011 +0100
@@ -112,7 +112,7 @@
/* Multiply M1^{-1} (a;b) */
return mpn_matrix22_mul1_inverse_vector (&M1, ap, tp, bp, n);
}
-
+
subtract:
return mpn_gcd_subdiv_step (ap, bp, n, s, hgcd_hook, M, tp);
diff -r c8459b86807e -r c50f17990772 mpn/powerpc64/mode64/aorsmul_1.asm
--- a/mpn/powerpc64/mode64/aorsmul_1.asm Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/powerpc64/mode64/aorsmul_1.asm Mon Nov 21 21:15:18 2011 +0100
@@ -54,7 +54,7 @@
')
MULFUNC_PROLOGUE(mpn_addmul_1 mpn_submul_1)
-
+
ASM_START()
PROLOGUE(func_nc)
EPILOGUE()
diff -r c8459b86807e -r c50f17990772 mpn/s390_32/lshift.asm
--- a/mpn/s390_32/lshift.asm Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/s390_32/lshift.asm Mon Nov 21 21:15:18 2011 +0100
@@ -126,7 +126,7 @@
L(end): l %r10, 16(up)
sll %r10, 0(cnt)
st %r10, 12(rp)
-
+
lr %r2, %r12
lm %r6, %r12, 24(%r15)
br %r14
diff -r c8459b86807e -r c50f17990772 mpn/s390_32/lshiftc.asm
--- a/mpn/s390_32/lshiftc.asm Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/s390_32/lshiftc.asm Mon Nov 21 21:15:18 2011 +0100
@@ -138,7 +138,7 @@
sll %r10, 0(cnt)
xr %r10, %r13
st %r10, 12(rp)
-
+
lr %r2, %r12
lm %r6, %r13, 24(%r15)
br %r14
diff -r c8459b86807e -r c50f17990772 mpn/s390_32/rshift.asm
--- a/mpn/s390_32/rshift.asm Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/s390_32/rshift.asm Mon Nov 21 21:15:18 2011 +0100
@@ -120,7 +120,7 @@
L(end): l %r11, 0(up)
srl %r11, 0(cnt)
st %r11, 0(rp)
-
+
lr %r2, %r12
lm %r6, %r12, 24(%r15)
br %r14
diff -r c8459b86807e -r c50f17990772 mpn/x86/atom/lshift.asm
--- a/mpn/x86/atom/lshift.asm Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/x86/atom/lshift.asm Mon Nov 21 21:15:18 2011 +0100
@@ -160,7 +160,7 @@
shr $2, %eax C (size + 3) / 4
and $3, %edx C (size - 1) % 4
jz L(goloop) C jmp if size == 1 (mod 4)
- shr %edx
+ shr %edx
jnc L(odd) C jum if size == 3 (mod 4)
add %ecx, %ecx
@@ -173,7 +173,7 @@
jnz L(goloop) C jump if size == 0 (mod 4)
L(odd): lea -8(up), up
lea -8(rp), rp
- jmp L(sentry) C reached if size == 2 or 3 (mod 4)
+ jmp L(sentry) C reached if size == 2 or 3 (mod 4)
L(sloop):
adc %ecx, %ecx
diff -r c8459b86807e -r c50f17990772 mpn/x86/atom/sse2/mul_1.asm
--- a/mpn/x86/atom/sse2/mul_1.asm Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/x86/atom/sse2/mul_1.asm Mon Nov 21 21:15:18 2011 +0100
@@ -62,7 +62,7 @@
PROLOGUE(mpn_mul_1)
pxor %mm6, %mm6
L(ent): push %esi FRAME_pushl()
- mov PARAM_SRC, up
+ mov PARAM_SRC, up
mov PARAM_SIZE, %eax C size
movd PARAM_MUL, %mm7
movd (up), %mm0
diff -r c8459b86807e -r c50f17990772 mpn/x86/bdiv_dbm1c.asm
--- a/mpn/x86/bdiv_dbm1c.asm Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/x86/bdiv_dbm1c.asm Mon Nov 21 21:15:18 2011 +0100
@@ -24,10 +24,10 @@
C P6 model 0-8,10-12)
C P6 model 9 (Banias)
C P6 model 13 (Dothan) 5.1
-C P4 model 0 (Willamette)
+C P4 model 0 (Willamette)
C P4 model 1 (?)
C P4 model 2 (Northwood) 13.67
-C P4 model 3 (Prescott)
+C P4 model 3 (Prescott)
C P4 model 4 (Nocona)
C Intel Atom
C AMD K6
diff -r c8459b86807e -r c50f17990772 mpn/x86/bdiv_q_1.asm
--- a/mpn/x86/bdiv_q_1.asm Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/x86/bdiv_q_1.asm Mon Nov 21 21:15:18 2011 +0100
@@ -30,7 +30,7 @@
C K7 12.0
C P4 42.0
-MULFUNC_PROLOGUE(mpn_bdiv_q_1 mpn_pi1_bdiv_q_1)
+MULFUNC_PROLOGUE(mpn_bdiv_q_1 mpn_pi1_bdiv_q_1)
defframe(PARAM_SHIFT, 24)
defframe(PARAM_INVERSE,20)
diff -r c8459b86807e -r c50f17990772 mpn/x86/k7/addlsh1_n.asm
--- a/mpn/x86/k7/addlsh1_n.asm Mon Nov 21 21:03:39 2011 +0100
+++ b/mpn/x86/k7/addlsh1_n.asm Mon Nov 21 21:15:18 2011 +0100
@@ -44,14 +44,14 @@
C This is a basic addlsh1_n for k7, atom, and perhaps some other x86-32
C processors. It uses 2*3-way unrolling, for good reasons. Unfortunately,
C that means we need an initial magic multiply.
-C
+C
C It is not clear how to do sublsh1_n or rsblsh1_n using the same pattern. We
C cannot do rsblsh1_n since we feed carry from the shift blocks to the
C add/subtract blocks, which is right for addition but reversed for
C subtraction. We could perhaps do sublsh1_n, with some extra move insns,
C without losing any time, since we're not issue limited but carry recurrency
C latency.
-C
More information about the gmp-commit
mailing list