[Gmp-commit] /home/hgfiles/gmp: 7 new changesets

mercurial at gmplib.org mercurial at gmplib.org
Thu May 13 11:35:44 CEST 2010


details:   /home/hgfiles/gmp/rev/9a41b1b8e598
changeset: 13627:9a41b1b8e598
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Wed May 12 01:50:26 2010 +0200
description:
New file.

details:   /home/hgfiles/gmp/rev/3615f483203e
changeset: 13628:3615f483203e
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Wed May 12 01:51:57 2010 +0200
description:
Tune for more processors.

details:   /home/hgfiles/gmp/rev/228950d685e8
changeset: 13629:228950d685e8
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Thu May 13 11:15:05 2010 +0200
description:
Measure mpn_addlsh_n, mpn_sublsh_n mpn_rsblsh_n.

details:   /home/hgfiles/gmp/rev/1c710c79c86c
changeset: 13630:1c710c79c86c
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Thu May 13 11:30:31 2010 +0200
description:
Disable mpn_addlsh_n, mpn_rsblsh_n for Pentium4.

details:   /home/hgfiles/gmp/rev/e40c65ba52cb
changeset: 13631:e40c65ba52cb
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Thu May 13 11:31:44 2010 +0200
description:
Amend comments.

details:   /home/hgfiles/gmp/rev/a3183ca20fcc
changeset: 13632:a3183ca20fcc
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Thu May 13 11:33:10 2010 +0200
description:
Give ultrasparct1 and ultrasparct2 special code path.

details:   /home/hgfiles/gmp/rev/2268a5f09f18
changeset: 13633:2268a5f09f18
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Thu May 13 11:35:37 2010 +0200
description:
Trivial merge.

diffstat:

 ChangeLog                        |   28 ++++++-
 configure.in                     |    2 +
 mpn/x86_64/aorrlsh_n.asm         |   15 +-
 mpn/x86_64/mod_1_4.asm           |   18 ++--
 mpn/x86_64/pentium4/gmp-mparam.h |    2 +
 mpn/x86_64/pentium4/lshiftc.asm  |  162 +++++++++++++++++++++++++++++++++++++++
 mpz/jacobi.c                     |   11 +-
 tune/common.c                    |   26 ++++++
 tune/speed.c                     |   10 ++
 tune/speed.h                     |    4 +
 10 files changed, 258 insertions(+), 20 deletions(-)

diffs (truncated from 459 to 300 lines):

diff -r 5b0baf874eb9 -r 2268a5f09f18 ChangeLog
--- a/ChangeLog	Tue May 11 16:09:10 2010 +0200
+++ b/ChangeLog	Thu May 13 11:35:37 2010 +0200
@@ -1,7 +1,33 @@
+2010-05-13  Torbjorn Granlund  <tege at gmplib.org>
+
+	* configure.in: Give ultrasparct1 and ultrasparct2 special code path.
+
+	* mpn/x86_64/pentium4/gmp-mparam.h: Disable mpn_addlsh_n, mpn_rsblsh_n.
+
+2010-05-12  Niels Möller  <nisse at lysator.liu.se>
+
+	* mpz/jacobi.c (mpz_jacobi): Fixed off-by-one error in use of
+	scratch space.
+
+	* tune/common.c (speed_mpz_powm_sec): New function.
+	* tune/speed.h: Declare speed_mpz_powm_sec.
+	* tune/speed.c (routine): Added speed_mpz_powm_sec.
+
+	* tune/common.c (speed_mpn_addlsh_n, speed_mpn_sublsh_n)
+	(speed_mpn_rsblsh_n): New functions.
+	* tune/speed.h: Declare new functions.
+	* tune/speed.c (routine): Add new functions.
+
+2010-05-12  Torbjorn Granlund  <tege at gmplib.org>
+
+	* mpn/x86_64/mod_1_4.asm: Tune for more processors.
+
+	* mpn/x86_64/pentium4/lshiftc.asm: New file.
+
 2010-05-11  Niels Möller  <nisse at lysator.liu.se>
 
 	* mpz/jacobi.c (mpz_jacobi): Deleted old implementation.
-	Reorganized new implementation, to handle small inputs effciently.
+	Reorganized new implementation, to handle small inputs efficiently.
 
 	* tests/mpz/t-jac.c (check_large_quotients): Reduced test sizes.
 	(check_data): One more input pair related to a fixed bug.
diff -r 5b0baf874eb9 -r 2268a5f09f18 configure.in
--- a/configure.in	Tue May 11 16:09:10 2010 +0200
+++ b/configure.in	Thu May 13 11:35:37 2010 +0200
@@ -1244,6 +1244,8 @@
 	    path_64="sparc64/ultrasparc12 sparc64" ;;
 	  ultrasparc3)
 	    path_64="sparc64/ultrasparc34 sparc64/ultrasparc12 sparc64" ;;
+	  ultrasparct1 | ultrasparct2)
+	    path_64="sparc64/ultrasparct1 sparc64" ;;
 	  *)
 	    path_64="sparc64"
 	esac
diff -r 5b0baf874eb9 -r 2268a5f09f18 mpn/x86_64/aorrlsh_n.asm
--- a/mpn/x86_64/aorrlsh_n.asm	Tue May 11 16:09:10 2010 +0200
+++ b/mpn/x86_64/aorrlsh_n.asm	Thu May 13 11:35:37 2010 +0200
@@ -2,7 +2,7 @@
 dnl  ("rsb" means reversed subtract, name mandated by mpn_sublsh1_n which
 dnl  subtacts the shifted operand from the unshifted operand.)
 
-dnl  Copyright 2006 Free Software Foundation, Inc.
+dnl  Copyright 2006, 2010 Free Software Foundation, Inc.
 
 dnl  This file is part of the GNU MP Library.
 
@@ -23,13 +23,13 @@
 
 
 C	     cycles/limb
-C AMD K8,K9	 3.25	(mpn_lshift + mpn_add_n costs about 4.1 c/l)
-C AMD K10	 3.25	(mpn_lshift + mpn_add_n costs about 4.1 c/l)
-C Intel P4	14
-C Intel core2	 4
-C Intel corei	 ?
+C AMD K8,K9	 3.25	(mpn_lshift + mpn_add_n costs 3.85 c/l)
+C AMD K10	 3.25	(mpn_lshift + mpn_add_n costs 3.85 c/l)
+C Intel P4	15	(mpn_lshift + mpn_add_n costs 7.33 c/l)
+C Intel core2	 4	(mpn_lshift + mpn_add_n costs 3.27 c/l)
+C Intel corei	 4	(mpn_lshift + mpn_add_n costs 3.75 c/l)
 C Intel atom	 ?
-C VIA nano	 ?
+C VIA nano	 4.7	(mpn_lshift + mpn_add_n costs 6.25 c/l)
 
 C This was written quickly and not optimized at all.  Surely one could get
 C closer to 3 c/l or perhaps even under 3 c/l.  Ideas:
@@ -38,6 +38,7 @@
 C   3) Be more clever about register usage
 C   4) Unroll more, handling CL negation, carry save/restore cost much now
 C   5) Reschedule
+C   6) Use shld/shrd on Intel core and probably even on AMD K8-K10
 
 C INPUT PARAMETERS
 define(`rp',	`%rdi')
diff -r 5b0baf874eb9 -r 2268a5f09f18 mpn/x86_64/mod_1_4.asm
--- a/mpn/x86_64/mod_1_4.asm	Tue May 11 16:09:10 2010 +0200
+++ b/mpn/x86_64/mod_1_4.asm	Thu May 13 11:35:37 2010 +0200
@@ -2,7 +2,7 @@
 
 dnl  Contributed to the GNU project by Torbjorn Granlund.
 
-dnl  Copyright 2009 Free Software Foundation, Inc.
+dnl  Copyright 2009, 2010 Free Software Foundation, Inc.
 
 dnl  This file is part of the GNU MP Library.
 
@@ -26,9 +26,9 @@
 C AMD K10	 3
 C Intel P4	15.5
 C Intel core2	 5
-C Intel corei	 4.25
+C Intel corei	 4
 C Intel atom	23
-C VIA nano	 5
+C VIA nano	 4.75
 
 ASM_START()
 	TEXT
@@ -97,8 +97,8 @@
 	mul	%r11			C up[1] * B1modb
 	add	%rax, %r10
 	mov	-16(%rdi), %rax
-	mov	%rdx, %rcx
-	adc	$0, %rcx
+	mov	$0, R32(%rcx)
+	adc	%rdx, %rcx
 	mul	%rbx			C up[2] * B2modb
 	add	%rax, %r10
 	mov	-8(%rdi), %rax
@@ -106,13 +106,13 @@
 	sub	$32, %rdi
 	mul	%rbp			C up[3] * B3modb
 	add	%rax, %r10
-	mov	%r9, %rax
+	mov	%r13, %rax
 	adc	%rdx, %rcx
-	mul	%r13			C rl * B4modb
+	mul	%r9			C rl * B4modb
 	add	%rax, %r10
-	mov	%r8, %rax
+	mov	%r12, %rax
 	adc	%rdx, %rcx
-	mul	%r12			C rh * B5modb
+	mul	%r8			C rh * B5modb
 	mov	%r10, %r9
 	mov	%rcx, %r8
 L(m0):	add	%rax, %r9
diff -r 5b0baf874eb9 -r 2268a5f09f18 mpn/x86_64/pentium4/gmp-mparam.h
--- a/mpn/x86_64/pentium4/gmp-mparam.h	Tue May 11 16:09:10 2010 +0200
+++ b/mpn/x86_64/pentium4/gmp-mparam.h	Thu May 13 11:35:37 2010 +0200
@@ -25,6 +25,8 @@
    than separate add/sub and shift.  Make sure they are not really used.  */
 #undef HAVE_NATIVE_mpn_rsblsh1_n
 #undef HAVE_NATIVE_mpn_rsblsh2_n
+#undef HAVE_NATIVE_mpn_addlsh_n
+#undef HAVE_NATIVE_mpn_rsblsh_n
 
 /* 3400 MHz Pentium / 1024 Kibyte cache */
 
diff -r 5b0baf874eb9 -r 2268a5f09f18 mpn/x86_64/pentium4/lshiftc.asm
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mpn/x86_64/pentium4/lshiftc.asm	Thu May 13 11:35:37 2010 +0200
@@ -0,0 +1,162 @@
+dnl  x86-64 mpn_lshiftc optimized for Pentium 4.
+
+dnl  Copyright 2003, 2005, 2007, 2008, 2010 Free Software Foundation, Inc.
+dnl
+dnl  This file is part of the GNU MP Library.
+dnl
+dnl  The GNU MP Library is free software; you can redistribute it and/or
+dnl  modify it under the terms of the GNU Lesser General Public License as
+dnl  published by the Free Software Foundation; either version 3 of the
+dnl  License, or (at your option) any later version.
+dnl
+dnl  The GNU MP Library is distributed in the hope that it will be useful,
+dnl  but WITHOUT ANY WARRANTY; without even the implied warranty of
+dnl  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+dnl  Lesser General Public License for more details.
+dnl
+dnl  You should have received a copy of the GNU Lesser General Public License
+dnl  along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+
+C	     cycles/limb
+C AMD K8,K9	 ?
+C AMD K10	 ?
+C Intel P4	 4.15
+C Intel core2	 ?
+C Intel corei	 ?
+C Intel atom	 ?
+C VIA nano	 ?
+
+C INPUT PARAMETERS
+define(`rp',`%rdi')
+define(`up',`%rsi')
+define(`n',`%rdx')
+define(`cnt',`%cl')
+
+ASM_START()
+	TEXT
+	ALIGN(32)
+PROLOGUE(mpn_lshiftc)
+	mov	-8(up,n,8), %rax
+	pcmpeqd	%mm6, %mm6		C 0xffff...fff
+	movd	%ecx, %mm4
+	neg	%ecx			C put rsh count in cl
+	and	$63, %ecx
+	movd	%ecx, %mm5
+
+	lea	1(n), %r8d
+
+	shr	%cl, %rax		C function return value
+
+	and	$3, %r8d
+	je	L(rol)			C jump for n = 3, 7, 11, ...
+
+	dec	%r8d
+	jne	L(1)
+C	n = 4, 8, 12, ...
+	movq	-8(up,n,8), %mm2
+	psllq	%mm4, %mm2
+	movq	-16(up,n,8), %mm0
+	pxor	%mm6, %mm2
+	psrlq	%mm5, %mm0
+	pandn	%mm2, %mm0
+	movq	%mm0, -8(rp,n,8)
+	dec	n
+	jmp	L(rol)
+
+L(1):	dec	%r8d
+	je	L(1x)			C jump for n = 1, 5, 9, 13, ...
+C	n = 2, 6, 10, 16, ...
+	movq	-8(up,n,8), %mm2
+	psllq	%mm4, %mm2
+	movq	-16(up,n,8), %mm0
+	pxor	%mm6, %mm2
+	psrlq	%mm5, %mm0
+	pandn	%mm2, %mm0
+	movq	%mm0, -8(rp,n,8)
+	dec	n
+L(1x):
+	cmp	$1, n
+	je	L(ast)
+	movq	-8(up,n,8), %mm2
+	psllq	%mm4, %mm2
+	movq	-16(up,n,8), %mm3
+	psllq	%mm4, %mm3
+	movq	-16(up,n,8), %mm0
+	movq	-24(up,n,8), %mm1
+	pxor	%mm6, %mm2
+	psrlq	%mm5, %mm0
+	pandn	%mm2, %mm0
+	pxor	%mm6, %mm3
+	psrlq	%mm5, %mm1
+	pandn	%mm3, %mm1
+	movq	%mm0, -8(rp,n,8)
+	movq	%mm1, -16(rp,n,8)
+	sub	$2, n
+
+L(rol):	movq	-8(up,n,8), %mm2
+	psllq	%mm4, %mm2
+	movq	-16(up,n,8), %mm3
+	psllq	%mm4, %mm3
+
+	sub	$4, n
+	jb	L(end)
+	ALIGN(32)
+L(top):
+	C finish stuff from lsh block
+	movq	16(up,n,8), %mm0
+	pxor	%mm6, %mm2
+	movq	8(up,n,8), %mm1
+	psrlq	%mm5, %mm0
+	psrlq	%mm5, %mm1
+	pandn	%mm2, %mm0
+	pxor	%mm6, %mm3
+	movq	%mm0, 24(rp,n,8)
+	movq	(up,n,8), %mm0
+	pandn	%mm3, %mm1
+	movq	%mm1, 16(rp,n,8)
+	movq	-8(up,n,8), %mm1
+	C start two new rsh
+	psrlq	%mm5, %mm0
+	psrlq	%mm5, %mm1
+
+	C finish stuff from rsh block
+	movq	8(up,n,8), %mm2
+	pxor	%mm6, %mm0
+	movq	(up,n,8), %mm3
+	psllq	%mm4, %mm2
+	psllq	%mm4, %mm3
+	pandn	%mm0, %mm2
+	pxor	%mm6, %mm1
+	movq	%mm2, 8(rp,n,8)
+	movq	-8(up,n,8), %mm2
+	pandn	%mm1, %mm3
+	movq	%mm3, (rp,n,8)
+	movq	-16(up,n,8), %mm3
+	C start two new lsh
+	sub	$4, n
+	psllq	%mm4, %mm2
+	psllq	%mm4, %mm3


More information about the gmp-commit mailing list