[Gmp-commit] /var/hg/gmp: 3 new changesets

mercurial at gmplib.org mercurial at gmplib.org
Thu Apr 12 22:40:31 CEST 2012


details:   /var/hg/gmp/rev/f0049ae7198f
changeset: 14810:f0049ae7198f
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Thu Apr 12 21:16:21 2012 +0200
description:
Simplify to very basic form.

details:   /var/hg/gmp/rev/bc8904ed515b
changeset: 14811:bc8904ed515b
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Thu Apr 12 22:37:55 2012 +0200
description:
Clarify.

details:   /var/hg/gmp/rev/81644c27d7c8
changeset: 14812:81644c27d7c8
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Thu Apr 12 22:40:26 2012 +0200
description:
New SSE shift code.

diffstat:

 ChangeLog                              |   14 ++
 mpn/x86_64/coreisbr/lshift.asm         |   26 ++++
 mpn/x86_64/coreisbr/lshiftc.asm        |   26 ++++
 mpn/x86_64/coreisbr/rshift.asm         |   26 ++++
 mpn/x86_64/fastsse/README              |    3 +-
 mpn/x86_64/fastsse/lshift-movdqu2.asm  |  168 ++++++++++++++++++++++++++++++
 mpn/x86_64/fastsse/lshift.asm          |   32 +---
 mpn/x86_64/fastsse/lshiftc-movdqu2.asm |  179 ++++++++++++++++++++++++++++++++
 mpn/x86_64/fastsse/rshift-movdqu2.asm  |  184 +++++++++++++++++++++++++++++++++
 mpn/x86_64/k10/lshift.asm              |   26 ++++
 mpn/x86_64/k10/lshiftc.asm             |   26 ++++
 mpn/x86_64/k10/rshift.asm              |   26 ++++
 12 files changed, 714 insertions(+), 22 deletions(-)

diffs (truncated from 835 to 300 lines):

diff -r 7c37eae1640e -r 81644c27d7c8 ChangeLog
--- a/ChangeLog	Wed Apr 11 23:36:40 2012 +0200
+++ b/ChangeLog	Thu Apr 12 22:40:26 2012 +0200
@@ -1,3 +1,17 @@
+2012-04-12  Torbjorn Granlund  <tege at gmplib.org>
+
+	* mpn/x86_64/fastsse/lshift-movdqu2.asm: New file.
+	* mpn/x86_64/fastsse/rshift-movdqu2.asm: New file.
+	* mpn/x86_64/fastsse/lshiftc-movdqu2.asm: New file.
+	* mpn/x86_64/coreisbr/lshift.asm: New file.
+	* mpn/x86_64/coreisbr/rshift.asm: New file.
+	* mpn/x86_64/coreisbr/lshiftc.asm: New file.
+	* mpn/x86_64/k10/lshift.asm: New file.
+	* mpn/x86_64/k10/rshift.asm: New file.
+	* mpn/x86_64/k10/lshiftc.asm: New file.
+
+	* mpn/x86_64/fastsse/lshift.asm: Simplify to very basic form.
+
 2012-04-11  Niels Möller  <nisse at lysator.liu.se>
 
 	* Makefile.am (check-mini-gmp): Pass -I../.. in EXTRA_CFLAGS, to
diff -r 7c37eae1640e -r 81644c27d7c8 mpn/x86_64/coreisbr/lshift.asm
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mpn/x86_64/coreisbr/lshift.asm	Thu Apr 12 22:40:26 2012 +0200
@@ -0,0 +1,26 @@
+dnl  X86-64 mpn_lshift optimised for Intel Sandy Bridge.
+
+dnl  Copyright 2012 Free Software Foundation, Inc.
+
+dnl  This file is part of the GNU MP Library.
+
+dnl  The GNU MP Library is free software; you can redistribute it and/or modify
+dnl  it under the terms of the GNU Lesser General Public License as published
+dnl  by the Free Software Foundation; either version 3 of the License, or (at
+dnl  your option) any later version.
+
+dnl  The GNU MP Library is distributed in the hope that it will be useful, but
+dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+dnl  License for more details.
+
+dnl  You should have received a copy of the GNU Lesser General Public License
+dnl  along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+ABI_SUPPORT(DOS64)
+ABI_SUPPORT(STD64)
+
+MULFUNC_PROLOGUE(mpn_lshift)
+include_mpn(`x86_64/fastsse/lshift-movdqu2.asm')
diff -r 7c37eae1640e -r 81644c27d7c8 mpn/x86_64/coreisbr/lshiftc.asm
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mpn/x86_64/coreisbr/lshiftc.asm	Thu Apr 12 22:40:26 2012 +0200
@@ -0,0 +1,26 @@
+dnl  X86-64 mpn_lshiftc optimised for Intel Sandy Bridge.
+
+dnl  Copyright 2012 Free Software Foundation, Inc.
+
+dnl  This file is part of the GNU MP Library.
+
+dnl  The GNU MP Library is free software; you can redistribute it and/or modify
+dnl  it under the terms of the GNU Lesser General Public License as published
+dnl  by the Free Software Foundation; either version 3 of the License, or (at
+dnl  your option) any later version.
+
+dnl  The GNU MP Library is distributed in the hope that it will be useful, but
+dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+dnl  License for more details.
+
+dnl  You should have received a copy of the GNU Lesser General Public License
+dnl  along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+ABI_SUPPORT(DOS64)
+ABI_SUPPORT(STD64)
+
+MULFUNC_PROLOGUE(mpn_lshiftc)
+include_mpn(`x86_64/fastsse/lshiftc-movdqu2.asm')
diff -r 7c37eae1640e -r 81644c27d7c8 mpn/x86_64/coreisbr/rshift.asm
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mpn/x86_64/coreisbr/rshift.asm	Thu Apr 12 22:40:26 2012 +0200
@@ -0,0 +1,26 @@
+dnl  X86-64 mpn_rshift optimised for Intel Sandy Bridge.
+
+dnl  Copyright 2012 Free Software Foundation, Inc.
+
+dnl  This file is part of the GNU MP Library.
+
+dnl  The GNU MP Library is free software; you can redistribute it and/or modify
+dnl  it under the terms of the GNU Lesser General Public License as published
+dnl  by the Free Software Foundation; either version 3 of the License, or (at
+dnl  your option) any later version.
+
+dnl  The GNU MP Library is distributed in the hope that it will be useful, but
+dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+dnl  License for more details.
+
+dnl  You should have received a copy of the GNU Lesser General Public License
+dnl  along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+ABI_SUPPORT(DOS64)
+ABI_SUPPORT(STD64)
+
+MULFUNC_PROLOGUE(mpn_rshift)
+include_mpn(`x86_64/fastsse/rshift-movdqu2.asm')
diff -r 7c37eae1640e -r 81644c27d7c8 mpn/x86_64/fastsse/README
--- a/mpn/x86_64/fastsse/README	Wed Apr 11 23:36:40 2012 +0200
+++ b/mpn/x86_64/fastsse/README	Thu Apr 12 22:40:26 2012 +0200
@@ -2,6 +2,7 @@
 implementations of SSE operations, hence the name "fastsse".
 
 Current processors that might benefit from this code are:
+
   AMD K10
   AMD Bulldozer
   Intel Nocona
@@ -16,4 +17,4 @@
   Intel Atom
 
 Intel Conroe/Penryn is a border case; its handling of non-aligned
-memory operands is poor.
+128-bit memory operands is poor.
diff -r 7c37eae1640e -r 81644c27d7c8 mpn/x86_64/fastsse/lshift-movdqu2.asm
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mpn/x86_64/fastsse/lshift-movdqu2.asm	Thu Apr 12 22:40:26 2012 +0200
@@ -0,0 +1,168 @@
+dnl  AMD64 mpn_lshift optimised for CPUs with fast SSE including fast movdqu.
+
+dnl  Contributed to the GNU project by Torbjorn Granlund.
+
+dnl  Copyright 2010, 2011, 2012 Free Software Foundation, Inc.
+
+dnl  This file is part of the GNU MP Library.
+
+dnl  The GNU MP Library is free software; you can redistribute it and/or modify
+dnl  it under the terms of the GNU Lesser General Public License as published
+dnl  by the Free Software Foundation; either version 3 of the License, or (at
+dnl  your option) any later version.
+
+dnl  The GNU MP Library is distributed in the hope that it will be useful, but
+dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+dnl  License for more details.
+
+dnl  You should have received a copy of the GNU Lesser General Public License
+dnl  along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+
+C	     cycles/limb     cycles/limb     cycles/limb    good
+C              aligned	      unaligned	      best seen	   for cpu?
+C AMD K8,K9	 3		 3		 2.35	  no, use shl/shr
+C AMD K10	 1.5-1.8	 1.5-1.8	 1.33	  yes
+C AMD bd1	 1.7-1.9	 1.7-1.9	 1.33	  yes
+C AMD bobcat	 3.17		 3.17			  yes, bad for n < 20
+C Intel P4	 4.67		 4.67		 2.7	  no, slow movdqu
+C Intel core2	 2.15		 2.15		 1.25	  no, use shld/shrd
+C Intel NHM	 1.66		 1.66		 1.25	  no, use shld/shrd
+C Intel SBR	 1.3		 1.3		 1.25	  yes, bad for n = 4-6
+C Intel atom	11.7		11.7		 4.5	  no
+C VIA nano	 5.7		 5.95		 2.0	  no, slow movdqu
+
+C We try to do as many aligned 16-byte operations as possible.  The top-most
+C and bottom-most writes might need 8-byte operations.
+C
+C This variant rely on fast load movdqu, and uses it even for aligned operands,
+C in order to avoid the need for two separate loops.
+C 
+C TODO
+C  * Could 2-limb wind-down code be simplified?
+C  * Improve basecase code, using shld/shrd for SBR, discrete integer shifts
+C    for other affected CPUs.
+
+C INPUT PARAMETERS
+define(`rp',  `%rdi')
+define(`ap',  `%rsi')
+define(`n',   `%rdx')
+define(`cnt', `%rcx')
+
+ASM_START()
+	TEXT
+	ALIGN(64)
+PROLOGUE(mpn_lshift)
+	movd	R32(%rcx), %xmm4
+	mov	$64, R32(%rax)
+	sub	R32(%rcx), R32(%rax)
+	movd	R32(%rax), %xmm5
+
+	neg	R32(%rcx)
+	mov	-8(ap,n,8), %rax
+	shr	R8(%rcx), %rax
+
+	cmp	$3, n
+	jle	L(bc)
+
+	lea	(rp,n,8), R32(%rcx)
+	bt	$3, R32(%rcx)
+	jnc	L(rp_aligned)
+
+C Do one initial limb in order to make rp aligned
+	movq	-8(ap,n,8), %xmm0
+	movq	-16(ap,n,8), %xmm1
+	psllq	%xmm4, %xmm0
+	psrlq	%xmm5, %xmm1
+	por	%xmm1, %xmm0
+	movq	%xmm0, -8(rp,n,8)
+	dec	n
+
+L(rp_aligned):
+	lea	1(n), %r8d
+
+	and	$6, R32(%r8)
+	jz	L(ba0)
+	cmp	$4, R32(%r8)
+	jz	L(ba4)
+	jc	L(ba2)
+L(ba6):	add	$-4, n
+	jmp	L(i56)
+L(ba0):	add	$-6, n
+	jmp	L(i70)
+L(ba4):	add	$-2, n
+	jmp	L(i34)
+L(ba2):	add	$-8, n
+	jle	L(end)
+
+	ALIGN(16)
+L(top):	movdqu	40(ap,n,8), %xmm1
+	movdqu	48(ap,n,8), %xmm0
+	psllq	%xmm4, %xmm0
+	psrlq	%xmm5, %xmm1
+	por	%xmm1, %xmm0
+	movdqa	%xmm0, 48(rp,n,8)
+L(i70):
+	movdqu	24(ap,n,8), %xmm1
+	movdqu	32(ap,n,8), %xmm0
+	psllq	%xmm4, %xmm0
+	psrlq	%xmm5, %xmm1
+	por	%xmm1, %xmm0
+	movdqa	%xmm0, 32(rp,n,8)
+L(i56):
+	movdqu	8(ap,n,8), %xmm1
+	movdqu	16(ap,n,8), %xmm0
+	psllq	%xmm4, %xmm0
+	psrlq	%xmm5, %xmm1
+	por	%xmm1, %xmm0
+	movdqa	%xmm0, 16(rp,n,8)
+L(i34):
+	movdqu	-8(ap,n,8), %xmm1
+	movdqu	(ap,n,8), %xmm0
+	psllq	%xmm4, %xmm0
+	psrlq	%xmm5, %xmm1
+	por	%xmm1, %xmm0
+	movdqa	%xmm0, (rp,n,8)
+	sub	$8, n
+	jg	L(top)
+
+L(end):	bt	$0, R32(n)
+	jc	L(end8)
+
+	movdqu	(ap), %xmm1
+	pxor	%xmm0, %xmm0
+	punpcklqdq  %xmm1, %xmm0
+	psllq	%xmm4, %xmm1
+	psrlq	%xmm5, %xmm0
+	por	%xmm1, %xmm0
+	movdqa	%xmm0, (rp)
+	ret
+
+C Basecase
+	ALIGN(16)
+L(bc):	dec	R32(n)
+	jz	L(end8)
+
+	movq	(ap,n,8), %xmm1
+	movq	-8(ap,n,8), %xmm0
+	psllq	%xmm4, %xmm1
+	psrlq	%xmm5, %xmm0
+	por	%xmm1, %xmm0
+	movq	%xmm0, (rp,n,8)
+	sub	$2, R32(n)
+	jl	L(end8)
+	movq	8(ap), %xmm1
+	movq	(ap), %xmm0
+	psllq	%xmm4, %xmm1
+	psrlq	%xmm5, %xmm0
+	por	%xmm1, %xmm0
+	movq	%xmm0, 8(rp)
+
+L(end8):movq	(ap), %xmm0
+	psllq	%xmm4, %xmm0
+	movq	%xmm0, (rp)
+	ret
+EPILOGUE()


More information about the gmp-commit mailing list