[Gmp-commit] /home/hgfiles/gmp: 5 new changesets

mercurial at gmplib.org mercurial at gmplib.org
Tue Mar 23 13:32:20 CET 2010


details:   /home/hgfiles/gmp/rev/a7c16af206d8
changeset: 13524:a7c16af206d8
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Tue Mar 23 10:28:45 2010 +0100
description:
Generalise ia64 add+lsh functions.

details:   /home/hgfiles/gmp/rev/3d1fecb2a105
changeset: 13525:3d1fecb2a105
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Tue Mar 23 10:30:14 2010 +0100
description:
Misc cleanups.

details:   /home/hgfiles/gmp/rev/a4ea14d9b3cd
changeset: 13526:a4ea14d9b3cd
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Tue Mar 23 10:33:56 2010 +0100
description:
Clean up obsolete comments.

details:   /home/hgfiles/gmp/rev/581f85c5ff21
changeset: 13527:581f85c5ff21
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Tue Mar 23 10:35:06 2010 +0100
description:
Remove a pointless mutex decl.

details:   /home/hgfiles/gmp/rev/bbd2c2155a37
changeset: 13528:bbd2c2155a37
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Tue Mar 23 10:37:50 2010 +0100
description:
Use mpn_invert_limb instead of div insn.

diffstat:

 ChangeLog                           |   13 +-
 mpn/ia64/aorslsh1_n.asm             |  284 +--------------------------------
 mpn/ia64/aorslsh2_n.asm             |   48 +++++
 mpn/ia64/aorslshC_n.asm             |  309 ++++++++++++++++++++++++++++++++++++
 mpn/ia64/mul_1.asm                  |    1 -
 mpn/powerpc64/mode64/aorslshC_n.asm |   30 +-
 mpn/x86_64/core2/divrem_1.asm       |    8 +-
 mpn/x86_64/divrem_1.asm             |    2 +-
 mpn/x86_64/divrem_2.asm             |   41 ++--
 9 files changed, 412 insertions(+), 324 deletions(-)

diffs (truncated from 877 to 300 lines):

diff -r 4366f13f8b34 -r bbd2c2155a37 ChangeLog
--- a/ChangeLog	Mon Mar 22 15:13:36 2010 +0100
+++ b/ChangeLog	Tue Mar 23 10:37:50 2010 +0100
@@ -1,3 +1,12 @@
+2010-03-23  Torbjorn Granlund  <tege at gmplib.org>
+
+	* mpn/x86_64/divrem_2.asm: Use mpn_invert_limb instead of div insn.
+
+	* mpn/ia64/aorslshC_n.asm: New file, generalised from last iteration of
+	aorslsh1_n.asm.
+	* mpn/ia64/aorslsh1_n.asm: Use aorslshC_n.asm.
+	* mpn/ia64/aorslsh1_n.asm: New file, use aorslshC_n.asm.
+
 2010-03-20  Torbjorn Granlund  <tege at gmplib.org>
 
 	* mpn/powerpc64/mode64/invert_limb.asm: Rewrite to exploit cancellation
@@ -6,10 +15,10 @@
 2010-03-20 Marco Bodrato <bodrato at mail.dm.unipi.it>
 
 	* mpn/generic/toom_interpolate_8pts.c: Use mpn_sublsh2_n.
-	
+
 2010-03-20  Torbjorn Granlund  <tege at gmplib.org>
 
-	* mpn/powerpc64/mode64/aorslshC_n.asm: New file, generlised from
+	* mpn/powerpc64/mode64/aorslshC_n.asm: New file, generalised from
 	last iteration of aorslsh1_n.asm.
 	* mpn/powerpc64/mode64/aorslsh1_n.asm: Use aorslshC_n.asm.
 	* mpn/powerpc64/mode64/aorslsh1_n.asm: New file, use aorslshC_n.asm.
diff -r 4366f13f8b34 -r bbd2c2155a37 mpn/ia64/aorslsh1_n.asm
--- a/mpn/ia64/aorslsh1_n.asm	Mon Mar 22 15:13:36 2010 +0100
+++ b/mpn/ia64/aorslsh1_n.asm	Tue Mar 23 10:37:50 2010 +0100
@@ -23,14 +23,9 @@
 C Itanium:      3.0
 C Itanium 2:    1.5
 
-C TODO
-C  * Use shladd in feed-in code (for mpn_addlsh1_n).
 
-C INPUT PARAMETERS
-define(`rp',`r32')
-define(`up',`r33')
-define(`vp',`r34')
-define(`n',`r35')
+define(LSH,		1)
+define(RSH,		63)
 
 ifdef(`OPERATION_addlsh1_n',`
   define(ADDSUB,       add)
@@ -47,280 +42,7 @@
   define(func, mpn_sublsh1_n)
 ')
 
-C Some useful aliases for registers we use
-define(`u0',`r14') define(`u1',`r15') define(`u2',`r16') define(`u3',`r17')
-define(`v0',`r18') define(`v1',`r19') define(`v2',`r20') define(`v3',`r21')
-define(`w0',`r22') define(`w1',`r23') define(`w2',`r24') define(`w3',`r25')
-define(`s0',`r26') define(`s1',`r27') define(`s2',`r28') define(`s3',`r29')
-define(`x0',`r30') define(`x1',`r31') define(`x2',`r30') define(`x3',`r31')
 
 MULFUNC_PROLOGUE(mpn_addlsh1_n mpn_sublsh1_n)
 
-ASM_START()
-PROLOGUE(func)
-	.prologue
-	.save	ar.lc, r2
-	.body
-ifdef(`HAVE_ABI_32',`
-	addp4		rp = 0, rp		C			M I
-	addp4		up = 0, up		C			M I
-	addp4		vp = 0, vp		C			M I
-	zxt4		n = n			C			I
-	;;
-')
- {.mmi;	ld8		r11 = [vp], 8		C			M01
-	ld8		r10 = [up], 8		C			M01
-	mov.i		r2 = ar.lc		C			I0
-}{.mmi;	and		r14 = 3, n		C			M I
-	cmp.lt		p15, p0 = 4, n		C			M I
-	add		n = -4, n		C			M I
-	;;
-}{.mmi;	cmp.eq		p6, p0 = 1, r14		C			M I
-	cmp.eq		p7, p0 = 2, r14		C			M I
-	cmp.eq		p8, p0 = 3, r14		C			M I
-}{.bbb
-  (p6)	br.dptk		.Lb01			C			B
-  (p7)	br.dptk		.Lb10			C			B
-  (p8)	br.dptk		.Lb11			C			B
-}
-
-.Lb00:	ld8		v0 = [vp], 8		C			M01
-	ld8		u0 = [up], 8		C			M01
-	shr.u		n = n, 2		C			I0
-	;;
-	ld8		v1 = [vp], 8		C			M01
-	ld8		u1 = [up], 8		C			M01
-	add		x3 = r11, r11		C			M I
-	;;
-	ld8		v2 = [vp], 8		C			M01
-	ld8		u2 = [up], 8		C			M01
-	ADDSUB		w3 = r10, x3		C			M I
-  (p15)	br.dpnt		.grt4			C			B
-	;;
-	shrp		x0 = v0, r11, 63	C			I0
-	cmp.PRED	p8, p0 = w3, r10	C			M I
-	;;
-	shrp		x1 = v1, v0, 63		C			I0
-	ADDSUB		w0 = u0, x0		C			M I
-	;;
-	cmp.PRED	p6, p0 = w0, u0		C			M I
-	ADDSUB		w1 = u1, x1		C			M I
-	br		.Lcj4			C			B
-
-.grt4:	ld8		v3 = [vp], 8		C			M01
-	shrp		x0 = v0, r11, 63	C			I0
-	cmp.PRED	p8, p0 = w3, r10	C			M I
-	add		n = -1, n
-	;;
-	ld8		u3 = [up], 8		C			M01
-	mov.i		ar.lc = n		C			I0
-	shrp		x1 = v1, v0, 63		C			I0
-	ld8		v0 = [vp], 8		C			M01
-	ADDSUB		w0 = u0, x0		C			M I
-	;;
-	cmp.PRED	p6, p0 = w0, u0		C			M I
-	ld8		u0 = [up], 8		C			M01
-	ADDSUB		w1 = u1, x1		C			M I
-	br		.LL00			C			B
-
-.Lb01:	add		x2 = r11, r11		C			M I
-	shr.u		n = n, 2		C			I0
-  (p15)	br.dpnt		.grt1			C			B
-	;;
-	ADDSUB		w2 = r10, x2		C			M I
-	shr.u		r8 = r11, 63		C retval		I0
-	;;
-	cmp.PRED	p6, p0 = w2, r10	C			M I
-	;;
-	st8		[rp] = w2, 8		C			M23
-   (p6)	add		r8 = 1, r8		C			M I
-	br.ret.sptk.many b0			C			B
-
-.grt1:	ld8		v3 = [vp], 8		C			M01
-	ld8		u3 = [up], 8		C			M01
-	mov.i		ar.lc = n		C FIXME swap with next	I0
-	;;
-	ld8		v0 = [vp], 8		C			M01
-	ld8		u0 = [up], 8		C			M01
-	ADDSUB		w2 = r10, x2
-	;;
-	ld8		v1 = [vp], 8		C			M01
-	ld8		u1 = [up], 8		C			M01
-	shrp		x3 = v3, r11, 63	C			I0
-	;;
-	ld8		v2 = [vp], 8		C			M01
-	ld8		u2 = [up], 8		C			M01
-	cmp.PRED	p6, p0 = w2, r10	C			M I
-	ADDSUB		w3 = u3, x3		C			M I
-	br.cloop.dptk	.grt5			C			B
-	;;
-	shrp		x0 = v0, v3, 63		C			I0
-	cmp.PRED	p8, p0 = w3, u3		C			M I
-	br		.Lcj5			C			B
-
-.grt5:	shrp		x0 = v0, v3, 63		C			I0
-	ld8		v3 = [vp], 8		C			M01
-	cmp.PRED	p8, p0 = w3, u3		C			M I
-	br		.LL01			C			B
-
-.Lb10:	ld8		v2 = [vp], 8		C			M01
-	ld8		u2 = [up], 8		C			M01
-	shr.u		n = n, 2		C			I0
-	add		x1 = r11, r11		C			M I
-  (p15)	br.dpnt		.grt2			C			B
-	;;
-	ADDSUB		w1 = r10, x1		C			M I
-	shrp		x2 = v2, r11, 63	C			I0
-	;;
-	cmp.PRED	p8, p0 = w1, r10	C			M I
-	ADDSUB		w2 = u2, x2		C			M I
-	shr.u		r8 = v2, 63		C retval		I0
-	;;
-	cmp.PRED	p6, p0 = w2, u2		C			M I
-	br		.Lcj2			C			B
-
-.grt2:	ld8		v3 = [vp], 8		C			M01
-	ld8		u3 = [up], 8		C			M01
-	mov.i		ar.lc = n		C			I0
-	;;
-	ld8		v0 = [vp], 8		C			M01
-	ld8		u0 = [up], 8		C			M01
-	ADDSUB		w1 = r10, x1		C			M I
-	;;
-	ld8		v1 = [vp], 8		C			M01
-	shrp		x2 = v2, r11, 63	C			I0
-	cmp.PRED	p8, p0 = w1, r10	C			M I
-	;;
-	ld8		u1 = [up], 8		C			M01
-	shrp		x3 = v3, v2, 63		C			I0
-	ld8		v2 = [vp], 8		C			M01
-	ADDSUB		w2 = u2, x2		C			M I
-	;;
-	cmp.PRED	p6, p0 = w2, u2		C			M I
-	ld8		u2 = [up], 8		C			M01
-	ADDSUB		w3 = u3, x3		C			M I
-	br.cloop.dpnt	.Loop			C			B
-	br		.Lskip			C			B
-
-.Lb11:	ld8		v1 = [vp], 8		C			M01
-	ld8		u1 = [up], 8		C			M01
-	shr.u		n = n, 2		C			I0
-	add		x0 = r11, r11		C			M I
-	;;
-	ld8		v2 = [vp], 8		C			M01
-	ld8		u2 = [up], 8		C			M01
-  (p15)	br.dpnt		.grt3			C			B
-	;;
-
-	shrp		x1 = v1, r11, 63	C			I0
-	ADDSUB		w0 = r10, x0		C			M I
-	;;
-	cmp.PRED	p6, p0 = w0, r10	C			M I
-	ADDSUB		w1 = u1, x1		C			M I
-	;;
-	shrp		x2 = v2, v1, 63		C			I0
-	cmp.PRED	p8, p0 = w1, u1		C			M I
-	br		.Lcj3			C			B
-
-.grt3:	ld8		v3 = [vp], 8		C			M01
-	ld8		u3 = [up], 8		C			M01
-	mov.i		ar.lc = n		C			I0
-	shrp		x1 = v1, r11, 63	C			I0
-	ADDSUB		w0 = r10, x0		C			M I
-	;;
-	ld8		v0 = [vp], 8		C			M01
-	cmp.PRED	p6, p0 = w0, r10	C			M I
-	ld8		u0 = [up], 8		C			M01
-	ADDSUB		w1 = u1, x1		C			M I
-	;;
-	shrp		x2 = v2, v1, 63		C			I0
-	ld8		v1 = [vp], 8		C			M01
-	cmp.PRED	p8, p0 = w1, u1		C			M I
-	br		.LL11			C			B
-
-
-C *** MAIN LOOP START ***
-	ALIGN(32)
-.Loop:	st8		[rp] = w1, 8		C			M23
-	shrp		x0 = v0, v3, 63		C			I0
-   (p8)	cmp.eq.or	p6, p0 = LIM, w2	C			M I
-   (p8)	add		w2 = INCR, w2		C			M I
-	ld8		v3 = [vp], 8		C			M01
-	cmp.PRED	p8, p0 = w3, u3		C			M I
-	;;
-.LL01:	ld8		u3 = [up], 8		C			M01
-	shrp		x1 = v1, v0, 63		C			I0
-   (p6)	cmp.eq.or	p8, p0 = LIM, w3	C			M I
-   (p6)	add		w3 = INCR, w3		C			M I
-	ld8		v0 = [vp], 8		C			M01
-	ADDSUB		w0 = u0, x0		C			M I
-	;;
-	st8		[rp] = w2, 8		C			M23
-	cmp.PRED	p6, p0 = w0, u0		C			M I
-	nop.b		0
-	ld8		u0 = [up], 8		C			M01
-	ADDSUB		w1 = u1, x1		C			M I
-	nop.b		0
-	;;
-.LL00:	st8		[rp] = w3, 8		C			M23
-	shrp		x2 = v2, v1, 63		C			I0
-   (p8)	cmp.eq.or	p6, p0 = LIM, w0	C			M I
-   (p8)	add		w0 = INCR, w0		C			M I
-	ld8		v1 = [vp], 8		C			M01
-	cmp.PRED	p8, p0 = w1, u1		C			M I
-	;;
-.LL11:	ld8		u1 = [up], 8		C			M01
-	shrp		x3 = v3, v2, 63		C			I0
-   (p6)	cmp.eq.or	p8, p0 = LIM, w1	C			M I
-   (p6)	add		w1 = INCR, w1		C			M I
-	ld8		v2 = [vp], 8		C			M01
-	ADDSUB		w2 = u2, x2		C			M I
-	;;
-	st8		[rp] = w0, 8		C			M23
-	cmp.PRED	p6, p0 = w2, u2		C			M I
-	nop.b		0
-	ld8		u2 = [up], 8		C			M01
-	ADDSUB		w3 = u3, x3		C			M I
-	br.cloop.dptk	.Loop			C			B
-	;;
-C *** MAIN LOOP END ***
-
-.Lskip:	st8		[rp] = w1, 8		C			M23
-	shrp		x0 = v0, v3, 63		C			I0
-   (p8)	cmp.eq.or	p6, p0 = LIM, w2	C			M I
-   (p8)	add		w2 = INCR, w2		C			M I
-	cmp.PRED	p8, p0 = w3, u3		C			M I
-	;;
-.Lcj5:	shrp		x1 = v1, v0, 63		C			I0
-   (p6)	cmp.eq.or	p8, p0 = LIM, w3	C			M I
-   (p6)	add		w3 = INCR, w3		C			M I


More information about the gmp-commit mailing list