[Gmp-commit] /home/hgfiles/gmp: Tweak to 1.5 c/l, less overhead.
mercurial at gmplib.org
mercurial at gmplib.org
Sun Jan 23 19:01:47 CET 2011
details: /home/hgfiles/gmp/rev/6024dde5c830
changeset: 13766:6024dde5c830
user: Torbjorn Granlund <tege at gmplib.org>
date: Sun Jan 23 19:01:38 2011 +0100
description:
Tweak to 1.5 c/l, less overhead.
diffstat:
ChangeLog | 2 +
mpn/ia64/mul_2.asm | 946 ++++++++++++++++++++++++----------------------------
2 files changed, 433 insertions(+), 515 deletions(-)
diffs (truncated from 1071 to 300 lines):
diff -r 6eab34e38239 -r 6024dde5c830 ChangeLog
--- a/ChangeLog Sun Jan 23 18:25:29 2011 +0100
+++ b/ChangeLog Sun Jan 23 19:01:38 2011 +0100
@@ -1,5 +1,7 @@
2011-01-23 Torbjorn Granlund <tege at gmplib.org>
+ * mpn/ia64/mul_2.asm: Tweak to 1.5 c/l, less overhead.
+
* mpn/ia64/addmul_2.asm: Rewrite, adding mpn_addmul_2s entry point.
2011-01-22 Torbjorn Granlund <tege at gmplib.org>
diff -r 6eab34e38239 -r 6024dde5c830 mpn/ia64/mul_2.asm
--- a/mpn/ia64/mul_2.asm Sun Jan 23 18:25:29 2011 +0100
+++ b/mpn/ia64/mul_2.asm Sun Jan 23 19:01:38 2011 +0100
@@ -3,7 +3,7 @@
dnl Contributed to the GNU project by Torbjorn Granlund.
-dnl Copyright 2004 Free Software Foundation, Inc.
+dnl Copyright 2004, 2011 Free Software Foundation, Inc.
dnl This file is part of the GNU MP Library.
@@ -23,19 +23,14 @@
include(`../config.m4')
C cycles/limb
-C Itanium: 3.15
-C Itanium 2: 1.625
-
-C Note that this is very similar to addmul_2.asm. If you change this file,
-C please change that file too.
+C Itanium: ?
+C Itanium 2: 1.5
C TODO
C * Clean up variable names, and try to decrease the number of distinct
C registers used.
-C * Cleanup feed-in code to not require zeroing several registers.
+C * Clean up feed-in code to not require zeroing several registers.
C * Make sure we don't depend on uninitialized predicate registers.
-C * We currently cross-jump very aggressively, at the expense of a few cycles
-C per operation. Consider changing that.
C * Could perhaps save a few cycles by using 1 c/l carry propagation in
C wind-down code.
C * Ultimately rewrite. The problem with this code is that it first uses a
@@ -97,597 +92,518 @@
.body
ifdef(`HAVE_ABI_32',
-` addp4 rp = 0, rp C M I
- addp4 up = 0, up C M I
- addp4 vp = 0, vp C M I
- zxt4 n = n C I
+ ` addp4 rp = 0, rp C M I
+ addp4 up = 0, up C M I
+ addp4 vp = 0, vp C M I
+ nop 0
+ nop 0
+ zxt4 n = n C I
;;')
-{.mmi C 00
- ldf8 ux = [up], 8 C M
- ldf8 v0 = [vp], 8 C M
- mov.i r2 = ar.lc C I0
-}{.mmi
- nop 0 C M
- and r14 = 3, n C M I
- add n = -2, n C M I
+.mmi; ldf8 ux = [up], 8 C M
+ ldf8 v0 = [vp], 8 C M
+ mov r2 = ar.lc C I0
+.mmi; nop 0 C M
+ and r14 = 3, n C M I
+ add n = -2, n C M I
;;
-}{.mmi C 01
- ldf8 uy = [up], 8 C M
- ldf8 v1 = [vp] C M
- shr.u n = n, 2 C I
-}{.mmi
- nop 0 C M
- cmp.eq p10, p0 = 1, r14 C M I
- cmp.eq p11, p0 = 2, r14 C M I
+.mmi; ldf8 uy = [up], 8 C M
+ ldf8 v1 = [vp] C M
+ shr.u n = n, 2 C I
+.mmi; nop 0 C M
+ cmp.eq p10, p0 = 1, r14 C M I
+ cmp.eq p11, p0 = 2, r14 C M I
;;
-}{.mmi C 02
- nop 0 C M
- cmp.eq p12, p0 = 3, r14 C M I
- mov.i ar.lc = n C I0
-}{.bbb
- (p10) br.dptk .Lb01 C B
- (p11) br.dptk .Lb10 C B
- (p12) br.dptk .Lb11 C B
+.mmi; nop 0 C M
+ cmp.eq p12, p0 = 3, r14 C M I
+ mov ar.lc = n C I0
+.bbb; (p10) br.dptk L(b01) C B
+ (p11) br.dptk L(b10) C B
+ (p12) br.dptk L(b11) C B
;;
-}
ALIGN(32)
-.Lb00: ldf8 u_1 = [up], 8
- mov acc1_2 = 0
- mov pr1_2 = 0
- mov pr0_3 = 0
- cmp.ne p8, p9 = r0, r0
+L(b00): ldf8 u_1 = [up], 8
+ mov acc1_2 = 0
+ mov pr1_2 = 0
+ mov pr0_3 = 0
+ cmp.ne p8, p9 = r0, r0
;;
- xma.l fp0b_3 = ux, v0, f0
- cmp.ne p12, p13 = r0, r0
- ldf8 u_2 = [up], 8
- xma.hu fp1a_3 = ux, v0, f0
- br.cloop.dptk .grt4
+ xma.l fp0b_3 = ux, v0, f0
+ cmp.ne p12, p13 = r0, r0
+ ldf8 u_2 = [up], 8
+ xma.hu fp1a_3 = ux, v0, f0
+ br.cloop.dptk L(gt4)
- xma.l fp0b_0 = uy, v0, f0
- xma.hu fp1a_0 = uy, v0, f0
+ xma.l fp0b_0 = uy, v0, f0
+ xma.hu fp1a_0 = uy, v0, f0
;;
- getf.sig acc0 = fp0b_3
- xma.l fp1b_3 = ux, v1, fp1a_3
- xma.hu fp2a_3 = ux, v1, fp1a_3
+ getfsig acc0 = fp0b_3
+ xma.l fp1b_3 = ux, v1, fp1a_3
+ xma.hu fp2a_3 = ux, v1, fp1a_3
;;
- xma.l fp0b_1 = u_1, v0, f0
- xma.hu fp1a_1 = u_1, v0, f0
+ xma.l fp0b_1 = u_1, v0, f0
+ xma.hu fp1a_1 = u_1, v0, f0
;;
- getf.sig pr0_0 = fp0b_0
- xma.l fp1b_0 = uy, v1, fp1a_0
- xma.hu fp2a_0 = uy, v1, fp1a_0
+ getfsig pr0_0 = fp0b_0
+ xma.l fp1b_0 = uy, v1, fp1a_0
+ xma.hu fp2a_0 = uy, v1, fp1a_0
;;
- getf.sig pr1_3 = fp1b_3
- getf.sig acc1_3 = fp2a_3
- xma.l fp0b_2 = u_2, v0, f0
- xma.hu fp1a_2 = u_2, v0, f0
- br .Lcj4
+ getfsig pr1_3 = fp1b_3
+ getfsig acc1_3 = fp2a_3
+ xma.l fp0b_2 = u_2, v0, f0
+ xma.hu fp1a_2 = u_2, v0, f0
+ br L(cj4)
-.grt4: xma.l fp0b_0 = uy, v0, f0
- xma.hu fp1a_0 = uy, v0, f0
+L(gt4): xma.l fp0b_0 = uy, v0, f0
+ xma.hu fp1a_0 = uy, v0, f0
;;
- getf.sig acc0 = fp0b_3
- xma.l fp1b_3 = ux, v1, fp1a_3
- ldf8 u_3 = [up], 8
- xma.hu fp2a_3 = ux, v1, fp1a_3
+ getfsig acc0 = fp0b_3
+ xma.l fp1b_3 = ux, v1, fp1a_3
+ ldf8 u_3 = [up], 8
+ xma.hu fp2a_3 = ux, v1, fp1a_3
;;
- xma.l fp0b_1 = u_1, v0, f0
- xma.hu fp1a_1 = u_1, v0, f0
+ xma.l fp0b_1 = u_1, v0, f0
+ xma.hu fp1a_1 = u_1, v0, f0
;;
- getf.sig pr0_0 = fp0b_0
- xma.l fp1b_0 = uy, v1, fp1a_0
- xma.hu fp2a_0 = uy, v1, fp1a_0
+ getfsig pr0_0 = fp0b_0
+ xma.l fp1b_0 = uy, v1, fp1a_0
+ xma.hu fp2a_0 = uy, v1, fp1a_0
;;
- ldf8 u_0 = [up], 8
- getf.sig pr1_3 = fp1b_3
+ ldf8 u_0 = [up], 8
+ getfsig pr1_3 = fp1b_3
+ xma.l fp0b_2 = u_2, v0, f0
;;
- getf.sig acc1_3 = fp2a_3
- xma.l fp0b_2 = u_2, v0, f0
- xma.hu fp1a_2 = u_2, v0, f0
- br .LL00
+ getfsig acc1_3 = fp2a_3
+ xma.hu fp1a_2 = u_2, v0, f0
+ br L(00)
ALIGN(32)
-.Lb01: ldf8 u_0 = [up], 8 C M
- mov acc1_1 = 0 C M I
- mov pr1_1 = 0 C M I
- mov pr0_2 = 0 C M I
- cmp.ne p6, p7 = r0, r0 C M I
+L(b01): ldf8 u_0 = [up], 8 C M
+ mov acc1_1 = 0 C M I
+ mov pr1_1 = 0 C M I
+ mov pr0_2 = 0 C M I
+ cmp.ne p6, p7 = r0, r0 C M I
;;
- xma.l fp0b_2 = ux, v0, f0 C F
- cmp.ne p10, p11 = r0, r0 C M I
- ldf8 u_1 = [up], 8 C M
- xma.hu fp1a_2 = ux, v0, f0 C F
+ xma.l fp0b_2 = ux, v0, f0 C F
+ cmp.ne p10, p11 = r0, r0 C M I
+ ldf8 u_1 = [up], 8 C M
+ xma.hu fp1a_2 = ux, v0, f0 C F
;;
- xma.l fp0b_3 = uy, v0, f0 C F
- xma.hu fp1a_3 = uy, v0, f0 C F
+ xma.l fp0b_3 = uy, v0, f0 C F
+ xma.hu fp1a_3 = uy, v0, f0 C F
;;
- getf.sig acc0 = fp0b_2 C M
- xma.l fp1b_2 = ux, v1,fp1a_2 C F
- xma.hu fp2a_2 = ux, v1,fp1a_2 C F
- ldf8 u_2 = [up], 8 C M
- br.cloop.dptk .grt5
+ getfsig acc0 = fp0b_2 C M
+ xma.l fp1b_2 = ux, v1,fp1a_2 C F
+ ldf8 u_2 = [up], 8 C M
+ xma.hu fp2a_2 = ux, v1,fp1a_2 C F
+ br.cloop.dptk L(gt5)
- xma.l fp0b_0 = u_0, v0, f0 C F
- xma.hu fp1a_0 = u_0, v0, f0 C F
+ xma.l fp0b_0 = u_0, v0, f0 C F
+ xma.hu fp1a_0 = u_0, v0, f0 C F
;;
- getf.sig pr0_3 = fp0b_3 C M
- xma.l fp1b_3 = uy, v1,fp1a_3 C F
- xma.hu fp2a_3 = uy, v1,fp1a_3 C F
+ getfsig pr0_3 = fp0b_3 C M
+ xma.l fp1b_3 = uy, v1,fp1a_3 C F
+ xma.hu fp2a_3 = uy, v1,fp1a_3 C F
;;
- getf.sig pr1_2 = fp1b_2 C M
- getf.sig acc1_2 = fp2a_2 C M
- xma.l fp0b_1 = u_1, v0, f0 C F
- xma.hu fp1a_1 = u_1, v0, f0 C F
- br .Lcj5
+ getfsig pr1_2 = fp1b_2 C M
+ getfsig acc1_2 = fp2a_2 C M
+ xma.l fp0b_1 = u_1, v0, f0 C F
+ xma.hu fp1a_1 = u_1, v0, f0 C F
+ br L(cj5)
-.grt5: xma.l fp0b_0 = u_0, v0, f0
- xma.hu fp1a_0 = u_0, v0, f0
+L(gt5): xma.l fp0b_0 = u_0, v0, f0
+ xma.hu fp1a_0 = u_0, v0, f0
;;
- getf.sig pr0_3 = fp0b_3
- xma.l fp1b_3 = uy, v1, fp1a_3
- xma.hu fp2a_3 = uy, v1, fp1a_3
+ getfsig pr0_3 = fp0b_3
+ xma.l fp1b_3 = uy, v1, fp1a_3
+ xma.hu fp2a_3 = uy, v1, fp1a_3
;;
- ldf8 u_3 = [up], 8
- getf.sig pr1_2 = fp1b_2
+ ldf8 u_3 = [up], 8
+ getfsig pr1_2 = fp1b_2
+ xma.l fp0b_1 = u_1, v0, f0
;;
- getf.sig acc1_2 = fp2a_2
- xma.l fp0b_1 = u_1, v0, f0
- xma.hu fp1a_1 = u_1, v0, f0
- br .LL01
-
-
-C We have two variants for n = 2. They turn out to run at exactly the same
-C speed. But the first, odd variant might allow one cycle to be trimmed.
- ALIGN(32)
-ifdef(`',`
-.Lb10: C 03
- br.cloop.dptk .grt2
- C 04
- C 05
More information about the gmp-commit
mailing list