[Gmp-commit] /var/hg/gmp: 4 new changesets
mercurial at gmplib.org
mercurial at gmplib.org
Wed Sep 4 00:09:09 CEST 2013
details: /var/hg/gmp/rev/d9c660fae97d
changeset: 15965:d9c660fae97d
user: Torbjorn Granlund <tege at gmplib.org>
date: Tue Sep 03 16:19:27 2013 +0200
description:
(mulx): Handle negative offsets.
details: /var/hg/gmp/rev/85aa96de9363
changeset: 15966:85aa96de9363
user: Torbjorn Granlund <tege at gmplib.org>
date: Tue Sep 03 18:03:18 2013 +0200
description:
Provide Haswell redc_1.
details: /var/hg/gmp/rev/cc6e9189784d
changeset: 15967:cc6e9189784d
user: Torbjorn Granlund <tege at gmplib.org>
date: Wed Sep 04 00:07:15 2013 +0200
description:
Implement larger "corner".
details: /var/hg/gmp/rev/87d79b5e7a8a
changeset: 15968:87d79b5e7a8a
user: Torbjorn Granlund <tege at gmplib.org>
date: Wed Sep 04 00:07:31 2013 +0200
description:
ChangeLog
diffstat:
ChangeLog | 11 +
mpn/x86_64/coreihwl/redc_1.asm | 422 +++++++++++++++++++++++++++++++++++
mpn/x86_64/coreihwl/sqr_basecase.asm | 132 ++++++----
mpn/x86_64/x86_64-defs.m4 | 14 +-
4 files changed, 515 insertions(+), 64 deletions(-)
diffs (truncated from 742 to 300 lines):
diff -r 61ff3141d62f -r 87d79b5e7a8a ChangeLog
--- a/ChangeLog Sun Sep 01 12:15:31 2013 +0200
+++ b/ChangeLog Wed Sep 04 00:07:31 2013 +0200
@@ -1,3 +1,14 @@
+2013-09-04 Torbjorn Granlund <tege at gmplib.org>
+
+ * mpn/x86_64/coreihwl/sqr_basecase.asm: Implement larger "corner".
+ Misc tuning.
+
+2013-09-03 Torbjorn Granlund <tege at gmplib.org>
+
+ * mpn/x86_64/coreihwl/redc_1.asm: New file.
+
+ * mpn/x86_64/x86_64-defs.m4 (mulx): Handle negative offsets.
+
2013-08-31 Torbjorn Granlund <tege at gmplib.org>
* mpn/x86_64/coreisbr/sqr_basecase.asm: New file.
diff -r 61ff3141d62f -r 87d79b5e7a8a mpn/x86_64/coreihwl/redc_1.asm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mpn/x86_64/coreihwl/redc_1.asm Wed Sep 04 00:07:31 2013 +0200
@@ -0,0 +1,422 @@
+dnl AMD64 mpn_redc_1 optimised for Intel Haswell.
+
+dnl Contributed to the GNU project by Torbjörn Granlund.
+
+dnl Copyright 2013 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of the GNU Lesser General Public License as published
+dnl by the Free Software Foundation; either version 3 of the License, or (at
+dnl your option) any later version.
+
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+dnl License for more details.
+
+dnl You should have received a copy of the GNU Lesser General Public License
+dnl along with the GNU MP Library. If not, see http://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C AMD K8,K9 n/a
+C AMD K10 n/a
+C AMD bull n/a
+C AMD pile n/a
+C AMD steam ?
+C AMD bobcat n/a
+C AMD jaguar ?
+C Intel P4 n/a
+C Intel core n/a
+C Intel NHM n/a
+C Intel SBR n/a
+C Intel IBR n/a
+C Intel HWL 2.32
+C Intel BWL ?
+C Intel atom n/a
+C VIA nano n/a
+
+C The inner loops of this code are the result of running a code generation and
+C optimisation tool suite written by David Harvey and Torbjörn Granlund.
+
+C TODO
+C * Micro-optimise.
+C * Consider inlining mpn_add_n. Tests indicate that this saves just 1-2
+C cycles, though.
+
+define(`rp', `%rdi') C rcx
+define(`up', `%rsi') C rdx
+define(`mp_param', `%rdx') C r8
+define(`n', `%rcx') C r9
+define(`u0inv_param', `%r8') C stack
+
+define(`i', `%r14')
+define(`j', `%r15')
+define(`mp', `%rdi')
+define(`u0inv', `(%rsp)') C stack
+
+ABI_SUPPORT(DOS64) C FIXME: needs verification
+ABI_SUPPORT(STD64)
+
+ASM_START()
+ TEXT
+ ALIGN(16)
+PROLOGUE(mpn_redc_1)
+ FUNC_ENTRY(4)
+IFDOS(` mov 56(%rsp), %r8 ')
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ push rp
+ mov mp_param, mp C note that rp and mp shares register
+ mov (up), %rdx
+
+ neg n
+ push %r8 C put u0inv on stack
+ imul u0inv_param, %rdx C first iteration q0
+ mov n, j C outer loop induction var
+
+ test $1, R8(n)
+ jnz L(bx1)
+
+L(bx0): test $2, R8(n)
+ jz L(o0b)
+
+ cmp $-2, R32(n)
+ jnz L(o2)
+
+C Special code for n = 2 since general code cannot handle it
+ mov 8(%rsp), %rbx C rp
+ lea 16(%rsp), %rsp C deallocate two slots
+ mulx( (mp), %r9, %r12)
+ mulx( 8,(mp), %r11, %r10)
+ add %r12, %r11
+ adc $0, %r10
+ add (up), %r9 C = 0
+ adc 8(up), %r11 C r11 = up[1]
+ adc $0, %r10 C -> up[0]
+ mov %r11, %rdx
+ imul u0inv_param, %rdx
+ mulx( (mp), %r13, %r12)
+ mulx( 8,(mp), %r14, %r15)
+ xor R32(%rax), R32(%rax)
+ add %r12, %r14
+ adc $0, %r15
+ add %r11, %r13 C = 0
+ adc 16(up), %r14 C rp[2]
+ adc $0, %r15 C -> up[1]
+ add %r14, %r10
+ adc 24(up), %r15
+ mov %r10, (%rbx)
+ mov %r15, 8(%rbx)
+ setc R8(%rax)
+ jmp L(ret)
+
+L(o2): lea 2(n), i C inner loop induction var
+ mulx( (mp), %r9, %r8)
+ mulx( 8,(mp), %r11, %r10)
+ sar $2, i
+ add %r8, %r11
+ jmp L(lo2)
+
+ ALIGN(16)
+L(tp2): adc %rax, %r9
+ lea 32(up), up
+ adc %r8, %r11
+L(lo2): mulx( 16,(mp), %r13, %r12)
+ mov (up), %r8
+ mulx( 24,(mp), %rbx, %rax)
+ lea 32(mp), mp
+ adc %r10, %r13
+ adc %r12, %rbx
+ adc $0, %rax
+ mov 8(up), %r10
+ mov 16(up), %r12
+ add %r9, %r8
+ mov 24(up), %rbp
+ mov %r8, (up)
+ adc %r11, %r10
+ mulx( (mp), %r9, %r8)
+ mov %r10, 8(up)
+ adc %r13, %r12
+ mov %r12, 16(up)
+ adc %rbx, %rbp
+ mulx( 8,(mp), %r11, %r10)
+ mov %rbp, 24(up)
+ inc i
+ jnz L(tp2)
+
+L(ed2): mov 56(up,n,8), %rdx C next iteration up[0]
+ lea 16(mp,n,8), mp C mp = (last starting mp)
+ adc %rax, %r9
+ adc %r8, %r11
+ mov 32(up), %r8
+ adc $0, %r10
+ imul u0inv, %rdx C next iteration q0
+ mov 40(up), %rax
+ add %r9, %r8
+ mov %r8, 32(up)
+ adc %r11, %rax
+ mov %rax, 40(up)
+ lea 56(up,n,8), up C up = (last starting up) + 1
+ adc $0, %r10
+ mov %r10, -8(up)
+ inc j
+ jnz L(o2)
+
+ jmp L(cj)
+
+
+L(bx1): test $2, R8(n)
+ jz L(o3a)
+
+L(o1a): cmp $-1, R32(n)
+ jnz L(o1b)
+
+C Special code for n = 1 since general code cannot handle it
+ mov 8(%rsp), %rbx C rp
+ lea 16(%rsp), %rsp C deallocate two slots
+ mulx( (mp), %r11, %r10)
+ add (up), %r11
+ adc 8(up), %r10
+ mov %r10, (%rbx)
+ mov $0, R32(%rax)
+ setc R8(%rax)
+ jmp L(ret)
+
+L(o1b): lea 24(mp), mp
+L(o1): lea 1(n), i C inner loop induction var
+ `mulx' -24(mp), %r11, %r10
+ `mulx' -16(mp), %r13, %r12
+ `mulx' -8(mp), %rbx, %rax
+ sar $2, i
+ add %r10, %r13
+ adc %r12, %rbx
+ adc $0, %rax
+ mov (up), %r10
+ mov 8(up), %r12
+ mov 16(up), %rbp
+ add %r11, %r10
+ jmp L(lo1)
+
+ ALIGN(16)
+L(tp1): adc %rax, %r9
+ lea 32(up), up
+ adc %r8, %r11
+ mulx( 16,(mp), %r13, %r12)
+ mov -8(up), %r8
+ mulx( 24,(mp), %rbx, %rax)
+ lea 32(mp), mp
+ adc %r10, %r13
+ adc %r12, %rbx
+ adc $0, %rax
+ mov (up), %r10
+ mov 8(up), %r12
+ add %r9, %r8
+ mov 16(up), %rbp
+ mov %r8, -8(up)
+ adc %r11, %r10
+L(lo1): mulx( (mp), %r9, %r8)
+ mov %r10, (up)
+ adc %r13, %r12
+ mov %r12, 8(up)
+ adc %rbx, %rbp
+ mulx( 8,(mp), %r11, %r10)
+ mov %rbp, 16(up)
+ inc i
+ jnz L(tp1)
+
+L(ed1): mov 48(up,n,8), %rdx C next iteration up[0]
+ lea 40(mp,n,8), mp C mp = (last starting mp)
+ adc %rax, %r9
+ adc %r8, %r11
+ mov 24(up), %r8
+ adc $0, %r10
+ imul u0inv, %rdx C next iteration q0
+ mov 32(up), %rax
+ add %r9, %r8
+ mov %r8, 24(up)
+ adc %r11, %rax
+ mov %rax, 32(up)
+ lea 48(up,n,8), up C up = (last starting up) + 1
+ adc $0, %r10
+ mov %r10, -8(up)
+ inc j
+ jnz L(o1)
+
+ jmp L(cj)
+
+L(o3a): cmp $-3, R32(n)
+ jnz L(o3b)
+
+C Special code for n = 3 since general code cannot handle it
+L(n3): mulx( (mp), %rbx, %rax)
+ mulx( 8,(mp), %r9, %r14)
+ add (up), %rbx
+ mulx( 16,(mp), %r11, %r10)
+ adc %rax, %r9 C W 1
+ adc %r14, %r11 C W 2
+ mov 8(up), %r14
+ mov u0inv_param, %rdx
+ adc $0, %r10 C W 3
+ mov 16(up), %rax
+ add %r9, %r14 C W 1
+ mov %r14, 8(up)
+ mulx( %r14, %rdx, %r13) C next iteration q0
+ adc %r11, %rax C W 2
+ mov %rax, 16(up)
+ adc $0, %r10 C W 3
+ mov %r10, (up)
+ lea 8(up), up C up = (last starting up) + 1
+ inc j
+ jnz L(n3)
More information about the gmp-commit
mailing list