[Gmp-commit] /var/hg/gmp: Add x86_64 mullo_basecase.
mercurial at gmplib.org
mercurial at gmplib.org
Thu Mar 1 14:13:48 CET 2012
details: /var/hg/gmp/rev/691fa82130a7
changeset: 14706:691fa82130a7
user: Torbjorn Granlund <tege at gmplib.org>
date: Thu Mar 01 14:13:41 2012 +0100
description:
Add x86_64 mullo_basecase.
diffstat:
ChangeLog | 4 +
mpn/x86_64/mullo_basecase.asm | 412 ++++++++++++++++++++++++++++++++++++++++++
2 files changed, 416 insertions(+), 0 deletions(-)
diffs (truncated from 427 to 300 lines):
diff -r aa68a5c1bde8 -r 691fa82130a7 ChangeLog
--- a/ChangeLog Wed Feb 29 22:25:12 2012 +0100
+++ b/ChangeLog Thu Mar 01 14:13:41 2012 +0100
@@ -1,3 +1,7 @@
+2012-03-01 Torbjorn Granlund <tege at gmplib.org>
+
+ * mpn/x86_64/mullo_basecase.asm: New file.
+
2012-02-29 Marc Glisse <marc.glisse at inria.fr>
* gmpxx.h (std::numeric_limits): New partial specialization.
diff -r aa68a5c1bde8 -r 691fa82130a7 mpn/x86_64/mullo_basecase.asm
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mpn/x86_64/mullo_basecase.asm Thu Mar 01 14:13:41 2012 +0100
@@ -0,0 +1,412 @@
+dnl AMD64 mpn_mullo_basecase.
+
+dnl Contributed to the GNU project by Torbjorn Granlund.
+
+dnl Copyright 2008, 2009, 2011, 2012 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of the GNU Lesser General Public License as published
+dnl by the Free Software Foundation; either version 3 of the License, or (at
+dnl your option) any later version.
+
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+dnl License for more details.
+
+dnl You should have received a copy of the GNU Lesser General Public License
+dnl along with the GNU MP Library. If not, see http://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C The inner loops of this code are the result of running a code generation and
+C optimisation tool suite written by David Harvey and Torbjorn Granlund.
+
+C NOTES
+C * There is a major stupidity in that we call mpn_mul_1 initially, for a
+C large trip count. Instead, we should start with mul_2 for any operand
+C size congruence class.
+C * Stop iterating addmul_2 earlier, falling into straight-line triangle code
+C for the last 2-3 iterations.
+C * Perhaps implement n=4 special code.
+C * The reload of the outer loop jump address hurts branch preditiction.
+C * The addmul_2 loop ends with an MUL whose high part is not used upon loop
+C exit.
+
+C INPUT PARAMETERS
+define(`rp', `%rdi')
+define(`up', `%rsi')
+define(`vp_param', `%rdx')
+define(`n', `%rcx')
+
+define(`vp', `%r11')
+define(`outer_addr', `%r8')
+define(`j', `%r9')
+define(`v0', `%r13')
+define(`v1', `%r14')
+define(`w0', `%rbx')
+define(`w1', `%r15')
+define(`w2', `%rbp')
+define(`w3', `%r10')
+
+ TEXT
+ ALIGN(16)
+ASM_START()
+PROLOGUE(mpn_mullo_basecase)
+ cmp $4, n
+ jge L(gen)
+ mov (up), %rax C u0
+ mov (vp_param), %r8 C v0
+
+ lea L(jmptab)(%rip), %r9
+ jmp *(%r9,n,8)
+ JUMPTABSECT
+ ALIGN(8)
+L(jmptab):
+ .quad 0 C not allowed
+ .quad L(1) C 1
+ .quad L(2) C 2
+ .quad L(3) C 3
+C .quad L(0m4) C 4
+C .quad L(1m4) C 5
+C .quad L(2m4) C 6
+C .quad L(3m4) C 7
+C .quad L(0m4) C 8
+C .quad L(1m4) C 9
+C .quad L(2m4) C 10
+C .quad L(3m4) C 11
+ TEXT
+
+L(1): imul %r8, %rax
+ mov %rax, (rp)
+ ret
+
+L(2): mov 8(vp_param), %r11
+ imul %rax, %r11 C u0 x v1
+ mul %r8 C u0 x v0
+ mov %rax, (rp)
+ imul 8(up), %r8 C u1 x v0
+ lea (%r11, %rdx), %rax
+ add %r8, %rax
+ mov %rax, 8(rp)
+ ret
+
+L(3): mov 8(vp_param), %r9 C v1
+ mov 16(vp_param), %r11
+ mul %r8 C u0 x v0 -> <r1,r0>
+ mov %rax, (rp) C r0
+ mov (up), %rax C u0
+ mov %rdx, %rcx C r1
+ mul %r9 C u0 x v1 -> <r2,r1>
+ imul 8(up), %r9 C u1 x v1 -> r2
+ mov 16(up), %r10
+ imul %r8, %r10 C u2 x v0 -> r2
+ add %rax, %rcx
+ adc %rdx, %r9
+ add %r10, %r9
+ mov 8(up), %rax C u1
+ mul %r8 C u1 x v0 -> <r2,r1>
+ add %rax, %rcx
+ adc %rdx, %r9
+ mov %r11, %rax
+ imul (up), %rax C u0 x v2 -> r2
+ add %rax, %r9
+ mov %rcx, 8(rp)
+ mov %r9, 16(rp)
+ ret
+
+L(0m4):
+L(1m4):
+L(2m4):
+L(3m4):
+L(gen): push %rbx
+ push %rbp
+ push %r13
+ push %r14
+ push %r15
+
+ mov (up), %rax
+ mov (vp_param), v0
+ mov vp_param, vp
+
+ lea (rp,n,8), rp
+ lea (up,n,8), up
+ neg n
+
+ mul v0
+
+ test $1, R8(n)
+ jz L(mul_2)
+
+L(mul_1):
+ lea -8(rp), rp
+ lea -8(up), up
+ test $2, R8(n)
+ jnz L(mul_1_prologue_3)
+
+L(mul_1_prologue_2): C n = 7, 11, 15, ...
+ lea -1(n), j
+ lea L(addmul_outer_1)(%rip), outer_addr
+ mov %rax, w0
+ mov %rdx, w1
+ xor R32(w2), R32(w2)
+ xor R32(w3), R32(w3)
+ mov 16(up,n,8), %rax
+ jmp L(mul_1_entry_2)
+
+L(mul_1_prologue_3): C n = 5, 9, 13, ...
+ lea 1(n), j
+ lea L(addmul_outer_3)(%rip), outer_addr
+ mov %rax, w2
+ mov %rdx, w3
+ xor R32(w0), R32(w0)
+ jmp L(mul_1_entry_0)
+
+ ALIGN(16)
+L(mul_1_top):
+ mov w0, -16(rp,j,8)
+ add %rax, w1
+ mov (up,j,8), %rax
+ adc %rdx, w2
+ xor R32(w0), R32(w0)
+ mul v0
+ mov w1, -8(rp,j,8)
+ add %rax, w2
+ adc %rdx, w3
+L(mul_1_entry_0):
+ mov 8(up,j,8), %rax
+ mul v0
+ mov w2, (rp,j,8)
+ add %rax, w3
+ adc %rdx, w0
+ mov 16(up,j,8), %rax
+ mul v0
+ mov w3, 8(rp,j,8)
+ xor R32(w2), R32(w2) C zero
+ mov w2, w3 C zero
+ add %rax, w0
+ mov 24(up,j,8), %rax
+ mov w2, w1 C zero
+ adc %rdx, w1
+L(mul_1_entry_2):
+ mul v0
+ add $4, j
+ js L(mul_1_top)
+
+ mov w0, -16(rp)
+ add %rax, w1
+ mov w1, -8(rp)
+ adc %rdx, w2
+
+ imul (up), v0
+ add v0, w2
+ mov w2, (rp)
+
+ add $1, n
+ jz L(ret)
+
+ mov 8(vp), v0
+ mov 16(vp), v1
+
+ lea 16(up), up
+ lea 8(vp), vp
+ lea 24(rp), rp
+
+ jmp *outer_addr
+
+
+L(mul_2):
+ mov 8(vp), v1
+ test $2, R8(n)
+ jz L(mul_2_prologue_3)
+
+ ALIGN(16)
+L(mul_2_prologue_1):
+ lea 0(n), j
+ mov %rax, w3
+ mov %rdx, w0
+ xor R32(w1), R32(w1)
+ mov (up,n,8), %rax
+ lea L(addmul_outer_3)(%rip), outer_addr
+ jmp L(mul_2_entry_1)
+
+ ALIGN(16)
+L(mul_2_prologue_3):
+ lea 2(n), j
+ mov $0, R32(w3)
+ mov %rax, w1
+ mov (up,n,8), %rax
+ mov %rdx, w2
+ lea L(addmul_outer_1)(%rip), outer_addr
+ jmp L(mul_2_entry_3)
+
+ ALIGN(16)
+L(mul_2_top):
+ mov -32(up,j,8), %rax
+ mul v1
+ add %rax, w0
+ adc %rdx, w1
+ mov -24(up,j,8), %rax
+ xor R32(w2), R32(w2)
+ mul v0
+ add %rax, w0
+ mov -24(up,j,8), %rax
+ adc %rdx, w1
+ adc $0, R32(w2)
+ mul v1
+ add %rax, w1
+ mov w0, -24(rp,j,8)
+ adc %rdx, w2
+ mov -16(up,j,8), %rax
+ mul v0
+ mov $0, R32(w3)
+ add %rax, w1
+ adc %rdx, w2
+ mov -16(up,j,8), %rax
+ adc $0, R32(w3)
+L(mul_2_entry_3):
+ mov $0, R32(w0)
+ mov w1, -16(rp,j,8)
+ mul v1
+ add %rax, w2
+ mov -8(up,j,8), %rax
+ adc %rdx, w3
+ mov $0, R32(w1)
+ mul v0
+ add %rax, w2
+ mov -8(up,j,8), %rax
+ adc %rdx, w3
+ adc R32(w1), R32(w0)
+ mul v1
+ add %rax, w3
+ mov w2, -8(rp,j,8)
+ adc %rdx, w0
More information about the gmp-commit
mailing list