[Gmp-commit] /var/hg/gmp: 3 new changesets

mercurial at gmplib.org mercurial at gmplib.org
Fri Sep 20 18:15:02 CEST 2013


details:   /var/hg/gmp/rev/712db07a7e22
changeset: 16004:712db07a7e22
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Fri Sep 20 14:16:16 2013 +0200
description:
Postpone pushes, short-circuit a branch.

details:   /var/hg/gmp/rev/d3d9c9ce63bc
changeset: 16005:d3d9c9ce63bc
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Fri Sep 20 14:17:11 2013 +0200
description:
Provide mullo_basecase for Conroe, Wolfdale, Nehalem, Westmere.

details:   /var/hg/gmp/rev/927fe93f3210
changeset: 16006:927fe93f3210
user:      Torbjorn Granlund <tege at gmplib.org>
date:      Fri Sep 20 14:31:22 2013 +0200
description:
Complete rewrite.

diffstat:

 mpn/x86_64/core2/mullo_basecase.asm    |  416 ++++++++++++++++++
 mpn/x86_64/coreisbr/mullo_basecase.asm |   37 +-
 mpn/x86_64/k8/redc_1.asm               |  754 +++++++++++++++++++++-----------
 3 files changed, 943 insertions(+), 264 deletions(-)

diffs (truncated from 1334 to 300 lines):

diff -r 18e377fa1fc7 -r 927fe93f3210 mpn/x86_64/core2/mullo_basecase.asm
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mpn/x86_64/core2/mullo_basecase.asm	Fri Sep 20 14:31:22 2013 +0200
@@ -0,0 +1,416 @@
+dnl  AMD64 mpn_mullo_basecase optimised for Conroe/Wolfdale/Nehalem/Westmere.
+
+dnl  Contributed to the GNU project by Torbjörn Granlund.
+
+dnl  Copyright 2008, 2009, 2011, 2012, 2013 Free Software Foundation, Inc.
+
+dnl  This file is part of the GNU MP Library.
+
+dnl  The GNU MP Library is free software; you can redistribute it and/or modify
+dnl  it under the terms of the GNU Lesser General Public License as published
+dnl  by the Free Software Foundation; either version 3 of the License, or (at
+dnl  your option) any later version.
+
+dnl  The GNU MP Library is distributed in the hope that it will be useful, but
+dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+dnl  License for more details.
+
+dnl  You should have received a copy of the GNU Lesser General Public License
+dnl  along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb	mul_2		addmul_2
+C AMD K8,K9
+C AMD K10
+C AMD bull
+C AMD pile
+C AMD steam
+C AMD bobcat
+C AMD jaguar
+C Intel P4
+C Intel core	 4.0		4.18-4.25
+C Intel NHM	 3.75		4.06-4.2
+C Intel SBR
+C Intel IBR
+C Intel HWL
+C Intel BWL
+C Intel atom
+C VIA nano
+
+C The inner loops of this code are the result of running a code generation and
+C optimisation tool suite written by David Harvey and Torbjörn Granlund.
+
+C TODO
+C   * Implement proper cor2, replacing current cor0.
+C   * Offset n by 2 in order to avoid the outer loop cmp.  (And sqr_basecase?)
+C   * Micro-optimise.
+
+C When playing with pointers, set this to $2 to fall back to conservative
+C indexing in wind-down code.
+define(`I',`$1')
+
+define(`rp',       `%rdi')
+define(`up',       `%rsi')
+define(`vp_param', `%rdx')
+define(`n_param',  `%rcx')
+
+define(`v0',       `%r10')
+define(`v1',       `%r11')
+define(`w0',       `%rbx')
+define(`w1',       `%rcx')
+define(`w2',       `%rbp')
+define(`w3',       `%r12')
+define(`n',        `%r9')
+define(`i',        `%r13')
+define(`vp',       `%r8')
+
+define(`X0',       `%r14')
+define(`X1',       `%r15')
+
+C rax rbx rcx rdx rdi rsi rbp r8 r9 r10 r11 r12 r13 r14 r15
+
+ABI_SUPPORT(DOS64)
+ABI_SUPPORT(STD64)
+
+define(`ALIGNx', `ALIGN(16)')
+
+define(`N', 85)
+ifdef(`N',,`define(`N',0)')
+define(`MOV', `ifelse(eval(N & $3),0,`mov	$1, $2',`lea	($1), $2')')
+
+ASM_START()
+	TEXT
+	ALIGN(32)
+PROLOGUE(mpn_mullo_basecase)
+	FUNC_ENTRY(4)
+
+	mov	(up), %rax
+	mov	vp_param, vp
+
+	cmp	$4, n_param
+	jb	L(small)
+
+	mov	(vp_param), v0
+	push	%rbx
+	lea	(rp,n_param,8), rp	C point rp at R[un]
+	push	%rbp
+	lea	(up,n_param,8), up	C point up right after U's end
+	push	%r12
+	mov	$0, R32(n)		C FIXME
+	sub	n_param, n
+	push	%r13
+	mul	v0
+	mov	8(vp), v1
+
+	test	$1, R8(n_param)
+	jnz	L(m2x1)
+
+L(m2x0):test	$2, R8(n_param)
+	jnz	L(m2b2)
+
+L(m2b0):lea	(n), i
+	mov	%rax, (rp,n,8)
+	mov	%rdx, w1
+	mov	(up,n,8), %rax
+	xor	R32(w2), R32(w2)
+	jmp	L(m2e0)
+
+L(m2b2):lea	-2(n), i
+	mov	%rax, w2
+	mov	(up,n,8), %rax
+	mov	%rdx, w3
+	xor	R32(w0), R32(w0)
+	jmp	L(m2e2)
+
+L(m2x1):test	$2, R8(n_param)
+	jnz	L(m2b3)
+
+L(m2b1):lea	1(n), i
+	mov	%rax, (rp,n,8)
+	mov	(up,n,8), %rax
+	mov	%rdx, w0
+	xor	R32(w1), R32(w1)
+	jmp	L(m2e1)
+
+L(m2b3):lea	-1(n), i
+	xor	R32(w3), R32(w3)
+	mov	%rax, w1
+	mov	%rdx, w2
+	mov	(up,n,8), %rax
+	jmp	L(m2e3)
+
+	ALIGNx
+L(m2tp):mul	v0
+	add	%rax, w3
+	mov	-8(up,i,8), %rax
+	mov	w3, -8(rp,i,8)
+	adc	%rdx, w0
+	adc	$0, R32(w1)
+L(m2e1):mul	v1
+	add	%rax, w0
+	adc	%rdx, w1
+	mov	$0, R32(w2)
+	mov	(up,i,8), %rax
+	mul	v0
+	add	%rax, w0
+	mov	w0, (rp,i,8)
+	adc	%rdx, w1
+	mov	(up,i,8), %rax
+	adc	$0, R32(w2)
+L(m2e0):mul	v1
+	add	%rax, w1
+	adc	%rdx, w2
+	mov	8(up,i,8), %rax
+	mul	v0
+	mov	$0, R32(w3)
+	add	%rax, w1
+	adc	%rdx, w2
+	adc	$0, R32(w3)
+	mov	8(up,i,8), %rax
+L(m2e3):mul	v1
+	add	%rax, w2
+	mov	w1, 8(rp,i,8)
+	adc	%rdx, w3
+	mov	$0, R32(w0)
+	mov	16(up,i,8), %rax
+	mul	v0
+	add	%rax, w2
+	mov	16(up,i,8), %rax
+	adc	%rdx, w3
+	adc	$0, R32(w0)
+L(m2e2):mul	v1
+	mov	$0, R32(w1)		C FIXME: dead in last iteration
+	add	%rax, w3
+	mov	24(up,i,8), %rax
+	mov	w2, 16(rp,i,8)
+	adc	%rdx, w0		C FIXME: dead in last iteration
+	add	$4, i
+	js	L(m2tp)
+
+L(m2ed):imul	v0, %rax
+	add	w3, %rax
+	mov	%rax, I(-8(rp),-8(rp,i,8))
+
+	add	$2, n
+	lea	16(vp), vp
+	lea	-16(up), up
+	cmp	$-2, n
+	jge	L(cor1)
+
+	push	%r14
+	push	%r15
+
+L(outer):
+	mov	(vp), v0
+	mov	8(vp), v1
+	mov	(up,n,8), %rax
+	mul	v0
+	test	$1, R8(n)
+	jnz	L(a1x1)
+
+L(a1x0):mov	%rax, X1
+	MOV(	%rdx, X0, 8)
+	mov	(up,n,8), %rax
+	mul	v1
+	test	$2, R8(n)
+	jnz	L(a110)
+
+L(a100):lea	(n), i
+	mov	(rp,n,8), w3
+	mov	%rax, w0
+	MOV(	%rdx, w1, 16)
+	jmp	L(lo0)
+
+L(a110):lea	2(n), i
+	mov	(rp,n,8), w1
+	mov	%rax, w2
+	mov	8(up,n,8), %rax
+	MOV(	%rdx, w3, 1)
+	jmp	L(lo2)
+
+L(a1x1):mov	%rax, X0
+	MOV(	%rdx, X1, 2)
+	mov	(up,n,8), %rax
+	mul	v1
+	test	$2, R8(n)
+	jz	L(a111)
+
+L(a101):lea	1(n), i
+	MOV(	%rdx, w0, 4)
+	mov	(rp,n,8), w2
+	mov	%rax, w3
+	jmp	L(lo1)
+
+L(a111):lea	-1(n), i
+	MOV(	%rdx, w2, 64)
+	mov	%rax, w1
+	mov	(rp,n,8), w0
+	mov	8(up,n,8), %rax
+	jmp	L(lo3)
+
+	ALIGNx
+L(top):	mul	v1
+	add	w0, w1
+	adc	%rax, w2
+	mov	-8(up,i,8), %rax
+	MOV(	%rdx, w3, 1)
+	adc	$0, w3
+L(lo2):	mul	v0
+	add	w1, X1
+	mov	X1, -16(rp,i,8)
+	adc	%rax, X0
+	MOV(	%rdx, X1, 2)
+	adc	$0, X1
+	mov	-8(up,i,8), %rax
+	mul	v1
+	MOV(	%rdx, w0, 4)
+	mov	-8(rp,i,8), w1
+	add	w1, w2
+	adc	%rax, w3
+	adc	$0, w0
+L(lo1):	mov	(up,i,8), %rax
+	mul	v0
+	add	w2, X0
+	adc	%rax, X1
+	mov	X0, -8(rp,i,8)
+	MOV(	%rdx, X0, 8)
+	adc	$0, X0
+	mov	(up,i,8), %rax
+	mov	(rp,i,8), w2
+	mul	v1
+	add	w2, w3
+	adc	%rax, w0
+	MOV(	%rdx, w1, 16)
+	adc	$0, w1
+L(lo0):	mov	8(up,i,8), %rax
+	mul	v0
+	add	w3, X1
+	mov	X1, (rp,i,8)
+	adc	%rax, X0
+	MOV(	%rdx, X1, 32)
+	mov	8(rp,i,8), w3
+	adc	$0, X1
+	mov	8(up,i,8), %rax
+	mul	v1


More information about the gmp-commit mailing list