r117933: cross-bootstrap issue: cross-GMP/MPFR needed?
Jan-Benedict Glaw
jbglaw at lug-owl.de
Wed Oct 25 08:44:36 CEST 2006
On Sun, 2006-10-22 12:36:28 -0700, Andrew Pinski <pinskia at gmail.com> wrote:
> On Sun, 2006-10-22 at 21:00 +0200, Jan-Benedict Glaw wrote:
> > r117933 had a side-effect: when cross-compiling a C-only gcc to run on
> > a target machine, it seems that you now have to cross-bulid gmp/mpfr
> > before building the target-hosted compiler. Was this intentional,
> > given that this stuff seems to be not needed for the C frontend?
>
> You should only need a native (for the host) gmp and mpfr for building
> GCC now. It will be used soon by the middle-end when folding math
> functions.
I just played with my build-robot script. It now attempts to build
gmp, but that fails:
Making all in mpn
make[2]: Entering directory `/tmp/mybuildplace/src/gcc/gmp-4.2.1/mpn'
/bin/sh ../libtool --mode=compile --tag=CC vax-linux-uclibc-gcc -c -DHAVE_CONFIG_H -I. -I. -I.. -D__GMP_WITHIN_GMP -I.. -DOPERATION_`echo add_n | sed 's/_$//'` -static `test -f 'add_n.s' || echo './'`add_n.s
vax-linux-uclibc-gcc -c -DHAVE_CONFIG_H -I. -I. -I.. -D__GMP_WITHIN_GMP -I.. -DOPERATION_add_n add_n.s -fPIC -DPIC -o .libs/add_n.o
add_n.s: Assembler messages:
add_n.s:35: Fatal error: Junk at end of expression "(ap)"
make[2]: *** [add_n.lo] Error 1
add_n.s is linked from some vax directory. Unfortunately, they use old
assembly there which current GAS will not assemble:
30 .text
31 .align 1
32 .globl ___gmpn_add_n
33 ___gmpn_add_n:
34 .word 0x0
35 movl 16(ap),r0
36 movl 12(ap),r1
37 movl 8(ap),r2
38 movl 4(ap),r3
39 mnegl r0,r5
40 addl2 $3,r0
When accessing the AP register, the name should be prefixed with '%'.
The assembly function's name had one _ too much in front, as well as
the .type ..., @function was missing.
With this patch to gmp, I can build GCC (build=i686-linux,
host=target=vax-linux-uclibc). It's not yet tested at all, though...
--- gmp-4.2.1/mpn/vax/addmul_1.s~ 2006-10-24 21:53:39.000000000 +0200
+++ gmp-4.2.1/mpn/vax/addmul_1.s 2006-10-25 08:25:24.000000000 +0200
@@ -1,7 +1,7 @@
# VAX __gmpn_addmul_1 -- Multiply a limb vector with a limb and add
# the result to a second limb vector.
-# Copyright 1992, 1994, 1996, 2000 Free Software Foundation, Inc.
+# Copyright 1992, 1994, 1996, 2000, 2006 Free Software Foundation, Inc.
# This file is part of the GNU MP Library.
@@ -29,98 +29,99 @@
.text
.align 1
-.globl ___gmpn_addmul_1
-___gmpn_addmul_1:
+.globl __gmpn_addmul_1
+.type __gmpn_addmul_1, @function
+__gmpn_addmul_1:
.word 0xfc0
- movl 12(ap),r4
- movl 8(ap),r8
- movl 4(ap),r9
- movl 16(ap),r6
+ movl 12(%ap),%r4
+ movl 8(%ap),%r8
+ movl 4(%ap),%r9
+ movl 16(%ap),%r6
jlss s2_big
- clrl r3
- incl r4
- ashl $-1,r4,r7
- jlbc r4,L1
- clrl r11
+ clrl %r3
+ incl %r4
+ ashl $-1,%r4,%r7
+ jlbc %r4,L1
+ clrl %r11
# Loop for S2_LIMB < 0x80000000
-Loop1: movl (r8)+,r1
+Loop1: movl (%r8)+,%r1
jlss L1n0
- emul r1,r6,$0,r2
- addl2 r11,r2
- adwc $0,r3
- addl2 r2,(r9)+
- adwc $0,r3
-L1: movl (r8)+,r1
+ emul %r1,%r6,$0,%r2
+ addl2 %r11,%r2
+ adwc $0,%r3
+ addl2 %r2,(%r9)+
+ adwc $0,%r3
+L1: movl (%r8)+,%r1
jlss L1n1
-L1p1: emul r1,r6,$0,r10
- addl2 r3,r10
- adwc $0,r11
- addl2 r10,(r9)+
- adwc $0,r11
+L1p1: emul %r1,%r6,$0,%r10
+ addl2 %r3,%r10
+ adwc $0,%r11
+ addl2 %r10,(%r9)+
+ adwc $0,%r11
- sobgtr r7,Loop1
- movl r11,r0
+ sobgtr %r7,Loop1
+ movl %r11,%r0
ret
-L1n0: emul r1,r6,$0,r2
- addl2 r11,r2
- adwc r6,r3
- addl2 r2,(r9)+
- adwc $0,r3
- movl (r8)+,r1
+L1n0: emul %r1,%r6,$0,%r2
+ addl2 %r11,%r2
+ adwc %r6,%r3
+ addl2 %r2,(%r9)+
+ adwc $0,%r3
+ movl (%r8)+,%r1
jgeq L1p1
-L1n1: emul r1,r6,$0,r10
- addl2 r3,r10
- adwc r6,r11
- addl2 r10,(r9)+
- adwc $0,r11
+L1n1: emul %r1,%r6,$0,%r10
+ addl2 %r3,%r10
+ adwc %r6,%r11
+ addl2 %r10,(%r9)+
+ adwc $0,%r11
- sobgtr r7,Loop1
- movl r11,r0
+ sobgtr %r7,Loop1
+ movl %r11,%r0
ret
-s2_big: clrl r3
- incl r4
- ashl $-1,r4,r7
- jlbc r4,L2
- clrl r11
+s2_big: clrl %r3
+ incl %r4
+ ashl $-1,%r4,%r7
+ jlbc %r4,L2
+ clrl %r11
# Loop for S2_LIMB >= 0x80000000
-Loop2: movl (r8)+,r1
+Loop2: movl (%r8)+,%r1
jlss L2n0
- emul r1,r6,$0,r2
- addl2 r11,r2
- adwc r1,r3
- addl2 r2,(r9)+
- adwc $0,r3
-L2: movl (r8)+,r1
+ emul %r1,%r6,$0,%r2
+ addl2 %r11,%r2
+ adwc %r1,%r3
+ addl2 %r2,(%r9)+
+ adwc $0,%r3
+L2: movl (%r8)+,%r1
jlss L2n1
-L2p1: emul r1,r6,$0,r10
- addl2 r3,r10
- adwc r1,r11
- addl2 r10,(r9)+
- adwc $0,r11
+L2p1: emul %r1,%r6,$0,%r10
+ addl2 %r3,%r10
+ adwc %r1,%r11
+ addl2 %r10,(%r9)+
+ adwc $0,%r11
- sobgtr r7,Loop2
- movl r11,r0
+ sobgtr %r7,Loop2
+ movl %r11,%r0
ret
-L2n0: emul r1,r6,$0,r2
- addl2 r11,r2
- adwc r6,r3
- addl2 r2,(r9)+
- adwc r1,r3
- movl (r8)+,r1
+L2n0: emul %r1,%r6,$0,%r2
+ addl2 %r11,%r2
+ adwc %r6,%r3
+ addl2 %r2,(%r9)+
+ adwc %r1,%r3
+ movl (%r8)+,%r1
jgeq L2p1
-L2n1: emul r1,r6,$0,r10
- addl2 r3,r10
- adwc r6,r11
- addl2 r10,(r9)+
- adwc r1,r11
+L2n1: emul %r1,%r6,$0,%r10
+ addl2 %r3,%r10
+ adwc %r6,%r11
+ addl2 %r10,(%r9)+
+ adwc %r1,%r11
- sobgtr r7,Loop2
- movl r11,r0
+ sobgtr %r7,Loop2
+ movl %r11,%r0
ret
--- gmp-4.2.1/mpn/vax/add_n.s~ 2006-10-24 21:48:09.000000000 +0200
+++ gmp-4.2.1/mpn/vax/add_n.s 2006-10-25 08:33:00.000000000 +0200
@@ -1,7 +1,7 @@
# VAX __gmpn_add_n -- Add two limb vectors of the same length > 0 and store
# sum in a third limb vector.
-# Copyright 1999, 2000 Free Software Foundation, Inc.
+# Copyright 1999, 2000, 2006 Free Software Foundation, Inc.
# This file is part of the GNU MP Library.
@@ -29,33 +29,34 @@
.text
.align 1
-.globl ___gmpn_add_n
-___gmpn_add_n:
+.globl __gmpn_add_n
+.type __gmpn_add_n, @function
+__gmpn_add_n:
.word 0x0
- movl 16(ap),r0
- movl 12(ap),r1
- movl 8(ap),r2
- movl 4(ap),r3
- mnegl r0,r5
- addl2 $3,r0
- ashl $-2,r0,r0 # unroll loop count
- bicl2 $-4,r5 # mask out low 2 bits
- movaq (r5)[r5],r5 # 9x
- jmp Loop(r5)
-
-Loop: movl (r2)+,r4
- adwc (r1)+,r4
- movl r4,(r3)+
- movl (r2)+,r4
- adwc (r1)+,r4
- movl r4,(r3)+
- movl (r2)+,r4
- adwc (r1)+,r4
- movl r4,(r3)+
- movl (r2)+,r4
- adwc (r1)+,r4
- movl r4,(r3)+
- sobgtr r0,Loop
+ movl 16(%ap),%r0
+ movl 12(%ap),%r1
+ movl 8(%ap),%r2
+ movl 4(%ap),%r3
+ mnegl %r0,%r5
+ addl2 $3,%r0
+ ashl $-2,%r0,%r0 # unroll loop count
+ bicl2 $-4,%r5 # mask out low 2 bits
+ movaq (%r5)[%r5],%r5 # 9x
+ jmp Loop(%r5)
+
+Loop: movl (%r2)+,%r4
+ adwc (%r1)+,%r4
+ movl %r4,(%r3)+
+ movl (%r2)+,%r4
+ adwc (%r1)+,%r4
+ movl %r4,(%r3)+
+ movl (%r2)+,%r4
+ adwc (%r1)+,%r4
+ movl %r4,(%r3)+
+ movl (%r2)+,%r4
+ adwc (%r1)+,%r4
+ movl %r4,(%r3)+
+ sobgtr %r0,Loop
- adwc r0,r0
+ adwc %r0,%r0
ret
--- gmp-4.2.1/mpn/vax/lshift.s~ 2006-10-24 21:59:29.000000000 +0200
+++ gmp-4.2.1/mpn/vax/lshift.s 2006-10-25 08:27:31.000000000 +0200
@@ -1,6 +1,6 @@
# VAX mpn_lshift -- left shift.
-# Copyright 1999, 2000, 2001 Free Software Foundation, Inc.
+# Copyright 1999, 2000, 2001, 2006 Free Software Foundation, Inc.
# This file is part of the GNU MP Library.
@@ -30,29 +30,30 @@
.text
.align 1
-.globl ___gmpn_lshift
-___gmpn_lshift:
+.globl __gmpn_lshift
+.type __gmpn_lshift, @function
+__gmpn_lshift:
.word 0x1c0
- movl 4(ap),r7
- movl 8(ap),r6
- movl 12(ap),r1
- movl 16(ap),r8
-
- moval (r6)[r1],r6
- moval (r7)[r1],r7
- clrl r3
- movl -(r6),r2
- ashq r8,r2,r4
- movl r5,r0
- movl r2,r3
- decl r1
+ movl 4(%ap),%r7
+ movl 8(%ap),%r6
+ movl 12(%ap),%r1
+ movl 16(%ap),%r8
+
+ moval (%r6)[%r1],%r6
+ moval (%r7)[%r1],%r7
+ clrl %r3
+ movl -(%r6),%r2
+ ashq %r8,%r2,%r4
+ movl %r5,%r0
+ movl %r2,%r3
+ decl %r1
jeql Lend
-Loop: movl -(r6),r2
- ashq r8,r2,r4
- movl r5,-(r7)
- movl r2,r3
- sobgtr r1,Loop
+Loop: movl -(%r6),%r2
+ ashq %r8,%r2,%r4
+ movl %r5,-(%r7)
+ movl %r2,%r3
+ sobgtr %r1,Loop
-Lend: movl r4,-4(r7)
+Lend: movl %r4,-4(%r7)
ret
--- gmp-4.2.1/mpn/vax/mul_1.s~ 2006-10-24 21:50:43.000000000 +0200
+++ gmp-4.2.1/mpn/vax/mul_1.s 2006-10-25 08:27:44.000000000 +0200
@@ -1,7 +1,7 @@
# VAX __gmpn_mul_1 -- Multiply a limb vector with a limb and store
# the result in a second limb vector.
-# Copyright 1992, 1994, 1996, 2000 Free Software Foundation, Inc.
+# Copyright 1992, 1994, 1996, 2000, 2006 Free Software Foundation, Inc.
# This file is part of the GNU MP Library.
@@ -29,95 +29,96 @@
.text
.align 1
-.globl ___gmpn_mul_1
-___gmpn_mul_1:
+.globl __gmpn_mul_1
+.type __gmpn_mul_1, @function
+__gmpn_mul_1:
.word 0xfc0
- movl 12(ap),r4
- movl 8(ap),r8
- movl 4(ap),r9
- movl 16(ap),r6
+ movl 12(%ap),%r4
+ movl 8(%ap),%r8
+ movl 4(%ap),%r9
+ movl 16(%ap),%r6
jlss s2_big
# One might want to combine the addl2 and the store below, but that
# is actually just slower according to my timing tests. (VAX 3600)
- clrl r3
- incl r4
- ashl $-1,r4,r7
- jlbc r4,L1
- clrl r11
+ clrl %r3
+ incl %r4
+ ashl $-1,%r4,%r7
+ jlbc %r4,L1
+ clrl %r11
# Loop for S2_LIMB < 0x80000000
-Loop1: movl (r8)+,r1
+Loop1: movl (%r8)+,%r1
jlss L1n0
- emul r1,r6,$0,r2
- addl2 r11,r2
- adwc $0,r3
- movl r2,(r9)+
-L1: movl (r8)+,r1
+ emul %r1,%r6,$0,%r2
+ addl2 %r11,%r2
+ adwc $0,%r3
+ movl %r2,(%r9)+
+L1: movl (%r8)+,%r1
jlss L1n1
-L1p1: emul r1,r6,$0,r10
- addl2 r3,r10
- adwc $0,r11
- movl r10,(r9)+
+L1p1: emul %r1,%r6,$0,%r10
+ addl2 %r3,%r10
+ adwc $0,%r11
+ movl %r10,(%r9)+
- sobgtr r7,Loop1
- movl r11,r0
+ sobgtr %r7,Loop1
+ movl %r11,%r0
ret
-L1n0: emul r1,r6,$0,r2
- addl2 r11,r2
- adwc r6,r3
- movl r2,(r9)+
- movl (r8)+,r1
+L1n0: emul %r1,%r6,$0,%r2
+ addl2 %r11,%r2
+ adwc %r6,%r3
+ movl %r2,(%r9)+
+ movl (%r8)+,%r1
jgeq L1p1
-L1n1: emul r1,r6,$0,r10
- addl2 r3,r10
- adwc r6,r11
- movl r10,(r9)+
+L1n1: emul %r1,%r6,$0,%r10
+ addl2 %r3,%r10
+ adwc %r6,%r11
+ movl %r10,(%r9)+
- sobgtr r7,Loop1
- movl r11,r0
+ sobgtr %r7,Loop1
+ movl %r11,%r0
ret
-s2_big: clrl r3
- incl r4
- ashl $-1,r4,r7
- jlbc r4,L2
- clrl r11
+s2_big: clrl %r3
+ incl %r4
+ ashl $-1,%r4,%r7
+ jlbc %r4,L2
+ clrl %r11
# Loop for S2_LIMB >= 0x80000000
-Loop2: movl (r8)+,r1
+Loop2: movl (%r8)+,%r1
jlss L2n0
- emul r1,r6,$0,r2
- addl2 r11,r2
- adwc r1,r3
- movl r2,(r9)+
-L2: movl (r8)+,r1
+ emul %r1,%r6,$0,%r2
+ addl2 %r11,%r2
+ adwc %r1,%r3
+ movl %r2,(%r9)+
+L2: movl (%r8)+,%r1
jlss L2n1
-L2p1: emul r1,r6,$0,r10
- addl2 r3,r10
- adwc r1,r11
- movl r10,(r9)+
+L2p1: emul %r1,%r6,$0,%r10
+ addl2 %r3,%r10
+ adwc %r1,%r11
+ movl %r10,(%r9)+
- sobgtr r7,Loop2
- movl r11,r0
+ sobgtr %r7,Loop2
+ movl %r11,%r0
ret
-L2n0: emul r1,r6,$0,r2
- addl2 r1,r3
- addl2 r11,r2
- adwc r6,r3
- movl r2,(r9)+
- movl (r8)+,r1
+L2n0: emul %r1,%r6,$0,%r2
+ addl2 %r1,%r3
+ addl2 %r11,%r2
+ adwc %r6,%r3
+ movl %r2,(%r9)+
+ movl (%r8)+,%r1
jgeq L2p1
-L2n1: emul r1,r6,$0,r10
- addl2 r1,r11
- addl2 r3,r10
- adwc r6,r11
- movl r10,(r9)+
+L2n1: emul %r1,%r6,$0,%r10
+ addl2 %r1,%r11
+ addl2 %r3,%r10
+ adwc %r6,%r11
+ movl %r10,(%r9)+
- sobgtr r7,Loop2
- movl r11,r0
+ sobgtr %r7,Loop2
+ movl %r11,%r0
ret
--- gmp-4.2.1/mpn/vax/rshift.s~ 2006-10-24 22:00:15.000000000 +0200
+++ gmp-4.2.1/mpn/vax/rshift.s 2006-10-25 08:26:29.000000000 +0200
@@ -1,6 +1,6 @@
# VAX mpn_rshift -- right shift.
-# Copyright 1999, 2000, 2001 Free Software Foundation, Inc.
+# Copyright 1999, 2000, 2001, 2006 Free Software Foundation, Inc.
# This file is part of the GNU MP Library.
@@ -30,27 +30,28 @@
.text
.align 1
-.globl ___gmpn_rshift
-___gmpn_rshift:
+.globl __gmpn_rshift
+.type __gmpn_rshift, @function
+__gmpn_rshift:
.word 0x1c0
- movl 4(ap),r7
- movl 8(ap),r6
- movl 12(ap),r1
- movl 16(ap),r8
-
- movl (r6)+,r2
- subl3 r8,$32,r8
- ashl r8,r2,r0
- decl r1
+ movl 4(%ap),%r7
+ movl 8(%ap),%r6
+ movl 12(%ap),%r1
+ movl 16(%ap),%r8
+
+ movl (%r6)+,%r2
+ subl3 %r8,$32,%r8
+ ashl %r8,%r2,%r0
+ decl %r1
jeql Lend
-Loop: movl (r6)+,r3
- ashq r8,r2,r4
- movl r5,(r7)+
- movl r3,r2
- sobgtr r1,Loop
-
-Lend: clrl r3
- ashq r8,r2,r4
- movl r5,(r7)
+Loop: movl (%r6)+,%r3
+ ashq %r8,%r2,%r4
+ movl %r5,(%r7)+
+ movl %r3,%r2
+ sobgtr %r1,Loop
+
+Lend: clrl %r3
+ ashq %r8,%r2,%r4
+ movl %r5,(%r7)
ret
--- gmp-4.2.1/mpn/vax/submul_1.s~ 2006-10-24 21:56:17.000000000 +0200
+++ gmp-4.2.1/mpn/vax/submul_1.s 2006-10-25 08:26:46.000000000 +0200
@@ -1,7 +1,7 @@
# VAX __gmpn_submul_1 -- Multiply a limb vector with a limb and subtract
# the result from a second limb vector.
-# Copyright 1992, 1994, 1996, 2000 Free Software Foundation, Inc.
+# Copyright 1992, 1994, 1996, 2000, 2006 Free Software Foundation, Inc.
# This file is part of the GNU MP Library.
@@ -29,98 +29,99 @@
.text
.align 1
-.globl ___gmpn_submul_1
-___gmpn_submul_1:
+.globl __gmpn_submul_1
+.type __gmpn_submul_1, @function
+__gmpn_submul_1:
.word 0xfc0
- movl 12(ap),r4
- movl 8(ap),r8
- movl 4(ap),r9
- movl 16(ap),r6
+ movl 12(%ap),%r4
+ movl 8(%ap),%r8
+ movl 4(%ap),%r9
+ movl 16(%ap),%r6
jlss s2_big
- clrl r3
- incl r4
- ashl $-1,r4,r7
- jlbc r4,L1
- clrl r11
+ clrl %r3
+ incl %r4
+ ashl $-1,%r4,%r7
+ jlbc %r4,L1
+ clrl %r11
# Loop for S2_LIMB < 0x80000000
-Loop1: movl (r8)+,r1
+Loop1: movl (%r8)+,%r1
jlss L1n0
- emul r1,r6,$0,r2
- addl2 r11,r2
- adwc $0,r3
- subl2 r2,(r9)+
- adwc $0,r3
-L1: movl (r8)+,r1
+ emul %r1,%r6,$0,%r2
+ addl2 %r11,%r2
+ adwc $0,%r3
+ subl2 %r2,(%r9)+
+ adwc $0,%r3
+L1: movl (%r8)+,%r1
jlss L1n1
-L1p1: emul r1,r6,$0,r10
- addl2 r3,r10
- adwc $0,r11
- subl2 r10,(r9)+
- adwc $0,r11
+L1p1: emul %r1,%r6,$0,%r10
+ addl2 %r3,%r10
+ adwc $0,%r11
+ subl2 %r10,(%r9)+
+ adwc $0,%r11
- sobgtr r7,Loop1
- movl r11,r0
+ sobgtr %r7,Loop1
+ movl %r11,%r0
ret
-L1n0: emul r1,r6,$0,r2
- addl2 r11,r2
- adwc r6,r3
- subl2 r2,(r9)+
- adwc $0,r3
- movl (r8)+,r1
+L1n0: emul %r1,%r6,$0,%r2
+ addl2 %r11,%r2
+ adwc %r6,%r3
+ subl2 %r2,(%r9)+
+ adwc $0,%r3
+ movl (%r8)+,%r1
jgeq L1p1
-L1n1: emul r1,r6,$0,r10
- addl2 r3,r10
- adwc r6,r11
- subl2 r10,(r9)+
- adwc $0,r11
+L1n1: emul %r1,%r6,$0,%r10
+ addl2 %r3,%r10
+ adwc %r6,%r11
+ subl2 %r10,(%r9)+
+ adwc $0,%r11
- sobgtr r7,Loop1
- movl r11,r0
+ sobgtr %r7,Loop1
+ movl %r11,%r0
ret
-s2_big: clrl r3
- incl r4
- ashl $-1,r4,r7
- jlbc r4,L2
- clrl r11
+s2_big: clrl %r3
+ incl %r4
+ ashl $-1,%r4,%r7
+ jlbc %r4,L2
+ clrl %r11
# Loop for S2_LIMB >= 0x80000000
-Loop2: movl (r8)+,r1
+Loop2: movl (%r8)+,%r1
jlss L2n0
- emul r1,r6,$0,r2
- addl2 r11,r2
- adwc r1,r3
- subl2 r2,(r9)+
- adwc $0,r3
-L2: movl (r8)+,r1
+ emul %r1,%r6,$0,%r2
+ addl2 %r11,%r2
+ adwc %r1,%r3
+ subl2 %r2,(%r9)+
+ adwc $0,%r3
+L2: movl (%r8)+,%r1
jlss L2n1
-L2p1: emul r1,r6,$0,r10
- addl2 r3,r10
- adwc r1,r11
- subl2 r10,(r9)+
- adwc $0,r11
+L2p1: emul %r1,%r6,$0,%r10
+ addl2 %r3,%r10
+ adwc %r1,%r11
+ subl2 %r10,(%r9)+
+ adwc $0,%r11
- sobgtr r7,Loop2
- movl r11,r0
+ sobgtr %r7,Loop2
+ movl %r11,%r0
ret
-L2n0: emul r1,r6,$0,r2
- addl2 r11,r2
- adwc r6,r3
- subl2 r2,(r9)+
- adwc r1,r3
- movl (r8)+,r1
+L2n0: emul %r1,%r6,$0,%r2
+ addl2 %r11,%r2
+ adwc %r6,%r3
+ subl2 %r2,(%r9)+
+ adwc %r1,%r3
+ movl (%r8)+,%r1
jgeq L2p1
-L2n1: emul r1,r6,$0,r10
- addl2 r3,r10
- adwc r6,r11
- subl2 r10,(r9)+
- adwc r1,r11
+L2n1: emul %r1,%r6,$0,%r10
+ addl2 %r3,%r10
+ adwc %r6,%r11
+ subl2 %r10,(%r9)+
+ adwc %r1,%r11
- sobgtr r7,Loop2
- movl r11,r0
+ sobgtr %r7,Loop2
+ movl %r11,%r0
ret
--- gmp-4.2.1/mpn/vax/sub_n.s~ 2006-10-24 21:49:24.000000000 +0200
+++ gmp-4.2.1/mpn/vax/sub_n.s 2006-10-25 08:27:08.000000000 +0200
@@ -1,7 +1,7 @@
# VAX __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and store
# difference in a third limb vector.
-# Copyright 1999, 2000 Free Software Foundation, Inc.
+# Copyright 1999, 2000, 2006 Free Software Foundation, Inc.
# This file is part of the GNU MP Library.
@@ -29,33 +29,34 @@
.text
.align 1
-.globl ___gmpn_sub_n
-___gmpn_sub_n:
+.globl __gmpn_sub_n
+.type __gmpn_sub_n, @function
+__gmpn_sub_n:
.word 0x0
- movl 16(ap),r0
- movl 12(ap),r1
- movl 8(ap),r2
- movl 4(ap),r3
- mnegl r0,r5
- addl2 $3,r0
- ashl $-2,r0,r0 # unroll loop count
- bicl2 $-4,r5 # mask out low 2 bits
- movaq (r5)[r5],r5 # 9x
- jmp Loop(r5)
-
-Loop: movl (r2)+,r4
- sbwc (r1)+,r4
- movl r4,(r3)+
- movl (r2)+,r4
- sbwc (r1)+,r4
- movl r4,(r3)+
- movl (r2)+,r4
- sbwc (r1)+,r4
- movl r4,(r3)+
- movl (r2)+,r4
- sbwc (r1)+,r4
- movl r4,(r3)+
- sobgtr r0,Loop
+ movl 16(%ap),%r0
+ movl 12(%ap),%r1
+ movl 8(%ap),%r2
+ movl 4(%ap),%r3
+ mnegl %r0,%r5
+ addl2 $3,%r0
+ ashl $-2,%r0,%r0 # unroll loop count
+ bicl2 $-4,%r5 # mask out low 2 bits
+ movaq (%r5)[%r5],%r5 # 9x
+ jmp Loop(%r5)
+
+Loop: movl (%r2)+,%r4
+ sbwc (%r1)+,%r4
+ movl %r4,(%r3)+
+ movl (%r2)+,%r4
+ sbwc (%r1)+,%r4
+ movl %r4,(%r3)+
+ movl (%r2)+,%r4
+ sbwc (%r1)+,%r4
+ movl %r4,(%r3)+
+ movl (%r2)+,%r4
+ sbwc (%r1)+,%r4
+ movl %r4,(%r3)+
+ sobgtr %r0,Loop
- adwc r0,r0
+ adwc %r0,%r0
ret
--
Jan-Benedict Glaw jbglaw at lug-owl.de +49-172-7608481
Signature of: Alles wird gut! ...und heute wirds schon ein bißchen besser.
the second :
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 189 bytes
Desc: Digital signature
Url : http://gmplib.org/list-archives/gmp-bugs/attachments/20061025/221eeacb/attachment.bin
More information about the gmp-bugs
mailing list